language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | plotly__plotly.py | plotly/graph_objs/scatter3d/marker/colorbar/_tickfont.py | {
"start": 233,
"end": 9964
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d.marker.colorbar"
_path_str = "scatter3d.marker.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatter3d.mark
er.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.marker.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | networkx__networkx | networkx/classes/coreviews.py | {
"start": 4302,
"end": 5843
} | class ____(Mapping):
"""A read-only union of dict Adjacencies as a Map of Maps of Maps.
The two input dict-of-dict-of-dicts represent the union of
`G.succ` and `G.pred`. Return values are UnionAtlas
The inner level of dict is read-write. But the
middle and outer levels are read-only.
succ : a dict-of-dict-of-dict {node: nbrdict}
pred : a dict-of-dict-of-dict {node: nbrdict}
The keys for the two dicts should be the same
See Also
========
UnionAtlas: View into dict-of-dict
UnionMultiAdjacency: View into dict-of-dict-of-dict-of-dict
"""
__slots__ = ("_succ", "_pred")
def __getstate__(self):
return {"_succ": self._succ, "_pred": self._pred}
def __setstate__(self, state):
self._succ = state["_succ"]
self._pred = state["_pred"]
def __init__(self, succ, pred):
# keys must be the same for two input dicts
assert len(set(succ.keys()) ^ set(pred.keys())) == 0
self._succ = succ
self._pred = pred
def __len__(self):
return len(self._succ) # length of each dict should be the same
def __iter__(self):
return iter(self._succ)
def __getitem__(self, nbr):
return UnionAtlas(self._succ[nbr], self._pred[nbr])
def copy(self):
return {n: self[n].copy() for n in self._succ}
def __str__(self):
return str({nbr: self[nbr] for nbr in self})
def __repr__(self):
return f"{self.__class__.__name__}({self._succ!r}, {self._pred!r})"
| UnionAdjacency |
python | getsentry__sentry | src/sentry/integrations/messaging/message_builder.py | {
"start": 1136,
"end": 11308
} | class ____(TypedDict):
text: str
value: str
def _actor_text_and_value(actor: Team | RpcUser) -> tuple[str, str]:
if isinstance(actor, RpcUser):
return (actor.get_display_name(), f"user:{actor.id}")
elif isinstance(actor, Team):
return (f"#{actor.slug}", f"team:{actor.id}")
else:
raise AssertionError("unreachable")
def format_actor_option_non_slack(actor: Team | RpcUser) -> _NonSlackActorOption:
text, value = _actor_text_and_value(actor)
return {"text": text, "value": value}
def format_actor_options_non_slack(actors: Iterable[Team | RpcUser]) -> list[_NonSlackActorOption]:
return sorted(
(format_actor_option_non_slack(actor) for actor in actors), key=lambda dct: dct["text"]
)
def format_actor_option_slack(actor: Team | RpcUser) -> _SlackActorOption:
text, value = _actor_text_and_value(actor)
return {"text": {"type": "plain_text", "text": text}, "value": value}
def format_actor_options_slack(actors: Iterable[Team | RpcUser]) -> list[_SlackActorOption]:
return sorted(
(format_actor_option_slack(actor) for actor in actors), key=lambda dct: dct["text"]["text"]
)
def build_attachment_title(obj: Group | Event | GroupEvent) -> str:
ev_metadata = obj.get_event_metadata()
ev_type = obj.get_event_type()
title = obj.title
if ev_type == "error" and "type" in ev_metadata:
title = ev_metadata["type"]
elif ev_type == "csp":
title = f'{ev_metadata["directive"]} - {ev_metadata["uri"]}'
else:
if isinstance(obj, GroupEvent):
if obj.occurrence is not None:
title = obj.occurrence.issue_title
else:
if not isinstance(obj, Group):
group = obj.group
else:
group = obj
if group is not None:
event = group.get_latest_event()
if event is not None and event.occurrence is not None:
title = event.occurrence.issue_title
return title
def fetch_environment_name(rule_env: int) -> str | None:
try:
env = Environment.objects.get(id=rule_env)
except Environment.DoesNotExist:
return None
else:
return env.name
def get_rule_environment_param_from_rule(
rule_id: int, rule_environment_id: int | None, organization: Organization, type_id: int
) -> dict[str, str]:
from sentry.notifications.notification_action.utils import should_fire_workflow_actions
params = {}
if should_fire_workflow_actions(organization, type_id):
if (
rule_environment_id is not None
and (environment_name := fetch_environment_name(rule_environment_id)) is not None
):
params["environment"] = environment_name
else:
try:
rule = Rule.objects.get(id=rule_id)
except Rule.DoesNotExist:
rule_env = None
else:
rule_env = rule.environment_id
if (
rule_env is not None
and (environment_name := fetch_environment_name(rule_env)) is not None
):
params["environment"] = environment_name
return params
def get_title_link(
group: Group,
event: Event | GroupEvent | None,
link_to_event: bool,
issue_details: bool,
notification: BaseNotification | None,
provider: ExternalProviders,
rule_id: int | None = None,
rule_environment_id: int | None = None,
notification_uuid: str | None = None,
) -> str:
other_params = {}
# add in rule id if we have it
if rule_id:
other_params.update(
get_rule_environment_param_from_rule(
rule_id, rule_environment_id, group.organization, group.type
)
)
# hard code for issue alerts
other_params["alert_rule_id"] = str(rule_id)
other_params["alert_type"] = "issue"
if event and link_to_event:
url = group.get_absolute_url(
params={"referrer": EXTERNAL_PROVIDERS[provider], **other_params},
event_id=event.event_id,
)
elif issue_details and notification:
referrer = notification.get_referrer(provider)
notification_uuid = notification.notification_uuid
url = group.get_absolute_url(
params={"referrer": referrer, "notification_uuid": notification_uuid, **other_params}
)
elif notification_uuid:
url = group.get_absolute_url(
params={
"referrer": EXTERNAL_PROVIDERS[provider],
"notification_uuid": notification_uuid,
**other_params,
}
)
else:
url = group.get_absolute_url(
params={"referrer": EXTERNAL_PROVIDERS[provider], **other_params}
)
return url
def get_title_link_workflow_engine_ui(
group: Group,
event: Event | GroupEvent | None,
link_to_event: bool,
issue_details: bool,
notification: BaseNotification | None,
provider: ExternalProviders,
workflow_id: int | None = None,
environment_id: int | None = None,
notification_uuid: str | None = None,
) -> str:
other_params = {}
# add in rule id if we have it
if workflow_id:
if (
environment_id is not None
and (environment_name := fetch_environment_name(environment_id)) is not None
):
other_params["environment"] = environment_name
# hard code for issue alerts
other_params["workflow_id"] = str(workflow_id)
other_params["alert_type"] = "issue"
if event and link_to_event:
url = group.get_absolute_url(
params={"referrer": EXTERNAL_PROVIDERS[provider], **other_params},
event_id=event.event_id,
)
elif issue_details and notification:
referrer = notification.get_referrer(provider)
notification_uuid = notification.notification_uuid
url = group.get_absolute_url(
params={"referrer": referrer, "notification_uuid": notification_uuid, **other_params}
)
elif notification_uuid:
url = group.get_absolute_url(
params={
"referrer": EXTERNAL_PROVIDERS[provider],
"notification_uuid": notification_uuid,
**other_params,
}
)
else:
url = group.get_absolute_url(
params={"referrer": EXTERNAL_PROVIDERS[provider], **other_params}
)
return url
def build_attachment_text(group: Group, event: Event | GroupEvent | None = None) -> Any | None:
# Group and Event both implement get_event_{type,metadata}
obj = event if event is not None else group
ev_metadata = obj.get_event_metadata()
ev_type = obj.get_event_type()
if not event:
event = group.get_latest_event()
if isinstance(event, GroupEvent) and event.occurrence is not None:
important = event.occurrence.important_evidence_display
if important:
return important.value
elif ev_type == "error":
return ev_metadata.get("value") or ev_metadata.get("function")
return None
def build_attachment_replay_link(
group: Group, url_format: str, event: Event | GroupEvent | None = None
) -> str | None:
has_replay = features.has("organizations:session-replay", group.organization)
has_slack_links = features.has(
"organizations:session-replay-slack-new-issue", group.organization
)
if has_replay and has_slack_links and group.has_replays():
referrer = EXTERNAL_PROVIDERS[ExternalProviders.SLACK]
replay_url = f"{group.get_absolute_url()}replays/?referrer={referrer}"
return f"{url_format.format(text='View Replays', url=absolute_uri(replay_url))}"
return None
def build_rule_url(rule: Any, group: Group, project: Project) -> str:
from sentry.notifications.notification_action.utils import should_fire_workflow_actions
org_slug = group.organization.slug
project_slug = project.slug
if should_fire_workflow_actions(group.organization, group.type):
rule_id = get_key_from_rule_data(rule, "legacy_rule_id")
rule_url = f"/organizations/{org_slug}/alerts/rules/{project_slug}/{rule_id}/details/"
else:
rule_url = f"/organizations/{org_slug}/alerts/rules/{project_slug}/{rule.id}/details/"
return absolute_uri(rule_url)
def build_footer(
group: Group,
project: Project,
url_format: str,
rules: Sequence[Rule] | None = None,
) -> str:
footer = f"{group.qualified_short_id}"
if rules:
if features.has("organizations:workflow-engine-ui-links", group.organization):
rule_url = absolute_uri(
create_link_to_workflow(
group.organization.id, get_key_from_rule_data(rules[0], "workflow_id")
)
)
else:
rule_url = build_rule_url(rules[0], group, project)
# If this notification is triggered via the "Send Test Notification"
# button then the label is not defined, but the url works.
text = rules[0].label if rules[0].label else "Test Alert"
footer += f" via {url_format.format(text=text, url=rule_url)}"
if len(rules) > 1:
footer += f" (+{len(rules) - 1} other)"
return footer
def get_timestamp(group: Group, event: GroupEvent | None) -> float:
ts = group.last_seen
return (max(ts, event.datetime) if event else ts).timestamp()
def get_color(
event_for_tags: GroupEvent | None, notification: BaseNotification | None, group: Group
) -> str:
if notification:
if not isinstance(notification, AlertRuleNotification):
return "info"
if event_for_tags:
color: str | None = event_for_tags.get_tag("level")
if (
hasattr(event_for_tags, "occurrence")
and event_for_tags.occurrence is not None
and event_for_tags.occurrence.level is not None
):
color = event_for_tags.occurrence.level
if color and color in LEVEL_TO_COLOR.keys():
return color
return "error"
| _NonSlackActorOption |
python | huggingface__transformers | tests/models/mask2former/test_modeling_mask2former.py | {
"start": 1666,
"end": 7649
} | class ____:
def __init__(
self,
parent,
batch_size=2,
is_training=True,
use_auxiliary_loss=False,
num_queries=10,
num_channels=3,
min_size=32 * 8,
max_size=32 * 8,
num_labels=4,
hidden_dim=64,
num_attention_heads=4,
num_hidden_layers=2,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_auxiliary_loss = use_auxiliary_loss
self.num_queries = num_queries
self.num_channels = num_channels
self.min_size = min_size
self.max_size = max_size
self.num_labels = num_labels
self.hidden_dim = hidden_dim
self.mask_feature_size = hidden_dim
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
torch_device
)
pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device)
mask_labels = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=torch_device) > 0.5
).float()
class_labels = (torch.rand((self.batch_size, self.num_labels), device=torch_device) > 0.5).long()
config = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def get_config(self):
config = Mask2FormerConfig(
hidden_size=self.hidden_dim,
num_attention_heads=self.num_attention_heads,
num_hidden_layers=self.num_hidden_layers,
encoder_feedforward_dim=16,
dim_feedforward=32,
num_queries=self.num_queries,
num_labels=self.num_labels,
decoder_layers=2,
encoder_layers=2,
feature_size=16,
)
config.num_queries = self.num_queries
config.num_labels = self.num_labels
config.backbone_config.embed_dim = 16
config.backbone_config.depths = [1, 1, 1, 1]
config.backbone_config.hidden_size = 16
config.backbone_config.num_channels = self.num_channels
config.backbone_config.num_heads = [1, 1, 2, 2]
config.backbone = None
config.hidden_dim = self.hidden_dim
config.mask_feature_size = self.hidden_dim
config.feature_size = self.hidden_dim
return config
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_mask, _, _ = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def check_output_hidden_state(self, output, config):
encoder_hidden_states = output.encoder_hidden_states
pixel_decoder_hidden_states = output.pixel_decoder_hidden_states
transformer_decoder_hidden_states = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(encoder_hidden_states), len(config.backbone_config.depths))
self.parent.assertTrue(len(pixel_decoder_hidden_states), len(config.backbone_config.depths))
self.parent.assertTrue(len(transformer_decoder_hidden_states), config.decoder_layers)
def create_and_check_mask2former_model(self, config, pixel_values, pixel_mask, output_hidden_states=False):
with torch.no_grad():
model = Mask2FormerModel(config=config)
model.to(torch_device)
model.eval()
output = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
output = model(pixel_values, output_hidden_states=True)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape,
(self.batch_size, self.num_queries, self.hidden_dim),
)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(output, config)
def create_and_check_mask2former_instance_segmentation_head_model(
self, config, pixel_values, pixel_mask, mask_labels, class_labels
):
model = Mask2FormerForUniversalSegmentation(config=config)
model.to(torch_device)
model.eval()
def comm_check_on_output(result):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape,
(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4),
)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1)
)
with torch.no_grad():
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask)
result = model(pixel_values)
comm_check_on_output(result)
result = model(
pixel_values=pixel_values, pixel_mask=pixel_mask, mask_labels=mask_labels, class_labels=class_labels
)
comm_check_on_output(result)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape, torch.Size([]))
@require_torch
| Mask2FormerModelTester |
python | dask__distributed | distributed/diagnostics/tests/test_worker_plugin.py | {
"start": 14823,
"end": 15817
} | class ____(WorkerPlugin):
def setup(self, worker):
raise RuntimeError("test error")
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_register_plugin_with_broken_setup_to_existing_workers_raises(c, s, a):
with pytest.raises(RuntimeError, match="test error"):
with captured_logger("distributed.worker", level=logging.ERROR) as caplog:
await c.register_plugin(BrokenSetupPlugin(), name="TestPlugin1")
logs = caplog.getvalue()
assert "TestPlugin1 failed to setup" in logs
assert "test error" in logs
@gen_cluster(client=True, nthreads=[])
async def test_plugin_with_broken_setup_on_new_worker_logs(c, s):
await c.register_plugin(BrokenSetupPlugin(), name="TestPlugin1")
with captured_logger("distributed.worker", level=logging.ERROR) as caplog:
async with Worker(s.address):
pass
logs = caplog.getvalue()
assert "TestPlugin1 failed to setup" in logs
assert "test error" in logs
| BrokenSetupPlugin |
python | neetcode-gh__leetcode | python/0606-construct-string-from-binary-tree.py | {
"start": 0,
"end": 940
} | class ____:
def tree2str(self, root: Optional[TreeNode]) -> str:
# Solution with O(n) time and space complexity
res = []
self.dfs(root, res)
return "".join(res)
def dfs(self, t: TreeNode, res: list):
# If the current node is None, do nothing and return
if t is None:
return
res.append(str(t.val))
# If both left and right children are None, return as there are no more branches to explore
if t.left is None and t.right is None:
return
res.append('(')
# Recursively call the DFS function for the left child
self.dfs(t.left, res)
res.append(')')
# If the right child exists, process it
if t.right is not None:
res.append('(')
# Recursively call the DFS function for the right child
self.dfs(t.right, res)
res.append(')')
| Solution |
python | ray-project__ray | python/ray/llm/tests/batch/cpu/processor/test_processor_base.py | {
"start": 4209,
"end": 4393
} | class ____(StatefulStageUDF):
async def udf(self, batch: List[Dict[str, Any]]) -> AsyncIterator[Dict[str, Any]]:
for row in batch:
yield row
| DummyStatefulStageUDF |
python | getsentry__sentry | tests/sentry/incidents/endpoints/test_organization_incident_details.py | {
"start": 1398,
"end": 2121
} | class ____(BaseIncidentDetailsTest):
@freeze_time()
def test_simple(self) -> None:
incident = self.create_incident()
with self.feature("organizations:incidents"):
resp = self.get_success_response(incident.organization.slug, incident.identifier)
expected = serialize(incident)
assert resp.data["id"] == expected["id"]
assert resp.data["identifier"] == expected["identifier"]
assert resp.data["projects"] == expected["projects"]
assert resp.data["dateDetected"] == expected["dateDetected"]
assert resp.data["dateCreated"] == expected["dateCreated"]
assert resp.data["projects"] == expected["projects"]
| OrganizationIncidentDetailsTest |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 77380,
"end": 80707
} | class ____(NonStrictDataModel):
"""
:param section: Section that the parameter belongs to
:type section: str
:param name: Name of the parameter. The combination of section and name should
be unique
:type name: str
:param value: Value of the parameter
:type value: str
:param type: Type of the parameter. Optional
:type type: str
:param description: The parameter description. Optional
:type description: str
"""
_schema = {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self, section=None, name=None, value=None, type=None, description=None, **kwargs
):
super(ParamsItem, self).__init__(**kwargs)
self.section = section
self.name = name
self.value = value
self.type = type
self.description = description
@schema_property("section")
def section(self):
return self._property_section
@section.setter
def section(self, value):
if value is None:
self._property_section = None
return
self.assert_isinstance(value, "section", six.string_types)
self._property_section = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("value")
def value(self):
return self._property_value
@value.setter
def value(self, value):
if value is None:
self._property_value = None
return
self.assert_isinstance(value, "value", six.string_types)
self._property_value = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
self.assert_isinstance(value, "type", six.string_types)
self._property_type = value
@schema_property("description")
def description(self):
return self._property_description
@description.setter
def description(self, value):
if value is None:
self._property_description = None
return
self.assert_isinstance(value, "description", six.string_types)
self._property_description = value
| ParamsItem |
python | django__django | django/db/models/expressions.py | {
"start": 33261,
"end": 33582
} | class ____(F):
contains_aggregate = False
contains_over_clause = False
def resolve_expression(self, *args, **kwargs):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def relabeled_clone(self, relabels):
return self
| OuterRef |
python | django__django | tests/aggregation_regress/models.py | {
"start": 2443,
"end": 2602
} | class ____(models.Model):
alfa = models.ForeignKey(Alfa, models.SET_NULL, null=True)
bravo = models.ForeignKey(Bravo, models.SET_NULL, null=True)
| Charlie |
python | falconry__falcon | tests/test_wsgi_errors.py | {
"start": 268,
"end": 454
} | class ____:
def on_get(self, req, resp):
req.log_error(unicode_message)
def on_head(self, req, resp):
req.log_error(unicode_message.encode('utf-8'))
| LoggerResource |
python | paramiko__paramiko | tests/test_channelfile.py | {
"start": 1425,
"end": 1904
} | class ____(ChannelFileBase):
klass = ChannelStdinFile
def test_close_calls_channel_shutdown_write(self):
chan = MagicMock()
cf = ChannelStdinFile(chan, mode="wb")
cf.flush = MagicMock()
cf.close()
# Sanity check that we still call BufferedFile.close()
cf.flush.assert_called_once_with()
assert cf._closed is True
# Actual point of test
chan.shutdown_write.assert_called_once_with()
| TestChannelStdinFile |
python | viewflow__viewflow | viewflow/views/actions.py | {
"start": 2864,
"end": 4071
} | class ____(BaseBulkActionView):
template_name = "viewflow/views/delete_action.html"
template_name_suffix = "_delete_action"
def get_deleted_objects(self, query):
collector = Collector(using=router.db_for_write(self.model))
collector.collect(query)
return [
(model_class, objects) for model_class, objects in collector.data.items()
]
def get_context_data(self, **kwargs):
"""Extend view context data.
`{{ deleted_objects }}` - list of related objects to delete
"""
if self.form.is_valid() and not self.form.cleaned_data.get("select_all"):
kwargs.setdefault(
"deleted_objects", self.get_deleted_objects(self.get_queryset())
)
return super(DeleteBulkActionView, self).get_context_data(**kwargs)
def form_valid(self, form):
self.get_queryset().delete()
self.message_user()
return HttpResponseRedirect(self.get_success_url())
def message_user(self):
message = "The objects were deleted successfully"
messages.add_message(
self.request, messages.SUCCESS, message, fail_silently=True
)
| DeleteBulkActionView |
python | has2k1__plotnine | plotnine/scales/scale_size.py | {
"start": 1743,
"end": 2234
} | class ____(scale_continuous[Literal["legend"] | None]):
"""
Continuous radius size scale
"""
_aesthetics = ["size"]
range: InitVar[tuple[float, float]] = (1, 6)
"""
Range ([Minimum, Maximum]) of the size.
"""
_: KW_ONLY
guide: Literal["legend"] | None = "legend"
def __post_init__(self, range):
from mizani.palettes import rescale_pal
super().__post_init__()
self.palette = rescale_pal(range)
@dataclass
| scale_size_radius |
python | facebook__pyre-check | client/commands/infer.py | {
"start": 2392,
"end": 2552
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin, RawAnnotation):
parent: str
annotation: str
@dataclasses.dataclass(frozen=True)
| RawAttributeAnnotation |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_match_powers_of_base.py | {
"start": 1188,
"end": 1737
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.match_powers_of_base"
condition_value_keys = ("base_integer",)
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, base_integer, **kwargs):
return column.apply(lambda x: is_power_of_n(x, base_integer))
# This class defines the Expectation itself
| ColumnValuesMatchPowersOfBase |
python | langchain-ai__langchain | libs/langchain/langchain_classic/smith/evaluation/string_run_evaluator.py | {
"start": 1149,
"end": 1749
} | class ____(Serializable):
"""Extract items to evaluate from the run object."""
@property
def output_keys(self) -> list[str]:
"""The keys to extract from the run."""
return ["prediction", "input"]
@abstractmethod
def map(self, run: Run) -> dict[str, str]:
"""Maps the Run to a dictionary."""
def __call__(self, run: Run) -> dict[str, str]:
"""Maps the Run to a dictionary."""
if not run.outputs:
msg = f"Run {run.id} has no outputs to evaluate."
raise ValueError(msg)
return self.map(run)
| StringRunMapper |
python | apache__airflow | providers/http/tests/unit/http/triggers/test_http.py | {
"start": 3137,
"end": 6791
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="http_default", conn_type="http", host="test:8080/", extra='{"bearer": "test"}'
)
)
@staticmethod
def _mock_run_result(result_to_mock):
f = Future()
f.set_result(result_to_mock)
return f
def test_serialization(self, trigger):
"""
Asserts that the HttpTrigger correctly serializes its arguments
and classpath.
"""
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.http.triggers.http.HttpTrigger"
assert kwargs == {
"http_conn_id": TEST_CONN_ID,
"auth_type": TEST_AUTH_TYPE,
"method": TEST_METHOD,
"endpoint": TEST_ENDPOINT,
"headers": TEST_HEADERS,
"data": TEST_DATA,
"extra_options": TEST_EXTRA_OPTIONS,
}
@pytest.mark.asyncio
@mock.patch(HTTP_PATH.format("HttpAsyncHook"))
async def test_trigger_on_success_yield_successfully(self, mock_hook, trigger, client_response):
"""
Tests the HttpTrigger only fires once the job execution reaches a successful state.
"""
mock_hook.return_value.run.return_value = self._mock_run_result(client_response)
response = await HttpTrigger._convert_response(client_response)
generator = trigger.run()
actual = await generator.asend(None)
assert actual == TriggerEvent(
{
"status": "success",
"response": base64.standard_b64encode(pickle.dumps(response)).decode("ascii"),
}
)
@pytest.mark.asyncio
@mock.patch(HTTP_PATH.format("HttpAsyncHook"))
async def test_trigger_on_exec_yield_successfully(self, mock_hook, trigger):
"""
Test that HttpTrigger fires the correct event in case of an error.
"""
mock_hook.return_value.run.side_effect = Exception("Test exception")
generator = trigger.run()
actual = await generator.asend(None)
assert actual == TriggerEvent({"status": "error", "message": "Test exception"})
@pytest.mark.asyncio
async def test_convert_response(self, client_response):
"""
Assert convert aiohttp.client_reqrep.ClientResponse to requests.Response.
"""
response = await HttpTrigger._convert_response(client_response)
assert response.content == await client_response.read()
assert response.status_code == client_response.status
assert response.headers == CaseInsensitiveDict(client_response.headers)
assert response.url == str(client_response.url)
assert response.history == [HttpTrigger._convert_response(h) for h in client_response.history]
assert response.encoding == client_response.get_encoding()
assert response.reason == client_response.reason
assert dict(response.cookies) == dict(client_response.cookies)
@pytest.mark.asyncio
@mock.patch("aiohttp.client.ClientSession.post")
async def test_trigger_on_post_with_data(self, mock_http_post, trigger):
"""
Test that HttpTrigger fires the correct event in case of an error.
"""
generator = trigger.run()
await generator.asend(None)
mock_http_post.assert_called_once()
_, kwargs = mock_http_post.call_args
assert kwargs["data"] == TEST_DATA
assert kwargs["json"] is None
assert kwargs["params"] is None
| TestHttpTrigger |
python | google__pytype | pytype/rewrite/flow/conditions.py | {
"start": 822,
"end": 1802
} | class ____(Condition):
"""Composition of conditions."""
conditions: frozenset[Condition]
_ACCEPT: ClassVar[Condition]
_IGNORE: ClassVar[Condition]
_REPR: ClassVar[str]
@classmethod
def make(cls, *args: Condition) -> Condition:
"""Composes the input conditions."""
conditions = set()
for arg in args:
if arg is cls._IGNORE:
continue
if arg is cls._ACCEPT:
return arg
negation = Not(arg)
if negation in conditions:
return cls._ACCEPT
conditions.add(arg)
if not conditions:
return cls._IGNORE
if len(conditions) == 1:
return conditions.pop()
return cls(frozenset(conditions))
def __repr__(self):
conditions = []
for c in self.conditions:
if isinstance(c, _Composite):
conditions.append(f'({repr(c)})')
else:
conditions.append(repr(c))
return f' {self._REPR} '.join(conditions)
@dataclasses.dataclass(frozen=True, repr=False)
| _Composite |
python | spack__spack | var/spack/test_repos/spack_repo/duplicates_test/packages/gmake/package.py | {
"start": 216,
"end": 648
} | class ____(Package):
"""Simple build tool, with different versions"""
homepage = "http://www.example.com"
url = "http://www.example.com/tdep-1.0.tar.gz"
tags = ["build-tools"]
version("4.1", md5="0123456789abcdef0123456789abcdef")
version("4.0", md5="0123456789abcdef0123456789abcdef")
version("3.0", md5="0123456789abcdef0123456789abcdef")
version("2.0", md5="0123456789abcdef0123456789abcdef")
| Gmake |
python | huggingface__transformers | tests/models/vjepa2/test_modeling_vjepa2.py | {
"start": 4662,
"end": 7625
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as VJEPA2 does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
test_torch_exportable = True
all_model_classes = (VJEPA2Model, VJEPA2ForVideoClassification) if is_torch_available() else ()
pipeline_model_mapping = {}
test_resize_embeddings = False
def setUp(self):
self.model_tester = VJEPA2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=VJEPA2Config, has_text_modality=False, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="VJEPA2 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="VJEPA2 does not support feedforward chunking yet")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model = VJEPA2Model.from_pretrained(VJEPA_HF_MODEL)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
def prepare_random_video(image_size=256):
videos = prepare_video_inputs(
batch_size=1,
num_frames=16,
num_channels=3,
min_resolution=image_size,
max_resolution=image_size,
equal_resolution=True,
return_tensors="torch",
)
return videos
@require_torch
@require_vision
| VJEPA2ModelTest |
python | dask__distributed | distributed/shuffle/_rechunk.py | {
"start": 37675,
"end": 39270
} | class ____(ShuffleSpec[NDIndex]):
new: ChunkedAxes
old: ChunkedAxes
@property
def output_partitions(self) -> Generator[NDIndex]:
yield from product(*(range(len(c)) for c in self.new))
def pick_worker(self, partition: NDIndex, workers: Sequence[str]) -> str:
npartitions = 1
for c in self.new:
npartitions *= len(c)
ix = 0
for dim, pos in enumerate(partition):
if dim > 0:
ix += len(self.new[dim - 1]) * pos
else:
ix += pos
i = len(workers) * ix // npartitions
return workers[i]
def create_run_on_worker(
self,
run_id: int,
span_id: str | None,
worker_for: dict[NDIndex, str],
plugin: ShuffleWorkerPlugin,
) -> ShuffleRun:
return ArrayRechunkRun(
worker_for=worker_for,
old=self.old,
new=self.new,
id=self.id,
run_id=run_id,
span_id=span_id,
directory=os.path.join(
plugin.worker.local_directory,
f"shuffle-{self.id}-{run_id}",
),
executor=plugin._executor,
local_address=plugin.worker.address,
rpc=plugin.worker.rpc,
digest_metric=plugin.worker.digest_metric,
scheduler=plugin.worker.scheduler,
memory_limiter_disk=plugin.memory_limiter_disk,
memory_limiter_comms=plugin.memory_limiter_comms,
disk=self.disk,
loop=plugin.worker.loop,
)
| ArrayRechunkSpec |
python | pytorch__pytorch | torch/_dynamo/variables/higher_order_ops.py | {
"start": 124568,
"end": 127100
} | class ____(TorchHigherOrderOperatorVariable):
@raise_hard_error_if_graph_break(
reason="strict_mode HOO doesn't work unless it is captured completely with torch.compile."
)
def _call_function(
self,
tx: "InstructionTranslator",
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
unpacked_sequence = args[1].unpack_var_sequence(tx)
# TODO (tmanlaibaatar) support pytree here
for arg in unpacked_sequence:
if isinstance(arg, (ListVariable, TupleVariable, ConstDictVariable)):
unimplemented(
gb_type="strict_mode: improper args",
context=f"args: {args}, kwargs: {kwargs}",
explanation="strict_mode higher order op expects flat inputs (list/tuple/dict)",
hints=[
*graph_break_hints.USER_ERROR,
],
)
if kwargs:
unimplemented(
gb_type="strict_mode: unexpected kwargs",
context=f"args: {args}, kwargs: {kwargs}",
explanation=f"strict_mode higher order op expects no keyword arguments (got {len(kwargs)}).",
hints=[
*graph_break_hints.USER_ERROR,
],
)
(
(ret_val, ret_spec),
ret_graph,
ret_lifted_freevars,
) = speculate_subgraph(
tx,
args[0],
unpacked_sequence,
{},
"strict_mode",
source_target=self.value,
should_flatten_outputs=True,
)
strict_mode_nn_modules = dict(tx.output.nn_modules)
strict_mode_name = tx.output.install_subgraph(
"strict_mode_body",
torch.fx.GraphModule(strict_mode_nn_modules, ret_graph),
)
strict_mode_node = make_attr(tx, strict_mode_name)
p_args = (
strict_mode_node,
tuple(ret_lifted_freevars.keys()),
)
flat_example_value = pytree.tree_map_only(
torch.fx.Proxy,
lambda a: a.node.meta["example_value"],
ret_val.as_proxy(),
)
return _call_function_and_unflatten_output(
tx,
torch.ops.higher_order.strict_mode,
p_args,
{},
flat_example_value,
ret_spec,
ret_val,
)
| StrictModeHigherOrderVariable |
python | huggingface__transformers | src/transformers/models/mobilevit/modeling_mobilevit.py | {
"start": 27630,
"end": 28459
} | class ____(nn.Module):
def __init__(self, config: MobileViTConfig, in_channels: int, out_channels: int) -> None:
super().__init__()
self.global_pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv_1x1 = MobileViTConvLayer(
config,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
use_normalization=True,
use_activation="relu",
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
spatial_size = features.shape[-2:]
features = self.global_pool(features)
features = self.conv_1x1(features)
features = nn.functional.interpolate(features, size=spatial_size, mode="bilinear", align_corners=False)
return features
| MobileViTASPPPooling |
python | wandb__wandb | wandb/sdk/lib/import_hooks.py | {
"start": 6729,
"end": 10271
} | class ____:
def __init__(self) -> None:
self.in_progress: Dict = {}
def find_module( # type: ignore
self,
fullname: str,
path: Optional[str] = None,
) -> Optional["_ImportHookChainedLoader"]:
# If the module being imported is not one we have registered
# post import hooks for, we can return immediately. We will
# take no further part in the importing of this module.
with _post_import_hooks_lock:
if fullname not in _post_import_hooks:
return None
# When we are interested in a specific module, we will call back
# into the import system a second time to defer to the import
# finder that is supposed to handle the importing of the module.
# We set an in progress flag for the target module so that on
# the second time through we don't trigger another call back
# into the import system and cause a infinite loop.
if fullname in self.in_progress:
return None
self.in_progress[fullname] = True
# Now call back into the import system again.
try:
# For Python 3 we need to use find_spec().loader
# from the importlib.util module. It doesn't actually
# import the target module and only finds the
# loader. If a loader is found, we need to return
# our own loader which will then in turn call the
# real loader to import the module and invoke the
# post import hooks.
loader = getattr(find_spec(fullname), "loader", None)
if loader and not isinstance(loader, _ImportHookChainedLoader):
return _ImportHookChainedLoader(loader)
finally:
del self.in_progress[fullname]
def find_spec(
self, fullname: str, path: Optional[str] = None, target: Any = None
) -> Any:
# Since Python 3.4, you are meant to implement find_spec() method
# instead of find_module() and since Python 3.10 you get deprecation
# warnings if you don't define find_spec().
# If the module being imported is not one we have registered
# post import hooks for, we can return immediately. We will
# take no further part in the importing of this module.
with _post_import_hooks_lock:
if fullname not in _post_import_hooks:
return None
# When we are interested in a specific module, we will call back
# into the import system a second time to defer to the import
# finder that is supposed to handle the importing of the module.
# We set an in progress flag for the target module so that on
# the second time through we don't trigger another call back
# into the import system and cause a infinite loop.
if fullname in self.in_progress:
return None
self.in_progress[fullname] = True
# Now call back into the import system again.
try:
# This should only be Python 3 so find_spec() should always
# exist so don't need to check.
spec = find_spec(fullname)
loader = getattr(spec, "loader", None)
if loader and not isinstance(loader, _ImportHookChainedLoader):
assert spec is not None
spec.loader = _ImportHookChainedLoader(loader) # type: ignore
return spec
finally:
del self.in_progress[fullname]
| ImportHookFinder |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/build_env_compiler_var_b/package.py | {
"start": 217,
"end": 776
} | class ____(Package):
"""Package with runtime variable that should be dropped in the parent's build environment."""
url = "https://www.example.com"
version("1.0", md5="0123456789abcdef0123456789abcdef")
def setup_run_environment(self, env: EnvironmentModifications) -> None:
env.set("CC", "this-should-be-dropped")
env.set("CXX", "this-should-be-dropped")
env.set("FC", "this-should-be-dropped")
env.set("F77", "this-should-be-dropped")
env.set("ANOTHER_VAR", "this-should-be-present")
| BuildEnvCompilerVarB |
python | getsentry__sentry-python | sentry_sdk/integrations/quart.py | {
"start": 1481,
"end": 7413
} | class ____(Integration):
identifier = "quart"
origin = f"auto.http.{identifier}"
transaction_style = ""
def __init__(self, transaction_style="endpoint"):
# type: (str) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
# type: () -> None
request_started.connect(_request_websocket_started)
websocket_started.connect(_request_websocket_started)
got_background_exception.connect(_capture_exception)
got_request_exception.connect(_capture_exception)
got_websocket_exception.connect(_capture_exception)
patch_asgi_app()
patch_scaffold_route()
def patch_asgi_app():
# type: () -> None
old_app = Quart.__call__
async def sentry_patched_asgi_app(self, scope, receive, send):
# type: (Any, Any, Any, Any) -> Any
if sentry_sdk.get_client().get_integration(QuartIntegration) is None:
return await old_app(self, scope, receive, send)
middleware = SentryAsgiMiddleware(
lambda *a, **kw: old_app(self, *a, **kw),
span_origin=QuartIntegration.origin,
asgi_version=3,
)
return await middleware(scope, receive, send)
Quart.__call__ = sentry_patched_asgi_app
def patch_scaffold_route():
# type: () -> None
old_route = Scaffold.route
def _sentry_route(*args, **kwargs):
# type: (*Any, **Any) -> Any
old_decorator = old_route(*args, **kwargs)
def decorator(old_func):
# type: (Any) -> Any
if inspect.isfunction(old_func) and not asyncio.iscoroutinefunction(
old_func
):
@wraps(old_func)
@ensure_integration_enabled(QuartIntegration, old_func)
def _sentry_func(*args, **kwargs):
# type: (*Any, **Any) -> Any
current_scope = sentry_sdk.get_current_scope()
if current_scope.transaction is not None:
current_scope.transaction.update_active_thread()
sentry_scope = sentry_sdk.get_isolation_scope()
if sentry_scope.profile is not None:
sentry_scope.profile.update_active_thread_id()
return old_func(*args, **kwargs)
return old_decorator(_sentry_func)
return old_decorator(old_func)
return decorator
Scaffold.route = _sentry_route
def _set_transaction_name_and_source(scope, transaction_style, request):
# type: (sentry_sdk.Scope, str, Request) -> None
try:
name_for_style = {
"url": request.url_rule.rule,
"endpoint": request.url_rule.endpoint,
}
scope.set_transaction_name(
name_for_style[transaction_style],
source=SOURCE_FOR_STYLE[transaction_style],
)
except Exception:
pass
async def _request_websocket_started(app, **kwargs):
# type: (Quart, **Any) -> None
integration = sentry_sdk.get_client().get_integration(QuartIntegration)
if integration is None:
return
if has_request_context():
request_websocket = request._get_current_object()
if has_websocket_context():
request_websocket = websocket._get_current_object()
# Set the transaction name here, but rely on ASGI middleware
# to actually start the transaction
_set_transaction_name_and_source(
sentry_sdk.get_current_scope(), integration.transaction_style, request_websocket
)
scope = sentry_sdk.get_isolation_scope()
evt_processor = _make_request_event_processor(app, request_websocket, integration)
scope.add_event_processor(evt_processor)
def _make_request_event_processor(app, request, integration):
# type: (Quart, Request, QuartIntegration) -> EventProcessor
def inner(event, hint):
# type: (Event, dict[str, Any]) -> Event
# if the request is gone we are fine not logging the data from
# it. This might happen if the processor is pushed away to
# another thread.
if request is None:
return event
with capture_internal_exceptions():
# TODO: Figure out what to do with request body. Methods on request
# are async, but event processors are not.
request_info = event.setdefault("request", {})
request_info["url"] = request.url
request_info["query_string"] = request.query_string
request_info["method"] = request.method
request_info["headers"] = _filter_headers(dict(request.headers))
if should_send_default_pii():
request_info["env"] = {"REMOTE_ADDR": request.access_route[0]}
_add_user_to_event(event)
return event
return inner
async def _capture_exception(sender, exception, **kwargs):
# type: (Quart, Union[ValueError, BaseException], **Any) -> None
integration = sentry_sdk.get_client().get_integration(QuartIntegration)
if integration is None:
return
event, hint = event_from_exception(
exception,
client_options=sentry_sdk.get_client().options,
mechanism={"type": "quart", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
def _add_user_to_event(event):
# type: (Event) -> None
if quart_auth is None:
return
user = quart_auth.current_user
if user is None:
return
with capture_internal_exceptions():
user_info = event.setdefault("user", {})
user_info["id"] = quart_auth.current_user._auth_id
| QuartIntegration |
python | getsentry__sentry-python | sentry_sdk/integrations/openfeature.py | {
"start": 654,
"end": 1131
} | class ____(Hook):
def after(self, hook_context, details, hints):
# type: (Any, Any, Any) -> None
if isinstance(details.value, bool):
add_feature_flag(details.flag_key, details.value)
def error(self, hook_context, exception, hints):
# type: (HookContext, Exception, HookHints) -> None
if isinstance(hook_context.default_value, bool):
add_feature_flag(hook_context.flag_key, hook_context.default_value)
| OpenFeatureHook |
python | django__django | tests/admin_widgets/tests.py | {
"start": 76206,
"end": 79612
} | class ____(AdminWidgetSeleniumTestCase):
def test_ForeignKey_using_to_field(self):
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
self.admin_login(username="super", password="secret", login_url="/")
with self.wait_page_loaded():
self.selenium.get(
self.live_server_url + reverse("admin:admin_widgets_profile_add")
)
main_window = self.selenium.current_window_handle
# Click the Add User button to add new
self.selenium.find_element(By.ID, "add_id_user").click()
self.wait_for_and_switch_to_popup()
password_field = self.selenium.find_element(By.ID, "id_password")
password_field.send_keys("password")
username_field = self.selenium.find_element(By.ID, "id_username")
username_value = "newuser"
username_field.send_keys(username_value)
save_button_css_selector = ".submit-row > input[type=submit]"
self.selenium.find_element(By.CSS_SELECTOR, save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
# The field now contains the new user
self.selenium.find_element(By.CSS_SELECTOR, "#id_user option[value=newuser]")
self.selenium.find_element(By.ID, "view_id_user").click()
self.wait_for_value("#id_username", "newuser")
self.selenium.back()
# Chrome and Safari don't update related object links when selecting
# the same option as previously submitted. As a consequence, the
# "pencil" and "eye" buttons remain disable, so select "---------"
# first.
select = Select(self.selenium.find_element(By.ID, "id_user"))
select.select_by_index(0)
select.select_by_value("newuser")
# Click the Change User button to change it
self.selenium.find_element(By.ID, "change_id_user").click()
self.wait_for_and_switch_to_popup()
username_field = self.selenium.find_element(By.ID, "id_username")
username_value = "changednewuser"
username_field.clear()
username_field.send_keys(username_value)
save_button_css_selector = ".submit-row > input[type=submit]"
self.selenium.find_element(By.CSS_SELECTOR, save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
self.selenium.find_element(
By.CSS_SELECTOR, "#id_user option[value=changednewuser]"
)
element = self.selenium.find_element(By.ID, "view_id_user")
ActionChains(self.selenium).move_to_element(element).click(element).perform()
self.wait_for_value("#id_username", "changednewuser")
self.selenium.back()
select = Select(self.selenium.find_element(By.ID, "id_user"))
select.select_by_value("changednewuser")
# Go ahead and submit the form to make sure it works
self.selenium.find_element(By.CSS_SELECTOR, save_button_css_selector).click()
self.wait_for_text(
"li.success", "The profile “changednewuser” was added successfully."
)
profiles = Profile.objects.all()
self.assertEqual(len(profiles), 1)
self.assertEqual(profiles[0].user.username, username_value)
@skipUnless(Image, "Pillow not installed")
| RelatedFieldWidgetSeleniumTests |
python | getsentry__sentry | src/sentry/integrations/api/bases/external_actor.py | {
"start": 1781,
"end": 1928
} | class ____(TypedDict):
id: int
external_id: str | None
external_name: str
provider: int
integration_id: int
| ExternalActorResponse |
python | django__django | tests/gis_tests/geo3d/tests.py | {
"start": 3151,
"end": 4519
} | class ____:
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([coord[:2] for coord in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(
name=name,
point=Point(*pnt_data, srid=4326),
pointg=Point(*pnt_data, srid=4326),
)
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(
tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140
)
Polygon2D.objects.create(name="2D BBox", poly=bbox_2d)
Polygon3D.objects.create(name="3D BBox", poly=bbox_3d)
@skipUnlessDBFeature("supports_3d_storage")
| Geo3DLoadingHelper |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 36439,
"end": 36627
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("SHA1", "SHA256", "SHA384", "SHA512")
| SamlDigestAlgorithm |
python | PrefectHQ__prefect | src/prefect/settings/models/server/api.py | {
"start": 262,
"end": 5202
} | class ____(PrefectBaseSettings):
"""
Settings for controlling API server behavior
"""
model_config: ClassVar[SettingsConfigDict] = build_settings_config(
("server", "api")
)
auth_string: Optional[SecretStr] = Field(
default=None,
description="A string to use for basic authentication with the API in the form 'user:password'.",
)
host: str = Field(
default="127.0.0.1",
description="The API's host address (defaults to `127.0.0.1`).",
)
port: int = Field(
default=4200,
description="The API's port address (defaults to `4200`).",
)
base_path: Optional[str] = Field(
default=None,
description="The base URL path to serve the API under.",
examples=["/v2/api"],
)
default_limit: int = Field(
default=200,
description="The default limit applied to queries that can return multiple objects, such as `POST /flow_runs/filter`.",
validation_alias=AliasChoices(
AliasPath("default_limit"),
"prefect_server_api_default_limit",
"prefect_api_default_limit",
),
)
keepalive_timeout: int = Field(
default=5,
description="""
The API's keep alive timeout (defaults to `5`).
Refer to https://www.uvicorn.org/settings/#timeouts for details.
When the API is hosted behind a load balancer, you may want to set this to a value
greater than the load balancer's idle timeout.
Note this setting only applies when calling `prefect server start`; if hosting the
API with another tool you will need to configure this there instead.
""",
)
csrf_protection_enabled: bool = Field(
default=False,
description="""
Controls the activation of CSRF protection for the Prefect server API.
When enabled (`True`), the server enforces CSRF validation checks on incoming
state-changing requests (POST, PUT, PATCH, DELETE), requiring a valid CSRF
token to be included in the request headers or body. This adds a layer of
security by preventing unauthorized or malicious sites from making requests on
behalf of authenticated users.
It is recommended to enable this setting in production environments where the
API is exposed to web clients to safeguard against CSRF attacks.
Note: Enabling this setting requires corresponding support in the client for
CSRF token management. See PREFECT_CLIENT_CSRF_SUPPORT_ENABLED for more.
""",
validation_alias=AliasChoices(
AliasPath("csrf_protection_enabled"),
"prefect_server_api_csrf_protection_enabled",
"prefect_server_csrf_protection_enabled",
),
)
csrf_token_expiration: timedelta = Field(
default=timedelta(hours=1),
description="""
Specifies the duration for which a CSRF token remains valid after being issued
by the server.
The default expiration time is set to 1 hour, which offers a reasonable
compromise. Adjust this setting based on your specific security requirements
and usage patterns.
""",
validation_alias=AliasChoices(
AliasPath("csrf_token_expiration"),
"prefect_server_api_csrf_token_expiration",
"prefect_server_csrf_token_expiration",
),
)
cors_allowed_origins: str = Field(
default="*",
description="""
A comma-separated list of origins that are authorized to make cross-origin requests to the API.
By default, this is set to `*`, which allows requests from all origins.
""",
validation_alias=AliasChoices(
AliasPath("cors_allowed_origins"),
"prefect_server_api_cors_allowed_origins",
"prefect_server_cors_allowed_origins",
),
)
cors_allowed_methods: str = Field(
default="*",
description="""
A comma-separated list of methods that are authorized to make cross-origin requests to the API.
By default, this is set to `*`, which allows requests from all methods.
""",
validation_alias=AliasChoices(
AliasPath("cors_allowed_methods"),
"prefect_server_api_cors_allowed_methods",
"prefect_server_cors_allowed_methods",
),
)
cors_allowed_headers: str = Field(
default="*",
description="""
A comma-separated list of headers that are authorized to make cross-origin requests to the API.
By default, this is set to `*`, which allows requests from all headers.
""",
validation_alias=AliasChoices(
AliasPath("cors_allowed_headers"),
"prefect_server_api_cors_allowed_headers",
"prefect_server_cors_allowed_headers",
),
)
| ServerAPISettings |
python | python-pillow__Pillow | src/PIL/ImageWin.py | {
"start": 1173,
"end": 6685
} | class ____:
"""
A Windows bitmap with the given mode and size. The mode can be one of "1",
"L", "P", or "RGB".
If the display requires a palette, this constructor creates a suitable
palette and associates it with the image. For an "L" image, 128 graylevels
are allocated. For an "RGB" image, a 6x6x6 colour cube is used, together
with 20 graylevels.
To make sure that palettes work properly under Windows, you must call the
``palette`` method upon certain events from Windows.
:param image: Either a PIL image, or a mode string. If a mode string is
used, a size must also be given. The mode can be one of "1",
"L", "P", or "RGB".
:param size: If the first argument is a mode string, this
defines the size of the image.
"""
def __init__(
self, image: Image.Image | str, size: tuple[int, int] | None = None
) -> None:
if isinstance(image, str):
mode = image
image = ""
if size is None:
msg = "If first argument is mode, size is required"
raise ValueError(msg)
else:
mode = image.mode
size = image.size
if mode not in ["1", "L", "P", "RGB"]:
mode = Image.getmodebase(mode)
self.image = Image.core.display(mode, size)
self.mode = mode
self.size = size
if image:
assert not isinstance(image, str)
self.paste(image)
def expose(self, handle: int | HDC | HWND) -> None:
"""
Copy the bitmap contents to a device context.
:param handle: Device context (HDC), cast to a Python integer, or an
HDC or HWND instance. In PythonWin, you can use
``CDC.GetHandleAttrib()`` to get a suitable handle.
"""
handle_int = int(handle)
if isinstance(handle, HWND):
dc = self.image.getdc(handle_int)
try:
self.image.expose(dc)
finally:
self.image.releasedc(handle_int, dc)
else:
self.image.expose(handle_int)
def draw(
self,
handle: int | HDC | HWND,
dst: tuple[int, int, int, int],
src: tuple[int, int, int, int] | None = None,
) -> None:
"""
Same as expose, but allows you to specify where to draw the image, and
what part of it to draw.
The destination and source areas are given as 4-tuple rectangles. If
the source is omitted, the entire image is copied. If the source and
the destination have different sizes, the image is resized as
necessary.
"""
if src is None:
src = (0, 0) + self.size
handle_int = int(handle)
if isinstance(handle, HWND):
dc = self.image.getdc(handle_int)
try:
self.image.draw(dc, dst, src)
finally:
self.image.releasedc(handle_int, dc)
else:
self.image.draw(handle_int, dst, src)
def query_palette(self, handle: int | HDC | HWND) -> int:
"""
Installs the palette associated with the image in the given device
context.
This method should be called upon **QUERYNEWPALETTE** and
**PALETTECHANGED** events from Windows. If this method returns a
non-zero value, one or more display palette entries were changed, and
the image should be redrawn.
:param handle: Device context (HDC), cast to a Python integer, or an
HDC or HWND instance.
:return: The number of entries that were changed (if one or more entries,
this indicates that the image should be redrawn).
"""
handle_int = int(handle)
if isinstance(handle, HWND):
handle = self.image.getdc(handle_int)
try:
result = self.image.query_palette(handle)
finally:
self.image.releasedc(handle, handle)
else:
result = self.image.query_palette(handle_int)
return result
def paste(
self, im: Image.Image, box: tuple[int, int, int, int] | None = None
) -> None:
"""
Paste a PIL image into the bitmap image.
:param im: A PIL image. The size must match the target region.
If the mode does not match, the image is converted to the
mode of the bitmap image.
:param box: A 4-tuple defining the left, upper, right, and
lower pixel coordinate. See :ref:`coordinate-system`. If
None is given instead of a tuple, all of the image is
assumed.
"""
im.load()
if self.mode != im.mode:
im = im.convert(self.mode)
if box:
self.image.paste(im.im, box)
else:
self.image.paste(im.im)
def frombytes(self, buffer: bytes) -> None:
"""
Load display memory contents from byte data.
:param buffer: A buffer containing display data (usually
data returned from :py:func:`~PIL.ImageWin.Dib.tobytes`)
"""
self.image.frombytes(buffer)
def tobytes(self) -> bytes:
"""
Copy display memory contents to bytes object.
:return: A bytes object containing display data.
"""
return self.image.tobytes()
| Dib |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/model_query_return_annotation_extends.py | {
"start": 857,
"end": 1159
} | class ____:
def test2_noalarm1(self) -> Test2_C1:
return _test_source()
def test2_noalarm2(self) -> Test2_C3:
return _test_source()
def test2_alarm1(self) -> Test2_C2:
return _test_source()
def test2_alarm2(self) -> Test2_C4:
return _test_source()
| Test2 |
python | django__django | tests/handlers/tests.py | {
"start": 7362,
"end": 10792
} | class ____(SimpleTestCase):
request_factory = RequestFactory()
def test_async_view(self):
"""Calling an async view down the normal synchronous path."""
response = self.client.get("/async_regular/")
self.assertEqual(response.status_code, 200)
def test_suspiciousop_in_view_returns_400(self):
response = self.client.get("/suspicious/")
self.assertEqual(response.status_code, 400)
def test_bad_request_in_view_returns_400(self):
response = self.client.get("/bad_request/")
self.assertEqual(response.status_code, 400)
def test_invalid_urls(self):
response = self.client.get("~%A9helloworld")
self.assertEqual(response.status_code, 404)
self.assertEqual(response.context["request_path"], "/~%25A9helloworld")
response = self.client.get("d%aao%aaw%aan%aal%aao%aaa%aad%aa/")
self.assertEqual(
response.context["request_path"],
"/d%25AAo%25AAw%25AAn%25AAl%25AAo%25AAa%25AAd%25AA",
)
response = self.client.get("/%E2%99%E2%99%A5/")
self.assertEqual(response.context["request_path"], "/%25E2%2599%E2%99%A5/")
response = self.client.get("/%E2%98%8E%E2%A9%E2%99%A5/")
self.assertEqual(
response.context["request_path"], "/%E2%98%8E%25E2%25A9%E2%99%A5/"
)
def test_environ_path_info_type(self):
environ = self.request_factory.get("/%E2%A8%87%87%A5%E2%A8%A0").environ
self.assertIsInstance(environ["PATH_INFO"], str)
def test_handle_accepts_httpstatus_enum_value(self):
def start_response(status, headers):
start_response.status = status
environ = self.request_factory.get("/httpstatus_enum/").environ
WSGIHandler()(environ, start_response)
self.assertEqual(start_response.status, "200 OK")
@override_settings(MIDDLEWARE=["handlers.tests.empty_middleware"])
def test_middleware_returns_none(self):
msg = "Middleware factory handlers.tests.empty_middleware returned None."
with self.assertRaisesMessage(ImproperlyConfigured, msg):
self.client.get("/")
def test_no_response(self):
msg = (
"The view %s didn't return an HttpResponse object. It returned None "
"instead."
)
tests = (
("/no_response_fbv/", "handlers.views.no_response"),
("/no_response_cbv/", "handlers.views.NoResponse.__call__"),
)
for url, view in tests:
with (
self.subTest(url=url),
self.assertRaisesMessage(ValueError, msg % view),
):
self.client.get(url)
def test_streaming(self):
response = self.client.get("/streaming/")
self.assertEqual(response.status_code, 200)
self.assertEqual(b"".join(list(response)), b"streaming content")
def test_async_streaming(self):
response = self.client.get("/async_streaming/")
self.assertEqual(response.status_code, 200)
msg = (
"StreamingHttpResponse must consume asynchronous iterators in order to "
"serve them synchronously. Use a synchronous iterator instead."
)
with self.assertWarnsMessage(Warning, msg) as ctx:
self.assertEqual(b"".join(list(response)), b"streaming content")
self.assertEqual(ctx.filename, __file__)
| HandlerRequestTests |
python | scrapy__scrapy | scrapy/extension.py | {
"start": 316,
"end": 555
} | class ____(MiddlewareManager):
component_name = "extension"
@classmethod
def _get_mwlist_from_settings(cls, settings: Settings) -> list[Any]:
return build_component_list(settings.getwithbase("EXTENSIONS"))
| ExtensionManager |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/errors.py | {
"start": 2342,
"end": 2795
} | class ____(Flaky):
"""Internal error raised by the conjecture engine if flaky failures are
detected during replay.
Carries information allowing the runner to reconstruct the flakiness as
a FlakyFailure exception group for final presentation.
"""
def __init__(self, reason, interesting_origins=None):
super().__init__(reason)
self.reason = reason
self._interesting_origins = interesting_origins
| FlakyReplay |
python | fastai__fastai | fastai/data/transforms.py | {
"start": 9112,
"end": 9967
} | class ____(DisplayedTransform):
"Read `cols` in `row` with potential `pref` and `suff`"
def __init__(self, cols, pref='', suff='', label_delim=None):
store_attr()
self.pref = str(pref) + os.path.sep if isinstance(pref, Path) else pref
self.cols = L(cols)
def _do_one(self, r, c):
o = r[c] if isinstance(c, int) or not c in getattr(r, '_fields', []) else getattr(r, c)
if len(self.pref)==0 and len(self.suff)==0 and self.label_delim is None: return o
if self.label_delim is None: return f'{self.pref}{o}{self.suff}'
else: return o.split(self.label_delim) if len(o)>0 else []
def __call__(self, o, **kwargs):
if len(self.cols) == 1: return self._do_one(o, self.cols[0])
return L(self._do_one(o, c) for c in self.cols)
# %% ../../nbs/05_data.transforms.ipynb 72
| ColReader |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/s3/file_manager.py | {
"start": 951,
"end": 3456
} | class ____(FileManager):
def __init__(self, s3_session, s3_bucket, s3_base_key):
self._s3_session = s3_session
self._s3_bucket = check.str_param(s3_bucket, "s3_bucket")
self._s3_base_key = check.str_param(s3_base_key, "s3_base_key")
self._local_handle_cache = {}
self._temp_file_manager = TempfileManager()
def copy_handle_to_local_temp(self, file_handle):
self._download_if_not_cached(file_handle)
return self._get_local_path(file_handle)
def _download_if_not_cached(self, file_handle):
if not self._file_handle_cached(file_handle):
# instigate download
temp_file_obj = self._temp_file_manager.tempfile()
temp_name = temp_file_obj.name
self._s3_session.download_file(
Bucket=file_handle.s3_bucket, Key=file_handle.s3_key, Filename=temp_name
)
self._local_handle_cache[file_handle.s3_path] = temp_name
return file_handle
@contextmanager
def read(self, file_handle, mode="rb"): # pyright: ignore[reportIncompatibleMethodOverride]
check.inst_param(file_handle, "file_handle", S3FileHandle)
check.str_param(mode, "mode")
check.param_invariant(mode in {"r", "rb"}, "mode")
self._download_if_not_cached(file_handle)
encoding = None if mode == "rb" else "utf-8"
with open(self._get_local_path(file_handle), mode, encoding=encoding) as file_obj:
yield file_obj
def _file_handle_cached(self, file_handle):
return file_handle.s3_path in self._local_handle_cache
def _get_local_path(self, file_handle):
return self._local_handle_cache[file_handle.s3_path]
def read_data(self, file_handle):
with self.read(file_handle, mode="rb") as file_obj:
return file_obj.read()
def write_data(self, data, ext=None):
check.inst_param(data, "data", bytes)
return self.write(io.BytesIO(data), mode="wb", ext=ext)
def write(self, file_obj, mode="wb", ext=None):
check_file_like_obj(file_obj)
s3_key = self.get_full_key(str(uuid.uuid4()) + (("." + ext) if ext is not None else ""))
self._s3_session.put_object(Body=file_obj, Bucket=self._s3_bucket, Key=s3_key)
return S3FileHandle(self._s3_bucket, s3_key)
def get_full_key(self, file_key):
return f"{self._s3_base_key}/{file_key}"
def delete_local_temp(self):
self._temp_file_manager.close()
| S3FileManager |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarDefaultClass1.py | {
"start": 3318,
"end": 3491
} | class ____(Generic[P6]): ...
assert_type(ClassE, type[ClassE[str, int]])
assert_type(ClassE(), ClassE[str, int])
assert_type(ClassE[[bool, bool]](), ClassE[bool, bool])
| ClassE |
python | Textualize__textual | tests/css/test_parse.py | {
"start": 563,
"end": 27293
} | class ____:
def test_simple_reference(self):
css = "$x: 1; #some-widget{border: $x;}"
variables = substitute_references(tokenize(css, ("", "")))
assert list(variables) == [
Token(
name="variable_name",
value="$x:",
read_from=("", ""),
code=css,
location=(0, 0),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 3),
referenced_by=None,
),
Token(
name="number",
value="1",
read_from=("", ""),
code=css,
location=(0, 4),
referenced_by=None,
),
Token(
name="variable_value_end",
value=";",
read_from=("", ""),
code=css,
location=(0, 5),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 6),
referenced_by=None,
),
Token(
name="selector_start_id",
value="#some-widget",
read_from=("", ""),
code=css,
location=(0, 7),
referenced_by=None,
),
Token(
name="declaration_set_start",
value="{",
read_from=("", ""),
code=css,
location=(0, 19),
referenced_by=None,
),
Token(
name="declaration_name",
value="border:",
read_from=("", ""),
code=css,
location=(0, 20),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 27),
referenced_by=None,
),
Token(
name="number",
value="1",
read_from=("", ""),
code=css,
location=(0, 4),
referenced_by=ReferencedBy(
name="x", location=(0, 28), length=2, code=css
),
),
Token(
name="declaration_end",
value=";",
read_from=("", ""),
code=css,
location=(0, 30),
referenced_by=None,
),
Token(
name="declaration_set_end",
value="}",
read_from=("", ""),
code=css,
location=(0, 31),
referenced_by=None,
),
]
def test_simple_reference_no_whitespace(self):
css = "$x:1; #some-widget{border: $x;}"
variables = substitute_references(tokenize(css, ("", "")))
assert list(variables) == [
Token(
name="variable_name",
value="$x:",
read_from=("", ""),
code=css,
location=(0, 0),
referenced_by=None,
),
Token(
name="number",
value="1",
read_from=("", ""),
code=css,
location=(0, 3),
referenced_by=None,
),
Token(
name="variable_value_end",
value=";",
read_from=("", ""),
code=css,
location=(0, 4),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 5),
referenced_by=None,
),
Token(
name="selector_start_id",
value="#some-widget",
read_from=("", ""),
code=css,
location=(0, 6),
referenced_by=None,
),
Token(
name="declaration_set_start",
value="{",
read_from=("", ""),
code=css,
location=(0, 18),
referenced_by=None,
),
Token(
name="declaration_name",
value="border:",
read_from=("", ""),
code=css,
location=(0, 19),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 26),
referenced_by=None,
),
Token(
name="number",
value="1",
read_from=("", ""),
code=css,
location=(0, 3),
referenced_by=ReferencedBy(
name="x", location=(0, 27), length=2, code=css
),
),
Token(
name="declaration_end",
value=";",
read_from=("", ""),
code=css,
location=(0, 29),
referenced_by=None,
),
Token(
name="declaration_set_end",
value="}",
read_from=("", ""),
code=css,
location=(0, 30),
referenced_by=None,
),
]
def test_undefined_variable(self):
css = ".thing { border: $not-defined; }"
with pytest.raises(UnresolvedVariableError):
list(substitute_references(tokenize(css, ("", ""))))
def test_empty_variable(self):
css = "$x:\n* { background:$x; }"
result = list(substitute_references(tokenize(css, ("", ""))))
assert [(t.name, t.value) for t in result] == [
("variable_name", "$x:"),
("variable_value_end", "\n"),
("selector_start_universal", "*"),
("whitespace", " "),
("declaration_set_start", "{"),
("whitespace", " "),
("declaration_name", "background:"),
("declaration_end", ";"),
("whitespace", " "),
("declaration_set_end", "}"),
]
def test_transitive_reference(self):
css = "$x: 1\n$y: $x\n.thing { border: $y }"
assert list(substitute_references(tokenize(css, ("", "")))) == [
Token(
name="variable_name",
value="$x:",
read_from=("", ""),
code=css,
location=(0, 0),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 3),
referenced_by=None,
),
Token(
name="number",
value="1",
read_from=("", ""),
code=css,
location=(0, 4),
referenced_by=None,
),
Token(
name="variable_value_end",
value="\n",
read_from=("", ""),
code=css,
location=(0, 5),
referenced_by=None,
),
Token(
name="variable_name",
value="$y:",
read_from=("", ""),
code=css,
location=(1, 0),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(1, 3),
referenced_by=None,
),
Token(
name="number",
value="1",
read_from=("", ""),
code=css,
location=(0, 4),
referenced_by=ReferencedBy(
name="x", location=(1, 4), length=2, code=css
),
),
Token(
name="variable_value_end",
value="\n",
read_from=("", ""),
code=css,
location=(1, 6),
referenced_by=None,
),
Token(
name="selector_start_class",
value=".thing",
read_from=("", ""),
code=css,
location=(2, 0),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(2, 6),
referenced_by=None,
),
Token(
name="declaration_set_start",
value="{",
read_from=("", ""),
code=css,
location=(2, 7),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(2, 8),
referenced_by=None,
),
Token(
name="declaration_name",
value="border:",
read_from=("", ""),
code=css,
location=(2, 9),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(2, 16),
referenced_by=None,
),
Token(
name="number",
value="1",
read_from=("", ""),
code=css,
location=(0, 4),
referenced_by=ReferencedBy(
name="y", location=(2, 17), length=2, code=css
),
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(2, 19),
referenced_by=None,
),
Token(
name="declaration_set_end",
value="}",
read_from=("", ""),
code=css,
location=(2, 20),
referenced_by=None,
),
]
def test_multi_value_variable(self):
css = "$x: 2 4\n$y: 6 $x 2\n.thing { border: $y }"
assert list(substitute_references(tokenize(css, ("", "")))) == [
Token(
name="variable_name",
value="$x:",
read_from=("", ""),
code=css,
location=(0, 0),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 3),
referenced_by=None,
),
Token(
name="number",
value="2",
read_from=("", ""),
code=css,
location=(0, 4),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 5),
referenced_by=None,
),
Token(
name="number",
value="4",
read_from=("", ""),
code=css,
location=(0, 6),
referenced_by=None,
),
Token(
name="variable_value_end",
value="\n",
read_from=("", ""),
code=css,
location=(0, 7),
referenced_by=None,
),
Token(
name="variable_name",
value="$y:",
read_from=("", ""),
code=css,
location=(1, 0),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(1, 3),
referenced_by=None,
),
Token(
name="number",
value="6",
read_from=("", ""),
code=css,
location=(1, 4),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(1, 5),
referenced_by=None,
),
Token(
name="number",
value="2",
read_from=("", ""),
code=css,
location=(0, 4),
referenced_by=ReferencedBy(
name="x", location=(1, 6), length=2, code=css
),
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 5),
referenced_by=ReferencedBy(
name="x", location=(1, 6), length=2, code=css
),
),
Token(
name="number",
value="4",
read_from=("", ""),
code=css,
location=(0, 6),
referenced_by=ReferencedBy(
name="x", location=(1, 6), length=2, code=css
),
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(1, 8),
referenced_by=None,
),
Token(
name="number",
value="2",
read_from=("", ""),
code=css,
location=(1, 9),
referenced_by=None,
),
Token(
name="variable_value_end",
value="\n",
read_from=("", ""),
code=css,
location=(1, 10),
referenced_by=None,
),
Token(
name="selector_start_class",
value=".thing",
read_from=("", ""),
code=css,
location=(2, 0),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(2, 6),
referenced_by=None,
),
Token(
name="declaration_set_start",
value="{",
read_from=("", ""),
code=css,
location=(2, 7),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(2, 8),
referenced_by=None,
),
Token(
name="declaration_name",
value="border:",
read_from=("", ""),
code=css,
location=(2, 9),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(2, 16),
referenced_by=None,
),
Token(
name="number",
value="6",
read_from=("", ""),
code=css,
location=(1, 4),
referenced_by=ReferencedBy(
name="y", location=(2, 17), length=2, code=css
),
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(1, 5),
referenced_by=ReferencedBy(
name="y", location=(2, 17), length=2, code=css
),
),
Token(
name="number",
value="2",
read_from=("", ""),
code=css,
location=(0, 4),
referenced_by=ReferencedBy(
name="y", location=(2, 17), length=2, code=css
),
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 5),
referenced_by=ReferencedBy(
name="y", location=(2, 17), length=2, code=css
),
),
Token(
name="number",
value="4",
read_from=("", ""),
code=css,
location=(0, 6),
referenced_by=ReferencedBy(
name="y", location=(2, 17), length=2, code=css
),
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(1, 8),
referenced_by=ReferencedBy(
name="y", location=(2, 17), length=2, code=css
),
),
Token(
name="number",
value="2",
read_from=("", ""),
code=css,
location=(1, 9),
referenced_by=ReferencedBy(
name="y", location=(2, 17), length=2, code=css
),
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(2, 19),
referenced_by=None,
),
Token(
name="declaration_set_end",
value="}",
read_from=("", ""),
code=css,
location=(2, 20),
referenced_by=None,
),
]
def test_variable_used_inside_property_value(self):
css = "$x: red\n.thing { border: on $x; }"
assert list(substitute_references(tokenize(css, ("", "")))) == [
Token(
name="variable_name",
value="$x:",
read_from=("", ""),
code=css,
location=(0, 0),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 3),
referenced_by=None,
),
Token(
name="token",
value="red",
read_from=("", ""),
code=css,
location=(0, 4),
referenced_by=None,
),
Token(
name="variable_value_end",
value="\n",
read_from=("", ""),
code=css,
location=(0, 7),
referenced_by=None,
),
Token(
name="selector_start_class",
value=".thing",
read_from=("", ""),
code=css,
location=(1, 0),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(1, 6),
referenced_by=None,
),
Token(
name="declaration_set_start",
value="{",
read_from=("", ""),
code=css,
location=(1, 7),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(1, 8),
referenced_by=None,
),
Token(
name="declaration_name",
value="border:",
read_from=("", ""),
code=css,
location=(1, 9),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(1, 16),
referenced_by=None,
),
Token(
name="token",
value="on",
read_from=("", ""),
code=css,
location=(1, 17),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(1, 19),
referenced_by=None,
),
Token(
name="token",
value="red",
read_from=("", ""),
code=css,
location=(0, 4),
referenced_by=ReferencedBy(
name="x", location=(1, 20), length=2, code=css
),
),
Token(
name="declaration_end",
value=";",
read_from=("", ""),
code=css,
location=(1, 22),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(1, 23),
referenced_by=None,
),
Token(
name="declaration_set_end",
value="}",
read_from=("", ""),
code=css,
location=(1, 24),
referenced_by=None,
),
]
def test_variable_definition_eof(self):
css = "$x: 1"
assert list(substitute_references(tokenize(css, ("", "")))) == [
Token(
name="variable_name",
value="$x:",
read_from=("", ""),
code=css,
location=(0, 0),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 3),
referenced_by=None,
),
Token(
name="number",
value="1",
read_from=("", ""),
code=css,
location=(0, 4),
referenced_by=None,
),
]
def test_variable_reference_whitespace_trimming(self):
css = "$x: 123;.thing{border: $x}"
assert list(substitute_references(tokenize(css, ("", "")))) == [
Token(
name="variable_name",
value="$x:",
read_from=("", ""),
code=css,
location=(0, 0),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 3),
referenced_by=None,
),
Token(
name="number",
value="123",
read_from=("", ""),
code=css,
location=(0, 7),
referenced_by=None,
),
Token(
name="variable_value_end",
value=";",
read_from=("", ""),
code=css,
location=(0, 10),
referenced_by=None,
),
Token(
name="selector_start_class",
value=".thing",
read_from=("", ""),
code=css,
location=(0, 11),
referenced_by=None,
),
Token(
name="declaration_set_start",
value="{",
read_from=("", ""),
code=css,
location=(0, 17),
referenced_by=None,
),
Token(
name="declaration_name",
value="border:",
read_from=("", ""),
code=css,
location=(0, 18),
referenced_by=None,
),
Token(
name="whitespace",
value=" ",
read_from=("", ""),
code=css,
location=(0, 25),
referenced_by=None,
),
Token(
name="number",
value="123",
read_from=("", ""),
code=css,
location=(0, 7),
referenced_by=ReferencedBy(
name="x", location=(0, 26), length=2, code=css
),
),
Token(
name="declaration_set_end",
value="}",
read_from=("", ""),
code=css,
location=(0, 28),
referenced_by=None,
),
]
| TestVariableReferenceSubstitution |
python | numpy__numpy | numpy/ma/tests/test_core.py | {
"start": 205123,
"end": 208555
} | class ____:
def test_ndarrayfuncs(self):
# test axis arg behaves the same as ndarray (including multiple axes)
d = np.arange(24.0).reshape((2, 3, 4))
m = np.zeros(24, dtype=bool).reshape((2, 3, 4))
# mask out last element of last dimension
m[:, :, -1] = True
a = np.ma.array(d, mask=m)
def testaxis(f, a, d):
numpy_f = numpy.__getattribute__(f)
ma_f = np.ma.__getattribute__(f)
# test axis arg
assert_equal(ma_f(a, axis=1)[..., :-1], numpy_f(d[..., :-1], axis=1))
assert_equal(ma_f(a, axis=(0, 1))[..., :-1],
numpy_f(d[..., :-1], axis=(0, 1)))
def testkeepdims(f, a, d):
numpy_f = numpy.__getattribute__(f)
ma_f = np.ma.__getattribute__(f)
# test keepdims arg
assert_equal(ma_f(a, keepdims=True).shape,
numpy_f(d, keepdims=True).shape)
assert_equal(ma_f(a, keepdims=False).shape,
numpy_f(d, keepdims=False).shape)
# test both at once
assert_equal(ma_f(a, axis=1, keepdims=True)[..., :-1],
numpy_f(d[..., :-1], axis=1, keepdims=True))
assert_equal(ma_f(a, axis=(0, 1), keepdims=True)[..., :-1],
numpy_f(d[..., :-1], axis=(0, 1), keepdims=True))
for f in ['sum', 'prod', 'mean', 'var', 'std']:
testaxis(f, a, d)
testkeepdims(f, a, d)
for f in ['min', 'max']:
testaxis(f, a, d)
d = (np.arange(24).reshape((2, 3, 4)) % 2 == 0)
a = np.ma.array(d, mask=m)
for f in ['all', 'any']:
testaxis(f, a, d)
testkeepdims(f, a, d)
def test_count(self):
# test np.ma.count specially
d = np.arange(24.0).reshape((2, 3, 4))
m = np.zeros(24, dtype=bool).reshape((2, 3, 4))
m[:, 0, :] = True
a = np.ma.array(d, mask=m)
assert_equal(count(a), 16)
assert_equal(count(a, axis=1), 2 * ones((2, 4)))
assert_equal(count(a, axis=(0, 1)), 4 * ones((4,)))
assert_equal(count(a, keepdims=True), 16 * ones((1, 1, 1)))
assert_equal(count(a, axis=1, keepdims=True), 2 * ones((2, 1, 4)))
assert_equal(count(a, axis=(0, 1), keepdims=True), 4 * ones((1, 1, 4)))
assert_equal(count(a, axis=-2), 2 * ones((2, 4)))
assert_raises(ValueError, count, a, axis=(1, 1))
assert_raises(AxisError, count, a, axis=3)
# check the 'nomask' path
a = np.ma.array(d, mask=nomask)
assert_equal(count(a), 24)
assert_equal(count(a, axis=1), 3 * ones((2, 4)))
assert_equal(count(a, axis=(0, 1)), 6 * ones((4,)))
assert_equal(count(a, keepdims=True), 24 * ones((1, 1, 1)))
assert_equal(np.ndim(count(a, keepdims=True)), 3)
assert_equal(count(a, axis=1, keepdims=True), 3 * ones((2, 1, 4)))
assert_equal(count(a, axis=(0, 1), keepdims=True), 6 * ones((1, 1, 4)))
assert_equal(count(a, axis=-2), 3 * ones((2, 4)))
assert_raises(ValueError, count, a, axis=(1, 1))
assert_raises(AxisError, count, a, axis=3)
# check the 'masked' singleton
assert_equal(count(np.ma.masked), 0)
# check 0-d arrays do not allow axis > 0
assert_raises(AxisError, count, np.ma.array(1), axis=1)
| TestOptionalArgs |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/quickly_change_tabs.py | {
"start": 186,
"end": 712
} | class ____(App[None]):
def compose(self) -> ComposeResult:
with TabbedContent():
with TabPane("one"):
yield Label("one")
with TabPane("two"):
yield Label("two")
with TabPane("three", id="three"):
yield Label("three")
def key_p(self) -> None:
self.query_one(Tabs).action_next_tab()
self.query_one(Tabs).action_next_tab()
app = QuicklyChangeTabsApp()
if __name__ == "__main__":
app.run()
| QuicklyChangeTabsApp |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/base.py | {
"start": 115414,
"end": 115860
} | class ____(
characteristics.ConnectionCharacteristic
):
transactional = True
def reset_characteristic(self, dialect, dbapi_conn):
dialect.set_deferrable(dbapi_conn, False)
def set_characteristic(self, dialect, dbapi_conn, value):
dialect.set_deferrable(dbapi_conn, value)
def get_characteristic(self, dialect, dbapi_conn):
return dialect.get_deferrable(dbapi_conn)
| PGDeferrableConnectionCharacteristic |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/__init__.py | {
"start": 11810,
"end": 12339
} | class ____(AnsibleSerializable, t.Generic[_T], metaclass=abc.ABCMeta):
__slots__ = ('_value',)
_wrapped_types: t.ClassVar[dict[type, type[AnsibleSerializable]]] = {}
_wrapped_type: t.ClassVar[type] = type(None)
def __init__(self, value: _T) -> None:
self._value: _T = value
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls._wrapped_type = t.get_args(cls.__orig_bases__[0])[0]
cls._wrapped_types[cls._wrapped_type] = cls
| AnsibleSerializableWrapper |
python | tqdm__tqdm | tqdm/tk.py | {
"start": 434,
"end": 6701
} | class ____(std_tqdm): # pragma: no cover
"""
Experimental Tkinter GUI version of tqdm!
Note: Window interactivity suffers if `tqdm_tk` is not running within
a Tkinter mainloop and values are generated infrequently. In this case,
consider calling `tqdm_tk.refresh()` frequently in the Tk thread.
"""
# TODO: @classmethod: write()?
def __init__(self, *args, **kwargs):
"""
This class accepts the following parameters *in addition* to
the parameters accepted by `tqdm`.
Parameters
----------
grab : bool, optional
Grab the input across all windows of the process.
tk_parent : `tkinter.Wm`, optional
Parent Tk window.
cancel_callback : Callable, optional
Create a cancel button and set `cancel_callback` to be called
when the cancel or window close button is clicked.
"""
kwargs = kwargs.copy()
kwargs['gui'] = True
# convert disable = None to False
kwargs['disable'] = bool(kwargs.get('disable', False))
self._warn_leave = 'leave' in kwargs
grab = kwargs.pop('grab', False)
tk_parent = kwargs.pop('tk_parent', None)
self._cancel_callback = kwargs.pop('cancel_callback', None)
super().__init__(*args, **kwargs)
if self.disable:
return
if tk_parent is None: # Discover parent widget
try:
tk_parent = tkinter._default_root
except AttributeError:
raise AttributeError(
"`tk_parent` required when using `tkinter.NoDefaultRoot()`")
if tk_parent is None: # use new default root window as display
self._tk_window = tkinter.Tk()
else: # some other windows already exist
self._tk_window = tkinter.Toplevel()
else:
self._tk_window = tkinter.Toplevel(tk_parent)
warn("GUI is experimental/alpha", TqdmExperimentalWarning, stacklevel=2)
self._tk_dispatching = self._tk_dispatching_helper()
self._tk_window.protocol("WM_DELETE_WINDOW", self.cancel)
self._tk_window.wm_title(self.desc)
self._tk_window.wm_attributes("-topmost", 1)
self._tk_window.after(0, lambda: self._tk_window.wm_attributes("-topmost", 0))
self._tk_n_var = tkinter.DoubleVar(self._tk_window, value=0)
self._tk_text_var = tkinter.StringVar(self._tk_window)
pbar_frame = ttk.Frame(self._tk_window, padding=5)
pbar_frame.pack()
_tk_label = ttk.Label(pbar_frame, textvariable=self._tk_text_var,
wraplength=600, anchor="center", justify="center")
_tk_label.pack()
self._tk_pbar = ttk.Progressbar(
pbar_frame, variable=self._tk_n_var, length=450)
if self.total is not None:
self._tk_pbar.configure(maximum=self.total)
else:
self._tk_pbar.configure(mode="indeterminate")
self._tk_pbar.pack()
if self._cancel_callback is not None:
_tk_button = ttk.Button(pbar_frame, text="Cancel", command=self.cancel)
_tk_button.pack()
if grab:
self._tk_window.grab_set()
def close(self):
if self.disable:
return
self.disable = True
with self.get_lock():
self._instances.remove(self)
def _close():
self._tk_window.after('idle', self._tk_window.destroy)
if not self._tk_dispatching:
self._tk_window.update()
self._tk_window.protocol("WM_DELETE_WINDOW", _close)
# if leave is set but we are self-dispatching, the left window is
# totally unresponsive unless the user manually dispatches
if not self.leave:
_close()
elif not self._tk_dispatching:
if self._warn_leave:
warn("leave flag ignored if not in tkinter mainloop",
TqdmWarning, stacklevel=2)
_close()
def clear(self, *_, **__):
pass
def display(self, *_, **__):
self._tk_n_var.set(self.n)
d = self.format_dict
# remove {bar}
d['bar_format'] = (d['bar_format'] or "{l_bar}<bar/>{r_bar}").replace(
"{bar}", "<bar/>")
msg = self.format_meter(**d)
if '<bar/>' in msg:
msg = "".join(re.split(r'\|?<bar/>\|?', msg, maxsplit=1))
self._tk_text_var.set(msg)
if not self._tk_dispatching:
self._tk_window.update()
def set_description(self, desc=None, refresh=True):
self.set_description_str(desc, refresh)
def set_description_str(self, desc=None, refresh=True):
self.desc = desc
if not self.disable:
self._tk_window.wm_title(desc)
if refresh and not self._tk_dispatching:
self._tk_window.update()
def cancel(self):
"""
`cancel_callback()` followed by `close()`
when close/cancel buttons clicked.
"""
if self._cancel_callback is not None:
self._cancel_callback()
self.close()
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
if hasattr(self, '_tk_pbar'):
if total is None:
self._tk_pbar.configure(maximum=100, mode="indeterminate")
else:
self._tk_pbar.configure(maximum=total, mode="determinate")
super().reset(total=total)
@staticmethod
def _tk_dispatching_helper():
"""determine if Tkinter mainloop is dispatching events"""
codes = {tkinter.mainloop.__code__, tkinter.Misc.mainloop.__code__}
for frame in sys._current_frames().values():
while frame:
if frame.f_code in codes:
return True
frame = frame.f_back
return False
def ttkrange(*args, **kwargs):
"""Shortcut for `tqdm.tk.tqdm(range(*args), **kwargs)`."""
return tqdm_tk(range(*args), **kwargs)
# Aliases
tqdm = tqdm_tk
trange = ttkrange
| tqdm_tk |
python | encode__django-rest-framework | rest_framework/permissions.py | {
"start": 4100,
"end": 4298
} | class ____(BasePermission):
"""
Allows access only to admin users.
"""
def has_permission(self, request, view):
return bool(request.user and request.user.is_staff)
| IsAdminUser |
python | huggingface__transformers | src/transformers/models/rt_detr/image_processing_rt_detr.py | {
"start": 1904,
"end": 12900
} | class ____(ImagesKwargs, total=False):
r"""
format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`):
Data format of the annotations. One of "coco_detection" or "coco_panoptic".
do_convert_annotations (`bool`, *optional*, defaults to `True`):
Controls whether to convert the annotations to the format expected by the RT_DETR model. Converts the
bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
return_segmentation_masks (`bool`, *optional*, defaults to `False`):
Whether to return segmentation masks.
annotations (`AnnotationType` or `list[AnnotationType]`, *optional*):
Annotations to transform according to the padding that is applied to the images.
masks_path (`str` or `pathlib.Path`, *optional*):
Path to the directory containing the segmentation masks.
"""
format: Union[str, AnnotationFormat]
do_convert_annotations: bool
return_segmentation_masks: bool
annotations: Optional[Union[AnnotationType, list[AnnotationType]]]
masks_path: Optional[Union[str, pathlib.Path]]
def get_resize_output_image_size(
input_image: np.ndarray,
size: Union[int, tuple[int, int], list[int]],
max_size: Optional[int] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> tuple[int, int]:
"""
Computes the output image size given the input image size and the desired output size. If the desired output size
is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
image size is computed by keeping the aspect ratio of the input image size.
Args:
input_image (`np.ndarray`):
The image to resize.
size (`int` or `tuple[int, int]` or `list[int]`):
The desired output size.
max_size (`int`, *optional*):
The maximum allowed output size.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input image.
"""
image_size = get_image_size(input_image, input_data_format)
if isinstance(size, (list, tuple)):
return size
return get_size_with_aspect_ratio(image_size, size, max_size)
# Copied from transformers.models.detr.image_processing_detr.get_image_size_for_max_height_width
def get_image_size_for_max_height_width(
input_image: np.ndarray,
max_height: int,
max_width: int,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> tuple[int, int]:
"""
Computes the output image size given the input image and the maximum allowed height and width. Keep aspect ratio.
Important, even if image_height < max_height and image_width < max_width, the image will be resized
to at least one of the edges be equal to max_height or max_width.
For example:
- input_size: (100, 200), max_height: 50, max_width: 50 -> output_size: (25, 50)
- input_size: (100, 200), max_height: 200, max_width: 500 -> output_size: (200, 400)
Args:
input_image (`np.ndarray`):
The image to resize.
max_height (`int`):
The maximum allowed height.
max_width (`int`):
The maximum allowed width.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input image.
"""
image_size = get_image_size(input_image, input_data_format)
height, width = image_size
height_scale = max_height / height
width_scale = max_width / width
min_scale = min(height_scale, width_scale)
new_height = int(height * min_scale)
new_width = int(width * min_scale)
return new_height, new_width
# Copied from transformers.models.detr.image_processing_detr.safe_squeeze
def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
"""
Squeezes an array, but only if the axis specified has dim 1.
"""
if axis is None:
return arr.squeeze()
try:
return arr.squeeze(axis=axis)
except ValueError:
return arr
# Copied from transformers.models.detr.image_processing_detr.normalize_annotation
def normalize_annotation(annotation: dict, image_size: tuple[int, int]) -> dict:
image_height, image_width = image_size
norm_annotation = {}
for key, value in annotation.items():
if key == "boxes":
boxes = value
boxes = corners_to_center_format(boxes)
boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
norm_annotation[key] = boxes
else:
norm_annotation[key] = value
return norm_annotation
# Copied from transformers.models.detr.image_processing_detr.max_across_indices
def max_across_indices(values: Iterable[Any]) -> list[Any]:
"""
Return the maximum value across all indices of an iterable of values.
"""
return [max(values_i) for values_i in zip(*values)]
# Copied from transformers.models.detr.image_processing_detr.get_max_height_width
def get_max_height_width(
images: list[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
) -> list[int]:
"""
Get the maximum height and width across all images in a batch.
"""
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if input_data_format == ChannelDimension.FIRST:
_, max_height, max_width = max_across_indices([img.shape for img in images])
elif input_data_format == ChannelDimension.LAST:
max_height, max_width, _ = max_across_indices([img.shape for img in images])
else:
raise ValueError(f"Invalid channel dimension format: {input_data_format}")
return (max_height, max_width)
# Copied from transformers.models.detr.image_processing_detr.make_pixel_mask
def make_pixel_mask(
image: np.ndarray, output_size: tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
) -> np.ndarray:
"""
Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
Args:
image (`np.ndarray`):
Image to make the pixel mask for.
output_size (`tuple[int, int]`):
Output size of the mask.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
mask = np.zeros(output_size, dtype=np.int64)
mask[:input_height, :input_width] = 1
return mask
def prepare_coco_detection_annotation(
image,
target,
return_segmentation_masks: bool = False,
input_data_format: Optional[Union[ChannelDimension, str]] = None,
):
"""
Convert the target in COCO format into the format expected by RTDETR.
"""
image_height, image_width = get_image_size(image, channel_dim=input_data_format)
image_id = target["image_id"]
image_id = np.asarray([image_id], dtype=np.int64)
# Get all COCO annotations for the given image.
annotations = target["annotations"]
annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
classes = [obj["category_id"] for obj in annotations]
classes = np.asarray(classes, dtype=np.int64)
# for conversion to coco api
area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
iscrowd = np.asarray([obj.get("iscrowd", 0) for obj in annotations], dtype=np.int64)
boxes = [obj["bbox"] for obj in annotations]
# guard against no boxes via resizing
boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
new_target = {}
new_target["image_id"] = image_id
new_target["class_labels"] = classes[keep]
new_target["boxes"] = boxes[keep]
new_target["area"] = area[keep]
new_target["iscrowd"] = iscrowd[keep]
new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
if annotations and "keypoints" in annotations[0]:
keypoints = [obj["keypoints"] for obj in annotations]
# Converting the filtered keypoints list to a numpy array
keypoints = np.asarray(keypoints, dtype=np.float32)
# Apply the keep mask here to filter the relevant annotations
keypoints = keypoints[keep]
num_keypoints = keypoints.shape[0]
keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
new_target["keypoints"] = keypoints
return new_target
# Copied from transformers.models.detr.image_processing_detr.resize_annotation
def resize_annotation(
annotation: dict[str, Any],
orig_size: tuple[int, int],
target_size: tuple[int, int],
threshold: float = 0.5,
resample: PILImageResampling = PILImageResampling.NEAREST,
):
"""
Resizes an annotation to a target size.
Args:
annotation (`dict[str, Any]`):
The annotation dictionary.
orig_size (`tuple[int, int]`):
The original size of the input image.
target_size (`tuple[int, int]`):
The target size of the image, as returned by the preprocessing `resize` step.
threshold (`float`, *optional*, defaults to 0.5):
The threshold used to binarize the segmentation masks.
resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
The resampling filter to use when resizing the masks.
"""
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
ratio_height, ratio_width = ratios
new_annotation = {}
new_annotation["size"] = target_size
for key, value in annotation.items():
if key == "boxes":
boxes = value
scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
new_annotation["boxes"] = scaled_boxes
elif key == "area":
area = value
scaled_area = area * (ratio_width * ratio_height)
new_annotation["area"] = scaled_area
elif key == "masks":
masks = value[:, None]
masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
masks = masks.astype(np.float32)
masks = masks[:, 0] > threshold
new_annotation["masks"] = masks
elif key == "size":
new_annotation["size"] = target_size
else:
new_annotation[key] = value
return new_annotation
| RTDetrImageProcessorKwargs |
python | django-extensions__django-extensions | django_extensions/management/commands/list_model_info.py | {
"start": 376,
"end": 6651
} | class ____(BaseCommand):
"""A simple management command which lists model fields and methods."""
help = "List out the fields and methods for each model"
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--field-class",
action="store_true",
default=None,
help="show class name of field.",
)
parser.add_argument(
"--db-type",
action="store_true",
default=None,
help="show database column type of field.",
)
parser.add_argument(
"--signature",
action="store_true",
default=None,
help="show the signature of method.",
)
parser.add_argument(
"--all-methods",
action="store_true",
default=None,
help="list all methods, including private and default.",
)
parser.add_argument(
"--model",
nargs="?",
type=str,
default=None,
help="list the details for a single model. "
"Input should be in the form appname.Modelname",
)
def list_model_info(self, options):
style = color_style()
INFO = getattr(style, "INFO", lambda x: x)
WARN = getattr(style, "WARN", lambda x: x)
BOLD = getattr(style, "BOLD", lambda x: x)
FIELD_CLASS = (
True
if options.get("field_class", None) is not None
else getattr(settings, "MODEL_INFO_FIELD_CLASS", False)
)
DB_TYPE = (
True
if options.get("db_type", None) is not None
else getattr(settings, "MODEL_INFO_DB_TYPE", False)
)
SIGNATURE = (
True
if options.get("signature", None) is not None
else getattr(settings, "MODEL_INFO_SIGNATURE", False)
)
ALL_METHODS = (
True
if options.get("all_methods", None) is not None
else getattr(settings, "MODEL_INFO_ALL_METHODS", False)
)
MODEL = (
options.get("model")
if options.get("model", None) is not None
else getattr(settings, "MODEL_INFO_MODEL", False)
)
default_methods = [
"adelete",
"arefresh_from_db",
"asave",
"check",
"clean",
"clean_fields",
"date_error_message",
"delete",
"from_db",
"full_clean",
"get_absolute_url",
"get_constraints",
"get_deferred_fields",
"prepare_database_save",
"refresh_from_db",
"save",
"save_base",
"serializable_value",
"unique_error_message",
"validate_constraints",
"validate_unique",
]
if MODEL:
model_list = [django_apps.get_model(MODEL)]
else:
model_list = sorted(
django_apps.get_models(),
key=lambda x: (x._meta.app_label, x._meta.object_name),
reverse=False,
)
for model in model_list:
self.stdout.write(
INFO(model._meta.app_label + "." + model._meta.object_name)
)
self.stdout.write(BOLD(HALFTAB + "Fields:"))
for field in model._meta.get_fields():
field_info = TAB + field.name + " -"
if FIELD_CLASS:
try:
field_info += " " + field.__class__.__name__
except TypeError:
field_info += WARN(" TypeError (field_class)")
except AttributeError:
field_info += WARN(" AttributeError (field_class)")
if FIELD_CLASS and DB_TYPE:
field_info += ","
if DB_TYPE:
try:
field_info += " " + field.db_type(connection=connection)
except TypeError:
field_info += WARN(" TypeError (db_type)")
except AttributeError:
field_info += WARN(" AttributeError (db_type)")
self.stdout.write(field_info)
if ALL_METHODS:
self.stdout.write(BOLD(HALFTAB + "Methods (all):"))
else:
self.stdout.write(BOLD(HALFTAB + "Methods (non-private/internal):"))
for method_name in dir(model):
try:
method = getattr(model, method_name)
if ALL_METHODS:
if callable(method) and not method_name[0].isupper():
if SIGNATURE:
signature = inspect.signature(method)
else:
signature = "()"
self.stdout.write(TAB + method_name + str(signature))
else:
if (
callable(method)
and not method_name.startswith("_")
and method_name not in default_methods
and not method_name[0].isupper()
):
if SIGNATURE:
signature = inspect.signature(method)
else:
signature = "()"
self.stdout.write(TAB + method_name + str(signature))
except AttributeError:
self.stdout.write(TAB + method_name + WARN(" - AttributeError"))
except ValueError:
self.stdout.write(
TAB
+ method_name
+ WARN(" - ValueError (could not identify signature)")
)
self.stdout.write("\n")
self.stdout.write(INFO("Total Models Listed: %d" % len(model_list)))
@signalcommand
def handle(self, *args, **options):
self.list_model_info(options)
| Command |
python | apache__airflow | providers/edge3/src/airflow/providers/edge3/worker_api/datamodels_ui.py | {
"start": 2304,
"end": 2427
} | class ____(BaseModel):
"""Job Collection serializer."""
jobs: list[Job]
total_entries: int
| JobCollectionResponse |
python | kamyu104__LeetCode-Solutions | Python/consecutive-numbers-sum.py | {
"start": 35,
"end": 987
} | class ____(object):
def consecutiveNumbersSum(self, N):
"""
:type N: int
:rtype: int
"""
# x + x+1 + x+2 + ... + x+l-1 = N = 2^k * M, where k >= 0 and M is odd
# => l*x + (l-1)*l/2 = 2^k * M
# => x = (2^k * M - (l-1)*l/2) / l = 2^k * M/l - (l-1)/2 is a positive integer
# => l is either 2^(k+1)*M/f or f (f is factors of M)
# => the answer is the number of all odd factors of M
# if prime factorization of N is 2^k * p1^a * p2^b * ...
# => answer is the number of all odd factors = (a+1) * (b+1) * ...
result = 1
while N % 2 == 0:
N /= 2
i = 3
while i*i <= N:
count = 0
while N % i == 0:
N /= i
count += 1
result *= count+1
i += 2
if N != 1: # N is the last prime if N != 1
result *= 1+1
return result
| Solution |
python | numba__numba | numba/core/inline_closurecall.py | {
"start": 12301,
"end": 62248
} | class ____(object):
""" A worker class for inlining, this is a more advanced version of
`inline_closure_call` in that it permits inlining from function type, Numba
IR and code object. It also, runs the entire untyped compiler pipeline on
the inlinee to ensure that it is transformed as though it were compiled
directly.
"""
def __init__(self,
typingctx=None,
targetctx=None,
locals=None,
pipeline=None,
flags=None,
validator=callee_ir_validator,
typemap=None,
calltypes=None):
"""
Instantiate a new InlineWorker, all arguments are optional though some
must be supplied together for certain use cases. The methods will refuse
to run if the object isn't configured in the manner needed. Args are the
same as those in a numba.core.Compiler.state, except the validator which
is a function taking Numba IR and validating it for use when inlining
(this is optional and really to just provide better error messages about
things which the inliner cannot handle like yield in closure).
"""
def check(arg, name):
if arg is None:
raise TypeError("{} must not be None".format(name))
from numba.core.compiler import DefaultPassBuilder
# check the stuff needed to run the more advanced compilation pipeline
# is valid if any of it is provided
compiler_args = (targetctx, locals, pipeline, flags)
compiler_group = [x is not None for x in compiler_args]
if any(compiler_group) and not all(compiler_group):
check(targetctx, 'targetctx')
check(locals, 'locals')
check(pipeline, 'pipeline')
check(flags, 'flags')
elif all(compiler_group):
check(typingctx, 'typingctx')
self._compiler_pipeline = DefaultPassBuilder.define_untyped_pipeline
self.typingctx = typingctx
self.targetctx = targetctx
self.locals = locals
self.pipeline = pipeline
self.flags = flags
self.validator = validator
self.debug_print = _make_debug_print("InlineWorker")
# check whether this inliner can also support typemap and calltypes
# update and if what's provided is valid
pair = (typemap, calltypes)
pair_is_none = [x is None for x in pair]
if any(pair_is_none) and not all(pair_is_none):
msg = ("typemap and calltypes must both be either None or have a "
"value, got: %s, %s")
raise TypeError(msg % pair)
self._permit_update_type_and_call_maps = not all(pair_is_none)
self.typemap = typemap
self.calltypes = calltypes
def inline_ir(self, caller_ir, block, i, callee_ir, callee_freevars,
arg_typs=None):
""" Inlines the callee_ir in the caller_ir at statement index i of block
`block`, callee_freevars are the free variables for the callee_ir. If
the callee_ir is derived from a function `func` then this is
`func.__code__.co_freevars`. If `arg_typs` is given and the InlineWorker
instance was initialized with a typemap and calltypes then they will be
appropriately updated based on the arg_typs.
"""
# Always copy the callee IR, it gets mutated
def copy_ir(the_ir):
kernel_copy = the_ir.copy()
kernel_copy.blocks = {}
for block_label, block in the_ir.blocks.items():
new_block = copy.deepcopy(the_ir.blocks[block_label])
kernel_copy.blocks[block_label] = new_block
return kernel_copy
callee_ir = copy_ir(callee_ir)
# check that the contents of the callee IR is something that can be
# inlined if a validator is present
if self.validator is not None:
self.validator(callee_ir)
# save an unmutated copy of the callee_ir to return
callee_ir_original = copy_ir(callee_ir)
scope = block.scope
instr = block.body[i]
call_expr = instr.value
callee_blocks = callee_ir.blocks
# 1. relabel callee_ir by adding an offset
max_label = max(
ir_utils._the_max_label.next(),
max(caller_ir.blocks.keys()),
)
callee_blocks = add_offset_to_labels(callee_blocks, max_label + 1)
callee_blocks = simplify_CFG(callee_blocks)
callee_ir.blocks = callee_blocks
min_label = min(callee_blocks.keys())
max_label = max(callee_blocks.keys())
# reset globals in ir_utils before we use it
ir_utils._the_max_label.update(max_label)
self.debug_print("After relabel")
_debug_dump(callee_ir)
# 2. rename all local variables in callee_ir with new locals created in
# caller_ir
callee_scopes = _get_all_scopes(callee_blocks)
self.debug_print("callee_scopes = ", callee_scopes)
# one function should only have one local scope
assert (len(callee_scopes) == 1)
callee_scope = callee_scopes[0]
var_dict = {}
for var in tuple(callee_scope.localvars._con.values()):
if not (var.name in callee_freevars):
inlined_name = _created_inlined_var_name(
callee_ir.func_id.unique_name, var.name)
# Update the caller scope with the new names
new_var = scope.redefine(inlined_name, loc=var.loc)
# Also update the callee scope with the new names. Should the
# type and call maps need updating (which requires SSA form) the
# transformation to SSA is valid as the IR object is internally
# consistent.
callee_scope.redefine(inlined_name, loc=var.loc)
var_dict[var.name] = new_var
self.debug_print("var_dict = ", var_dict)
replace_vars(callee_blocks, var_dict)
self.debug_print("After local var rename")
_debug_dump(callee_ir)
# 3. replace formal parameters with actual arguments
callee_func = callee_ir.func_id.func
args = _get_callee_args(call_expr, callee_func, block.body[i].loc,
caller_ir)
# 4. Update typemap
if self._permit_update_type_and_call_maps:
if arg_typs is None:
raise TypeError('arg_typs should have a value not None')
self.update_type_and_call_maps(callee_ir, arg_typs)
# update_type_and_call_maps replaces blocks
callee_blocks = callee_ir.blocks
self.debug_print("After arguments rename: ")
_debug_dump(callee_ir)
_replace_args_with(callee_blocks, args)
# 5. split caller blocks into two
new_blocks = []
new_block = ir.Block(scope, block.loc)
new_block.body = block.body[i + 1:]
new_label = next_label()
caller_ir.blocks[new_label] = new_block
new_blocks.append((new_label, new_block))
block.body = block.body[:i]
block.body.append(ir.Jump(min_label, instr.loc))
# 6. replace Return with assignment to LHS
topo_order = find_topo_order(callee_blocks)
_replace_returns(callee_blocks, instr.target, new_label)
# remove the old definition of instr.target too
if (instr.target.name in caller_ir._definitions
and call_expr in caller_ir._definitions[instr.target.name]):
# NOTE: target can have multiple definitions due to control flow
caller_ir._definitions[instr.target.name].remove(call_expr)
# 7. insert all new blocks, and add back definitions
for label in topo_order:
# block scope must point to parent's
block = callee_blocks[label]
block.scope = scope
_add_definitions(caller_ir, block)
caller_ir.blocks[label] = block
new_blocks.append((label, block))
self.debug_print("After merge in")
_debug_dump(caller_ir)
return callee_ir_original, callee_blocks, var_dict, new_blocks
def inline_function(self, caller_ir, block, i, function, arg_typs=None):
""" Inlines the function in the caller_ir at statement index i of block
`block`. If `arg_typs` is given and the InlineWorker instance was
initialized with a typemap and calltypes then they will be appropriately
updated based on the arg_typs.
"""
callee_ir = self.run_untyped_passes(function)
freevars = function.__code__.co_freevars
return self.inline_ir(caller_ir, block, i, callee_ir, freevars,
arg_typs=arg_typs)
def run_untyped_passes(self, func, enable_ssa=False):
"""
Run the compiler frontend's untyped passes over the given Python
function, and return the function's canonical Numba IR.
Disable SSA transformation by default, since the call site won't be in
SSA form and self.inline_ir depends on this being the case.
"""
from numba.core.compiler import StateDict, _CompileStatus
from numba.core.untyped_passes import ExtractByteCode
from numba.core import bytecode
from numba.parfors.parfor import ParforDiagnostics
state = StateDict()
state.func_ir = None
state.typingctx = self.typingctx
state.targetctx = self.targetctx
state.locals = self.locals
state.pipeline = self.pipeline
state.flags = self.flags
state.flags.enable_ssa = enable_ssa
state.func_id = bytecode.FunctionIdentity.from_function(func)
state.typemap = None
state.calltypes = None
state.type_annotation = None
state.status = _CompileStatus(False)
state.return_type = None
state.parfor_diagnostics = ParforDiagnostics()
state.metadata = {}
ExtractByteCode().run_pass(state)
# This is a lie, just need *some* args for the case where an obj mode
# with lift is needed
state.args = len(state.bc.func_id.pysig.parameters) * (types.pyobject,)
pm = self._compiler_pipeline(state)
pm.finalize()
pm.run(state)
return state.func_ir
def update_type_and_call_maps(self, callee_ir, arg_typs):
""" Updates the type and call maps based on calling callee_ir with
arguments from arg_typs"""
from numba.core.ssa import reconstruct_ssa
from numba.core.typed_passes import PreLowerStripPhis
if not self._permit_update_type_and_call_maps:
msg = ("InlineWorker instance not configured correctly, typemap or "
"calltypes missing in initialization.")
raise ValueError(msg)
from numba.core import typed_passes
# call branch pruning to simplify IR and avoid inference errors
callee_ir._definitions = ir_utils.build_definitions(callee_ir.blocks)
numba.core.analysis.dead_branch_prune(callee_ir, arg_typs)
# callee's typing may require SSA
callee_ir = reconstruct_ssa(callee_ir)
callee_ir._definitions = ir_utils.build_definitions(callee_ir.blocks)
[f_typemap,
_f_return_type,
f_calltypes, _] = typed_passes.type_inference_stage(
self.typingctx, self.targetctx, callee_ir, arg_typs, None,
)
callee_ir = PreLowerStripPhis()._strip_phi_nodes(callee_ir)
callee_ir._definitions = ir_utils.build_definitions(callee_ir.blocks)
canonicalize_array_math(callee_ir, f_typemap,
f_calltypes, self.typingctx)
# remove argument entries like arg.a from typemap
arg_names = [vname for vname in f_typemap if vname.startswith("arg.")]
for a in arg_names:
f_typemap.pop(a)
self.typemap.update(f_typemap)
self.calltypes.update(f_calltypes)
def inline_closure_call(func_ir, glbls, block, i, callee, typingctx=None,
targetctx=None, arg_typs=None, typemap=None,
calltypes=None, work_list=None, callee_validator=None,
replace_freevars=True):
"""Inline the body of `callee` at its callsite (`i`-th instruction of
`block`)
`func_ir` is the func_ir object of the caller function and `glbls` is its
global variable environment (func_ir.func_id.func.__globals__).
`block` is the IR block of the callsite and `i` is the index of the
callsite's node. `callee` is either the called function or a
make_function node. `typingctx`, `typemap` and `calltypes` are typing
data structures of the caller, available if we are in a typed pass.
`arg_typs` includes the types of the arguments at the callsite.
`callee_validator` is an optional callable which can be used to validate the
IR of the callee to ensure that it contains IR supported for inlining, it
takes one argument, the func_ir of the callee
Returns IR blocks of the callee and the variable renaming dictionary used
for them to facilitate further processing of new blocks.
"""
scope = block.scope
instr = block.body[i]
call_expr = instr.value
debug_print = _make_debug_print("inline_closure_call")
debug_print("Found closure call: ", instr, " with callee = ", callee)
# support both function object and make_function Expr
callee_code = callee.code if hasattr(callee, 'code') else callee.__code__
callee_closure = (callee.closure
if hasattr(callee, 'closure') else callee.__closure__)
# first, get the IR of the callee
if isinstance(callee, pytypes.FunctionType):
from numba.core import compiler
callee_ir = compiler.run_frontend(callee, inline_closures=True)
else:
callee_ir = get_ir_of_code(glbls, callee_code)
# check that the contents of the callee IR is something that can be inlined
# if a validator is supplied
if callee_validator is not None:
callee_validator(callee_ir)
callee_blocks = callee_ir.blocks
# 1. relabel callee_ir by adding an offset
max_label = max(ir_utils._the_max_label.next(), max(func_ir.blocks.keys()))
callee_blocks = add_offset_to_labels(callee_blocks, max_label + 1)
callee_blocks = simplify_CFG(callee_blocks)
callee_ir.blocks = callee_blocks
min_label = min(callee_blocks.keys())
max_label = max(callee_blocks.keys())
# reset globals in ir_utils before we use it
ir_utils._the_max_label.update(max_label)
debug_print("After relabel")
_debug_dump(callee_ir)
# 2. rename all local variables in callee_ir with new locals created in
# func_ir
callee_scopes = _get_all_scopes(callee_blocks)
debug_print("callee_scopes = ", callee_scopes)
# one function should only have one local scope
assert (len(callee_scopes) == 1)
callee_scope = callee_scopes[0]
var_dict = {}
for var in callee_scope.localvars._con.values():
if not (var.name in callee_code.co_freevars):
inlined_name = _created_inlined_var_name(
callee_ir.func_id.unique_name, var.name)
new_var = scope.redefine(inlined_name, loc=var.loc)
var_dict[var.name] = new_var
debug_print("var_dict = ", var_dict)
replace_vars(callee_blocks, var_dict)
debug_print("After local var rename")
_debug_dump(callee_ir)
# 3. replace formal parameters with actual arguments
args = _get_callee_args(call_expr, callee, block.body[i].loc, func_ir)
debug_print("After arguments rename: ")
_debug_dump(callee_ir)
# 4. replace freevar with actual closure var
if callee_closure and replace_freevars:
closure = func_ir.get_definition(callee_closure)
debug_print("callee's closure = ", closure)
if isinstance(closure, tuple):
cellget = ctypes.pythonapi.PyCell_Get
cellget.restype = ctypes.py_object
cellget.argtypes = (ctypes.py_object,)
items = tuple(cellget(x) for x in closure)
else:
assert (isinstance(closure, ir.Expr)
and closure.op == 'build_tuple')
items = closure.items
assert (len(callee_code.co_freevars) == len(items))
_replace_freevars(callee_blocks, items)
debug_print("After closure rename")
_debug_dump(callee_ir)
if typingctx:
from numba.core import typed_passes
# call branch pruning to simplify IR and avoid inference errors
callee_ir._definitions = ir_utils.build_definitions(callee_ir.blocks)
numba.core.analysis.dead_branch_prune(callee_ir, arg_typs)
try:
[f_typemap, f_return_type,
f_calltypes, _] = typed_passes.type_inference_stage(
typingctx, targetctx, callee_ir, arg_typs, None)
except Exception:
[f_typemap, f_return_type,
f_calltypes, _] = typed_passes.type_inference_stage(
typingctx, targetctx, callee_ir, arg_typs, None)
canonicalize_array_math(callee_ir, f_typemap,
f_calltypes, typingctx)
# remove argument entries like arg.a from typemap
arg_names = [vname for vname in f_typemap if vname.startswith("arg.")]
for a in arg_names:
f_typemap.pop(a)
typemap.update(f_typemap)
calltypes.update(f_calltypes)
_replace_args_with(callee_blocks, args)
# 5. split caller blocks into two
new_blocks = []
new_block = ir.Block(scope, block.loc)
new_block.body = block.body[i + 1:]
new_label = next_label()
func_ir.blocks[new_label] = new_block
new_blocks.append((new_label, new_block))
block.body = block.body[:i]
block.body.append(ir.Jump(min_label, instr.loc))
# 6. replace Return with assignment to LHS
topo_order = find_topo_order(callee_blocks)
_replace_returns(callee_blocks, instr.target, new_label)
# remove the old definition of instr.target too
if (instr.target.name in func_ir._definitions
and call_expr in func_ir._definitions[instr.target.name]):
# NOTE: target can have multiple definitions due to control flow
func_ir._definitions[instr.target.name].remove(call_expr)
# 7. insert all new blocks, and add back definitions
for label in topo_order:
# block scope must point to parent's
block = callee_blocks[label]
block.scope = scope
_add_definitions(func_ir, block)
func_ir.blocks[label] = block
new_blocks.append((label, block))
debug_print("After merge in")
_debug_dump(func_ir)
if work_list is not None:
for block in new_blocks:
work_list.append(block)
return callee_blocks, var_dict
def _get_callee_args(call_expr, callee, loc, func_ir):
"""Get arguments for calling 'callee', including the default arguments.
keyword arguments are currently only handled when 'callee' is a function.
"""
if call_expr.op == 'call':
args = list(call_expr.args)
if call_expr.vararg:
msg = "Calling a closure with *args is unsupported."
raise errors.UnsupportedError(msg, call_expr.loc)
elif call_expr.op == 'getattr':
args = [call_expr.value]
elif ir_utils.is_operator_or_getitem(call_expr):
args = call_expr.list_vars()
else:
raise TypeError("Unsupported ir.Expr.{}".format(call_expr.op))
debug_print = _make_debug_print("inline_closure_call default handling")
# handle defaults and kw arguments using pysignature if callee is function
if isinstance(callee, pytypes.FunctionType):
pysig = numba.core.utils.pysignature(callee)
normal_handler = lambda index, param, default: default
default_handler = lambda index, param, default: ir.Const(default, loc)
# Throw error for stararg
# TODO: handle stararg
def stararg_handler(index, param, default):
raise NotImplementedError(
"Stararg not supported in inliner for arg {} {}".format(
index, param))
if call_expr.op == 'call':
kws = dict(call_expr.kws)
else:
kws = {}
return numba.core.typing.fold_arguments(
pysig, args, kws, normal_handler, default_handler,
stararg_handler)
else:
# TODO: handle arguments for make_function case similar to function
# case above
callee_defaults = (callee.defaults if hasattr(callee, 'defaults')
else callee.__defaults__)
if callee_defaults:
debug_print("defaults = ", callee_defaults)
if isinstance(callee_defaults, tuple): # Python 3.5
defaults_list = []
for x in callee_defaults:
if isinstance(x, ir.Var):
defaults_list.append(x)
else:
# this branch is predominantly for kwargs from
# inlinable functions
defaults_list.append(ir.Const(value=x, loc=loc))
args = args + defaults_list
elif (isinstance(callee_defaults, ir.Var)
or isinstance(callee_defaults, str)):
default_tuple = func_ir.get_definition(callee_defaults)
assert (isinstance(default_tuple, ir.Expr))
assert (default_tuple.op == "build_tuple")
const_vals = [func_ir.get_definition(x) for
x in default_tuple.items]
args = args + const_vals
else:
raise NotImplementedError(
"Unsupported defaults to make_function: {}".format(
callee_defaults))
return args
def _make_debug_print(prefix):
def debug_print(*args):
if config.DEBUG_INLINE_CLOSURE:
print(prefix + ": " + "".join(str(x) for x in args))
return debug_print
def _debug_dump(func_ir):
if config.DEBUG_INLINE_CLOSURE:
func_ir.dump()
def _get_all_scopes(blocks):
"""Get all block-local scopes from an IR.
"""
all_scopes = []
for label, block in blocks.items():
if not (block.scope in all_scopes):
all_scopes.append(block.scope)
return all_scopes
def _replace_args_with(blocks, args):
"""
Replace ir.Arg(...) with real arguments from call site
"""
for label, block in blocks.items():
assigns = block.find_insts(ir.Assign)
for stmt in assigns:
if isinstance(stmt.value, ir.Arg):
idx = stmt.value.index
assert (idx < len(args))
stmt.value = args[idx]
def _replace_freevars(blocks, args):
"""
Replace ir.FreeVar(...) with real variables from parent function
"""
for label, block in blocks.items():
assigns = block.find_insts(ir.Assign)
for stmt in assigns:
if isinstance(stmt.value, ir.FreeVar):
idx = stmt.value.index
assert (idx < len(args))
if isinstance(args[idx], ir.Var):
stmt.value = args[idx]
else:
stmt.value = ir.Const(args[idx], stmt.loc)
def _replace_returns(blocks, target, return_label):
"""
Return return statement by assigning directly to target, and a jump.
"""
for label, block in blocks.items():
casts = []
for i in range(len(block.body)):
stmt = block.body[i]
if isinstance(stmt, ir.Return):
assert (i + 1 == len(block.body))
block.body[i] = ir.Assign(stmt.value, target, stmt.loc)
block.body.append(ir.Jump(return_label, stmt.loc))
# remove cast of the returned value
for cast in casts:
if cast.target.name == stmt.value.name:
cast.value = cast.value.value
elif (isinstance(stmt, ir.Assign) and
isinstance(stmt.value, ir.Expr) and
stmt.value.op == 'cast'):
casts.append(stmt)
def _add_definitions(func_ir, block):
"""
Add variable definitions found in a block to parent func_ir.
"""
definitions = func_ir._definitions
assigns = block.find_insts(ir.Assign)
for stmt in assigns:
definitions[stmt.target.name].append(stmt.value)
def _find_arraycall(func_ir, block):
"""Look for statement like "x = numpy.array(y)" or "x[..] = y"
immediately after the closure call that creates list y (the i-th
statement in block). Return the statement index if found, or
raise GuardException.
"""
array_var = None
list_var_dead_after_array_call = False
list_var = None
i = 0
while i < len(block.body):
instr = block.body[i]
if isinstance(instr, ir.Del):
# Stop the process if list_var becomes dead
if list_var and array_var and instr.value == list_var.name:
list_var_dead_after_array_call = True
break
pass
elif isinstance(instr, ir.Assign):
# Found array_var = array(list_var)
lhs = instr.target
expr = instr.value
if (guard(find_callname, func_ir, expr) == ('array', 'numpy') and
isinstance(expr.args[0], ir.Var)):
list_var = expr.args[0]
array_var = lhs
array_stmt_index = i
array_kws = dict(expr.kws)
elif (isinstance(instr, ir.SetItem) and
isinstance(instr.value, ir.Var) and
not list_var):
list_var = instr.value
# Found array_var[..] = list_var, the case for nested array
array_var = instr.target
array_def = get_definition(func_ir, array_var)
require(guard(_find_unsafe_empty_inferred, func_ir, array_def))
array_stmt_index = i
array_kws = {}
else:
# Bail out otherwise
break
i = i + 1
# require array_var is found, and list_var is dead after array_call.
require(array_var and list_var_dead_after_array_call)
_make_debug_print("find_array_call")(block.body[array_stmt_index])
return list_var, array_stmt_index, array_kws
def _find_iter_range(func_ir, range_iter_var, swapped):
"""Find the iterator's actual range if it is either range(n), or
range(m, n), otherwise return raise GuardException.
"""
debug_print = _make_debug_print("find_iter_range")
range_iter_def = get_definition(func_ir, range_iter_var)
debug_print("range_iter_var = ", range_iter_var, " def = ", range_iter_def)
require(isinstance(range_iter_def, ir.Expr) and
range_iter_def.op == 'getiter')
range_var = range_iter_def.value
range_def = get_definition(func_ir, range_var)
debug_print("range_var = ", range_var, " range_def = ", range_def)
require(isinstance(range_def, ir.Expr) and range_def.op == 'call')
func_var = range_def.func
func_def = get_definition(func_ir, func_var)
debug_print("func_var = ", func_var, " func_def = ", func_def)
require(isinstance(func_def, ir.Global) and
(func_def.value == range or
func_def.value == numba.misc.special.prange))
nargs = len(range_def.args)
swapping = [('"array comprehension"', 'closure of'), range_def.func.loc]
if nargs == 1:
swapped[range_def.func.name] = swapping
stop = get_definition(func_ir, range_def.args[0], lhs_only=True)
return (0, range_def.args[0], func_def)
elif nargs == 2:
swapped[range_def.func.name] = swapping
start = get_definition(func_ir, range_def.args[0], lhs_only=True)
stop = get_definition(func_ir, range_def.args[1], lhs_only=True)
return (start, stop, func_def)
else:
raise GuardException
@intrinsic
def length_of_iterator(typingctx, val):
"""
An implementation of len(iter) for internal use.
Primary use is for array comprehensions (see inline_closurecall).
"""
if isinstance(val, types.RangeIteratorType):
val_type = val.yield_type
def codegen(context, builder, sig, args):
(value,) = args
iter_type = range_impl_map[val_type][1]
iterobj = cgutils.create_struct_proxy(iter_type)(context, builder,
value)
int_type = iterobj.count.type
return impl_ret_untracked(context, builder, int_type,
builder.load(iterobj.count))
return signature(val_type, val), codegen
elif isinstance(val, types.ListIter):
def codegen(context, builder, sig, args):
(value,) = args
intp_t = context.get_value_type(types.intp)
iterobj = ListIterInstance(context, builder, sig.args[0], value)
return impl_ret_untracked(context, builder, intp_t, iterobj.size)
return signature(types.intp, val), codegen
elif isinstance(val, types.ArrayIterator):
def codegen(context, builder, sig, args):
(iterty,) = sig.args
(value,) = args
intp_t = context.get_value_type(types.intp)
iterobj = context.make_helper(builder, iterty, value=value)
arrayty = iterty.array_type
ary = make_array(arrayty)(context, builder, value=iterobj.array)
shape = cgutils.unpack_tuple(builder, ary.shape)
# array iterates along the outer dimension
return impl_ret_untracked(context, builder, intp_t, shape[0])
return signature(types.intp, val), codegen
elif isinstance(val, types.UniTupleIter):
def codegen(context, builder, sig, args):
(iterty,) = sig.args
tuplety = iterty.container
intp_t = context.get_value_type(types.intp)
count_const = intp_t(tuplety.count)
return impl_ret_untracked(context, builder, intp_t, count_const)
return signature(types.intp, val), codegen
elif isinstance(val, types.ListTypeIteratorType):
def codegen(context, builder, sig, args):
(value,) = args
intp_t = context.get_value_type(types.intp)
from numba.typed.listobject import ListIterInstance
iterobj = ListIterInstance(context, builder, sig.args[0], value)
return impl_ret_untracked(context, builder, intp_t, iterobj.size)
return signature(types.intp, val), codegen
else:
msg = ('Unsupported iterator found in array comprehension, try '
'preallocating the array and filling manually.')
raise errors.TypingError(msg)
def _inline_arraycall(func_ir, cfg, visited, loop, swapped, enable_prange=False,
typed=False):
"""Look for array(list) call in the exit block of a given loop, and turn
list operations into array operations in the loop if the following
conditions are met:
1. The exit block contains an array call on the list;
2. The list variable is no longer live after array call;
3. The list is created in the loop entry block;
4. The loop is created from an range iterator whose length is known prior
to the loop;
5. There is only one list_append operation on the list variable in the
loop body;
6. The block that contains list_append dominates the loop head, which
ensures list length is the same as loop length;
If any condition check fails, no modification will be made to the incoming
IR.
"""
debug_print = _make_debug_print("inline_arraycall")
# There should only be one loop exit
require(len(loop.exits) == 1)
exit_block = next(iter(loop.exits))
list_var, array_call_index, array_kws = _find_arraycall(
func_ir, func_ir.blocks[exit_block],
)
# check if dtype is present in array call
dtype_def = None
dtype_mod_def = None
if 'dtype' in array_kws:
require(isinstance(array_kws['dtype'], ir.Var))
# We require that dtype argument to be a constant of getattr Expr, and
# we'll remember its definition for later use.
dtype_def = get_definition(func_ir, array_kws['dtype'])
require(isinstance(dtype_def, ir.Expr) and dtype_def.op == 'getattr')
dtype_mod_def = get_definition(func_ir, dtype_def.value)
list_var_def = get_definition(func_ir, list_var)
debug_print("list_var = ", list_var, " def = ", list_var_def)
if isinstance(list_var_def, ir.Expr) and list_var_def.op == 'cast':
list_var_def = get_definition(func_ir, list_var_def.value)
# Check if the definition is a build_list
require(isinstance(list_var_def, ir.Expr) and
list_var_def.op == 'build_list')
# The build_list must be empty
require(len(list_var_def.items) == 0)
# Look for list_append in "last" block in loop body, which should be a block
# that is a post-dominator of the loop header.
list_append_stmts = []
for label in loop.body:
# We have to consider blocks of this loop, but not sub-loops.
# To achieve this, we require the set of "in_loops" of "label" to be
# visited loops.
in_visited_loops = [l.header in visited for l in cfg.in_loops(label)]
if not all(in_visited_loops):
continue
block = func_ir.blocks[label]
debug_print("check loop body block ", label)
for stmt in block.find_insts(ir.Assign):
expr = stmt.value
if isinstance(expr, ir.Expr) and expr.op == 'call':
func_def = get_definition(func_ir, expr.func)
if (isinstance(func_def, ir.Expr) and func_def.op == 'getattr'
and func_def.attr == 'append'):
list_def = get_definition(func_ir, func_def.value)
debug_print("list_def = ", list_def,
list_def is list_var_def)
if list_def is list_var_def:
# found matching append call
list_append_stmts.append((label, block, stmt))
# Require only one list_append, otherwise we won't know the indices
require(len(list_append_stmts) == 1)
append_block_label, append_block, append_stmt = list_append_stmts[0]
# Check if append_block (besides loop entry) dominates loop header.
# Since CFG doesn't give us this info without loop entry, we approximate
# by checking if the predecessor set of the header block is the same
# as loop_entries plus append_block, which is certainly more restrictive
# than necessary, and can be relaxed if needed.
preds = set(l for l, b in cfg.predecessors(loop.header))
debug_print("preds = ", preds, (loop.entries | set([append_block_label])))
require(preds == (loop.entries | set([append_block_label])))
# Find iterator in loop header
iter_vars = []
iter_first_vars = []
loop_header = func_ir.blocks[loop.header]
for stmt in loop_header.find_insts(ir.Assign):
expr = stmt.value
if isinstance(expr, ir.Expr):
if expr.op == 'iternext':
iter_def = get_definition(func_ir, expr.value)
debug_print("iter_def = ", iter_def)
iter_vars.append(expr.value)
elif expr.op == 'pair_first':
iter_first_vars.append(stmt.target)
# Require only one iterator in loop header
require(len(iter_vars) == 1 and len(iter_first_vars) == 1)
# variable that holds the iterator object
iter_var = iter_vars[0]
# variable that holds the value out of iterator
iter_first_var = iter_first_vars[0]
# Final requirement: only one loop entry, and we're going to modify it by:
# 1. replacing the list definition with an array definition;
# 2. adding a counter for the array iteration.
require(len(loop.entries) == 1)
loop_entry = func_ir.blocks[next(iter(loop.entries))]
terminator = loop_entry.terminator
scope = loop_entry.scope
loc = loop_entry.loc
stmts = []
removed = []
def is_removed(val, removed):
if isinstance(val, ir.Var):
for x in removed:
if x.name == val.name:
return True
return False
# Skip list construction and skip terminator, add the rest to stmts
for i in range(len(loop_entry.body) - 1):
stmt = loop_entry.body[i]
if (isinstance(stmt, ir.Assign) and
(stmt.value is list_def or is_removed(stmt.value, removed))):
removed.append(stmt.target)
else:
stmts.append(stmt)
debug_print("removed variables: ", removed)
# Define an index_var to index the array.
# If the range happens to be single step ranges like range(n), or
# range(m, n), then the index_var correlates to iterator index; otherwise
# we'll have to define a new counter.
range_def = guard(_find_iter_range, func_ir, iter_var, swapped)
index_var = scope.redefine("index", loc)
if range_def and range_def[0] == 0:
# iterator starts with 0, index_var can just be iter_first_var
index_var = iter_first_var
else:
# index_var = -1 # starting the index with -1 since it will incremented
# in loop header
stmts.append(_new_definition(func_ir, index_var,
ir.Const(value=-1, loc=loc), loc))
# Insert statement to get the size of the loop iterator
size_var = scope.redefine("size", loc)
if range_def:
start, stop, range_func_def = range_def
if start == 0:
size_val = stop
else:
size_val = ir.Expr.binop(fn=operator.sub, lhs=stop, rhs=start,
loc=loc)
# we can parallelize this loop if enable_prange = True, by changing
# range function from range, to prange.
if enable_prange and isinstance(range_func_def, ir.Global):
range_func_def.name = 'internal_prange'
range_func_def.value = internal_prange
else:
# this doesn't work in objmode as it's effectively untyped
if typed:
len_func_var = scope.redefine("len_func", loc)
stmts.append(_new_definition(func_ir, len_func_var,
ir.Global('length_of_iterator',
length_of_iterator,
loc=loc),
loc))
size_val = ir.Expr.call(len_func_var, (iter_var,), (), loc=loc)
else:
raise GuardException
stmts.append(_new_definition(func_ir, size_var, size_val, loc))
size_tuple_var = scope.redefine("size_tuple", loc)
stmts.append(_new_definition(func_ir, size_tuple_var,
ir.Expr.build_tuple(items=[size_var], loc=loc), loc))
# Insert array allocation
array_var = scope.redefine("array", loc)
empty_func = scope.redefine("empty_func", loc)
if dtype_def and dtype_mod_def:
# when dtype is present, we'll call empty with dtype
dtype_mod_var = scope.redefine("dtype_mod", loc)
dtype_var = scope.redefine("dtype", loc)
stmts.append(_new_definition(
func_ir, dtype_mod_var, dtype_mod_def, loc))
stmts.append(_new_definition(
func_ir, dtype_var,
ir.Expr.getattr(dtype_mod_var, dtype_def.attr, loc), loc))
stmts.append(_new_definition(
func_ir, empty_func, ir.Global('empty', np.empty, loc=loc), loc))
array_kws = [('dtype', dtype_var)]
else:
# this doesn't work in objmode as it's effectively untyped
if typed:
# otherwise we'll call unsafe_empty_inferred
stmts.append(_new_definition(
func_ir, empty_func,
ir.Global('unsafe_empty_inferred', unsafe_empty_inferred,
loc=loc),
loc))
array_kws = []
else:
raise GuardException
# array_var = empty_func(size_tuple_var)
stmts.append(_new_definition(func_ir, array_var,
ir.Expr.call(empty_func, (size_tuple_var,), list(array_kws),
loc=loc), loc))
# Add back removed just in case they are used by something else
for var in removed:
stmts.append(_new_definition(func_ir, var, array_var, loc))
# Add back terminator
stmts.append(terminator)
# Modify loop_entry
loop_entry.body = stmts
if range_def:
if range_def[0] != 0:
# when range doesn't start from 0, index_var becomes loop index
# (iter_first_var) minus an offset (range_def[0])
terminator = loop_header.terminator
assert (isinstance(terminator, ir.Branch))
# find the block in the loop body that header jumps to
block_id = terminator.truebr
blk = func_ir.blocks[block_id]
loc = blk.loc
blk.body.insert(
0,
_new_definition(
func_ir, index_var,
ir.Expr.binop(fn=operator.sub, lhs=iter_first_var,
rhs=range_def[0], loc=loc),
loc
)
)
else:
# Insert index_var increment to the end of loop header
loc = loop_header.loc
terminator = loop_header.terminator
stmts = loop_header.body[0:-1]
next_index_var = scope.redefine("next_index", loc)
one = scope.redefine("one", loc)
# one = 1
stmts.append(_new_definition(func_ir, one,
ir.Const(value=1,loc=loc), loc))
# next_index_var = index_var + 1
stmts.append(_new_definition(func_ir, next_index_var,
ir.Expr.binop(fn=operator.add, lhs=index_var, rhs=one,
loc=loc), loc))
# index_var = next_index_var
stmts.append(_new_definition(func_ir, index_var, next_index_var, loc))
stmts.append(terminator)
loop_header.body = stmts
# In append_block, change list_append into array assign
for i in range(len(append_block.body)):
if append_block.body[i] is append_stmt:
debug_print("Replace append with SetItem")
append_block.body[i] = ir.SetItem(
target=array_var, index=index_var,
value=append_stmt.value.args[0], loc=append_stmt.loc)
# replace array call, by changing "a = array(b)" to "a = b"
stmt = func_ir.blocks[exit_block].body[array_call_index]
# stmt can be either array call or SetItem, we only replace array call
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr):
stmt.value = array_var
func_ir._definitions[stmt.target.name] = [stmt.value]
return True
def _find_unsafe_empty_inferred(func_ir, expr):
unsafe_empty_inferred
require(isinstance(expr, ir.Expr) and expr.op == 'call')
callee = expr.func
callee_def = get_definition(func_ir, callee)
require(isinstance(callee_def, ir.Global))
_make_debug_print("_find_unsafe_empty_inferred")(callee_def.value)
return callee_def.value == unsafe_empty_inferred
def _fix_nested_array(func_ir):
"""Look for assignment like: a[..] = b, where both a and b are numpy arrays,
and try to eliminate array b by expanding a with an extra dimension.
"""
blocks = func_ir.blocks
cfg = compute_cfg_from_blocks(blocks)
usedefs = compute_use_defs(blocks)
empty_deadmap = dict([(label, set()) for label in blocks.keys()])
livemap = compute_live_variables(cfg, blocks, usedefs.defmap, empty_deadmap)
def find_array_def(arr):
"""Find numpy array definition such as
arr = numba.unsafe.ndarray.empty_inferred(...).
If it is arr = b[...], find array definition of b recursively.
"""
arr_def = get_definition(func_ir, arr)
_make_debug_print("find_array_def")(arr, arr_def)
if isinstance(arr_def, ir.Expr):
if guard(_find_unsafe_empty_inferred, func_ir, arr_def):
return arr_def
elif arr_def.op == 'getitem':
return find_array_def(arr_def.value)
raise GuardException
def fix_dependencies(expr, varlist):
"""Double check if all variables in varlist are defined before
expr is used. Try to move constant definition when the check fails.
Bails out by raising GuardException if it can't be moved.
"""
debug_print = _make_debug_print("fix_dependencies")
for label, block in blocks.items():
scope = block.scope
body = block.body
defined = set()
for i in range(len(body)):
inst = body[i]
if isinstance(inst, ir.Assign):
defined.add(inst.target.name)
if inst.value is expr:
new_varlist = []
for var in varlist:
# var must be defined before this inst, or live
# and not later defined.
if (var.name in defined or
(var.name in livemap[label] and
not (var.name in usedefs.defmap[label]))):
debug_print(var.name, " already defined")
new_varlist.append(var)
else:
debug_print(var.name, " not yet defined")
var_def = get_definition(func_ir, var.name)
if isinstance(var_def, ir.Const):
loc = var.loc
new_var = scope.redefine("new_var", loc)
new_const = ir.Const(var_def.value, loc)
new_vardef = _new_definition(
func_ir, new_var, new_const, loc)
new_body = []
new_body.extend(body[:i])
new_body.append(new_vardef)
new_body.extend(body[i:])
block.body = new_body
new_varlist.append(new_var)
else:
raise GuardException
return new_varlist
# when expr is not found in block
raise GuardException
def fix_array_assign(stmt):
"""For assignment like lhs[idx] = rhs, where both lhs and rhs are
arrays, do the following:
1. find the definition of rhs, which has to be a call to
numba.unsafe.ndarray.empty_inferred
2. find the source array creation for lhs, insert an extra dimension of
size of b.
3. replace the definition of
rhs = numba.unsafe.ndarray.empty_inferred(...) with rhs = lhs[idx]
"""
require(isinstance(stmt, ir.SetItem))
require(isinstance(stmt.value, ir.Var))
debug_print = _make_debug_print("fix_array_assign")
debug_print("found SetItem: ", stmt)
lhs = stmt.target
# Find the source array creation of lhs
lhs_def = find_array_def(lhs)
debug_print("found lhs_def: ", lhs_def)
rhs_def = get_definition(func_ir, stmt.value)
debug_print("found rhs_def: ", rhs_def)
require(isinstance(rhs_def, ir.Expr))
if rhs_def.op == 'cast':
rhs_def = get_definition(func_ir, rhs_def.value)
require(isinstance(rhs_def, ir.Expr))
require(_find_unsafe_empty_inferred(func_ir, rhs_def))
# Find the array dimension of rhs
dim_def = get_definition(func_ir, rhs_def.args[0])
require(isinstance(dim_def, ir.Expr) and dim_def.op == 'build_tuple')
debug_print("dim_def = ", dim_def)
extra_dims = [get_definition(func_ir, x, lhs_only=True)
for x in dim_def.items ]
debug_print("extra_dims = ", extra_dims)
# Expand size tuple when creating lhs_def with extra_dims
size_tuple_def = get_definition(func_ir, lhs_def.args[0])
require(isinstance(size_tuple_def, ir.Expr) and
size_tuple_def.op == 'build_tuple')
debug_print("size_tuple_def = ", size_tuple_def)
extra_dims = fix_dependencies(size_tuple_def, extra_dims)
size_tuple_def.items += extra_dims
# In-place modify rhs_def to be getitem
rhs_def.op = 'getitem'
rhs_def.fn = operator.getitem
rhs_def.value = get_definition(func_ir, lhs, lhs_only=True)
rhs_def.index = stmt.index
del rhs_def._kws['func']
del rhs_def._kws['args']
del rhs_def._kws['vararg']
del rhs_def._kws['kws']
# success
return True
for label in find_topo_order(func_ir.blocks):
block = func_ir.blocks[label]
for stmt in block.body:
if guard(fix_array_assign, stmt):
block.body.remove(stmt)
def _new_definition(func_ir, var, value, loc):
func_ir._definitions[var.name] = [value]
return ir.Assign(value=value, target=var, loc=loc)
@rewrites.register_rewrite('after-inference')
| InlineWorker |
python | jpadilla__pyjwt | jwt/api_jwk.py | {
"start": 335,
"end": 4558
} | class ____:
def __init__(self, jwk_data: JWKDict, algorithm: str | None = None) -> None:
"""A class that represents a `JSON Web Key <https://www.rfc-editor.org/rfc/rfc7517>`_.
:param jwk_data: The decoded JWK data.
:type jwk_data: dict[str, typing.Any]
:param algorithm: The key algorithm. If not specified, the key's ``alg`` will be used.
:type algorithm: str or None
:raises InvalidKeyError: If the key type (``kty``) is not found or unsupported, or if the curve (``crv``) is not found or unsupported.
:raises MissingCryptographyError: If the algorithm requires ``cryptography`` to be installed and it is not available.
:raises PyJWKError: If unable to find an algorithm for the key.
"""
self._algorithms = get_default_algorithms()
self._jwk_data = jwk_data
kty = self._jwk_data.get("kty", None)
if not kty:
raise InvalidKeyError(f"kty is not found: {self._jwk_data}")
if not algorithm and isinstance(self._jwk_data, dict):
algorithm = self._jwk_data.get("alg", None)
if not algorithm:
# Determine alg with kty (and crv).
crv = self._jwk_data.get("crv", None)
if kty == "EC":
if crv == "P-256" or not crv:
algorithm = "ES256"
elif crv == "P-384":
algorithm = "ES384"
elif crv == "P-521":
algorithm = "ES512"
elif crv == "secp256k1":
algorithm = "ES256K"
else:
raise InvalidKeyError(f"Unsupported crv: {crv}")
elif kty == "RSA":
algorithm = "RS256"
elif kty == "oct":
algorithm = "HS256"
elif kty == "OKP":
if not crv:
raise InvalidKeyError(f"crv is not found: {self._jwk_data}")
if crv == "Ed25519":
algorithm = "EdDSA"
else:
raise InvalidKeyError(f"Unsupported crv: {crv}")
else:
raise InvalidKeyError(f"Unsupported kty: {kty}")
if not has_crypto and algorithm in requires_cryptography:
raise MissingCryptographyError(
f"{algorithm} requires 'cryptography' to be installed."
)
self.algorithm_name = algorithm
if algorithm in self._algorithms:
self.Algorithm = self._algorithms[algorithm]
else:
raise PyJWKError(f"Unable to find an algorithm for key: {self._jwk_data}")
self.key = self.Algorithm.from_jwk(self._jwk_data)
@staticmethod
def from_dict(obj: JWKDict, algorithm: str | None = None) -> PyJWK:
"""Creates a :class:`PyJWK` object from a JSON-like dictionary.
:param obj: The JWK data, as a dictionary
:type obj: dict[str, typing.Any]
:param algorithm: The key algorithm. If not specified, the key's ``alg`` will be used.
:type algorithm: str or None
:rtype: PyJWK
"""
return PyJWK(obj, algorithm)
@staticmethod
def from_json(data: str, algorithm: None = None) -> PyJWK:
"""Create a :class:`PyJWK` object from a JSON string.
Implicitly calls :meth:`PyJWK.from_dict()`.
:param str data: The JWK data, as a JSON string.
:param algorithm: The key algorithm. If not specific, the key's ``alg`` will be used.
:type algorithm: str or None
:rtype: PyJWK
"""
obj = json.loads(data)
return PyJWK.from_dict(obj, algorithm)
@property
def key_type(self) -> str | None:
"""The `kty` property from the JWK.
:rtype: str or None
"""
return self._jwk_data.get("kty", None)
@property
def key_id(self) -> str | None:
"""The `kid` property from the JWK.
:rtype: str or None
"""
return self._jwk_data.get("kid", None)
@property
def public_key_use(self) -> str | None:
"""The `use` property from the JWK.
:rtype: str or None
"""
return self._jwk_data.get("use", None)
| PyJWK |
python | langchain-ai__langchain | libs/partners/anthropic/langchain_anthropic/llms.py | {
"start": 829,
"end": 4628
} | class ____(BaseLanguageModel):
client: Any = None
async_client: Any = None
model: str = Field(default="claude-sonnet-4-5", alias="model_name")
"""Model name to use."""
max_tokens: int = Field(default=1024, alias="max_tokens_to_sample")
"""Denotes the number of tokens to predict per generation."""
temperature: float | None = None
"""A non-negative float that tunes the degree of randomness in generation."""
top_k: int | None = None
"""Number of most likely tokens to consider at each step."""
top_p: float | None = None
"""Total probability mass of tokens to consider at each step."""
streaming: bool = False
"""Whether to stream the results."""
default_request_timeout: float | None = None
"""Timeout for requests to Anthropic Completion API. Default is 600 seconds."""
max_retries: int = 2
"""Number of retries allowed for requests sent to the Anthropic Completion API."""
anthropic_api_url: str | None = Field(
alias="base_url",
default_factory=from_env(
"ANTHROPIC_API_URL",
default="https://api.anthropic.com",
),
)
"""Base URL for API requests. Only specify if using a proxy or service emulator.
If a value isn't passed in, will attempt to read the value from
`ANTHROPIC_API_URL`. If not set, the default value `https://api.anthropic.com`
will be used.
"""
anthropic_api_key: SecretStr = Field(
alias="api_key",
default_factory=secret_from_env("ANTHROPIC_API_KEY", default=""),
)
"""Automatically read from env var `ANTHROPIC_API_KEY` if not provided."""
HUMAN_PROMPT: str | None = None
AI_PROMPT: str | None = None
count_tokens: Callable[[str], int] | None = None
model_kwargs: dict[str, Any] = Field(default_factory=dict)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: dict) -> Any:
all_required_field_names = get_pydantic_field_names(cls)
return _build_model_kwargs(values, all_required_field_names)
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
self.client = anthropic.Anthropic(
base_url=self.anthropic_api_url,
api_key=self.anthropic_api_key.get_secret_value(),
timeout=self.default_request_timeout,
max_retries=self.max_retries,
)
self.async_client = anthropic.AsyncAnthropic(
base_url=self.anthropic_api_url,
api_key=self.anthropic_api_key.get_secret_value(),
timeout=self.default_request_timeout,
max_retries=self.max_retries,
)
# Keep for backward compatibility but not used in Messages API
self.HUMAN_PROMPT = getattr(anthropic, "HUMAN_PROMPT", None)
self.AI_PROMPT = getattr(anthropic, "AI_PROMPT", None)
return self
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling Anthropic API."""
d = {
"max_tokens": self.max_tokens,
"model": self.model,
}
if self.temperature is not None:
d["temperature"] = self.temperature
if self.top_k is not None:
d["top_k"] = self.top_k
if self.top_p is not None:
d["top_p"] = self.top_p
return {**d, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**self._default_params}
def _get_anthropic_stop(self, stop: list[str] | None = None) -> list[str]:
if stop is None:
stop = []
return stop
| _AnthropicCommon |
python | walkccc__LeetCode | solutions/2640. Find the Score of All Prefixes of an Array/2640.py | {
"start": 0,
"end": 219
} | class ____:
def findPrefixScore(self, nums: list[int]) -> list[int]:
conver = []
mx = 0
for num in nums:
mx = max(mx, num)
conver.append(num + mx)
return itertools.accumulate(conver)
| Solution |
python | python-attrs__attrs | tests/test_next_gen.py | {
"start": 331,
"end": 364
} | class ____:
x: str
y: int
| C |
python | huggingface__transformers | src/transformers/models/musicgen/processing_musicgen.py | {
"start": 777,
"end": 4232
} | class ____(ProcessorMixin):
r"""
Constructs a MusicGen processor which wraps an EnCodec feature extractor and a T5 tokenizer into a single processor
class.
[`MusicgenProcessor`] offers all the functionalities of [`EncodecFeatureExtractor`] and [`TTokenizer`]. See
[`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information.
Args:
feature_extractor (`EncodecFeatureExtractor`):
An instance of [`EncodecFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`T5Tokenizer`):
An instance of [`T5Tokenizer`]. The tokenizer is a required input.
"""
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
def __call__(self, *args, **kwargs):
"""
Forwards the `audio` argument to EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`] and the `text`
argument to [`~T5Tokenizer.__call__`]. Please refer to the docstring of the above two methods for more
information.
"""
if len(args) > 0:
kwargs["audio"] = args[0]
return super().__call__(*args, **kwargs)
def batch_decode(self, *args, **kwargs):
"""
This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids
from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's
[`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information.
"""
audio_values = kwargs.pop("audio", None)
padding_mask = kwargs.pop("padding_mask", None)
if len(args) > 0:
audio_values = args[0]
args = args[1:]
if audio_values is not None:
return self._decode_audio(audio_values, padding_mask=padding_mask)
else:
return self.tokenizer.batch_decode(*args, **kwargs)
def _decode_audio(self, audio_values, padding_mask: Any = None) -> list[np.ndarray]:
"""
This method strips any padding from the audio values to return a list of numpy audio arrays.
"""
audio_values = to_numpy(audio_values)
bsz, channels, seq_len = audio_values.shape
if padding_mask is None:
return list(audio_values)
padding_mask = to_numpy(padding_mask)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
difference = seq_len - padding_mask.shape[-1]
padding_value = 1 - self.feature_extractor.padding_value
padding_mask = np.pad(padding_mask, ((0, 0), (0, difference)), "constant", constant_values=padding_value)
audio_values = audio_values.tolist()
for i in range(bsz):
sliced_audio = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
audio_values[i] = sliced_audio.reshape(channels, -1)
return audio_values
__all__ = ["MusicgenProcessor"]
| MusicgenProcessor |
python | huggingface__transformers | src/transformers/models/vitpose/modeling_vitpose.py | {
"start": 7734,
"end": 11520
} | class ____(VitPosePreTrainedModel):
def __init__(self, config: VitPoseConfig):
super().__init__(config)
self.backbone = load_backbone(config)
# add backbone attributes
if not hasattr(self.backbone.config, "hidden_size"):
raise ValueError("The backbone should have a hidden_size attribute")
if not hasattr(self.backbone.config, "image_size"):
raise ValueError("The backbone should have an image_size attribute")
if not hasattr(self.backbone.config, "patch_size"):
raise ValueError("The backbone should have a patch_size attribute")
self.head = VitPoseSimpleDecoder(config) if config.use_simple_decoder else VitPoseClassicDecoder(config)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
dataset_index: Optional[torch.Tensor] = None,
flip_pairs: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> VitPoseEstimatorOutput:
r"""
dataset_index (`torch.Tensor` of shape `(batch_size,)`):
Index to use in the Mixture-of-Experts (MoE) blocks of the backbone.
This corresponds to the dataset index used during training, e.g. For the single dataset index 0 refers to the corresponding dataset. For the multiple datasets index 0 refers to dataset A (e.g. MPII) and index 1 refers to dataset B (e.g. CrowdPose).
flip_pairs (`torch.tensor`, *optional*):
Whether to mirror pairs of keypoints (for example, left ear -- right ear).
Examples:
```python
>>> from transformers import AutoImageProcessor, VitPoseForPoseEstimation
>>> import torch
>>> from PIL import Image
>>> import requests
>>> processor = AutoImageProcessor.from_pretrained("usyd-community/vitpose-base-simple")
>>> model = VitPoseForPoseEstimation.from_pretrained("usyd-community/vitpose-base-simple")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> boxes = [[[412.8, 157.61, 53.05, 138.01], [384.43, 172.21, 15.12, 35.74]]]
>>> inputs = processor(image, boxes=boxes, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> heatmaps = outputs.heatmaps
```"""
loss = None
if labels is not None:
raise NotImplementedError("Training is not yet supported")
outputs: BackboneOutput = self.backbone.forward_with_filtered_kwargs(
pixel_values,
dataset_index=dataset_index,
**kwargs,
)
# Turn output hidden states in tensor of shape (batch_size, num_channels, height, width)
sequence_output = outputs.feature_maps[-1]
batch_size = sequence_output.shape[0]
patch_height = self.config.backbone_config.image_size[0] // self.config.backbone_config.patch_size[0]
patch_width = self.config.backbone_config.image_size[1] // self.config.backbone_config.patch_size[1]
sequence_output = sequence_output.permute(0, 2, 1)
sequence_output = sequence_output.reshape(batch_size, -1, patch_height, patch_width).contiguous()
heatmaps = self.head(sequence_output, flip_pairs=flip_pairs)
return VitPoseEstimatorOutput(
loss=loss,
heatmaps=heatmaps,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["VitPosePreTrainedModel", "VitPoseForPoseEstimation"]
| VitPoseForPoseEstimation |
python | kamyu104__LeetCode-Solutions | Python/make-string-anti-palindrome.py | {
"start": 800,
"end": 1559
} | class ____(object):
def makeAntiPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
cnt = [0]*26
for x in s:
cnt[ord(x)-ord('a')] += 1
if max(cnt) > len(s)//2:
return "-1"
result = [i for i, x in enumerate(cnt) for _ in xrange(x)]
left = len(s)//2
right = left+1
while right < len(s) and result[right] == result[left]:
right += 1
while result[left] == result[len(s)-1-left]:
result[left] , result[right] = result[right], result[left]
left += 1
right += 1
return "".join(map(lambda x: chr(ord('a')+x), result))
# Time: O(n * 26)
# Space: O(26)
# freq table, greedy
| Solution2 |
python | scikit-learn__scikit-learn | sklearn/linear_model/_perceptron.py | {
"start": 245,
"end": 7593
} | class ____(BaseSGDClassifier):
"""Linear perceptron classifier.
The implementation is a wrapper around :class:`~sklearn.linear_model.SGDClassifier`
by fixing the `loss` and `learning_rate` parameters as::
SGDClassifier(loss="perceptron", learning_rate="constant")
Other available parameters are described below and are forwarded to
:class:`~sklearn.linear_model.SGDClassifier`.
Read more in the :ref:`User Guide <perceptron>`.
Parameters
----------
penalty : {'l2','l1','elasticnet'}, default=None
The penalty (aka regularization term) to be used.
alpha : float, default=0.0001
Constant that multiplies the regularization term if regularization is
used.
l1_ratio : float, default=0.15
The Elastic Net mixing parameter, with `0 <= l1_ratio <= 1`.
`l1_ratio=0` corresponds to L2 penalty, `l1_ratio=1` to L1.
Only used if `penalty='elasticnet'`.
.. versionadded:: 0.24
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float or None, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol).
.. versionadded:: 0.19
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
eta0 : float, default=1
Constant by which the updates are multiplied.
n_jobs : int, default=None
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, default=0
Used to shuffle the training data, when ``shuffle`` is set to
``True``. Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a stratified fraction of training data as validation and terminate
training when validation score is not improving by at least `tol` for
`n_iter_no_change` consecutive epochs.
.. versionadded:: 0.20
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if early_stopping is True.
.. versionadded:: 0.20
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before early stopping.
.. versionadded:: 0.20
class_weight : dict, {class_label: weight} or "balanced", default=None
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution. See
:term:`the Glossary <warm_start>`.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The unique classes labels.
coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
(n_classes, n_features)
Weights assigned to the features.
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples + 1)``.
See Also
--------
sklearn.linear_model.SGDClassifier : Linear classifiers
(SVM, logistic regression, etc.) with SGD training.
Notes
-----
``Perceptron`` is a classification algorithm which shares the same
underlying implementation with ``SGDClassifier``. In fact,
``Perceptron()`` is equivalent to `SGDClassifier(loss="perceptron",
eta0=1, learning_rate="constant", penalty=None)`.
References
----------
https://en.wikipedia.org/wiki/Perceptron and references therein.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.linear_model import Perceptron
>>> X, y = load_digits(return_X_y=True)
>>> clf = Perceptron(tol=1e-3, random_state=0)
>>> clf.fit(X, y)
Perceptron()
>>> clf.score(X, y)
0.939...
"""
_parameter_constraints: dict = {**BaseSGDClassifier._parameter_constraints}
_parameter_constraints.pop("loss")
_parameter_constraints.pop("average")
_parameter_constraints.update(
{
"penalty": [StrOptions({"l2", "l1", "elasticnet"}), None],
"alpha": [Interval(Real, 0, None, closed="left")],
"l1_ratio": [Interval(Real, 0, 1, closed="both")],
"eta0": [Interval(Real, 0, None, closed="neither")],
}
)
def __init__(
self,
*,
penalty=None,
alpha=0.0001,
l1_ratio=0.15,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
shuffle=True,
verbose=0,
eta0=1.0,
n_jobs=None,
random_state=0,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
class_weight=None,
warm_start=False,
):
super().__init__(
loss="perceptron",
penalty=penalty,
alpha=alpha,
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
learning_rate="constant",
eta0=eta0,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
power_t=0.5,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs,
)
| Perceptron |
python | numpy__numpy | numpy/typing/tests/test_isfile.py | {
"start": 868,
"end": 1047
} | class ____:
def test_isfile(self):
"""Test if all ``.pyi`` files are properly installed."""
for file in FILES:
assert_(os.path.isfile(file))
| TestIsFile |
python | zarr-developers__zarr-python | src/zarr/core/indexing.py | {
"start": 49782,
"end": 56380
} | class ____(Generic[T_ArrayMetadata]):
array: AsyncArray[T_ArrayMetadata]
# TODO: develop Array generic and move zarr.Array[np.intp] | zarr.Array[np.bool_] to ArrayOfIntOrBool
async def getitem(
self, selection: CoordinateSelection | MaskSelection | AnyArray
) -> NDArrayLikeOrScalar:
# TODO deduplicate these internals with the sync version of getitem
# TODO requires solving this circular sync issue: https://github.com/zarr-developers/zarr-python/pull/3083#discussion_r2230737448
from zarr.core.array import Array
# if input is a Zarr array, we materialize it now.
if isinstance(selection, Array):
selection = _zarr_array_to_int_or_bool_array(selection)
fields, new_selection = pop_fields(selection)
new_selection = ensure_tuple(new_selection)
new_selection = replace_lists(new_selection)
if is_coordinate_selection(new_selection, self.array.shape):
return await self.array.get_coordinate_selection(new_selection, fields=fields)
elif is_mask_selection(new_selection, self.array.shape):
return await self.array.get_mask_selection(new_selection, fields=fields)
else:
msg = (
"unsupported selection type for vectorized indexing; only "
"coordinate selection (tuple of integer arrays) and mask selection "
f"(single Boolean array) are supported; got {new_selection!r}"
)
raise VindexInvalidSelectionError(msg)
def check_fields(fields: Fields | None, dtype: np.dtype[Any]) -> np.dtype[Any]:
# early out
if fields is None:
return dtype
# check type
if not isinstance(fields, str | list | tuple):
raise IndexError(
f"'fields' argument must be a string or list of strings; found {type(fields)!r}"
)
if fields:
if dtype.names is None:
raise IndexError("invalid 'fields' argument, array does not have any fields")
try:
if isinstance(fields, str):
# single field selection
out_dtype = dtype[fields]
else:
# multiple field selection
out_dtype = np.dtype([(f, dtype[f]) for f in fields])
except KeyError as e:
raise IndexError(f"invalid 'fields' argument, field not found: {e!r}") from e
else:
return out_dtype
else:
return dtype
def check_no_multi_fields(fields: Fields | None) -> Fields | None:
if isinstance(fields, list):
if len(fields) == 1:
return fields[0]
elif len(fields) > 1:
raise IndexError("multiple fields are not supported for this operation")
return fields
def pop_fields(selection: SelectionWithFields) -> tuple[Fields | None, Selection]:
if isinstance(selection, str):
# single field selection
return selection, ()
elif not isinstance(selection, tuple):
# single selection item, no fields
# leave selection as-is
return None, cast("Selection", selection)
else:
# multiple items, split fields from selection items
fields: Fields = [f for f in selection if isinstance(f, str)]
fields = fields[0] if len(fields) == 1 else fields
selection_tuple = tuple(s for s in selection if not isinstance(s, str))
selection = cast(
"Selection", selection_tuple[0] if len(selection_tuple) == 1 else selection_tuple
)
return fields, selection
def make_slice_selection(selection: Any) -> list[slice]:
ls: list[slice] = []
for dim_selection in selection:
if is_integer(dim_selection):
ls.append(slice(int(dim_selection), int(dim_selection) + 1, 1))
elif isinstance(dim_selection, np.ndarray):
if len(dim_selection) == 1:
ls.append(slice(int(dim_selection[0]), int(dim_selection[0]) + 1, 1))
else:
raise ArrayIndexError
else:
ls.append(dim_selection)
return ls
def decode_morton(z: int, chunk_shape: tuple[int, ...]) -> tuple[int, ...]:
# Inspired by compressed morton code as implemented in Neuroglancer
# https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/volume.md#compressed-morton-code
bits = tuple(math.ceil(math.log2(c)) for c in chunk_shape)
max_coords_bits = max(bits)
input_bit = 0
input_value = z
out = [0] * len(chunk_shape)
for coord_bit in range(max_coords_bits):
for dim in range(len(chunk_shape)):
if coord_bit < bits[dim]:
bit = (input_value >> input_bit) & 1
out[dim] |= bit << coord_bit
input_bit += 1
return tuple(out)
def morton_order_iter(chunk_shape: tuple[int, ...]) -> Iterator[tuple[int, ...]]:
i = 0
order: list[tuple[int, ...]] = []
while len(order) < product(chunk_shape):
m = decode_morton(i, chunk_shape)
if m not in order and all(x < y for x, y in zip(m, chunk_shape, strict=False)):
order.append(m)
i += 1
for j in range(product(chunk_shape)):
yield order[j]
def c_order_iter(chunks_per_shard: tuple[int, ...]) -> Iterator[tuple[int, ...]]:
return itertools.product(*(range(x) for x in chunks_per_shard))
def get_indexer(
selection: SelectionWithFields, shape: tuple[int, ...], chunk_grid: ChunkGrid
) -> Indexer:
_, pure_selection = pop_fields(selection)
if is_pure_fancy_indexing(pure_selection, len(shape)):
new_selection = ensure_tuple(selection)
new_selection = replace_lists(new_selection)
if is_coordinate_selection(new_selection, shape):
return CoordinateIndexer(cast("CoordinateSelection", selection), shape, chunk_grid)
elif is_mask_selection(new_selection, shape):
return MaskIndexer(cast("MaskSelection", selection), shape, chunk_grid)
else:
msg = (
"unsupported selection type for vectorized indexing; only "
"coordinate selection (tuple of integer arrays) and mask selection "
f"(single Boolean array) are supported; got {new_selection!r}"
)
raise VindexInvalidSelectionError(msg)
elif is_pure_orthogonal_indexing(pure_selection, len(shape)):
return OrthogonalIndexer(cast("OrthogonalSelection", selection), shape, chunk_grid)
else:
return BasicIndexer(cast("BasicSelection", selection), shape, chunk_grid)
| AsyncVIndex |
python | pypa__setuptools | setuptools/_vendor/more_itertools/more.py | {
"start": 6386,
"end": 27019
} | class ____:
"""Wrap an iterator to allow lookahead and prepending elements.
Call :meth:`peek` on the result to get the value that will be returned
by :func:`next`. This won't advance the iterator:
>>> p = peekable(['a', 'b'])
>>> p.peek()
'a'
>>> next(p)
'a'
Pass :meth:`peek` a default value to return that instead of raising
``StopIteration`` when the iterator is exhausted.
>>> p = peekable([])
>>> p.peek('hi')
'hi'
peekables also offer a :meth:`prepend` method, which "inserts" items
at the head of the iterable:
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> p.peek()
11
>>> list(p)
[11, 12, 1, 2, 3]
peekables can be indexed. Index 0 is the item that will be returned by
:func:`next`, index 1 is the item after that, and so on:
The values up to the given index will be cached.
>>> p = peekable(['a', 'b', 'c', 'd'])
>>> p[0]
'a'
>>> p[1]
'b'
>>> next(p)
'a'
Negative indexes are supported, but be aware that they will cache the
remaining items in the source iterator, which may require significant
storage.
To check whether a peekable is exhausted, check its truth value:
>>> p = peekable(['a', 'b'])
>>> if p: # peekable has items
... list(p)
['a', 'b']
>>> if not p: # peekable is exhausted
... list(p)
[]
"""
def __init__(self, iterable):
self._it = iter(iterable)
self._cache = deque()
def __iter__(self):
return self
def __bool__(self):
try:
self.peek()
except StopIteration:
return False
return True
def peek(self, default=_marker):
"""Return the item that will be next returned from ``next()``.
Return ``default`` if there are no items left. If ``default`` is not
provided, raise ``StopIteration``.
"""
if not self._cache:
try:
self._cache.append(next(self._it))
except StopIteration:
if default is _marker:
raise
return default
return self._cache[0]
def prepend(self, *items):
"""Stack up items to be the next ones returned from ``next()`` or
``self.peek()``. The items will be returned in
first in, first out order::
>>> p = peekable([1, 2, 3])
>>> p.prepend(10, 11, 12)
>>> next(p)
10
>>> list(p)
[11, 12, 1, 2, 3]
It is possible, by prepending items, to "resurrect" a peekable that
previously raised ``StopIteration``.
>>> p = peekable([])
>>> next(p)
Traceback (most recent call last):
...
StopIteration
>>> p.prepend(1)
>>> next(p)
1
>>> next(p)
Traceback (most recent call last):
...
StopIteration
"""
self._cache.extendleft(reversed(items))
def __next__(self):
if self._cache:
return self._cache.popleft()
return next(self._it)
def _get_slice(self, index):
# Normalize the slice's arguments
step = 1 if (index.step is None) else index.step
if step > 0:
start = 0 if (index.start is None) else index.start
stop = maxsize if (index.stop is None) else index.stop
elif step < 0:
start = -1 if (index.start is None) else index.start
stop = (-maxsize - 1) if (index.stop is None) else index.stop
else:
raise ValueError('slice step cannot be zero')
# If either the start or stop index is negative, we'll need to cache
# the rest of the iterable in order to slice from the right side.
if (start < 0) or (stop < 0):
self._cache.extend(self._it)
# Otherwise we'll need to find the rightmost index and cache to that
# point.
else:
n = min(max(start, stop) + 1, maxsize)
cache_len = len(self._cache)
if n >= cache_len:
self._cache.extend(islice(self._it, n - cache_len))
return list(self._cache)[index]
def __getitem__(self, index):
if isinstance(index, slice):
return self._get_slice(index)
cache_len = len(self._cache)
if index < 0:
self._cache.extend(self._it)
elif index >= cache_len:
self._cache.extend(islice(self._it, index + 1 - cache_len))
return self._cache[index]
def consumer(func):
"""Decorator that automatically advances a PEP-342-style "reverse iterator"
to its first yield point so you don't have to call ``next()`` on it
manually.
>>> @consumer
... def tally():
... i = 0
... while True:
... print('Thing number %s is %s.' % (i, (yield)))
... i += 1
...
>>> t = tally()
>>> t.send('red')
Thing number 0 is red.
>>> t.send('fish')
Thing number 1 is fish.
Without the decorator, you would have to call ``next(t)`` before
``t.send()`` could be used.
"""
@wraps(func)
def wrapper(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
return gen
return wrapper
def ilen(iterable):
"""Return the number of items in *iterable*.
>>> ilen(x for x in range(1000000) if x % 3 == 0)
333334
This consumes the iterable, so handle with care.
"""
# This approach was selected because benchmarks showed it's likely the
# fastest of the known implementations at the time of writing.
# See GitHub tracker: #236, #230.
counter = count()
deque(zip(iterable, counter), maxlen=0)
return next(counter)
def iterate(func, start):
"""Return ``start``, ``func(start)``, ``func(func(start))``, ...
>>> from itertools import islice
>>> list(islice(iterate(lambda x: 2*x, 1), 10))
[1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
"""
while True:
yield start
try:
start = func(start)
except StopIteration:
break
def with_iter(context_manager):
"""Wrap an iterable in a ``with`` statement, so it closes once exhausted.
For example, this will close the file when the iterator is exhausted::
upper_lines = (line.upper() for line in with_iter(open('foo')))
Any context manager which returns an iterable is a candidate for
``with_iter``.
"""
with context_manager as iterable:
yield from iterable
def one(iterable, too_short=None, too_long=None):
"""Return the first item from *iterable*, which is expected to contain only
that item. Raise an exception if *iterable* is empty or has more than one
item.
:func:`one` is useful for ensuring that an iterable contains only one item.
For example, it can be used to retrieve the result of a database query
that is expected to return a single row.
If *iterable* is empty, ``ValueError`` will be raised. You may specify a
different exception with the *too_short* keyword:
>>> it = []
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (expected 1)'
>>> too_short = IndexError('too few items')
>>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
IndexError: too few items
Similarly, if *iterable* contains more than one item, ``ValueError`` will
be raised. You may specify a different exception with the *too_long*
keyword:
>>> it = ['too', 'many']
>>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Expected exactly one item in iterable, but got 'too',
'many', and perhaps more.
>>> too_long = RuntimeError
>>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
Note that :func:`one` attempts to advance *iterable* twice to ensure there
is only one item. See :func:`spy` or :func:`peekable` to check iterable
contents less destructively.
"""
it = iter(iterable)
try:
first_value = next(it)
except StopIteration as exc:
raise (
too_short or ValueError('too few items in iterable (expected 1)')
) from exc
try:
second_value = next(it)
except StopIteration:
pass
else:
msg = (
'Expected exactly one item in iterable, but got {!r}, {!r}, '
'and perhaps more.'.format(first_value, second_value)
)
raise too_long or ValueError(msg)
return first_value
def raise_(exception, *args):
raise exception(*args)
def strictly_n(iterable, n, too_short=None, too_long=None):
"""Validate that *iterable* has exactly *n* items and return them if
it does. If it has fewer than *n* items, call function *too_short*
with those items. If it has more than *n* items, call function
*too_long* with the first ``n + 1`` items.
>>> iterable = ['a', 'b', 'c', 'd']
>>> n = 4
>>> list(strictly_n(iterable, n))
['a', 'b', 'c', 'd']
Note that the returned iterable must be consumed in order for the check to
be made.
By default, *too_short* and *too_long* are functions that raise
``ValueError``.
>>> list(strictly_n('ab', 3)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too few items in iterable (got 2)
>>> list(strictly_n('abc', 2)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: too many items in iterable (got at least 3)
You can instead supply functions that do something else.
*too_short* will be called with the number of items in *iterable*.
*too_long* will be called with `n + 1`.
>>> def too_short(item_count):
... raise RuntimeError
>>> it = strictly_n('abcd', 6, too_short=too_short)
>>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
RuntimeError
>>> def too_long(item_count):
... print('The boss is going to hear about this')
>>> it = strictly_n('abcdef', 4, too_long=too_long)
>>> list(it)
The boss is going to hear about this
['a', 'b', 'c', 'd']
"""
if too_short is None:
too_short = lambda item_count: raise_(
ValueError,
'Too few items in iterable (got {})'.format(item_count),
)
if too_long is None:
too_long = lambda item_count: raise_(
ValueError,
'Too many items in iterable (got at least {})'.format(item_count),
)
it = iter(iterable)
for i in range(n):
try:
item = next(it)
except StopIteration:
too_short(i)
return
else:
yield item
try:
next(it)
except StopIteration:
pass
else:
too_long(n + 1)
def distinct_permutations(iterable, r=None):
"""Yield successive distinct permutations of the elements in *iterable*.
>>> sorted(distinct_permutations([1, 0, 1]))
[(0, 1, 1), (1, 0, 1), (1, 1, 0)]
Equivalent to ``set(permutations(iterable))``, except duplicates are not
generated and thrown away. For larger input sequences this is much more
efficient.
Duplicate permutations arise when there are duplicated elements in the
input iterable. The number of items returned is
`n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
items input, and each `x_i` is the count of a distinct item in the input
sequence.
If *r* is given, only the *r*-length permutations are yielded.
>>> sorted(distinct_permutations([1, 0, 1], r=2))
[(0, 1), (1, 0), (1, 1)]
>>> sorted(distinct_permutations(range(3), r=2))
[(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
"""
# Algorithm: https://w.wiki/Qai
def _full(A):
while True:
# Yield the permutation we have
yield tuple(A)
# Find the largest index i such that A[i] < A[i + 1]
for i in range(size - 2, -1, -1):
if A[i] < A[i + 1]:
break
# If no such index exists, this permutation is the last one
else:
return
# Find the largest index j greater than j such that A[i] < A[j]
for j in range(size - 1, i, -1):
if A[i] < A[j]:
break
# Swap the value of A[i] with that of A[j], then reverse the
# sequence from A[i + 1] to form the new permutation
A[i], A[j] = A[j], A[i]
A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1]
# Algorithm: modified from the above
def _partial(A, r):
# Split A into the first r items and the last r items
head, tail = A[:r], A[r:]
right_head_indexes = range(r - 1, -1, -1)
left_tail_indexes = range(len(tail))
while True:
# Yield the permutation we have
yield tuple(head)
# Starting from the right, find the first index of the head with
# value smaller than the maximum value of the tail - call it i.
pivot = tail[-1]
for i in right_head_indexes:
if head[i] < pivot:
break
pivot = head[i]
else:
return
# Starting from the left, find the first value of the tail
# with a value greater than head[i] and swap.
for j in left_tail_indexes:
if tail[j] > head[i]:
head[i], tail[j] = tail[j], head[i]
break
# If we didn't find one, start from the right and find the first
# index of the head with a value greater than head[i] and swap.
else:
for j in right_head_indexes:
if head[j] > head[i]:
head[i], head[j] = head[j], head[i]
break
# Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
tail += head[: i - r : -1] # head[i + 1:][::-1]
i += 1
head[i:], tail[:] = tail[: r - i], tail[r - i :]
items = sorted(iterable)
size = len(items)
if r is None:
r = size
if 0 < r <= size:
return _full(items) if (r == size) else _partial(items, r)
return iter(() if r else ((),))
def intersperse(e, iterable, n=1):
"""Intersperse filler element *e* among the items in *iterable*, leaving
*n* items between each filler element.
>>> list(intersperse('!', [1, 2, 3, 4, 5]))
[1, '!', 2, '!', 3, '!', 4, '!', 5]
>>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
[1, 2, None, 3, 4, None, 5]
"""
if n == 0:
raise ValueError('n must be > 0')
elif n == 1:
# interleave(repeat(e), iterable) -> e, x_0, e, x_1, e, x_2...
# islice(..., 1, None) -> x_0, e, x_1, e, x_2...
return islice(interleave(repeat(e), iterable), 1, None)
else:
# interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
# islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
# flatten(...) -> x_0, x_1, e, x_2, x_3...
filler = repeat([e])
chunks = chunked(iterable, n)
return flatten(islice(interleave(filler, chunks), 1, None))
def unique_to_each(*iterables):
"""Return the elements from each of the input iterables that aren't in the
other input iterables.
For example, suppose you have a set of packages, each with a set of
dependencies::
{'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
If you remove one package, which dependencies can also be removed?
If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
``pkg_2``, and ``D`` is only needed for ``pkg_3``::
>>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
[['A'], ['C'], ['D']]
If there are duplicates in one input iterable that aren't in the others
they will be duplicated in the output. Input order is preserved::
>>> unique_to_each("mississippi", "missouri")
[['p', 'p'], ['o', 'u', 'r']]
It is assumed that the elements of each iterable are hashable.
"""
pool = [list(it) for it in iterables]
counts = Counter(chain.from_iterable(map(set, pool)))
uniques = {element for element in counts if counts[element] == 1}
return [list(filter(uniques.__contains__, it)) for it in pool]
def windowed(seq, n, fillvalue=None, step=1):
"""Return a sliding window of width *n* over the given iterable.
>>> all_windows = windowed([1, 2, 3, 4, 5], 3)
>>> list(all_windows)
[(1, 2, 3), (2, 3, 4), (3, 4, 5)]
When the window is larger than the iterable, *fillvalue* is used in place
of missing values:
>>> list(windowed([1, 2, 3], 4))
[(1, 2, 3, None)]
Each window will advance in increments of *step*:
>>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
[(1, 2, 3), (3, 4, 5), (5, 6, '!')]
To slide into the iterable's items, use :func:`chain` to add filler items
to the left:
>>> iterable = [1, 2, 3, 4]
>>> n = 3
>>> padding = [None] * (n - 1)
>>> list(windowed(chain(padding, iterable), 3))
[(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
"""
if n < 0:
raise ValueError('n must be >= 0')
if n == 0:
yield ()
return
if step < 1:
raise ValueError('step must be >= 1')
iterable = iter(seq)
# Generate first window
window = deque(islice(iterable, n), maxlen=n)
# Deal with the first window not being full
if not window:
return
if len(window) < n:
yield tuple(window) + ((fillvalue,) * (n - len(window)))
return
yield tuple(window)
# Create the filler for the next windows. The padding ensures
# we have just enough elements to fill the last window.
padding = (fillvalue,) * (n - 1 if step >= n else step - 1)
filler = map(window.append, chain(iterable, padding))
# Generate the rest of the windows
for _ in islice(filler, step - 1, None, step):
yield tuple(window)
def substrings(iterable):
"""Yield all of the substrings of *iterable*.
>>> [''.join(s) for s in substrings('more')]
['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
Note that non-string iterables can also be subdivided.
>>> list(substrings([0, 1, 2]))
[(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
"""
# The length-1 substrings
seq = []
for item in iter(iterable):
seq.append(item)
yield (item,)
seq = tuple(seq)
item_count = len(seq)
# And the rest
for n in range(2, item_count + 1):
for i in range(item_count - n + 1):
yield seq[i : i + n]
def substrings_indexes(seq, reverse=False):
"""Yield all substrings and their positions in *seq*
The items yielded will be a tuple of the form ``(substr, i, j)``, where
``substr == seq[i:j]``.
This function only works for iterables that support slicing, such as
``str`` objects.
>>> for item in substrings_indexes('more'):
... print(item)
('m', 0, 1)
('o', 1, 2)
('r', 2, 3)
('e', 3, 4)
('mo', 0, 2)
('or', 1, 3)
('re', 2, 4)
('mor', 0, 3)
('ore', 1, 4)
('more', 0, 4)
Set *reverse* to ``True`` to yield the same items in the opposite order.
"""
r = range(1, len(seq) + 1)
if reverse:
r = reversed(r)
return (
(seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
)
| peekable |
python | huggingface__transformers | src/transformers/models/esm/modeling_esm.py | {
"start": 5816,
"end": 11454
} | class ____(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
if config.emb_layer_norm_before:
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
else:
self.layer_norm = None
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.padding_idx = config.pad_token_id
if self.position_embedding_type == "absolute":
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
self.token_dropout = config.token_dropout
self.mask_token_id = config.mask_token_id
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
inputs_embeds=None,
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
# Note that if we want to support ESM-1 (not 1b!) in future then we need to support an
# embedding_scale factor here.
embeddings = inputs_embeds
# Matt: ESM has the option to handle masking in MLM in a slightly unusual way. If the token_dropout
# flag is False then it is handled in the same was as BERT/RoBERTa. If it is set to True, however,
# masked tokens are treated as if they were selected for input dropout and zeroed out.
# This "mask-dropout" is compensated for when masked tokens are not present, by scaling embeddings by
# a factor of (fraction of unmasked tokens during training) / (fraction of unmasked tokens in sample).
# This is analogous to the way that dropout layers scale down outputs during evaluation when not
# actually dropping out values (or, equivalently, scale up their un-dropped outputs in training).
if self.token_dropout and input_ids is not None:
embeddings = embeddings.masked_fill((input_ids == self.mask_token_id).unsqueeze(-1), 0.0)
mask_ratio_train = 0.15 * 0.8 # Hardcoded as the ratio used in all ESM model training runs
src_lengths = attention_mask.sum(-1) if attention_mask is not None else input_ids.shape[1]
mask_ratio_observed = (input_ids == self.mask_token_id).sum(-1).float() / src_lengths
embeddings = (embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]).to(
embeddings.dtype
)
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
if self.layer_norm is not None:
embeddings = self.layer_norm(embeddings)
if attention_mask is not None:
embeddings = (embeddings * attention_mask.unsqueeze(-1)).to(embeddings.dtype)
# Matt: I think this line was copied incorrectly from BERT, disabling it for now.
# embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| EsmEmbeddings |
python | doocs__leetcode | solution/0900-0999/0914.X of a Kind in a Deck of Cards/Solution.py | {
"start": 0,
"end": 145
} | class ____:
def hasGroupsSizeX(self, deck: List[int]) -> bool:
cnt = Counter(deck)
return reduce(gcd, cnt.values()) >= 2
| Solution |
python | joke2k__faker | tests/providers/test_barcode.py | {
"start": 9752,
"end": 9992
} | class ____(_LocaleNorthAmericaMixin):
"""Tests en_US barcode provider"""
num_samples = 1000
@staticmethod
def get_provider_class():
from faker.providers.barcode.en_US import Provider
return Provider
| TestEnUs |
python | spack__spack | lib/spack/spack/url_buildcache.py | {
"start": 54688,
"end": 54834
} | class ____(spack.error.SpackError):
"""Raised when spack encounters a spec file it cannot understand or process"""
pass
| InvalidMetadataFile |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_reverse_geocoded_lat_lon_to_contain.py | {
"start": 1010,
"end": 1990
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.reverse_geocoded_lat_lon_contain"
condition_value_keys = (
"word",
"provider",
)
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, word, provider=None, **kwargs):
column = column.apply(Point)
def reverse(point):
# lat lon to lon lat for reverse_geocode
return Point(point.y, point.x)
column = column.apply(reverse)
reverse_geocoded_column = column.apply(geopandas.tools.reverse_geocode)
# check if lowercase reverse geocoded string contains word
return reverse_geocoded_column.apply(lambda x: word in x["address"].values[0].lower())
# This class defines the Expectation itself
| ColumnValuesReverseGeocodedLatLonContain |
python | optuna__optuna | optuna/artifacts/_boto3.py | {
"start": 381,
"end": 3538
} | class ____:
"""An artifact backend for Boto3.
Args:
bucket_name:
The name of the bucket to store artifacts.
client:
A Boto3 client to use for storage operations. If not specified, a new client will
be created.
avoid_buf_copy:
If True, skip procedure to copy the content of the source file object to a buffer
before uploading it to S3 ins. This is default to False because using
``upload_fileobj()`` method of Boto3 client might close the source file object.
Example:
.. code-block:: python
import optuna
from optuna.artifacts import upload_artifact
from optuna.artifacts import Boto3ArtifactStore
artifact_store = Boto3ArtifactStore("my-bucket")
def objective(trial: optuna.Trial) -> float:
... = trial.suggest_float("x", -10, 10)
file_path = generate_example(...)
upload_artifact(
artifact_store=artifact_store,
file_path=file_path,
study_or_trial=trial,
)
return ...
"""
def __init__(
self, bucket_name: str, client: S3Client | None = None, *, avoid_buf_copy: bool = False
) -> None:
_imports.check()
self.bucket = bucket_name
self.client = client or boto3.client("s3")
# This flag is added to avoid that upload_fileobj() method of Boto3 client may close the
# source file object. See https://github.com/boto/boto3/issues/929.
self._avoid_buf_copy = avoid_buf_copy
def open_reader(self, artifact_id: str) -> BinaryIO:
try:
obj = self.client.get_object(Bucket=self.bucket, Key=artifact_id)
except ClientError as e:
if _is_not_found_error(e):
raise ArtifactNotFound(
f"Artifact storage with bucket: {self.bucket}, artifact_id: {artifact_id} was"
" not found"
) from e
raise
body = obj.get("Body")
assert body is not None
return body
def write(self, artifact_id: str, content_body: BinaryIO) -> None:
fsrc: BinaryIO = content_body
if not self._avoid_buf_copy:
buf = io.BytesIO()
shutil.copyfileobj(content_body, buf)
buf.seek(0)
fsrc = buf
self.client.upload_fileobj(fsrc, self.bucket, artifact_id)
def remove(self, artifact_id: str) -> None:
self.client.delete_object(Bucket=self.bucket, Key=artifact_id)
def _is_not_found_error(e: ClientError) -> bool:
error_code = e.response.get("Error", {}).get("Code")
http_status_code = e.response.get("ResponseMetadata", {}).get("HTTPStatusCode")
return error_code == "NoSuchKey" or http_status_code == 404
if TYPE_CHECKING:
# A mypy-runtime assertion to ensure that Boto3ArtifactStore implements all abstract methods
# in ArtifactStore.
from optuna.artifacts._protocol import ArtifactStore
_: ArtifactStore = Boto3ArtifactStore("")
| Boto3ArtifactStore |
python | kamyu104__LeetCode-Solutions | Python/map-of-highest-peak.py | {
"start": 992,
"end": 1979
} | class ____(object):
def highestPeak(self, isWater):
"""
:type isWater: List[List[int]]
:rtype: List[List[int]]
"""
directions = [(1, 0), (-1, 0), (0, 1), (0, -1)]
q, heights = [], [[-1]*len(isWater[0]) for _ in xrange(len(isWater))]
for r, row in enumerate(isWater):
for c, cell in enumerate(row):
if not cell:
continue
heights[r][c] = 0
q.append((r, c))
while q:
new_q = []
for r, c in q:
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(isWater) and
0 <= nc < len(isWater[0]) and
heights[nr][nc] == -1):
continue
heights[nr][nc] = heights[r][c]+1
q.append((nr, nc))
q = new_q
return heights
| Solution2 |
python | apache__airflow | providers/edge3/src/airflow/providers/edge3/worker_api/datamodels.py | {
"start": 4027,
"end": 4235
} | class ____(WorkerQueuesBase):
"""Queues that a worker supports to run jobs on."""
free_concurrency: Annotated[int, Field(description="Number of free concurrency slots on the worker.")]
| WorkerQueuesBody |
python | pypa__hatch | src/hatch/config/constants.py | {
"start": 327,
"end": 466
} | class ____:
PROJECT = "HATCH_PROJECT"
DATA = "HATCH_DATA_DIR"
CACHE = "HATCH_CACHE_DIR"
CONFIG = "HATCH_CONFIG"
| ConfigEnvVars |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/executors/kubernetes_executor_types.py | {
"start": 1488,
"end": 1751
} | class ____(NamedTuple):
"""Results from Kubernetes task execution."""
key: TaskInstanceKey
state: TaskInstanceState | str | None
pod_name: str
namespace: str
resource_version: str
failure_details: FailureDetails | None
| KubernetesResults |
python | langchain-ai__langchain | libs/core/langchain_core/vectorstores/base.py | {
"start": 35483,
"end": 40769
} | class ____(BaseRetriever):
"""Base Retriever class for VectorStore."""
vectorstore: VectorStore
"""VectorStore to use for retrieval."""
search_type: str = "similarity"
"""Type of search to perform."""
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass to the search function."""
allowed_search_types: ClassVar[Collection[str]] = (
"similarity",
"similarity_score_threshold",
"mmr",
)
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@model_validator(mode="before")
@classmethod
def validate_search_type(cls, values: dict) -> Any:
"""Validate search type.
Args:
values: Values to validate.
Returns:
Validated values.
Raises:
ValueError: If `search_type` is not one of the allowed search types.
ValueError: If `score_threshold` is not specified with a float value(`0~1`)
"""
search_type = values.get("search_type", "similarity")
if search_type not in cls.allowed_search_types:
msg = (
f"search_type of {search_type} not allowed. Valid values are: "
f"{cls.allowed_search_types}"
)
raise ValueError(msg)
if search_type == "similarity_score_threshold":
score_threshold = values.get("search_kwargs", {}).get("score_threshold")
if (score_threshold is None) or (not isinstance(score_threshold, float)):
msg = (
"`score_threshold` is not specified with a float value(0~1) "
"in `search_kwargs`."
)
raise ValueError(msg)
return values
def _get_ls_params(self, **kwargs: Any) -> LangSmithRetrieverParams:
"""Get standard params for tracing."""
kwargs_ = self.search_kwargs | kwargs
ls_params = super()._get_ls_params(**kwargs_)
ls_params["ls_vector_store_provider"] = self.vectorstore.__class__.__name__
if self.vectorstore.embeddings:
ls_params["ls_embedding_provider"] = (
self.vectorstore.embeddings.__class__.__name__
)
elif hasattr(self.vectorstore, "embedding") and isinstance(
self.vectorstore.embedding, Embeddings
):
ls_params["ls_embedding_provider"] = (
self.vectorstore.embedding.__class__.__name__
)
return ls_params
@override
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun, **kwargs: Any
) -> list[Document]:
kwargs_ = self.search_kwargs | kwargs
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(query, **kwargs_)
elif self.search_type == "similarity_score_threshold":
docs_and_similarities = (
self.vectorstore.similarity_search_with_relevance_scores(
query, **kwargs_
)
)
docs = [doc for doc, _ in docs_and_similarities]
elif self.search_type == "mmr":
docs = self.vectorstore.max_marginal_relevance_search(query, **kwargs_)
else:
msg = f"search_type of {self.search_type} not allowed."
raise ValueError(msg)
return docs
@override
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> list[Document]:
kwargs_ = self.search_kwargs | kwargs
if self.search_type == "similarity":
docs = await self.vectorstore.asimilarity_search(query, **kwargs_)
elif self.search_type == "similarity_score_threshold":
docs_and_similarities = (
await self.vectorstore.asimilarity_search_with_relevance_scores(
query, **kwargs_
)
)
docs = [doc for doc, _ in docs_and_similarities]
elif self.search_type == "mmr":
docs = await self.vectorstore.amax_marginal_relevance_search(
query, **kwargs_
)
else:
msg = f"search_type of {self.search_type} not allowed."
raise ValueError(msg)
return docs
def add_documents(self, documents: list[Document], **kwargs: Any) -> list[str]:
"""Add documents to the `VectorStore`.
Args:
documents: Documents to add to the `VectorStore`.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
List of IDs of the added texts.
"""
return self.vectorstore.add_documents(documents, **kwargs)
async def aadd_documents(
self, documents: list[Document], **kwargs: Any
) -> list[str]:
"""Async add documents to the `VectorStore`.
Args:
documents: Documents to add to the `VectorStore`.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
List of IDs of the added texts.
"""
return await self.vectorstore.aadd_documents(documents, **kwargs)
| VectorStoreRetriever |
python | pypa__warehouse | tests/unit/integration/vulnerabilities/osv/test_package.py | {
"start": 202,
"end": 16809
} | class ____:
def test_init(self, metrics):
session = pretend.stub()
cache = integrations.PublicKeysCache(cache_time=12)
vuln_report_verifier = osv.VulnerabilityReportVerifier(
session=session,
metrics=metrics,
public_keys_cache=cache,
)
# assert vuln_report_verifier._session is session
assert vuln_report_verifier._metrics is metrics
assert vuln_report_verifier._public_keys_cache is cache
def test_verify_cache_miss(self, metrics):
# Example taken from
# https://gist.github.com/ewjoachim/7dde11c31d9686ed6b4431c3ca166da2
meta_payload = {
"public_keys": [
{
"key_identifier": "90a421169f0a406205f1563a953312f0be898d3c"
"7b6c06b681aa86a874555f4a",
"key": "-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE1c2S+CINXEihVeXz95He1bmWfhPc\n"
"ri7XBJXSEtW2IuZZyrlQP7wDXVupMZ3OsGsZaNX0SL4/nOx2S4OTrF1miA==\n"
"-----END PUBLIC KEY-----\n",
"is_current": True,
}
]
}
response = pretend.stub(
json=lambda: meta_payload, raise_for_status=lambda: None
)
session = pretend.stub(get=lambda *a, **k: response)
cache = integrations.PublicKeysCache(cache_time=12)
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=session,
metrics=metrics,
public_keys_cache=cache,
)
key_id = "90a421169f0a406205f1563a953312f0be898d3c7b6c06b681aa86a874555f4a"
signature = (
"MEUCIQDz4wvDZjrX2YHsWhmu5Cvvp0gny6xYMD0AGrwEhTHGRAIgXCSvx"
"Tl2SdnaY7fImXFRSKhbw3IRf68g1LMaQRetM80="
)
payload = (
b'[{"project":"vuln_project",'
b'"versions":["v1","v2"],'
b'"id":"vuln_id",'
b'"link":"vulns.com/vuln_id",'
b'"aliases":["vuln_alias"]}]'
)
assert (
vuln_report_verifier.verify(
payload=payload, key_id=key_id, signature=signature
)
is True
)
assert metrics.increment.calls == [
pretend.call("warehouse.vulnerabilities.osv.auth.cache.miss"),
pretend.call("warehouse.vulnerabilities.osv.auth.success"),
]
def test_verify_cache_hit(self, metrics):
session = pretend.stub()
cache = integrations.PublicKeysCache(cache_time=12)
cache.cached_at = time.time()
cache.cache = [
{
"key_id": "90a421169f0a406205f1563a953312f0be898d3c"
"7b6c06b681aa86a874555f4a",
"key": "-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE1c2S+CINXEihVeXz95He1bmWfhPc\n"
"ri7XBJXSEtW2IuZZyrlQP7wDXVupMZ3OsGsZaNX0SL4/nOx2S4OTrF1miA==\n"
"-----END PUBLIC KEY-----\n",
}
]
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=session,
metrics=metrics,
public_keys_cache=cache,
)
key_id = "90a421169f0a406205f1563a953312f0be898d3c7b6c06b681aa86a874555f4a"
signature = (
"MEUCIQDz4wvDZjrX2YHsWhmu5Cvvp0gny6xYMD0AGrwEhTHGRAIgXCSvx"
"Tl2SdnaY7fImXFRSKhbw3IRf68g1LMaQRetM80="
)
payload = (
b'[{"project":"vuln_project",'
b'"versions":["v1","v2"],'
b'"id":"vuln_id",'
b'"link":"vulns.com/vuln_id",'
b'"aliases":["vuln_alias"]}]'
)
assert (
vuln_report_verifier.verify(
payload=payload, key_id=key_id, signature=signature
)
is True
)
assert metrics.increment.calls == [
pretend.call("warehouse.vulnerabilities.osv.auth.cache.hit"),
pretend.call("warehouse.vulnerabilities.osv.auth.success"),
]
def test_verify_error(self, metrics):
cache = integrations.PublicKeysCache(cache_time=12)
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=pretend.stub(),
metrics=metrics,
public_keys_cache=cache,
)
vuln_report_verifier.retrieve_public_key_payload = pretend.raiser(
integrations.InvalidPayloadSignatureError("Bla", "bla")
)
assert (
vuln_report_verifier.verify(payload={}, key_id="a", signature="a") is False
)
assert metrics.increment.calls == [
pretend.call("warehouse.vulnerabilities.osv.auth.cache.miss"),
pretend.call("warehouse.vulnerabilities.osv.auth.error.bla"),
]
def test_retrieve_public_key_payload(self):
meta_payload = {
"public_keys": [
{
"key_identifier": "90a421169f0a406205f1563a953312f0be898d3c"
"7b6c06b681aa86a874555f4a",
"key": "-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE1c2S+CINXEihVeXz95He1bmWfhPc\n"
"ri7XBJXSEtW2IuZZyrlQP7wDXVupMZ3OsGsZaNX0SL4/nOx2S4OTrF1miA==\n"
"-----END PUBLIC KEY-----\n",
"is_current": True,
}
]
}
response = pretend.stub(
json=lambda: meta_payload, raise_for_status=lambda: None
)
session = pretend.stub(get=pretend.call_recorder(lambda *a, **k: response))
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
assert vuln_report_verifier.retrieve_public_key_payload() == meta_payload
assert session.get.calls == [
pretend.call(
"http://foo",
)
]
def test_get_cached_public_key_cache_hit(self):
session = pretend.stub()
cache = integrations.PublicKeysCache(cache_time=12)
cache_value = pretend.stub()
cache.set(now=time.time(), value=cache_value)
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=cache,
)
assert vuln_report_verifier._get_cached_public_keys() is cache_value
def test_get_cached_public_key_cache_miss_no_cache(self):
session = pretend.stub()
cache = integrations.PublicKeysCache(cache_time=12)
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=cache,
)
with pytest.raises(integrations.CacheMissError):
vuln_report_verifier._get_cached_public_keys()
def test_retrieve_public_key_payload_http_error(self):
response = pretend.stub(
status_code=418,
text="I'm a teapot",
raise_for_status=pretend.raiser(requests.HTTPError),
)
session = pretend.stub(
get=lambda *a, **k: response,
)
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(osv.OSVPublicKeyAPIError) as exc:
vuln_report_verifier.retrieve_public_key_payload()
assert str(exc.value) == "Invalid response code 418: I'm a teapot"
assert exc.value.reason == "public_key_api.status.418"
def test_retrieve_public_key_payload_json_error(self):
response = pretend.stub(
text="Still a non-json teapot",
json=pretend.raiser(json.JSONDecodeError("", "", 3)),
raise_for_status=lambda: None,
)
session = pretend.stub(get=lambda *a, **k: response)
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(osv.OSVPublicKeyAPIError) as exc:
vuln_report_verifier.retrieve_public_key_payload()
assert str(exc.value) == "Non-JSON response received: Still a non-json teapot"
assert exc.value.reason == "public_key_api.invalid_json"
def test_retrieve_public_key_payload_connection_error(self):
session = pretend.stub(get=pretend.raiser(requests.ConnectionError))
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(osv.OSVPublicKeyAPIError) as exc:
vuln_report_verifier.retrieve_public_key_payload()
assert str(exc.value) == "Could not connect to OSV"
assert exc.value.reason == "public_key_api.network_error"
def test_extract_public_keys(self):
meta_payload = {
"public_keys": [
{
"key_identifier": "90a421169f0a406205f1563a953312f0be898d3c"
"7b6c06b681aa86a874555f4a",
"key": "-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE1c2S+CINXEihVeXz95He1bmWfhPc\n"
"ri7XBJXSEtW2IuZZyrlQP7wDXVupMZ3OsGsZaNX0SL4/nOx2S4OTrF1miA==\n"
"-----END PUBLIC KEY-----\n",
"is_current": True,
}
]
}
cache = integrations.PublicKeysCache(cache_time=12)
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=cache,
)
keys = vuln_report_verifier.extract_public_keys(pubkey_api_data=meta_payload)
assert keys == [
{
"key": "-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE1c2S+CINXEihVeXz95He1bmWfhPc\n"
"ri7XBJXSEtW2IuZZyrlQP7wDXVupMZ3OsGsZaNX0SL4/nOx2S4OTrF1miA==\n"
"-----END PUBLIC KEY-----\n",
"key_id": "90a421169f0a406205f1563a953312f0be"
"898d3c7b6c06b681aa86a874555f4a",
}
]
assert cache.cache == keys
@pytest.mark.parametrize(
("payload", "expected"),
[
([], "Payload is not a dict but: []"),
({}, "Payload misses 'public_keys' attribute"),
({"public_keys": None}, "Payload 'public_keys' attribute is not a list"),
({"public_keys": [None]}, "Key is not a dict but: None"),
(
{"public_keys": [{}]},
"Missing attribute in key: ['key', 'key_identifier']",
),
(
{"public_keys": [{"key": "a"}]},
"Missing attribute in key: ['key_identifier']",
),
(
{"public_keys": [{"key_identifier": "a"}]},
"Missing attribute in key: ['key']",
),
],
)
def test_extract_public_keys_error(self, payload, expected):
cache = integrations.PublicKeysCache(cache_time=12)
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=cache,
)
with pytest.raises(osv.OSVPublicKeyAPIError) as exc:
list(vuln_report_verifier.extract_public_keys(pubkey_api_data=payload))
assert exc.value.reason == "public_key_api.format_error"
assert str(exc.value) == expected
assert cache.cache is None
def test_check_public_key(self):
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
keys = [
{"key_id": "a", "key": "b"},
{"key_id": "c", "key": "d"},
]
assert (
vuln_report_verifier._check_public_key(public_keys=keys, key_id="c") == "d"
)
def test_check_public_key_error(self):
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(integrations.InvalidPayloadSignatureError) as exc:
vuln_report_verifier._check_public_key(public_keys=[], key_id="c")
assert str(exc.value) == "Key c not found in public keys"
assert exc.value.reason == "wrong_key_id"
def test_check_signature(self):
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
public_key = (
"-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE1c2S+CINXEihVeXz95He1bmWfhPc\n"
"ri7XBJXSEtW2IuZZyrlQP7wDXVupMZ3OsGsZaNX0SL4/nOx2S4OTrF1miA==\n"
"-----END PUBLIC KEY-----\n"
)
signature = (
"MEUCIQDz4wvDZjrX2YHsWhmu5Cvvp0gny6xYMD0AGrwEhTHGRAIgXCSvx"
"Tl2SdnaY7fImXFRSKhbw3IRf68g1LMaQRetM80="
)
payload = (
b'[{"project":"vuln_project",'
b'"versions":["v1","v2"],'
b'"id":"vuln_id",'
b'"link":"vulns.com/vuln_id",'
b'"aliases":["vuln_alias"]}]'
)
assert (
vuln_report_verifier._check_signature(
payload=payload, public_key=public_key, signature=signature
)
is None
)
def test_check_signature_invalid_signature(self):
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
public_key = (
"-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE1c2S+CINXEihVeXz95He1bmWfhPc\n"
"ri7XBJXSEtW2IuZZyrlQP7wDXVupMZ3OsGsZaNX0SL4/nOx2S4OTrF1miA==\n"
"-----END PUBLIC KEY-----\n"
)
signature = (
"MEUCIQDz4wvDZjrX2YHsWd34db33f0gny6xYMD0AGrwEhTHGRAIgXCSvx"
"Tl2SdnaY7fImXFRSKhbw3IRf68g1LMaQRetM80="
)
payload = (
b'[{"project":"vuln_project",'
b'"versions":["v1","v2"],'
b'"id":"vuln_id",'
b'"link":"vulns.com/vuln_id",'
b'"aliases":["vuln_alias"]}]'
)
with pytest.raises(integrations.InvalidPayloadSignatureError) as exc:
vuln_report_verifier._check_signature(
payload=payload, public_key=public_key, signature=signature
)
assert str(exc.value) == "Invalid signature"
assert exc.value.reason == "invalid_signature"
def test_check_signature_invalid_crypto(self):
vuln_report_verifier = osv.VulnerabilityReportVerifier(
public_keys_api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
public_key = ""
signature = ""
payload = "yeah, nope, that won't pass"
with pytest.raises(integrations.InvalidPayloadSignatureError) as exc:
vuln_report_verifier._check_signature(
payload=payload, public_key=public_key, signature=signature
)
assert str(exc.value) == "Invalid cryptographic values"
assert exc.value.reason == "invalid_crypto"
| TestVulnerabilityReportVerifier |
python | sympy__sympy | sympy/series/sequences.py | {
"start": 20637,
"end": 27276
} | class ____(SeqBase):
"""
A finite degree recursive sequence.
Explanation
===========
That is, a sequence a(n) that depends on a fixed, finite number of its
previous values. The general form is
a(n) = f(a(n - 1), a(n - 2), ..., a(n - d))
for some fixed, positive integer d, where f is some function defined by a
SymPy expression.
Parameters
==========
recurrence : SymPy expression defining recurrence
This is *not* an equality, only the expression that the nth term is
equal to. For example, if :code:`a(n) = f(a(n - 1), ..., a(n - d))`,
then the expression should be :code:`f(a(n - 1), ..., a(n - d))`.
yn : applied undefined function
Represents the nth term of the sequence as e.g. :code:`y(n)` where
:code:`y` is an undefined function and `n` is the sequence index.
n : symbolic argument
The name of the variable that the recurrence is in, e.g., :code:`n` if
the recurrence function is :code:`y(n)`.
initial : iterable with length equal to the degree of the recurrence
The initial values of the recurrence.
start : start value of sequence (inclusive)
Examples
========
>>> from sympy import Function, symbols
>>> from sympy.series.sequences import RecursiveSeq
>>> y = Function("y")
>>> n = symbols("n")
>>> fib = RecursiveSeq(y(n - 1) + y(n - 2), y(n), n, [0, 1])
>>> fib.coeff(3) # Value at a particular point
2
>>> fib[:6] # supports slicing
[0, 1, 1, 2, 3, 5]
>>> fib.recurrence # inspect recurrence
Eq(y(n), y(n - 2) + y(n - 1))
>>> fib.degree # automatically determine degree
2
>>> for x in zip(range(10), fib): # supports iteration
... print(x)
(0, 0)
(1, 1)
(2, 1)
(3, 2)
(4, 3)
(5, 5)
(6, 8)
(7, 13)
(8, 21)
(9, 34)
See Also
========
sympy.series.sequences.SeqFormula
"""
def __new__(cls, recurrence, yn, n, initial=None, start=0):
if not isinstance(yn, AppliedUndef):
raise TypeError("recurrence sequence must be an applied undefined function"
", found `{}`".format(yn))
if not isinstance(n, Basic) or not n.is_symbol:
raise TypeError("recurrence variable must be a symbol"
", found `{}`".format(n))
if yn.args != (n,):
raise TypeError("recurrence sequence does not match symbol")
y = yn.func
k = Wild("k", exclude=(n,))
degree = 0
# Find all applications of y in the recurrence and check that:
# 1. The function y is only being used with a single argument; and
# 2. All arguments are n + k for constant negative integers k.
prev_ys = recurrence.find(y)
for prev_y in prev_ys:
if len(prev_y.args) != 1:
raise TypeError("Recurrence should be in a single variable")
shift = prev_y.args[0].match(n + k)[k]
if not (shift.is_constant() and shift.is_integer and shift < 0):
raise TypeError("Recurrence should have constant,"
" negative, integer shifts"
" (found {})".format(prev_y))
if -shift > degree:
degree = -shift
if not initial:
initial = [Dummy("c_{}".format(k)) for k in range(degree)]
if len(initial) != degree:
raise ValueError("Number of initial terms must equal degree")
degree = Integer(degree)
start = sympify(start)
initial = Tuple(*(sympify(x) for x in initial))
seq = Basic.__new__(cls, recurrence, yn, n, initial, start)
seq.cache = {y(start + k): init for k, init in enumerate(initial)}
seq.degree = degree
return seq
@property
def _recurrence(self):
"""Equation defining recurrence."""
return self.args[0]
@property
def recurrence(self):
"""Equation defining recurrence."""
return Eq(self.yn, self.args[0])
@property
def yn(self):
"""Applied function representing the nth term"""
return self.args[1]
@property
def y(self):
"""Undefined function for the nth term of the sequence"""
return self.yn.func
@property
def n(self):
"""Sequence index symbol"""
return self.args[2]
@property
def initial(self):
"""The initial values of the sequence"""
return self.args[3]
@property
def start(self):
"""The starting point of the sequence. This point is included"""
return self.args[4]
@property
def stop(self):
"""The ending point of the sequence. (oo)"""
return S.Infinity
@property
def interval(self):
"""Interval on which sequence is defined."""
return (self.start, S.Infinity)
def _eval_coeff(self, index):
if index - self.start < len(self.cache):
return self.cache[self.y(index)]
for current in range(len(self.cache), index + 1):
# Use xreplace over subs for performance.
# See issue #10697.
seq_index = self.start + current
current_recurrence = self._recurrence.xreplace({self.n: seq_index})
new_term = current_recurrence.xreplace(self.cache)
self.cache[self.y(seq_index)] = new_term
return self.cache[self.y(self.start + current)]
def __iter__(self):
index = self.start
while True:
yield self._eval_coeff(index)
index += 1
def sequence(seq, limits=None):
"""
Returns appropriate sequence object.
Explanation
===========
If ``seq`` is a SymPy sequence, returns :class:`SeqPer` object
otherwise returns :class:`SeqFormula` object.
Examples
========
>>> from sympy import sequence
>>> from sympy.abc import n
>>> sequence(n**2, (n, 0, 5))
SeqFormula(n**2, (n, 0, 5))
>>> sequence((1, 2, 3), (n, 0, 5))
SeqPer((1, 2, 3), (n, 0, 5))
See Also
========
sympy.series.sequences.SeqPer
sympy.series.sequences.SeqFormula
"""
seq = sympify(seq)
if is_sequence(seq, Tuple):
return SeqPer(seq, limits)
else:
return SeqFormula(seq, limits)
###############################################################################
# OPERATIONS #
###############################################################################
| RecursiveSeq |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 4156,
"end": 4317
} | class ____(ParentD):
def f(self):
super: "Any"
__class__ # Python injects __class__ into scope
builtins.super(ChildD6, self).f()
| ChildD6 |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fragments.py | {
"start": 704,
"end": 1057
} | class ____(GQLResult):
typename__: Typename[Literal["ArtifactSequence", "ArtifactPortfolio"]]
id: GQLId
name: str
description: Optional[str]
created_at: str = Field(alias="createdAt")
project: Optional[ProjectInfoFragment]
type: ArtifactCollectionFragmentType
tags: ArtifactCollectionFragmentTags
| ArtifactCollectionFragment |
python | huggingface__transformers | src/transformers/models/prophetnet/modeling_prophetnet.py | {
"start": 26875,
"end": 27867
} | class ____(nn.Module):
"""
This is the residual two feed-forward layer block based on the original Transformer implementation.
"""
def __init__(self, config: ProphetNetConfig, ffn_dim: int):
super().__init__()
self.activation_fn = ACT2FN[config.activation_function]
self.intermediate = nn.Linear(config.hidden_size, ffn_dim)
self.output = nn.Linear(ffn_dim, config.hidden_size)
self.activation_dropout = config.activation_dropout
self.dropout = config.dropout
def forward(self, hidden_states):
hidden_states = self.intermediate(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.output(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
| ProphetNetFeedForward |
python | sympy__sympy | sympy/stats/matrix_distributions.py | {
"start": 11675,
"end": 14311
} | class ____(MatrixDistribution):
_argnames = ('n', 'scale_matrix')
@staticmethod
def check(n, scale_matrix):
if not isinstance(scale_matrix, MatrixSymbol):
_value_check(scale_matrix.is_positive_definite, "The shape "
"matrix must be positive definite.")
_value_check(scale_matrix.is_square, "Should "
"be square matrix")
_value_check(n.is_positive, "Shape parameter should be positive.")
@property
def set(self):
k = self.scale_matrix.shape[0]
return MatrixSet(k, k, S.Reals)
@property
def dimension(self):
return self.scale_matrix.shape
def pdf(self, x):
n, scale_matrix = self.n, self.scale_matrix
p = scale_matrix.shape[0]
if isinstance(x, list):
x = ImmutableMatrix(x)
if not isinstance(x, (MatrixBase, MatrixSymbol)):
raise ValueError("%s should be an isinstance of Matrix "
"or MatrixSymbol" % str(x))
sigma_inv_x = - Inverse(scale_matrix)*x / S(2)
term1 = exp(Trace(sigma_inv_x))/((2**(p*n/S(2))) * multigamma(n/S(2), p))
term2 = (Determinant(scale_matrix))**(-n/S(2))
term3 = (Determinant(x))**(S(n - p - 1)/2)
return term1 * term2 * term3
def Wishart(symbol, n, scale_matrix):
"""
Creates a random variable with Wishart Distribution.
The density of the said distribution can be found at [1].
Parameters
==========
n: Positive Real number
Represents degrees of freedom
scale_matrix: Positive definite real square matrix
Scale Matrix
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density, Wishart
>>> from sympy import MatrixSymbol, symbols
>>> n = symbols('n', positive=True)
>>> W = Wishart('W', n, [[2, 1], [1, 2]])
>>> X = MatrixSymbol('X', 2, 2)
>>> density(W)(X).doit()
exp(Trace(Matrix([
[-1/3, 1/6],
[ 1/6, -1/3]])*X))*Determinant(X)**(n/2 - 3/2)/(2**n*3**(n/2)*sqrt(pi)*gamma(n/2)*gamma(n/2 - 1/2))
>>> density(W)([[1, 0], [0, 1]]).doit()
exp(-2/3)/(2**n*3**(n/2)*sqrt(pi)*gamma(n/2)*gamma(n/2 - 1/2))
References
==========
.. [1] https://en.wikipedia.org/wiki/Wishart_distribution
"""
if isinstance(scale_matrix, list):
scale_matrix = ImmutableMatrix(scale_matrix)
return rv(symbol, WishartDistribution, (n, scale_matrix))
#-------------------------------------------------------------------------------
# Matrix Normal distribution ---------------------------------------------------
| WishartDistribution |
python | fsspec__filesystem_spec | fsspec/asyn.py | {
"start": 33110,
"end": 36578
} | class ____(AbstractBufferedFile):
# no read buffering, and always auto-commit
# TODO: readahead might still be useful here, but needs async version
async def read(self, length=-1):
"""
Return data from cache, or fetch pieces as necessary
Parameters
----------
length: int (-1)
Number of bytes to read; if <0, all remaining bytes.
"""
length = -1 if length is None else int(length)
if self.mode != "rb":
raise ValueError("File not in read mode")
if length < 0:
length = self.size - self.loc
if self.closed:
raise ValueError("I/O operation on closed file.")
if length == 0:
# don't even bother calling fetch
return b""
out = await self._fetch_range(self.loc, self.loc + length)
self.loc += len(out)
return out
async def write(self, data):
"""
Write data to buffer.
Buffer only sent on flush() or if buffer is greater than
or equal to blocksize.
Parameters
----------
data: bytes
Set of bytes to be written.
"""
if self.mode not in {"wb", "ab"}:
raise ValueError("File not in write mode")
if self.closed:
raise ValueError("I/O operation on closed file.")
if self.forced:
raise ValueError("This file has been force-flushed, can only close")
out = self.buffer.write(data)
self.loc += out
if self.buffer.tell() >= self.blocksize:
await self.flush()
return out
async def close(self):
"""Close file
Finalizes writes, discards cache
"""
if getattr(self, "_unclosable", False):
return
if self.closed:
return
if self.mode == "rb":
self.cache = None
else:
if not self.forced:
await self.flush(force=True)
if self.fs is not None:
self.fs.invalidate_cache(self.path)
self.fs.invalidate_cache(self.fs._parent(self.path))
self.closed = True
async def flush(self, force=False):
if self.closed:
raise ValueError("Flush on closed file")
if force and self.forced:
raise ValueError("Force flush cannot be called more than once")
if force:
self.forced = True
if self.mode not in {"wb", "ab"}:
# no-op to flush on read-mode
return
if not force and self.buffer.tell() < self.blocksize:
# Defer write on small block
return
if self.offset is None:
# Initialize a multipart upload
self.offset = 0
try:
await self._initiate_upload()
except:
self.closed = True
raise
if await self._upload_chunk(final=force) is not False:
self.offset += self.buffer.seek(0, 2)
self.buffer = io.BytesIO()
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
async def _fetch_range(self, start, end):
raise NotImplementedError
async def _initiate_upload(self):
pass
async def _upload_chunk(self, final=False):
raise NotImplementedError
| AbstractAsyncStreamedFile |
python | getsentry__sentry | src/sentry/sentry_apps/models/sentry_app_installation_token.py | {
"start": 550,
"end": 2430
} | class ____(BaseManager["SentryAppInstallationToken"]):
def get_token(self, organization_id: int, provider: str) -> str | None:
"""Find a token associated with the installation so we can use it for authentication."""
sentry_app_installation_tokens = self.select_related("api_token").filter(
sentry_app_installation__sentryappinstallationforprovider__organization_id=organization_id,
sentry_app_installation__sentryappinstallationforprovider__provider=provider,
)
if not sentry_app_installation_tokens:
return None
return sentry_app_installation_tokens[0].api_token.token
def _get_token(self, token: ApiToken | AuthenticatedToken) -> SentryAppInstallationToken | None:
if isinstance(token, ApiToken):
id = token.id
elif token.kind == "api_token" and token.entity_id is not None:
id = token.entity_id
else:
return None
try:
return self.select_related("sentry_app_installation").get(api_token_id=id)
except SentryAppInstallationToken.DoesNotExist:
pass
return None
def get_projects(self, token: ApiToken) -> QuerySet:
from sentry.models.project import Project
install_token = self._get_token(token)
if not install_token:
return Project.objects.none()
return Project.objects.filter(
organization_id=install_token.sentry_app_installation.organization_id
)
def has_organization_access(
self, token: ApiToken | AuthenticatedToken, organization_id: int
) -> bool:
install_token = self._get_token(token)
if not install_token:
return False
return install_token.sentry_app_installation.organization_id == organization_id
@control_silo_model
| SentryAppInstallationTokenManager |
python | numpy__numpy | numpy/lib/tests/test_nanfunctions.py | {
"start": 15726,
"end": 19832
} | class ____:
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
with warnings.catch_warnings():
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
# Giving the warning is a small bug, see gh-8000
warnings.simplefilter('ignore', ComplexWarning)
tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_char(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
with warnings.catch_warnings():
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
# Giving the warning is a small bug, see gh-8000
warnings.simplefilter('ignore', ComplexWarning)
tgt = rf(mat, dtype=c, axis=1).dtype.type
res = nf(mat, dtype=c, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=c, axis=None).dtype.type
res = nf(mat, dtype=c, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt, f"res {res}, tgt {tgt}")
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_subclass(self):
class MyNDArray(np.ndarray):
pass
# Check that it works and that type and
# shape are preserved
array = np.eye(3)
mine = array.view(MyNDArray)
for f in self.nanfuncs:
expected_shape = f(array, axis=0).shape
res = f(mine, axis=0)
assert_(isinstance(res, MyNDArray))
assert_(res.shape == expected_shape)
expected_shape = f(array, axis=1).shape
res = f(mine, axis=1)
assert_(isinstance(res, MyNDArray))
assert_(res.shape == expected_shape)
expected_shape = f(array).shape
res = f(mine)
assert_(isinstance(res, MyNDArray))
assert_(res.shape == expected_shape)
| SharedNanFunctionsTestsMixin |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/auto_ml.py | {
"start": 11422,
"end": 15407
} | class ____(AutoMLTrainingJobBaseOperator):
"""Create Auto ML Image Training job."""
template_fields = (
"parent_model",
"dataset_id",
"region",
"impersonation_chain",
)
operator_extra_links = (VertexAIModelLink(), VertexAITrainingLink())
def __init__(
self,
*,
dataset_id: str,
prediction_type: str = "classification",
multi_label: bool = False,
model_type: str = "CLOUD",
base_model: Model | None = None,
validation_fraction_split: float | None = None,
training_filter_split: str | None = None,
validation_filter_split: str | None = None,
test_filter_split: str | None = None,
budget_milli_node_hours: int | None = None,
disable_early_stopping: bool = False,
region: str,
impersonation_chain: str | Sequence[str] | None = None,
parent_model: str | None = None,
**kwargs,
) -> None:
super().__init__(
region=region, impersonation_chain=impersonation_chain, parent_model=parent_model, **kwargs
)
self.dataset_id = dataset_id
self.prediction_type = prediction_type
self.multi_label = multi_label
self.model_type = model_type
self.base_model = base_model
self.validation_fraction_split = validation_fraction_split
self.training_filter_split = training_filter_split
self.validation_filter_split = validation_filter_split
self.test_filter_split = test_filter_split
self.budget_milli_node_hours = budget_milli_node_hours
self.disable_early_stopping = disable_early_stopping
def execute(self, context: Context):
self.hook = AutoMLHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.parent_model = self.parent_model.split("@")[0] if self.parent_model else None
model, training_id = self.hook.create_auto_ml_image_training_job(
project_id=self.project_id,
region=self.region,
display_name=self.display_name,
dataset=datasets.ImageDataset(dataset_name=self.dataset_id),
parent_model=self.parent_model,
is_default_version=self.is_default_version,
model_version_aliases=self.model_version_aliases,
model_version_description=self.model_version_description,
prediction_type=self.prediction_type,
multi_label=self.multi_label,
model_type=self.model_type,
base_model=self.base_model,
labels=self.labels,
training_encryption_spec_key_name=self.training_encryption_spec_key_name,
model_encryption_spec_key_name=self.model_encryption_spec_key_name,
training_fraction_split=self.training_fraction_split,
validation_fraction_split=self.validation_fraction_split,
test_fraction_split=self.test_fraction_split,
training_filter_split=self.training_filter_split,
validation_filter_split=self.validation_filter_split,
test_filter_split=self.test_filter_split,
budget_milli_node_hours=self.budget_milli_node_hours,
model_display_name=self.model_display_name,
model_labels=self.model_labels,
disable_early_stopping=self.disable_early_stopping,
sync=self.sync,
)
if model:
result = Model.to_dict(model)
model_id = self.hook.extract_model_id(result)
context["ti"].xcom_push(key="model_id", value=model_id)
VertexAIModelLink.persist(context=context, model_id=model_id)
else:
result = model # type: ignore
context["ti"].xcom_push(key="training_id", value=training_id)
VertexAITrainingLink.persist(context=context, training_id=training_id)
return result
| CreateAutoMLImageTrainingJobOperator |
python | astropy__astropy | astropy/cosmology/_src/flrw/base.py | {
"start": 2705,
"end": 3997
} | class ____(NamedTuple):
"""A container for neutrino information.
This is Private API - internal to FLRW cosmologies.
"""
n_nu: int
"""Number of neutrino species (floor of Neff)."""
neff_per_nu: float | None
"""Number of effective neutrino species per neutrino.
We are going to share Neff between the neutrinos equally. In detail this is not
correct, but it is a standard assumption because properly calculating it is a)
complicated b) depends on the details of the massive neutrinos (e.g., their weak
interactions, which could be unusual if one is considering sterile neutrinos).
"""
has_massive_nu: bool
"""Boolean of which neutrinos are massive."""
n_massive_nu: int
"""Number of massive neutrinos."""
n_massless_nu: int
"""Number of massless neutrinos."""
nu_y: NDArray[np.floating] | None
"""The ratio m_nu / (kB T_nu) for each massive neutrino."""
nu_y_list: list[float] | None
"""The ratio m_nu / (kB T_nu) for each massive neutrino as a list."""
##############################################################################
ParameterOde0 = Parameter(
doc="Omega dark energy; dark energy density/critical density at z=0.",
fvalidate="float",
)
@dataclass_decorator
| NeutrinoInfo |
python | kamyu104__LeetCode-Solutions | Python/water-and-jug-problem.py | {
"start": 57,
"end": 522
} | class ____(object):
def canMeasureWater(self, x, y, z):
"""
:type x: int
:type y: int
:type z: int
:rtype: bool
"""
def gcd(a, b):
while b:
a, b = b, a%b
return a
# The problem is to solve:
# - check z <= x + y
# - check if there is any (a, b) integers s.t. ax + by = z
return z == 0 or ((z <= x + y) and (z % gcd(x, y) == 0))
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 40971,
"end": 42161
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
idempotency_token: Optional[str] = Field(
None,
description=(
"An optional token to guarantee the idempotency of job run requests. If a"
" run with the provided token already exists, the request does not create a"
" new run but returns the ID of the existing run instead. If a run with the"
" provided token is deleted, an error is returned.\n\nIf you specify the"
" idempotency token, upon failure you can retry until the request succeeds."
" Databricks guarantees that exactly one run is launched with that"
" idempotency token.\n\nThis token must have at most 64 characters.\n\nFor"
" more information, see [How to ensure idempotency for"
" jobs](https://kb.databricks.com/jobs/jobs-idempotency.html)."
),
examples=["8f018174-4792-40d5-bcbc-3e6a527352c8"],
)
job_id: Optional[int] = Field(
None, description="The ID of the job to be executed", examples=[11223344]
)
| RunNowInput |
python | PrefectHQ__prefect | src/prefect/events/schemas/automations.py | {
"start": 10985,
"end": 12235
} | class ____(CompositeTrigger):
"""A composite trigger that requires some number of triggers to have
fired within the given time period"""
type: Literal["compound"] = "compound"
require: Union[int, Literal["any", "all"]]
@model_validator(mode="after")
def validate_require(self) -> Self:
if isinstance(self.require, int):
if self.require < 1:
raise ValueError("require must be at least 1")
if self.require > len(self.triggers):
raise ValueError(
"require must be less than or equal to the number of triggers"
)
return self
def describe_for_cli(self, indent: int = 0) -> str:
"""Return a human-readable description of this trigger for the CLI"""
return textwrap.indent(
"\n".join(
[
f"{str(self.require).capitalize()} of:",
"\n".join(
[
trigger.describe_for_cli(indent=indent + 1)
for trigger in self.triggers
]
),
]
),
prefix=" " * indent,
)
| CompoundTrigger |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes9.py | {
"start": 535,
"end": 566
} | class ____(D0, D1, C):
pass
| D |
python | doocs__leetcode | solution/0100-0199/0146.LRU Cache/Solution.py | {
"start": 162,
"end": 1584
} | class ____:
def __init__(self, capacity: int):
self.size = 0
self.capacity = capacity
self.cache = {}
self.head = Node()
self.tail = Node()
self.head.next = self.tail
self.tail.prev = self.head
def get(self, key: int) -> int:
if key not in self.cache:
return -1
node = self.cache[key]
self.remove_node(node)
self.add_to_head(node)
return node.val
def put(self, key: int, value: int) -> None:
if key in self.cache:
node = self.cache[key]
self.remove_node(node)
node.val = value
self.add_to_head(node)
else:
node = Node(key, value)
self.cache[key] = node
self.add_to_head(node)
self.size += 1
if self.size > self.capacity:
node = self.tail.prev
self.cache.pop(node.key)
self.remove_node(node)
self.size -= 1
def remove_node(self, node):
node.prev.next = node.next
node.next.prev = node.prev
def add_to_head(self, node):
node.next = self.head.next
node.prev = self.head
self.head.next = node
node.next.prev = node
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
| LRUCache |
python | huggingface__transformers | src/transformers/models/grounding_dino/modeling_grounding_dino.py | {
"start": 18678,
"end": 21111
} | class ____(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than
torchvision.models.resnet[18,34,50,101] produce nans.
"""
def __init__(self, n):
super().__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
# move reshapes to the beginning
# to make it user-friendly
weight = self.weight.reshape(1, -1, 1, 1)
bias = self.bias.reshape(1, -1, 1, 1)
running_var = self.running_var.reshape(1, -1, 1, 1)
running_mean = self.running_mean.reshape(1, -1, 1, 1)
epsilon = 1e-5
scale = weight * (running_var + epsilon).rsqrt()
bias = bias - running_mean * scale
return x * scale + bias
# Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->GroundingDino
def replace_batch_norm(model):
r"""
Recursively replace all `torch.nn.BatchNorm2d` with `GroundingDinoFrozenBatchNorm2d`.
Args:
model (torch.nn.Module):
input model
"""
for name, module in model.named_children():
if isinstance(module, nn.BatchNorm2d):
new_module = GroundingDinoFrozenBatchNorm2d(module.num_features)
if module.weight.device != torch.device("meta"):
new_module.weight.copy_(module.weight)
new_module.bias.copy_(module.bias)
new_module.running_mean.copy_(module.running_mean)
new_module.running_var.copy_(module.running_var)
model._modules[name] = new_module
if len(list(module.children())) > 0:
replace_batch_norm(module)
| GroundingDinoFrozenBatchNorm2d |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/initsubclass1.py | {
"start": 1097,
"end": 1143
} | class ____:
__init_subclass__ = func2
| ClassG |
python | tensorflow__tensorflow | tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py | {
"start": 109849,
"end": 121691
} | class ____(test.TestCase):
dtypes_ = [dtypes.float16, dtypes.float32]
def testBasic(self):
for dtype in self.dtypes_:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
optimizer = gradient_descent.GradientDescentOptimizer(3.0)
sgd_op = optimizer.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], self.evaluate(var1)
)
self.assertEqual(0, len(optimizer.variables()))
def testBasicResourceVariable(self):
for dtype in self.dtypes_:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1])
)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], self.evaluate(var1)
)
def testBasicCallableParams(self):
for dtype in self.dtypes_:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
lr = lambda: 3.0
sgd_op = gradient_descent.GradientDescentOptimizer(lr).apply_gradients(
zip([grads0, grads1], [var0, var1])
)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], self.evaluate(var1)
)
def testMinimizeResourceVariable(self):
for dtype in self.dtypes_:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(var0, x) + var1
loss = pred * pred
sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], self.evaluate(var0)
)
self.assertAllCloseAccordingToType([3.0 - np_grad], self.evaluate(var1))
def testMinimizeSparseResourceVariable(self):
for dtype in self.dtypes_:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
pred += var1
loss = pred * pred
sgd_op = gradient_descent.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], self.evaluate(var0)
)
self.assertAllCloseAccordingToType([3.0 - np_grad], self.evaluate(var1))
def testTensorLearningRate(self):
for dtype in self.dtypes_:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
lrate = constant_op.constant(3.0)
sgd_op = gradient_descent.GradientDescentOptimizer(
lrate
).apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], self.evaluate(var1)
)
def testGradWrtRef(self):
for dtype in self.dtypes_:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
opt = gradient_descent.GradientDescentOptimizer(3.0)
values = [1.0, 3.0]
vars_ = [variables.Variable([v], dtype=dtype) for v in values]
grads_and_vars = opt.compute_gradients(vars_[0] + vars_[1], vars_)
self.evaluate(variables.global_variables_initializer())
for grad, _ in grads_and_vars:
self.assertAllCloseAccordingToType([1.0], self.evaluate(grad))
def testWithGlobalStep(self):
for dtype in self.dtypes_:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
global_step = variables.Variable(0, trainable=False)
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step
)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([3.0, 4.0], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params and global_step
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], self.evaluate(var1)
)
self.assertAllCloseAccordingToType(1, self.evaluate(global_step))
def testSparseBasic(self):
for dtype in self.dtypes_:
# train.GradientDescentOptimizer is V1 only API.
with ops.Graph().as_default(), self.cached_session():
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = indexed_slices.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]),
constant_op.constant([2, 1]),
)
grads1 = indexed_slices.IndexedSlices(
constant_op.constant([0.01], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]),
)
sgd_op = gradient_descent.GradientDescentOptimizer(3.0).apply_gradients(
zip([grads0, grads1], [var0, var1])
)
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0], [2.0]], self.evaluate(var0))
self.assertAllCloseAccordingToType([[3.0], [4.0]], self.evaluate(var1))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[[1.0 - 3.0 * 0.1], [2.0]], self.evaluate(var0)
)
self.assertAllCloseAccordingToType(
[[3.0], [4.0 - 3.0 * 0.01]], self.evaluate(var1)
)
| GradientDescentOptimizerTest |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 36343,
"end": 41721
} | class ____(NonStrictDataModel):
"""
Sequential Iteration API configuration
:param order: Input frames order. Values: 'sequential', 'random' In Sequential
mode frames will be returned according to the order in which the frames were
added to the dataset.
:type order: str
:param jump: Jump entry
:type jump: Jump
:param min_sequence: Length (in ms) of video clips to return. This is used in
random order, and in sequential order only if jumping is provided and only for
video frames
:type min_sequence: int
:param infinite: Infinite iteration
:type infinite: bool
:param limit: Maximum frames per task. If not passed, frames will end when no
more matching frames are found, unless infinite is True.
:type limit: int
:param random_seed: Random seed used during iteration
:type random_seed: int
"""
_schema = {
"description": "Sequential Iteration API configuration",
"properties": {
"infinite": {
"description": "Infinite iteration",
"type": ["boolean", "null"],
},
"jump": {
"description": "Jump entry",
"oneOf": [{"$ref": "#/definitions/jump"}, {"type": "null"}],
},
"limit": {
"description": (
"Maximum frames per task. If not passed, frames will end when no more matching frames are found,"
" unless infinite is True."
),
"type": ["integer", "null"],
},
"min_sequence": {
"description": (
"Length (in ms) of video clips to return. This is used in random order, and in sequential order"
" only if jumping is provided and only for video frames"
),
"type": ["integer", "null"],
},
"order": {
"description": (
"\n Input frames order. Values: 'sequential', 'random'\n In Sequential mode"
" frames will be returned according to the order in which the frames were added to the dataset."
),
"type": ["string", "null"],
},
"random_seed": {
"description": "Random seed used during iteration",
"type": "integer",
},
},
"required": ["random_seed"],
"type": "object",
}
def __init__(
self,
random_seed,
order=None,
jump=None,
min_sequence=None,
infinite=None,
limit=None,
**kwargs
):
super(Iteration, self).__init__(**kwargs)
self.order = order
self.jump = jump
self.min_sequence = min_sequence
self.infinite = infinite
self.limit = limit
self.random_seed = random_seed
@schema_property("order")
def order(self):
return self._property_order
@order.setter
def order(self, value):
if value is None:
self._property_order = None
return
self.assert_isinstance(value, "order", six.string_types)
self._property_order = value
@schema_property("jump")
def jump(self):
return self._property_jump
@jump.setter
def jump(self, value):
if value is None:
self._property_jump = None
return
if isinstance(value, dict):
value = Jump.from_dict(value)
else:
self.assert_isinstance(value, "jump", Jump)
self._property_jump = value
@schema_property("min_sequence")
def min_sequence(self):
return self._property_min_sequence
@min_sequence.setter
def min_sequence(self, value):
if value is None:
self._property_min_sequence = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "min_sequence", six.integer_types)
self._property_min_sequence = value
@schema_property("infinite")
def infinite(self):
return self._property_infinite
@infinite.setter
def infinite(self, value):
if value is None:
self._property_infinite = None
return
self.assert_isinstance(value, "infinite", (bool,))
self._property_infinite = value
@schema_property("limit")
def limit(self):
return self._property_limit
@limit.setter
def limit(self, value):
if value is None:
self._property_limit = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "limit", six.integer_types)
self._property_limit = value
@schema_property("random_seed")
def random_seed(self):
return self._property_random_seed
@random_seed.setter
def random_seed(self, value):
if value is None:
self._property_random_seed = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "random_seed", six.integer_types)
self._property_random_seed = value
| Iteration |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 995076,
"end": 995784
} | class ____(sgqlc.types.Type):
"""A suggestion to review a pull request based on a user's commit
history and review comments.
"""
__schema__ = github_schema
__field_names__ = ("is_author", "is_commenter", "reviewer")
is_author = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isAuthor")
"""Is this suggestion based on past commits?"""
is_commenter = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isCommenter")
"""Is this suggestion based on past review comments?"""
reviewer = sgqlc.types.Field(sgqlc.types.non_null("User"), graphql_name="reviewer")
"""Identifies the user suggested to review the pull request."""
| SuggestedReviewer |
python | numba__numba | numba/core/postproc.py | {
"start": 1627,
"end": 9318
} | class ____(object):
"""
A post-processor for Numba IR.
"""
def __init__(self, func_ir):
self.func_ir = func_ir
def run(self, emit_dels: bool = False, extend_lifetimes: bool = False):
"""
Run the following passes over Numba IR:
- canonicalize the CFG
- emit explicit `del` instructions for variables
- compute lifetime of variables
- compute generator info (if function is a generator function)
"""
self.func_ir.blocks = transforms.canonicalize_cfg(self.func_ir.blocks)
vlt = VariableLifetime(self.func_ir.blocks)
self.func_ir.variable_lifetime = vlt
bev = analysis.compute_live_variables(vlt.cfg, self.func_ir.blocks,
vlt.usedefs.defmap,
vlt.deadmaps.combined)
for offset, ir_block in self.func_ir.blocks.items():
self.func_ir.block_entry_vars[ir_block] = bev[offset]
if self.func_ir.is_generator:
self.func_ir.generator_info = GeneratorInfo()
self._compute_generator_info()
else:
self.func_ir.generator_info = None
# Emit del nodes, do this last as the generator info parsing generates
# and then strips dels as part of its analysis.
if emit_dels:
self._insert_var_dels(extend_lifetimes=extend_lifetimes)
def _populate_generator_info(self):
"""
Fill `index` for the Yield instruction and create YieldPoints.
"""
dct = self.func_ir.generator_info.yield_points
assert not dct, 'rerunning _populate_generator_info'
for block in self.func_ir.blocks.values():
for inst in block.body:
if isinstance(inst, ir.Assign):
yieldinst = inst.value
if isinstance(yieldinst, ir.Yield):
index = len(dct) + 1
yieldinst.index = index
yp = YieldPoint(block, yieldinst)
dct[yieldinst.index] = yp
def _compute_generator_info(self):
"""
Compute the generator's state variables as the union of live variables
at all yield points.
"""
# generate del info, it's used in analysis here, strip it out at the end
self._insert_var_dels()
self._populate_generator_info()
gi = self.func_ir.generator_info
for yp in gi.get_yield_points():
live_vars = set(self.func_ir.get_block_entry_vars(yp.block))
weak_live_vars = set()
stmts = iter(yp.block.body)
for stmt in stmts:
if isinstance(stmt, ir.Assign):
if stmt.value is yp.inst:
break
live_vars.add(stmt.target.name)
elif isinstance(stmt, ir.Del):
live_vars.remove(stmt.value)
else:
assert 0, "couldn't find yield point"
# Try to optimize out any live vars that are deleted immediately
# after the yield point.
for stmt in stmts:
if isinstance(stmt, ir.Del):
name = stmt.value
if name in live_vars:
live_vars.remove(name)
weak_live_vars.add(name)
else:
break
yp.live_vars = live_vars
yp.weak_live_vars = weak_live_vars
st = set()
for yp in gi.get_yield_points():
st |= yp.live_vars
st |= yp.weak_live_vars
gi.state_vars = sorted(st)
self.remove_dels()
def _insert_var_dels(self, extend_lifetimes=False):
"""
Insert del statements for each variable.
Returns a 2-tuple of (variable definition map, variable deletion map)
which indicates variables defined and deleted in each block.
The algorithm avoids relying on explicit knowledge on loops and
distinguish between variables that are defined locally vs variables that
come from incoming blocks.
We start with simple usage (variable reference) and definition (variable
creation) maps on each block. Propagate the liveness info to predecessor
blocks until it stabilize, at which point we know which variables must
exist before entering each block. Then, we compute the end of variable
lives and insert del statements accordingly. Variables are deleted after
the last use. Variable referenced by terminators (e.g. conditional
branch and return) are deleted by the successors or the caller.
"""
vlt = self.func_ir.variable_lifetime
self._patch_var_dels(vlt.deadmaps.internal, vlt.deadmaps.escaping,
extend_lifetimes=extend_lifetimes)
def _patch_var_dels(self, internal_dead_map, escaping_dead_map,
extend_lifetimes=False):
"""
Insert delete in each block
"""
for offset, ir_block in self.func_ir.blocks.items():
# for each internal var, insert delete after the last use
internal_dead_set = internal_dead_map[offset].copy()
delete_pts = []
# for each statement in reverse order
for stmt in reversed(ir_block.body[:-1]):
# internal vars that are used here
live_set = set(v.name for v in stmt.list_vars())
dead_set = live_set & internal_dead_set
for T, def_func in ir_extension_insert_dels.items():
if isinstance(stmt, T):
done_dels = def_func(stmt, dead_set)
dead_set -= done_dels
internal_dead_set -= done_dels
# used here but not afterwards
delete_pts.append((stmt, dead_set))
internal_dead_set -= dead_set
# rewrite body and insert dels
body = []
lastloc = ir_block.loc
del_store = []
for stmt, delete_set in reversed(delete_pts):
# If using extended lifetimes then the Dels are all put at the
# block end just ahead of the terminator, so associate their
# location with the terminator.
if extend_lifetimes:
lastloc = ir_block.body[-1].loc
else:
lastloc = stmt.loc
# Ignore dels (assuming no user inserted deletes)
if not isinstance(stmt, ir.Del):
body.append(stmt)
# note: the reverse sort is not necessary for correctness
# it is just to minimize changes to test for now
for var_name in sorted(delete_set, reverse=True):
delnode = ir.Del(var_name, loc=lastloc)
if extend_lifetimes:
del_store.append(delnode)
else:
body.append(delnode)
if extend_lifetimes:
body.extend(del_store)
body.append(ir_block.body[-1]) # terminator
ir_block.body = body
# vars to delete at the start
escape_dead_set = escaping_dead_map[offset]
for var_name in sorted(escape_dead_set):
ir_block.prepend(ir.Del(var_name, loc=ir_block.body[0].loc))
def remove_dels(self):
"""
Strips the IR of Del nodes
"""
ir_utils.remove_dels(self.func_ir.blocks)
| PostProcessor |
python | sympy__sympy | sympy/stats/drv_types.py | {
"start": 16889,
"end": 18577
} | class ____(SingleDiscreteDistribution):
_argnames = ('rho',)
set = S.Naturals
@staticmethod
def check(rho):
_value_check(rho > 0, 'rho should be positive')
def pdf(self, k):
rho = self.rho
return rho * beta(k, rho + 1)
def _cdf(self, x):
return Piecewise((1 - floor(x) * beta(floor(x), self.rho + 1), x >= 1), (0, True))
def _characteristic_function(self, t):
rho = self.rho
return rho * hyper((1, 1), (rho + 2,), exp(I*t)) * exp(I*t) / (rho + 1)
def _moment_generating_function(self, t):
rho = self.rho
return rho * hyper((1, 1), (rho + 2,), exp(t)) * exp(t) / (rho + 1)
def YuleSimon(name, rho):
r"""
Create a discrete random variable with a Yule-Simon distribution.
Explanation
===========
The density of the Yule-Simon distribution is given by
.. math::
f(k) := \rho B(k, \rho + 1)
Parameters
==========
rho : A positive value
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import YuleSimon, density, E, variance
>>> from sympy import Symbol, simplify
>>> p = 5
>>> z = Symbol("z")
>>> X = YuleSimon("x", p)
>>> density(X)(z)
5*beta(z, 6)
>>> simplify(E(X))
5/4
>>> simplify(variance(X))
25/48
References
==========
.. [1] https://en.wikipedia.org/wiki/Yule%E2%80%93Simon_distribution
"""
return rv(name, YuleSimonDistribution, rho)
#-------------------------------------------------------------------------------
# Zeta distribution ------------------------------------------------------------
| YuleSimonDistribution |
python | jmcnamara__XlsxWriter | xlsxwriter/worksheet.py | {
"start": 8456,
"end": 288567
} | class ____(xmlwriter.XMLwriter):
"""
A class for writing the Excel XLSX Worksheet file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self) -> None:
"""
Constructor.
"""
super().__init__()
self.name = None
self.index = None
self.str_table = None
self.palette = None
self.constant_memory = 0
self.tmpdir = None
self.is_chartsheet = False
self.ext_sheets = []
self.fileclosed = 0
self.excel_version = 2007
self.excel2003_style = False
self.xls_rowmax = 1048576
self.xls_colmax = 16384
self.xls_strmax = 32767
self.dim_rowmin = None
self.dim_rowmax = None
self.dim_colmin = None
self.dim_colmax = None
self.col_info: Dict[int, ColumnInfo] = {}
self.row_info: Dict[int, RowInfo] = {}
self.default_row_height: int = 20
self.default_col_width: int = 64
self.cell_padding: int = 5
self.original_row_height: int = 20
self.max_digit_width: int = 7
self.max_col_width: int = 1790
self.default_date_width = 68
self.default_row_zeroed = 0
self.selections = []
self.hidden = 0
self.active = 0
self.tab_color = 0
self.top_left_cell = ""
self.panes = []
self.active_pane = 3
self.selected = 0
self.page_setup_changed = False
self.paper_size = 0
self.orientation = 1
self.print_options_changed = False
self.hcenter = False
self.vcenter = False
self.print_gridlines = False
self.screen_gridlines = True
self.print_headers = False
self.row_col_headers = False
self.header_footer_changed = False
self.header = ""
self.footer = ""
self.header_footer_aligns = True
self.header_footer_scales = True
self.header_images = []
self.footer_images = []
self.header_images_list = []
self.margin_left = 0.7
self.margin_right = 0.7
self.margin_top = 0.75
self.margin_bottom = 0.75
self.margin_header = 0.3
self.margin_footer = 0.3
self.repeat_row_range = ""
self.repeat_col_range = ""
self.print_area_range = ""
self.page_order = 0
self.black_white = 0
self.draft_quality = 0
self.print_comments = 0
self.page_start = 0
self.fit_page = 0
self.fit_width = 0
self.fit_height = 0
self.hbreaks = []
self.vbreaks = []
self.protect_options = {}
self.protected_ranges = []
self.num_protected_ranges = 0
self.zoom = 100
self.zoom_scale_normal = True
self.zoom_to_fit = False
self.print_scale = 100
self.is_right_to_left = False
self.show_zeros = 1
self.leading_zeros = 0
self.outline_row_level = 0
self.outline_col_level = 0
self.outline_style = 0
self.outline_below = 1
self.outline_right = 1
self.outline_on = 1
self.outline_changed = False
self.names = {}
self.write_match = []
self.table = defaultdict(dict)
self.merge = []
self.merged_cells = {}
self.table_cells = {}
self.row_spans = {}
self.has_vml = False
self.has_header_vml = False
self.has_comments = False
self.comments = defaultdict(dict)
self.comments_list = []
self.comments_author = ""
self.comments_visible = False
self.vml_shape_id = 1024
self.buttons_list = []
self.vml_header_id = 0
self.autofilter_area = ""
self.autofilter_ref = None
self.filter_range = [0, 9]
self.filter_on = 0
self.filter_cols = {}
self.filter_type = {}
self.filter_cells = {}
self.row_sizes = {}
self.col_size_changed = False
self.row_size_changed = False
self.last_shape_id = 1
self.rel_count = 0
self.hlink_count = 0
self.hlink_refs = []
self.external_hyper_links = []
self.external_drawing_links = []
self.external_comment_links = []
self.external_vml_links = []
self.external_table_links = []
self.external_background_links = []
self.drawing_links = []
self.vml_drawing_links = []
self.charts = []
self.images = []
self.tables = []
self.sparklines = []
self.shapes = []
self.shape_hash = {}
self.drawing = 0
self.drawing_rels = {}
self.drawing_rels_id = 0
self.vml_drawing_rels = {}
self.vml_drawing_rels_id = 0
self.background_image = None
self.rstring = ""
self.previous_row = 0
self.validations = []
self.cond_formats = {}
self.data_bars_2010 = []
self.use_data_bars_2010 = False
self.dxf_priority = 1
self.page_view = 0
self.vba_codename = None
self.date_1904 = False
self.hyperlinks = defaultdict(dict)
self.strings_to_numbers = False
self.strings_to_urls = True
self.nan_inf_to_errors = False
self.strings_to_formulas = True
self.default_date_format = None
self.default_url_format = None
self.default_checkbox_format = None
self.workbook_add_format = None
self.remove_timezone = False
self.max_url_length = 2079
self.row_data_filename = None
self.row_data_fh = None
self.worksheet_meta = None
self.vml_data_id = None
self.vml_shape_id = None
self.row_data_filename = None
self.row_data_fh = None
self.row_data_fh_closed = False
self.vertical_dpi = 0
self.horizontal_dpi = 0
self.write_handlers = {}
self.ignored_errors = None
self.has_dynamic_arrays = False
self.use_future_functions = False
self.ignore_write_string = False
self.embedded_images = None
# Utility function for writing different types of strings.
def _write_token_as_string(self, token, row: int, col: int, *args):
# Map the data to the appropriate write_*() method.
if token == "":
return self._write_blank(row, col, *args)
if self.strings_to_formulas and token.startswith("="):
return self._write_formula(row, col, *args)
if token.startswith("{=") and token.endswith("}"):
return self._write_formula(row, col, *args)
# pylint: disable=too-many-boolean-expressions
if (
":" in token
and self.strings_to_urls
and (
re.match("(ftp|http)s?://", token)
or re.match("mailto:", token)
or re.match("(in|ex)ternal:", token)
or re.match("file://", token)
)
):
return self._write_url(row, col, *args)
if self.strings_to_numbers:
try:
f = float(token)
if self.nan_inf_to_errors or (not isnan(f) and not isinf(f)):
return self._write_number(row, col, f, *args[1:])
except ValueError:
# Not a number, write as a string.
pass
return self._write_string(row, col, *args)
# We have a plain string.
return self._write_string(row, col, *args)
@convert_cell_args
def write(self, row: int, col: int, *args) -> Union[Literal[0, -1], Any]:
"""
Write data to a worksheet cell by calling the appropriate write_*()
method based on the type of data being passed.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
*args: Args to pass to sub functions.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
other: Return value of called method.
"""
return self._write(row, col, *args)
# Undecorated version of write().
def _write(self, row: int, col: int, *args):
# pylint: disable=raise-missing-from
# Check the number of args passed.
if not args:
raise TypeError("write() takes at least 4 arguments (3 given)")
# The first arg should be the token for all write calls.
token = args[0]
# Avoid isinstance() for better performance.
token_type = token.__class__
# Check for any user defined type handlers with callback functions.
if token_type in self.write_handlers:
write_handler = self.write_handlers[token_type]
function_return = write_handler(self, row, col, *args)
# If the return value is None then the callback has returned
# control to this function and we should continue as
# normal. Otherwise we return the value to the caller and exit.
if function_return is None:
pass
else:
return function_return
# Write None as a blank cell.
if token is None:
return self._write_blank(row, col, *args)
# Check for standard Python types.
if token_type is bool:
return self._write_boolean(row, col, *args)
if token_type in (float, int, Decimal, Fraction):
return self._write_number(row, col, *args)
if token_type is str:
return self._write_token_as_string(token, row, col, *args)
if token_type in (
datetime.datetime,
datetime.date,
datetime.time,
datetime.timedelta,
):
return self._write_datetime(row, col, *args)
# Resort to isinstance() for subclassed primitives.
# Write number types.
if isinstance(token, (float, int, Decimal, Fraction)):
return self._write_number(row, col, *args)
# Write string types.
if isinstance(token, str):
return self._write_token_as_string(token, row, col, *args)
# Write boolean types.
if isinstance(token, bool):
return self._write_boolean(row, col, *args)
# Write datetime objects.
if _supported_datetime(token):
return self._write_datetime(row, col, *args)
# Write Url type.
if isinstance(token, Url):
return self._write_url(row, col, *args)
# We haven't matched a supported type. Try float.
try:
f = float(token)
return self._write_number(row, col, f, *args[1:])
except ValueError:
pass
except TypeError:
raise TypeError(f"Unsupported type {type(token)} in write()")
# Finally try string.
try:
str(token)
return self._write_string(row, col, *args)
except ValueError:
raise TypeError(f"Unsupported type {type(token)} in write()")
@convert_cell_args
def write_string(
self, row: int, col: int, string: str, cell_format: Optional[Format] = None
) -> Literal[0, -1, -2]:
"""
Write a string to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
string: Cell data. Str.
format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String truncated to 32k characters.
"""
return self._write_string(row, col, string, cell_format)
# Undecorated version of write_string().
def _write_string(
self, row: int, col: int, string: str, cell_format: Optional[Format] = None
) -> Literal[0, -1, -2]:
str_error = 0
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Check that the string is < 32767 chars.
if len(string) > self.xls_strmax:
string = string[: self.xls_strmax]
str_error = -2
# Write a shared string or an in-line string in constant_memory mode.
if not self.constant_memory:
string_index = self.str_table._get_shared_string_index(string)
else:
string_index = string
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = CellStringTuple(string_index, cell_format)
return str_error
@convert_cell_args
def write_number(
self,
row: int,
col: int,
number: Union[int, float, Fraction],
cell_format: Optional[Format] = None,
) -> Literal[0, -1]:
"""
Write a number to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
number: Cell data. Int or float.
cell_format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_number(row, col, number, cell_format)
# Undecorated version of write_number().
def _write_number(
self,
row: int,
col: int,
number: Union[int, float, Fraction],
cell_format: Optional[Format] = None,
) -> Literal[0, -1]:
if isnan(number) or isinf(number):
if self.nan_inf_to_errors:
if isnan(number):
return self._write_formula(row, col, "#NUM!", cell_format, "#NUM!")
if number == math.inf:
return self._write_formula(row, col, "1/0", cell_format, "#DIV/0!")
if number == -math.inf:
return self._write_formula(row, col, "-1/0", cell_format, "#DIV/0!")
else:
raise TypeError(
"NAN/INF not supported in write_number() "
"without 'nan_inf_to_errors' Workbook() option"
)
if number.__class__ is Fraction:
number = float(number)
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = CellNumberTuple(number, cell_format)
return 0
@convert_cell_args
def write_blank(
self, row: int, col: int, blank: Any, cell_format: Optional[Format] = None
):
"""
Write a blank cell with formatting to a worksheet cell. The blank
token is ignored and the format only is written to the cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
blank: Any value. It is ignored.
cell_format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_blank(row, col, blank, cell_format)
# Undecorated version of write_blank().
def _write_blank(
self, row: int, col: int, _, cell_format: Optional[Format] = None
) -> Literal[0, -1]:
# Don't write a blank cell unless it has a format.
if cell_format is None:
return 0
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = CellBlankTuple(cell_format)
return 0
@convert_cell_args
def write_formula(
self,
row: int,
col: int,
formula: str,
cell_format: Optional[Format] = None,
value=0,
) -> Literal[0, -1, -2]:
"""
Write a formula to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
formula: Cell formula.
cell_format: An optional cell Format object.
value: An optional value for the formula. Default is 0.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Formula can't be None or empty.
"""
# Check that row and col are valid and store max and min values.
return self._write_formula(row, col, formula, cell_format, value)
# Undecorated version of write_formula().
def _write_formula(
self,
row: int,
col: int,
formula: str,
cell_format: Optional[Format] = None,
value=0,
) -> Literal[0, -1, -2]:
if self._check_dimensions(row, col):
return -1
if formula is None or formula == "":
warn("Formula can't be None or empty")
return -1
# Check for dynamic array functions.
if re_dynamic_function.search(formula):
return self.write_dynamic_array_formula(
row, col, row, col, formula, cell_format, value
)
# Hand off array formulas.
if formula.startswith("{") and formula.endswith("}"):
return self._write_array_formula(
row, col, row, col, formula, cell_format, value
)
# Modify the formula string, as needed.
formula = self._prepare_formula(formula)
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = CellFormulaTuple(formula, cell_format, value)
return 0
@convert_range_args
def write_array_formula(
self,
first_row: int,
first_col: int,
last_row: int,
last_col: int,
formula: str,
cell_format: Optional[Format] = None,
value=0,
) -> Literal[0, -1]:
"""
Write a formula to a worksheet cell/range.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
formula: Cell formula.
cell_format: An optional cell Format object.
value: An optional value for the formula. Default is 0.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check for dynamic array functions.
if re_dynamic_function.search(formula):
return self.write_dynamic_array_formula(
first_row, first_col, last_row, last_col, formula, cell_format, value
)
return self._write_array_formula(
first_row,
first_col,
last_row,
last_col,
formula,
cell_format,
value,
"static",
)
@convert_range_args
def write_dynamic_array_formula(
self,
first_row: int,
first_col: int,
last_row: int,
last_col: int,
formula: str,
cell_format: Optional[Format] = None,
value=0,
) -> Literal[0, -1]:
"""
Write a dynamic array formula to a worksheet cell/range.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
formula: Cell formula.
cell_format: An optional cell Format object.
value: An optional value for the formula. Default is 0.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
error = self._write_array_formula(
first_row,
first_col,
last_row,
last_col,
formula,
cell_format,
value,
"dynamic",
)
if error == 0:
self.has_dynamic_arrays = True
return error
# Utility method to strip equal sign and array braces from a formula and
# also expand out future and dynamic array formulas.
def _prepare_formula(self, formula, expand_future_functions=False):
# Remove array formula braces and the leading =.
if formula.startswith("{"):
formula = formula[1:]
if formula.startswith("="):
formula = formula[1:]
if formula.endswith("}"):
formula = formula[:-1]
# Check if formula is already expanded by the user.
if "_xlfn." in formula:
return formula
# Expand dynamic formulas.
formula = re.sub(r"\bANCHORARRAY\(", "_xlfn.ANCHORARRAY(", formula)
formula = re.sub(r"\bBYCOL\(", "_xlfn.BYCOL(", formula)
formula = re.sub(r"\bBYROW\(", "_xlfn.BYROW(", formula)
formula = re.sub(r"\bCHOOSECOLS\(", "_xlfn.CHOOSECOLS(", formula)
formula = re.sub(r"\bCHOOSEROWS\(", "_xlfn.CHOOSEROWS(", formula)
formula = re.sub(r"\bDROP\(", "_xlfn.DROP(", formula)
formula = re.sub(r"\bEXPAND\(", "_xlfn.EXPAND(", formula)
formula = re.sub(r"\bFILTER\(", "_xlfn._xlws.FILTER(", formula)
formula = re.sub(r"\bHSTACK\(", "_xlfn.HSTACK(", formula)
formula = re.sub(r"\bLAMBDA\(", "_xlfn.LAMBDA(", formula)
formula = re.sub(r"\bMAKEARRAY\(", "_xlfn.MAKEARRAY(", formula)
formula = re.sub(r"\bMAP\(", "_xlfn.MAP(", formula)
formula = re.sub(r"\bRANDARRAY\(", "_xlfn.RANDARRAY(", formula)
formula = re.sub(r"\bREDUCE\(", "_xlfn.REDUCE(", formula)
formula = re.sub(r"\bSCAN\(", "_xlfn.SCAN(", formula)
formula = re.sub(r"\SINGLE\(", "_xlfn.SINGLE(", formula)
formula = re.sub(r"\bSEQUENCE\(", "_xlfn.SEQUENCE(", formula)
formula = re.sub(r"\bSORT\(", "_xlfn._xlws.SORT(", formula)
formula = re.sub(r"\bSORTBY\(", "_xlfn.SORTBY(", formula)
formula = re.sub(r"\bSWITCH\(", "_xlfn.SWITCH(", formula)
formula = re.sub(r"\bTAKE\(", "_xlfn.TAKE(", formula)
formula = re.sub(r"\bTEXTSPLIT\(", "_xlfn.TEXTSPLIT(", formula)
formula = re.sub(r"\bTOCOL\(", "_xlfn.TOCOL(", formula)
formula = re.sub(r"\bTOROW\(", "_xlfn.TOROW(", formula)
formula = re.sub(r"\bUNIQUE\(", "_xlfn.UNIQUE(", formula)
formula = re.sub(r"\bVSTACK\(", "_xlfn.VSTACK(", formula)
formula = re.sub(r"\bWRAPCOLS\(", "_xlfn.WRAPCOLS(", formula)
formula = re.sub(r"\bWRAPROWS\(", "_xlfn.WRAPROWS(", formula)
formula = re.sub(r"\bXLOOKUP\(", "_xlfn.XLOOKUP(", formula)
if not self.use_future_functions and not expand_future_functions:
return formula
formula = re.sub(r"\bACOTH\(", "_xlfn.ACOTH(", formula)
formula = re.sub(r"\bACOT\(", "_xlfn.ACOT(", formula)
formula = re.sub(r"\bAGGREGATE\(", "_xlfn.AGGREGATE(", formula)
formula = re.sub(r"\bARABIC\(", "_xlfn.ARABIC(", formula)
formula = re.sub(r"\bARRAYTOTEXT\(", "_xlfn.ARRAYTOTEXT(", formula)
formula = re.sub(r"\bBASE\(", "_xlfn.BASE(", formula)
formula = re.sub(r"\bBETA.DIST\(", "_xlfn.BETA.DIST(", formula)
formula = re.sub(r"\bBETA.INV\(", "_xlfn.BETA.INV(", formula)
formula = re.sub(r"\bBINOM.DIST.RANGE\(", "_xlfn.BINOM.DIST.RANGE(", formula)
formula = re.sub(r"\bBINOM.DIST\(", "_xlfn.BINOM.DIST(", formula)
formula = re.sub(r"\bBINOM.INV\(", "_xlfn.BINOM.INV(", formula)
formula = re.sub(r"\bBITAND\(", "_xlfn.BITAND(", formula)
formula = re.sub(r"\bBITLSHIFT\(", "_xlfn.BITLSHIFT(", formula)
formula = re.sub(r"\bBITOR\(", "_xlfn.BITOR(", formula)
formula = re.sub(r"\bBITRSHIFT\(", "_xlfn.BITRSHIFT(", formula)
formula = re.sub(r"\bBITXOR\(", "_xlfn.BITXOR(", formula)
formula = re.sub(r"\bCEILING.MATH\(", "_xlfn.CEILING.MATH(", formula)
formula = re.sub(r"\bCEILING.PRECISE\(", "_xlfn.CEILING.PRECISE(", formula)
formula = re.sub(r"\bCHISQ.DIST.RT\(", "_xlfn.CHISQ.DIST.RT(", formula)
formula = re.sub(r"\bCHISQ.DIST\(", "_xlfn.CHISQ.DIST(", formula)
formula = re.sub(r"\bCHISQ.INV.RT\(", "_xlfn.CHISQ.INV.RT(", formula)
formula = re.sub(r"\bCHISQ.INV\(", "_xlfn.CHISQ.INV(", formula)
formula = re.sub(r"\bCHISQ.TEST\(", "_xlfn.CHISQ.TEST(", formula)
formula = re.sub(r"\bCOMBINA\(", "_xlfn.COMBINA(", formula)
formula = re.sub(r"\bCONCAT\(", "_xlfn.CONCAT(", formula)
formula = re.sub(r"\bCONFIDENCE.NORM\(", "_xlfn.CONFIDENCE.NORM(", formula)
formula = re.sub(r"\bCONFIDENCE.T\(", "_xlfn.CONFIDENCE.T(", formula)
formula = re.sub(r"\bCOTH\(", "_xlfn.COTH(", formula)
formula = re.sub(r"\bCOT\(", "_xlfn.COT(", formula)
formula = re.sub(r"\bCOVARIANCE.P\(", "_xlfn.COVARIANCE.P(", formula)
formula = re.sub(r"\bCOVARIANCE.S\(", "_xlfn.COVARIANCE.S(", formula)
formula = re.sub(r"\bCSCH\(", "_xlfn.CSCH(", formula)
formula = re.sub(r"\bCSC\(", "_xlfn.CSC(", formula)
formula = re.sub(r"\bDAYS\(", "_xlfn.DAYS(", formula)
formula = re.sub(r"\bDECIMAL\(", "_xlfn.DECIMAL(", formula)
formula = re.sub(r"\bERF.PRECISE\(", "_xlfn.ERF.PRECISE(", formula)
formula = re.sub(r"\bERFC.PRECISE\(", "_xlfn.ERFC.PRECISE(", formula)
formula = re.sub(r"\bEXPON.DIST\(", "_xlfn.EXPON.DIST(", formula)
formula = re.sub(r"\bF.DIST.RT\(", "_xlfn.F.DIST.RT(", formula)
formula = re.sub(r"\bF.DIST\(", "_xlfn.F.DIST(", formula)
formula = re.sub(r"\bF.INV.RT\(", "_xlfn.F.INV.RT(", formula)
formula = re.sub(r"\bF.INV\(", "_xlfn.F.INV(", formula)
formula = re.sub(r"\bF.TEST\(", "_xlfn.F.TEST(", formula)
formula = re.sub(r"\bFILTERXML\(", "_xlfn.FILTERXML(", formula)
formula = re.sub(r"\bFLOOR.MATH\(", "_xlfn.FLOOR.MATH(", formula)
formula = re.sub(r"\bFLOOR.PRECISE\(", "_xlfn.FLOOR.PRECISE(", formula)
formula = re.sub(
r"\bFORECAST.ETS.CONFINT\(", "_xlfn.FORECAST.ETS.CONFINT(", formula
)
formula = re.sub(
r"\bFORECAST.ETS.SEASONALITY\(", "_xlfn.FORECAST.ETS.SEASONALITY(", formula
)
formula = re.sub(r"\bFORECAST.ETS.STAT\(", "_xlfn.FORECAST.ETS.STAT(", formula)
formula = re.sub(r"\bFORECAST.ETS\(", "_xlfn.FORECAST.ETS(", formula)
formula = re.sub(r"\bFORECAST.LINEAR\(", "_xlfn.FORECAST.LINEAR(", formula)
formula = re.sub(r"\bFORMULATEXT\(", "_xlfn.FORMULATEXT(", formula)
formula = re.sub(r"\bGAMMA.DIST\(", "_xlfn.GAMMA.DIST(", formula)
formula = re.sub(r"\bGAMMA.INV\(", "_xlfn.GAMMA.INV(", formula)
formula = re.sub(r"\bGAMMALN.PRECISE\(", "_xlfn.GAMMALN.PRECISE(", formula)
formula = re.sub(r"\bGAMMA\(", "_xlfn.GAMMA(", formula)
formula = re.sub(r"\bGAUSS\(", "_xlfn.GAUSS(", formula)
formula = re.sub(r"\bHYPGEOM.DIST\(", "_xlfn.HYPGEOM.DIST(", formula)
formula = re.sub(r"\bIFNA\(", "_xlfn.IFNA(", formula)
formula = re.sub(r"\bIFS\(", "_xlfn.IFS(", formula)
formula = re.sub(r"\bIMAGE\(", "_xlfn.IMAGE(", formula)
formula = re.sub(r"\bIMCOSH\(", "_xlfn.IMCOSH(", formula)
formula = re.sub(r"\bIMCOT\(", "_xlfn.IMCOT(", formula)
formula = re.sub(r"\bIMCSCH\(", "_xlfn.IMCSCH(", formula)
formula = re.sub(r"\bIMCSC\(", "_xlfn.IMCSC(", formula)
formula = re.sub(r"\bIMSECH\(", "_xlfn.IMSECH(", formula)
formula = re.sub(r"\bIMSEC\(", "_xlfn.IMSEC(", formula)
formula = re.sub(r"\bIMSINH\(", "_xlfn.IMSINH(", formula)
formula = re.sub(r"\bIMTAN\(", "_xlfn.IMTAN(", formula)
formula = re.sub(r"\bISFORMULA\(", "_xlfn.ISFORMULA(", formula)
formula = re.sub(r"\bISOMITTED\(", "_xlfn.ISOMITTED(", formula)
formula = re.sub(r"\bISOWEEKNUM\(", "_xlfn.ISOWEEKNUM(", formula)
formula = re.sub(r"\bLET\(", "_xlfn.LET(", formula)
formula = re.sub(r"\bLOGNORM.DIST\(", "_xlfn.LOGNORM.DIST(", formula)
formula = re.sub(r"\bLOGNORM.INV\(", "_xlfn.LOGNORM.INV(", formula)
formula = re.sub(r"\bMAXIFS\(", "_xlfn.MAXIFS(", formula)
formula = re.sub(r"\bMINIFS\(", "_xlfn.MINIFS(", formula)
formula = re.sub(r"\bMODE.MULT\(", "_xlfn.MODE.MULT(", formula)
formula = re.sub(r"\bMODE.SNGL\(", "_xlfn.MODE.SNGL(", formula)
formula = re.sub(r"\bMUNIT\(", "_xlfn.MUNIT(", formula)
formula = re.sub(r"\bNEGBINOM.DIST\(", "_xlfn.NEGBINOM.DIST(", formula)
formula = re.sub(r"\bNORM.DIST\(", "_xlfn.NORM.DIST(", formula)
formula = re.sub(r"\bNORM.INV\(", "_xlfn.NORM.INV(", formula)
formula = re.sub(r"\bNORM.S.DIST\(", "_xlfn.NORM.S.DIST(", formula)
formula = re.sub(r"\bNORM.S.INV\(", "_xlfn.NORM.S.INV(", formula)
formula = re.sub(r"\bNUMBERVALUE\(", "_xlfn.NUMBERVALUE(", formula)
formula = re.sub(r"\bPDURATION\(", "_xlfn.PDURATION(", formula)
formula = re.sub(r"\bPERCENTILE.EXC\(", "_xlfn.PERCENTILE.EXC(", formula)
formula = re.sub(r"\bPERCENTILE.INC\(", "_xlfn.PERCENTILE.INC(", formula)
formula = re.sub(r"\bPERCENTRANK.EXC\(", "_xlfn.PERCENTRANK.EXC(", formula)
formula = re.sub(r"\bPERCENTRANK.INC\(", "_xlfn.PERCENTRANK.INC(", formula)
formula = re.sub(r"\bPERMUTATIONA\(", "_xlfn.PERMUTATIONA(", formula)
formula = re.sub(r"\bPHI\(", "_xlfn.PHI(", formula)
formula = re.sub(r"\bPOISSON.DIST\(", "_xlfn.POISSON.DIST(", formula)
formula = re.sub(r"\bQUARTILE.EXC\(", "_xlfn.QUARTILE.EXC(", formula)
formula = re.sub(r"\bQUARTILE.INC\(", "_xlfn.QUARTILE.INC(", formula)
formula = re.sub(r"\bQUERYSTRING\(", "_xlfn.QUERYSTRING(", formula)
formula = re.sub(r"\bRANK.AVG\(", "_xlfn.RANK.AVG(", formula)
formula = re.sub(r"\bRANK.EQ\(", "_xlfn.RANK.EQ(", formula)
formula = re.sub(r"\bRRI\(", "_xlfn.RRI(", formula)
formula = re.sub(r"\bSECH\(", "_xlfn.SECH(", formula)
formula = re.sub(r"\bSEC\(", "_xlfn.SEC(", formula)
formula = re.sub(r"\bSHEETS\(", "_xlfn.SHEETS(", formula)
formula = re.sub(r"\bSHEET\(", "_xlfn.SHEET(", formula)
formula = re.sub(r"\bSKEW.P\(", "_xlfn.SKEW.P(", formula)
formula = re.sub(r"\bSTDEV.P\(", "_xlfn.STDEV.P(", formula)
formula = re.sub(r"\bSTDEV.S\(", "_xlfn.STDEV.S(", formula)
formula = re.sub(r"\bT.DIST.2T\(", "_xlfn.T.DIST.2T(", formula)
formula = re.sub(r"\bT.DIST.RT\(", "_xlfn.T.DIST.RT(", formula)
formula = re.sub(r"\bT.DIST\(", "_xlfn.T.DIST(", formula)
formula = re.sub(r"\bT.INV.2T\(", "_xlfn.T.INV.2T(", formula)
formula = re.sub(r"\bT.INV\(", "_xlfn.T.INV(", formula)
formula = re.sub(r"\bT.TEST\(", "_xlfn.T.TEST(", formula)
formula = re.sub(r"\bTEXTAFTER\(", "_xlfn.TEXTAFTER(", formula)
formula = re.sub(r"\bTEXTBEFORE\(", "_xlfn.TEXTBEFORE(", formula)
formula = re.sub(r"\bTEXTJOIN\(", "_xlfn.TEXTJOIN(", formula)
formula = re.sub(r"\bUNICHAR\(", "_xlfn.UNICHAR(", formula)
formula = re.sub(r"\bUNICODE\(", "_xlfn.UNICODE(", formula)
formula = re.sub(r"\bVALUETOTEXT\(", "_xlfn.VALUETOTEXT(", formula)
formula = re.sub(r"\bVAR.P\(", "_xlfn.VAR.P(", formula)
formula = re.sub(r"\bVAR.S\(", "_xlfn.VAR.S(", formula)
formula = re.sub(r"\bWEBSERVICE\(", "_xlfn.WEBSERVICE(", formula)
formula = re.sub(r"\bWEIBULL.DIST\(", "_xlfn.WEIBULL.DIST(", formula)
formula = re.sub(r"\bXMATCH\(", "_xlfn.XMATCH(", formula)
formula = re.sub(r"\bXOR\(", "_xlfn.XOR(", formula)
formula = re.sub(r"\bZ.TEST\(", "_xlfn.Z.TEST(", formula)
return formula
# Escape/expand table functions. This mainly involves converting Excel 2010
# "@" table ref to 2007 "[#This Row],". We parse the string to avoid
# replacements in string literals within the formula.
@staticmethod
def _prepare_table_formula(formula):
if "@" not in formula:
# No escaping required.
return formula
escaped_formula = []
in_string_literal = False
for char in formula:
# Match the start/end of string literals to avoid escaping
# references in strings.
if char == '"':
in_string_literal = not in_string_literal
# Copy the string literal.
if in_string_literal:
escaped_formula.append(char)
continue
# Replace table reference.
if char == "@":
escaped_formula.append("[#This Row],")
else:
escaped_formula.append(char)
return ("").join(escaped_formula)
# Undecorated version of write_array_formula() and
# write_dynamic_array_formula().
def _write_array_formula(
self,
first_row,
first_col,
last_row,
last_col,
formula,
cell_format=None,
value=0,
atype="static",
) -> Literal[0, -1]:
# Swap last row/col with first row/col as necessary.
if first_row > last_row:
first_row, last_row = last_row, first_row
if first_col > last_col:
first_col, last_col = last_col, first_col
# Check that row and col are valid and store max and min values.
if self._check_dimensions(first_row, first_col):
return -1
if self._check_dimensions(last_row, last_col):
return -1
# Define array range
if first_row == last_row and first_col == last_col:
cell_range = xl_rowcol_to_cell(first_row, first_col)
else:
cell_range = (
xl_rowcol_to_cell(first_row, first_col)
+ ":"
+ xl_rowcol_to_cell(last_row, last_col)
)
# Modify the formula string, as needed.
formula = self._prepare_formula(formula)
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and first_row > self.previous_row:
self._write_single_row(first_row)
# Store the cell data in the worksheet data table.
self.table[first_row][first_col] = CellArrayFormulaTuple(
formula, cell_format, value, cell_range, atype
)
# Pad out the rest of the area with formatted zeroes.
if not self.constant_memory:
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row != first_row or col != first_col:
self._write_number(row, col, 0, cell_format)
return 0
@convert_cell_args
def write_datetime(
self,
row: int,
col: int,
date: datetime.datetime,
cell_format: Optional[Format] = None,
) -> Literal[0, -1]:
"""
Write a date or time to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
date: Date and/or time as a datetime object.
cell_format: A cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_datetime(row, col, date, cell_format)
# Undecorated version of write_datetime().
def _write_datetime(self, row: int, col: int, date, cell_format=None) -> int:
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Convert datetime to an Excel date.
number = self._convert_date_time(date)
# Add the default date format.
if cell_format is None:
cell_format = self.default_date_format
# Store the cell data in the worksheet data table.
self.table[row][col] = CellDatetimeTuple(number, cell_format)
return 0
@convert_cell_args
def write_boolean(
self, row: int, col: int, boolean: bool, cell_format: Optional[Format] = None
):
"""
Write a boolean value to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
boolean: Cell data. bool type.
cell_format: An optional cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
return self._write_boolean(row, col, boolean, cell_format)
# Undecorated version of write_boolean().
def _write_boolean(self, row: int, col: int, boolean, cell_format=None) -> int:
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
if boolean:
value = 1
else:
value = 0
# Store the cell data in the worksheet data table.
self.table[row][col] = CellBooleanTuple(value, cell_format)
return 0
# Write a hyperlink. This is comprised of two elements: the displayed
# string and the non-displayed link. The displayed string is the same as
# the link unless an alternative string is specified. The display string
# is written using the write_string() method. Therefore the max characters
# string limit applies.
#
# The hyperlink can be to a http, ftp, mail, internal sheet, or external
# directory urls.
@convert_cell_args
def write_url(
self,
row: int,
col: int,
url: str,
cell_format: Optional[Format] = None,
string: Optional[str] = None,
tip: Optional[str] = None,
):
"""
Write a hyperlink to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
url: Hyperlink url.
format: An optional cell Format object.
string: An optional display string for the hyperlink.
tip: An optional tooltip.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String longer than 32767 characters.
-3: URL longer than Excel limit of 255 characters.
-4: Exceeds Excel limit of 65,530 urls per worksheet.
"""
return self._write_url(row, col, url, cell_format, string, tip)
# Undecorated version of write_url().
def _write_url(
self, row: int, col: int, url, cell_format=None, string=None, tip=None
) -> int:
# Check that row and col are valid and store max and min values
if self._check_dimensions(row, col):
return -1
# If the URL is a string convert it to a Url object.
if not isinstance(url, Url):
# For backwards compatibility check if the string URL exceeds the
# Excel character limit for URLs and ignore it with a warning.
max_url = self.max_url_length
if "#" in url:
url_str, anchor_str = url.split("#", 1)
else:
url_str = url
anchor_str = ""
if len(url_str) > max_url or len(anchor_str) > max_url:
warn(
f"Ignoring URL '{url}' with link or location/anchor > {max_url} "
f"characters since it exceeds Excel's limit for URLs."
)
return -3
url = Url(url)
if string is not None:
url._text = string
if tip is not None:
url._tip = tip
# Check the limit of URLs per worksheet.
self.hlink_count += 1
if self.hlink_count > 65530:
warn(
f"Ignoring URL '{url._original_url}' since it exceeds Excel's limit of "
f"65,530 URLs per worksheet."
)
return -4
# Add the default URL format.
if cell_format is None:
cell_format = self.default_url_format
if not self.ignore_write_string:
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Write the hyperlink string.
self._write_string(row, col, url.text, cell_format)
# Store the hyperlink data in a separate structure.
self.hyperlinks[row][col] = url
return 0
@convert_cell_args
def write_rich_string(
self, row: int, col: int, *args: Union[str, Format]
) -> Literal[0, -1, -2, -3, -4, -5]:
"""
Write a "rich" string with multiple formats to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
string_parts: String and format pairs.
cell_format: Optional Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String truncated to 32k characters.
-3: 2 consecutive formats used.
-4: Empty string used.
-5: Insufficient parameters.
"""
return self._write_rich_string(row, col, *args)
# Undecorated version of write_rich_string().
def _write_rich_string(self, row: int, col: int, *args) -> int:
tokens = list(args)
cell_format = None
string_index = 0
raw_string = ""
# Check that row and col are valid and store max and min values
if self._check_dimensions(row, col):
return -1
# If the last arg is a format we use it as the cell format.
if isinstance(tokens[-1], Format):
cell_format = tokens.pop()
# Create a temp XMLWriter object and use it to write the rich string
# XML to a string.
fh = StringIO()
self.rstring = XMLwriter()
self.rstring._set_filehandle(fh)
# Create a temp format with the default font for unformatted fragments.
default = Format()
# Convert list of format, string tokens to pairs of (format, string)
# except for the first string fragment which doesn't require a default
# formatting run. Use the default for strings without a leading format.
fragments = []
previous = "format"
pos = 0
if len(tokens) <= 2:
warn(
"You must specify more than 2 format/fragments for rich "
"strings. Ignoring input in write_rich_string()."
)
return -5
for token in tokens:
if not isinstance(token, Format):
# Token is a string.
if previous != "format":
# If previous token wasn't a format add one before string.
fragments.append(default)
fragments.append(token)
else:
# If previous token was a format just add the string.
fragments.append(token)
if token == "":
warn(
"Excel doesn't allow empty strings in rich strings. "
"Ignoring input in write_rich_string()."
)
return -4
# Keep track of unformatted string.
raw_string += token
previous = "string"
else:
# Can't allow 2 formats in a row.
if previous == "format" and pos > 0:
warn(
"Excel doesn't allow 2 consecutive formats in rich "
"strings. Ignoring input in write_rich_string()."
)
return -3
# Token is a format object. Add it to the fragment list.
fragments.append(token)
previous = "format"
pos += 1
# If the first token is a string start the <r> element.
if not isinstance(fragments[0], Format):
self.rstring._xml_start_tag("r")
# Write the XML elements for the $format $string fragments.
for token in fragments:
if isinstance(token, Format):
# Write the font run.
self.rstring._xml_start_tag("r")
self._write_font(token)
else:
# Write the string fragment part, with whitespace handling.
attributes = []
if _preserve_whitespace(token):
attributes.append(("xml:space", "preserve"))
self.rstring._xml_data_element("t", token, attributes)
self.rstring._xml_end_tag("r")
# Read the in-memory string.
string = self.rstring.fh.getvalue()
# Check that the string is < 32767 chars.
if len(raw_string) > self.xls_strmax:
warn(
"String length must be less than or equal to Excel's limit "
"of 32,767 characters in write_rich_string()."
)
return -2
# Write a shared string or an in-line string in constant_memory mode.
if not self.constant_memory:
string_index = self.str_table._get_shared_string_index(string)
else:
string_index = string
# Write previous row if in in-line string constant_memory mode.
if self.constant_memory and row > self.previous_row:
self._write_single_row(row)
# Store the cell data in the worksheet data table.
self.table[row][col] = CellRichStringTuple(
string_index, cell_format, raw_string
)
return 0
def add_write_handler(self, user_type, user_function) -> None:
"""
Add a callback function to the write() method to handle user defined
types.
Args:
user_type: The user type() to match on.
user_function: The user defined function to write the type data.
Returns:
Nothing.
"""
self.write_handlers[user_type] = user_function
@convert_cell_args
def write_row(
self, row: int, col: int, data, cell_format: Optional[Format] = None
) -> Union[Literal[0], Any]:
"""
Write a row of data starting from (row, col).
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
data: A list of tokens to be written with write().
format: An optional cell Format object.
Returns:
0: Success.
other: Return value of write() method.
"""
for token in data:
error = self._write(row, col, token, cell_format)
if error:
return error
col += 1
return 0
@convert_cell_args
def write_column(
self, row: int, col: int, data, cell_format: Optional[Format] = None
) -> Union[Literal[0], Any]:
"""
Write a column of data starting from (row, col).
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
data: A list of tokens to be written with write().
format: An optional cell Format object.
Returns:
0: Success.
other: Return value of write() method.
"""
for token in data:
error = self._write(row, col, token, cell_format)
if error:
return error
row += 1
return 0
@convert_cell_args
def insert_image(
self,
row: int,
col: int,
source: Union[str, BytesIO, Image],
options: Optional[Dict[str, Any]] = None,
) -> Literal[0, -1]:
"""
Insert an image with its top-left corner in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
source: Filename, BytesIO, or Image object.
options: Position, scale, url and data stream of the image.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check insert (row, col) without storing.
if self._check_dimensions(row, col, True, True):
warn(f"Cannot insert image at ({row}, {col}).")
return -1
# Convert the source to an Image object.
image = self._image_from_source(source, options)
image._row = row
image._col = col
image._set_user_options(options)
self.images.append(image)
return 0
@convert_cell_args
def embed_image(
self,
row: int,
col: int,
source: Union[str, BytesIO, Image],
options: Optional[Dict[str, Any]] = None,
) -> Literal[0, -1]:
"""
Embed an image in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
source: Filename, BytesIO, or Image object.
options: Url and data stream of the image.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check insert (row, col) without storing.
if self._check_dimensions(row, col):
warn(f"Cannot embed image at ({row}, {col}).")
return -1
if options is None:
options = {}
# Convert the source to an Image object.
image = self._image_from_source(source, options)
image._set_user_options(options)
cell_format = options.get("cell_format", None)
if image.url:
if cell_format is None:
cell_format = self.default_url_format
self.ignore_write_string = True
self.write_url(row, col, image.url, cell_format)
self.ignore_write_string = False
image_index = self.embedded_images.get_image_index(image)
# Store the cell error and image index in the worksheet data table.
self.table[row][col] = CellErrorTuple("#VALUE!", cell_format, image_index)
return 0
@convert_cell_args
def insert_textbox(
self, row: int, col: int, text: str, options: Optional[Dict[str, Any]] = None
) -> Literal[0, -1]:
"""
Insert an textbox with its top-left corner in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
text: The text for the textbox.
options: Textbox options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check insert (row, col) without storing.
if self._check_dimensions(row, col, True, True):
warn(f"Cannot insert textbox at ({row}, {col}).")
return -1
if text is None:
text = ""
if options is None:
options = {}
x_offset = options.get("x_offset", 0)
y_offset = options.get("y_offset", 0)
x_scale = options.get("x_scale", 1)
y_scale = options.get("y_scale", 1)
anchor = options.get("object_position", 1)
description = options.get("description", None)
decorative = options.get("decorative", False)
self.shapes.append(
[
row,
col,
x_offset,
y_offset,
x_scale,
y_scale,
text,
anchor,
options,
description,
decorative,
]
)
return 0
@convert_cell_args
def insert_chart(
self, row: int, col: int, chart: Chart, options: Optional[Dict[str, Any]] = None
) -> Literal[0, -1, -2]:
"""
Insert an chart with its top-left corner in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
chart: Chart object.
options: Position and scale of the chart.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check insert (row, col) without storing.
if self._check_dimensions(row, col, True, True):
warn(f"Cannot insert chart at ({row}, {col}).")
return -1
if options is None:
options = {}
# Ensure a chart isn't inserted more than once.
if chart.already_inserted or chart.combined and chart.combined.already_inserted:
warn("Chart cannot be inserted in a worksheet more than once.")
return -2
chart.already_inserted = True
if chart.combined:
chart.combined.already_inserted = True
x_offset = options.get("x_offset", 0)
y_offset = options.get("y_offset", 0)
x_scale = options.get("x_scale", 1)
y_scale = options.get("y_scale", 1)
anchor = options.get("object_position", 1)
description = options.get("description", None)
decorative = options.get("decorative", False)
# Allow Chart to override the scale and offset.
if chart.x_scale != 1:
x_scale = chart.x_scale
if chart.y_scale != 1:
y_scale = chart.y_scale
if chart.x_offset:
x_offset = chart.x_offset
if chart.y_offset:
y_offset = chart.y_offset
self.charts.append(
[
row,
col,
chart,
x_offset,
y_offset,
x_scale,
y_scale,
anchor,
description,
decorative,
]
)
return 0
@convert_cell_args
def write_comment(
self, row: int, col: int, comment: str, options: Optional[Dict[str, Any]] = None
) -> Literal[0, -1, -2]:
"""
Write a comment to a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
comment: Cell comment. Str.
options: Comment formatting options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: String longer than 32k characters.
"""
# Check that row and col are valid and store max and min values
if self._check_dimensions(row, col):
return -1
# Check that the comment string is < 32767 chars.
if len(comment) > self.xls_strmax:
return -2
self.has_vml = True
self.has_comments = True
# Store the options of the cell comment, to process on file close.
comment = CommentType(row, col, comment, options)
self.comments[row][col] = comment
return 0
def show_comments(self) -> None:
"""
Make any comments in the worksheet visible.
Args:
None.
Returns:
Nothing.
"""
self.comments_visible = True
def set_background(
self, source: Union[str, BytesIO, Image], is_byte_stream: bool = False
) -> Literal[0]:
"""
Set a background image for a worksheet.
Args:
source: Filename, BytesIO, or Image object.
is_byte_stream: Deprecated. Use a BytesIO object instead.
Returns:
0: Success.
"""
# Convert the source to an Image object.
image = self._image_from_source(source)
self.background_image = image
if is_byte_stream:
warn(
"The `is_byte_stream` parameter in `set_background()` is deprecated. "
"This argument can be omitted if you are using a BytesIO object."
)
return 0
def set_comments_author(self, author) -> None:
"""
Set the default author of the cell comments.
Args:
author: Comment author name. String.
Returns:
Nothing.
"""
self.comments_author = author
def get_name(self):
"""
Retrieve the worksheet name.
Args:
None.
Returns:
Nothing.
"""
# There is no set_name() method. Name must be set in add_worksheet().
return self.name
def activate(self) -> None:
"""
Set this worksheet as the active worksheet, i.e. the worksheet that is
displayed when the workbook is opened. Also set it as selected.
Note: An active worksheet cannot be hidden.
Args:
None.
Returns:
Nothing.
"""
self.hidden = 0
self.selected = 1
self.worksheet_meta.activesheet = self.index
def select(self) -> None:
"""
Set current worksheet as a selected worksheet, i.e. the worksheet
has its tab highlighted.
Note: A selected worksheet cannot be hidden.
Args:
None.
Returns:
Nothing.
"""
self.selected = 1
self.hidden = 0
def hide(self) -> None:
"""
Hide the current worksheet.
Args:
None.
Returns:
Nothing.
"""
self.hidden = 1
# A hidden worksheet shouldn't be active or selected.
self.selected = 0
def very_hidden(self) -> None:
"""
Hide the current worksheet. This can only be unhidden by VBA.
Args:
None.
Returns:
Nothing.
"""
self.hidden = 2
# A hidden worksheet shouldn't be active or selected.
self.selected = 0
def set_first_sheet(self) -> None:
"""
Set current worksheet as the first visible sheet. This is necessary
when there are a large number of worksheets and the activated
worksheet is not visible on the screen.
Note: A selected worksheet cannot be hidden.
Args:
None.
Returns:
Nothing.
"""
self.hidden = 0 # Active worksheet can't be hidden.
self.worksheet_meta.firstsheet = self.index
@convert_column_args
def set_column(
self,
first_col: int,
last_col: int,
width: Optional[float] = None,
cell_format: Optional[Format] = None,
options: Optional[Dict[str, Any]] = None,
) -> Literal[0, -1]:
"""
Set the width, and other properties of a single column or a
range of columns.
Args:
first_col: First column (zero-indexed).
last_col: Last column (zero-indexed). Can be same as first_col.
width: Column width. (optional).
cell_format: Column cell_format. (optional).
options: Dict of options such as hidden and level.
Returns:
0: Success.
-1: Column number is out of worksheet bounds.
"""
# Convert from Excel character width to pixels. The conversion is
# different below 1 character widths.
if width is None:
width_pixels = None
elif width == 0.0:
width_pixels = 0
elif width < 1.0:
width_pixels = round(width * (self.max_digit_width + self.cell_padding))
else:
width_pixels = round(width * self.max_digit_width) + self.cell_padding
return self.set_column_pixels(
first_col, last_col, width_pixels, cell_format, options
)
@convert_column_args
def set_column_pixels(
self,
first_col: int,
last_col: int,
width: Optional[float] = None,
cell_format: Optional[Format] = None,
options: Optional[Dict[str, Any]] = None,
) -> Literal[0, -1]:
"""
Set the width, and other properties of a single column or a
range of columns, where column width is in pixels.
Args:
first_col: First column (zero-indexed).
last_col: Last column (zero-indexed). Can be same as first_col.
width: Column width in pixels. (optional).
cell_format: Column cell_format. (optional).
options: Dict of options such as hidden and level.
Returns:
0: Success.
-1: Column number is out of worksheet bounds.
"""
if options is None:
options = {}
# Ensure 2nd col is larger than first.
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
# Don't modify the row dimensions when checking the columns.
ignore_row = True
# Set optional column values.
hidden = options.get("hidden", False)
collapsed = options.get("collapsed", False)
level = options.get("level", 0)
# Store the column dimension only in some conditions.
if cell_format or (width and hidden):
ignore_col = False
else:
ignore_col = True
# Check that each column is valid and store the max and min values.
if self._check_dimensions(0, last_col, ignore_row, ignore_col):
return -1
if self._check_dimensions(0, first_col, ignore_row, ignore_col):
return -1
# Set the limits for the outline levels (0 <= x <= 7).
level = max(level, 0)
level = min(level, 7)
self.outline_col_level = max(self.outline_col_level, level)
# Store the column data.
for col in range(first_col, last_col + 1):
self.col_info[col] = ColumnInfo(
width=width,
column_format=cell_format,
hidden=hidden,
level=level,
collapsed=collapsed,
)
# Store the column change to allow optimizations.
self.col_size_changed = True
return 0
def autofit(self, max_width: int = None) -> None:
"""
Simulate autofit based on the data, and datatypes in each column.
Args:
max_width (optional): max column width to autofit, in pixels.
Returns:
Nothing.
"""
# pylint: disable=too-many-nested-blocks
if self.constant_memory:
warn("Autofit is not supported in constant_memory mode.")
return
# No data written to the target sheet; nothing to autofit
if self.dim_rowmax is None:
return
# Store the max pixel width for each column.
col_width_max = {}
# Convert the autofit maximum pixel width to a column/character width,
# but limit it to the Excel max limit.
if max_width is None:
max_width = self.max_col_width
max_width = min(max_width, self.max_col_width)
# Create a reverse lookup for the share strings table so we can convert
# the string id back to the original string.
strings = sorted(
self.str_table.string_table, key=self.str_table.string_table.__getitem__
)
for row_num in range(self.dim_rowmin, self.dim_rowmax + 1):
if not self.table.get(row_num):
continue
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if col_num in self.table[row_num]:
cell = self.table[row_num][col_num]
cell_type = cell.__class__.__name__
length = 0
if cell_type in ("String", "RichString"):
# Handle strings and rich strings.
#
# For standard shared strings we do a reverse lookup
# from the shared string id to the actual string. For
# rich strings we use the unformatted string. We also
# split multi-line strings and handle each part
# separately.
if cell_type == "String":
string_id = cell.string
string = strings[string_id]
else:
string = cell.raw_string
if "\n" not in string:
# Single line string.
length = xl_pixel_width(string)
else:
# Handle multi-line strings.
for string in string.split("\n"):
seg_length = xl_pixel_width(string)
length = max(length, seg_length)
elif cell_type == "Number":
# Handle numbers.
#
# We use a workaround/optimization for numbers since
# digits all have a pixel width of 7. This gives a
# slightly greater width for the decimal place and
# minus sign but only by a few pixels and
# over-estimation is okay.
length = 7 * len(str(cell.number))
elif cell_type == "Datetime":
# Handle dates.
#
# The following uses the default width for mm/dd/yyyy
# dates. It isn't feasible to parse the number format
# to get the actual string width for all format types.
length = self.default_date_width
elif cell_type == "Boolean":
# Handle boolean values.
#
# Use the Excel standard widths for TRUE and FALSE.
if cell.boolean:
length = 31
else:
length = 36
elif cell_type in ("Formula", "ArrayFormula"):
# Handle formulas.
#
# We only try to autofit a formula if it has a
# non-zero value.
if isinstance(cell.value, (float, int)):
if cell.value > 0:
length = 7 * len(str(cell.value))
elif isinstance(cell.value, str):
length = xl_pixel_width(cell.value)
elif isinstance(cell.value, bool):
if cell.value:
length = 31
else:
length = 36
# If the cell is in an autofilter header we add an
# additional 16 pixels for the dropdown arrow.
if self.filter_cells.get((row_num, col_num)) and length > 0:
length += 16
# Add the string length to the lookup table.
width_max = col_width_max.get(col_num, 0)
if length > width_max:
col_width_max[col_num] = length
# Apply the width to the column.
for col_num, width in col_width_max.items():
# Add a 7 pixels padding, like Excel.
width += 7
# Limit the width to the maximum user or Excel value.
width = min(width, max_width)
# Add the width to an existing col info structure or add a new one.
if self.col_info.get(col_num):
# We only update the width for an existing column if it is
# greater than the user defined value. This allows the user
# to pre-load a minimum col width.
col_info = self.col_info.get(col_num)
user_width = col_info.width
hidden = col_info.hidden
if user_width is not None and not hidden:
# Col info is user defined.
if width > user_width:
self.col_info[col_num].width = width
self.col_info[col_num].hidden = True
else:
self.col_info[col_num].width = width
self.col_info[col_num].hidden = True
else:
self.col_info[col_num] = ColumnInfo(
width=width,
autofit=True,
)
def set_row(
self,
row: int,
height: Optional[float] = None,
cell_format: Optional[Format] = None,
options: Optional[Dict[str, Any]] = None,
) -> Literal[0, -1]:
"""
Set the width, and other properties of a row.
Args:
row: Row number (zero-indexed).
height: Row height. (optional).
cell_format: Row cell_format. (optional).
options: Dict of options such as hidden, level and collapsed.
Returns:
0: Success.
-1: Row number is out of worksheet bounds.
"""
if height is not None:
pixel_height = round(height * 4.0 / 3.0)
else:
pixel_height = None
return self.set_row_pixels(row, pixel_height, cell_format, options)
def set_row_pixels(
self,
row: int,
height: Optional[float] = None,
cell_format: Optional[Format] = None,
options: Optional[Dict[str, Any]] = None,
) -> Literal[0, -1]:
"""
Set the width (in pixels), and other properties of a row.
Args:
row: Row number (zero-indexed).
height: Row height in pixels. (optional).
cell_format: Row cell_format. (optional).
options: Dict of options such as hidden, level and collapsed.
Returns:
0: Success.
-1: Row number is out of worksheet bounds.
"""
if options is None:
options = {}
# Use minimum col in _check_dimensions().
if self.dim_colmin is not None:
min_col = self.dim_colmin
else:
min_col = 0
# Check that row is valid.
if self._check_dimensions(row, min_col):
return -1
if height is None:
height = self.default_row_height
# Set optional row values.
hidden = options.get("hidden", False)
collapsed = options.get("collapsed", False)
level = options.get("level", 0)
# If the height is 0 the row is hidden and the height is the default.
if height == 0:
hidden = True
height = self.default_row_height
# Set the limits for the outline levels (0 <= x <= 7).
level = max(level, 0)
level = min(level, 7)
self.outline_row_level = max(self.outline_row_level, level)
# Store the row properties.
self.row_info[row] = RowInfo(
height=height,
row_format=cell_format,
hidden=hidden,
level=level,
collapsed=collapsed,
)
# Store the row change to allow optimizations.
self.row_size_changed = True
# Store the row sizes for use when calculating image vertices.
self.row_sizes[row] = [height, hidden]
return 0
def set_default_row(
self, height: Optional[float] = None, hide_unused_rows: bool = False
) -> None:
"""
Set the default row properties.
Args:
height: Default height. Optional, defaults to 15.
hide_unused_rows: Hide unused rows. Optional, defaults to False.
Returns:
Nothing.
"""
if height is None:
pixel_height = self.default_row_height
else:
pixel_height = int(round(height * 4.0 / 3.0))
if pixel_height != self.original_row_height:
# Store the row change to allow optimizations.
self.row_size_changed = True
self.default_row_height = pixel_height
if hide_unused_rows:
self.default_row_zeroed = 1
@convert_range_args
def merge_range(
self,
first_row: int,
first_col: int,
last_row: int,
last_col: int,
data: Any,
cell_format: Optional[Format] = None,
) -> int:
"""
Merge a range of cells.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
data: Cell data.
cell_format: Cell Format object.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
other: Return value of write().
"""
# Merge a range of cells. The first cell should contain the data and
# the others should be blank. All cells should have the same format.
# Excel doesn't allow a single cell to be merged
if first_row == last_row and first_col == last_col:
warn("Can't merge single cell")
return -1
# Swap last row/col with first row/col as necessary
if first_row > last_row:
(first_row, last_row) = (last_row, first_row)
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
# Check that row and col are valid and store max and min values.
if self._check_dimensions(first_row, first_col):
return -1
if self._check_dimensions(last_row, last_col):
return -1
# Check if the merge range overlaps a previous merged or table range.
# This is a critical file corruption error in Excel.
cell_range = xl_range(first_row, first_col, last_row, last_col)
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if self.merged_cells.get((row, col)):
previous_range = self.merged_cells.get((row, col))
raise OverlappingRange(
f"Merge range '{cell_range}' overlaps previous merge "
f"range '{previous_range}'."
)
if self.table_cells.get((row, col)):
previous_range = self.table_cells.get((row, col))
raise OverlappingRange(
f"Merge range '{cell_range}' overlaps previous table "
f"range '{previous_range}'."
)
self.merged_cells[(row, col)] = cell_range
# Store the merge range.
self.merge.append([first_row, first_col, last_row, last_col])
# Write the first cell
self._write(first_row, first_col, data, cell_format)
# Pad out the rest of the area with formatted blank cells.
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
continue
self._write_blank(row, col, "", cell_format)
return 0
@convert_range_args
def autofilter(
self, first_row: int, first_col: int, last_row: int, last_col: int
) -> None:
"""
Set the autofilter area in the worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
Returns:
Nothing.
"""
# Reverse max and min values if necessary.
if last_row < first_row:
(first_row, last_row) = (last_row, first_row)
if last_col < first_col:
(first_col, last_col) = (last_col, first_col)
# Build up the autofilter area range "Sheet1!$A$1:$C$13".
area = self._convert_name_area(first_row, first_col, last_row, last_col)
ref = xl_range(first_row, first_col, last_row, last_col)
self.autofilter_area = area
self.autofilter_ref = ref
self.filter_range = [first_col, last_col]
# Store the filter cell positions for use in the autofit calculation.
for col in range(first_col, last_col + 1):
# Check that the autofilter doesn't overlap a table filter.
if self.filter_cells.get((first_row, col)):
filter_type, filter_range = self.filter_cells.get((first_row, col))
if filter_type == "table":
raise OverlappingRange(
f"Worksheet autofilter range '{ref}' overlaps previous "
f"Table autofilter range '{filter_range}'."
)
self.filter_cells[(first_row, col)] = ("worksheet", ref)
def filter_column(self, col: int, criteria: str) -> None:
"""
Set the column filter criteria.
Args:
col: Filter column (zero-indexed).
criteria: Filter criteria.
Returns:
Nothing.
"""
if not self.autofilter_area:
warn("Must call autofilter() before filter_column()")
return
# Check for a column reference in A1 notation and substitute.
try:
int(col)
except ValueError:
# Convert col ref to a cell ref and then to a col number.
col_letter = col
(_, col) = xl_cell_to_rowcol(col + "1")
if col >= self.xls_colmax:
warn(f"Invalid column '{col_letter}'")
return
(col_first, col_last) = self.filter_range
# Reject column if it is outside filter range.
if col < col_first or col > col_last:
warn(
f"Column '{col}' outside autofilter() column "
f"range ({col_first}, {col_last})"
)
return
tokens = self._extract_filter_tokens(criteria)
if len(tokens) not in (3, 7):
warn(f"Incorrect number of tokens in criteria '{criteria}'")
tokens = self._parse_filter_expression(criteria, tokens)
# Excel handles single or double custom filters as default filters.
# We need to check for them and handle them accordingly.
if len(tokens) == 2 and tokens[0] == 2:
# Single equality.
self.filter_column_list(col, [tokens[1]])
elif len(tokens) == 5 and tokens[0] == 2 and tokens[2] == 1 and tokens[3] == 2:
# Double equality with "or" operator.
self.filter_column_list(col, [tokens[1], tokens[4]])
else:
# Non default custom filter.
self.filter_cols[col] = tokens
self.filter_type[col] = 0
self.filter_on = 1
def filter_column_list(self, col: int, filters: List[str]) -> None:
"""
Set the column filter criteria in Excel 2007 list style.
Args:
col: Filter column (zero-indexed).
filters: List of filter criteria to match.
Returns:
Nothing.
"""
if not self.autofilter_area:
warn("Must call autofilter() before filter_column()")
return
# Check for a column reference in A1 notation and substitute.
try:
int(col)
except ValueError:
# Convert col ref to a cell ref and then to a col number.
col_letter = col
(_, col) = xl_cell_to_rowcol(col + "1")
if col >= self.xls_colmax:
warn(f"Invalid column '{col_letter}'")
return
(col_first, col_last) = self.filter_range
# Reject column if it is outside filter range.
if col < col_first or col > col_last:
warn(
f"Column '{col}' outside autofilter() column range "
f"({col_first},{col_last})"
)
return
self.filter_cols[col] = filters
self.filter_type[col] = 1
self.filter_on = 1
@convert_range_args
def data_validation(
self,
first_row: int,
first_col: int,
last_row: int,
last_col: int,
options: Optional[Dict[str, Any]] = None,
) -> Literal[0, -1, -2]:
"""
Add a data validation to a worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
options: Data validation options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Incorrect parameter or option.
"""
# Check that row and col are valid without storing the values.
if self._check_dimensions(first_row, first_col, True, True):
return -1
if self._check_dimensions(last_row, last_col, True, True):
return -1
if options is None:
options = {}
else:
# Copy the user defined options so they aren't modified.
options = options.copy()
# Valid input parameters.
valid_parameters = {
"validate",
"criteria",
"value",
"source",
"minimum",
"maximum",
"ignore_blank",
"dropdown",
"show_input",
"input_title",
"input_message",
"show_error",
"error_title",
"error_message",
"error_type",
"other_cells",
"multi_range",
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameters:
warn(f"Unknown parameter '{param_key}' in data_validation()")
return -2
# Map alternative parameter names 'source' or 'minimum' to 'value'.
if "source" in options:
options["value"] = options["source"]
if "minimum" in options:
options["value"] = options["minimum"]
# 'validate' is a required parameter.
if "validate" not in options:
warn("Parameter 'validate' is required in data_validation()")
return -2
# List of valid validation types.
valid_types = {
"any": "none",
"any value": "none",
"whole number": "whole",
"whole": "whole",
"integer": "whole",
"decimal": "decimal",
"list": "list",
"date": "date",
"time": "time",
"text length": "textLength",
"length": "textLength",
"custom": "custom",
}
# Check for valid validation types.
if options["validate"] not in valid_types:
warn(
f"Unknown validation type '{options['validate']}' for parameter "
f"'validate' in data_validation()"
)
return -2
options["validate"] = valid_types[options["validate"]]
# No action is required for validation type 'any' if there are no
# input messages to display.
if (
options["validate"] == "none"
and options.get("input_title") is None
and options.get("input_message") is None
):
return -2
# The any, list and custom validations don't have a criteria so we use
# a default of 'between'.
if (
options["validate"] == "none"
or options["validate"] == "list"
or options["validate"] == "custom"
):
options["criteria"] = "between"
options["maximum"] = None
# 'criteria' is a required parameter.
if "criteria" not in options:
warn("Parameter 'criteria' is required in data_validation()")
return -2
# Valid criteria types.
criteria_types = {
"between": "between",
"not between": "notBetween",
"equal to": "equal",
"=": "equal",
"==": "equal",
"not equal to": "notEqual",
"!=": "notEqual",
"<>": "notEqual",
"greater than": "greaterThan",
">": "greaterThan",
"less than": "lessThan",
"<": "lessThan",
"greater than or equal to": "greaterThanOrEqual",
">=": "greaterThanOrEqual",
"less than or equal to": "lessThanOrEqual",
"<=": "lessThanOrEqual",
}
# Check for valid criteria types.
if options["criteria"] not in criteria_types:
warn(
f"Unknown criteria type '{options['criteria']}' for parameter "
f"'criteria' in data_validation()"
)
return -2
options["criteria"] = criteria_types[options["criteria"]]
# 'Between' and 'Not between' criteria require 2 values.
if options["criteria"] == "between" or options["criteria"] == "notBetween":
if "maximum" not in options:
warn(
"Parameter 'maximum' is required in data_validation() "
"when using 'between' or 'not between' criteria"
)
return -2
else:
options["maximum"] = None
# Valid error dialog types.
error_types = {
"stop": 0,
"warning": 1,
"information": 2,
}
# Check for valid error dialog types.
if "error_type" not in options:
options["error_type"] = 0
elif options["error_type"] not in error_types:
warn(
f"Unknown criteria type '{options['error_type']}' "
f"for parameter 'error_type'."
)
return -2
else:
options["error_type"] = error_types[options["error_type"]]
# Convert date/times value if required.
if (
options["validate"] in ("date", "time")
and options["value"]
and _supported_datetime(options["value"])
):
date_time = self._convert_date_time(options["value"])
# Format date number to the same precision as Excel.
options["value"] = f"{date_time:.16g}"
if options["maximum"] and _supported_datetime(options["maximum"]):
date_time = self._convert_date_time(options["maximum"])
options["maximum"] = f"{date_time:.16g}"
# Check that the input title doesn't exceed the maximum length.
if options.get("input_title") and len(options["input_title"]) > 32:
warn(
f"Length of input title '{options['input_title']}' "
f"exceeds Excel's limit of 32"
)
return -2
# Check that the error title doesn't exceed the maximum length.
if options.get("error_title") and len(options["error_title"]) > 32:
warn(
f"Length of error title '{options['error_title']}' "
f"exceeds Excel's limit of 32"
)
return -2
# Check that the input message doesn't exceed the maximum length.
if options.get("input_message") and len(options["input_message"]) > 255:
warn(
f"Length of input message '{options['input_message']}' "
f"exceeds Excel's limit of 255"
)
return -2
# Check that the error message doesn't exceed the maximum length.
if options.get("error_message") and len(options["error_message"]) > 255:
warn(
f"Length of error message '{options['error_message']}' "
f"exceeds Excel's limit of 255"
)
return -2
# Check that the input list doesn't exceed the maximum length.
if options["validate"] == "list" and isinstance(options["value"], list):
formula = self._csv_join(*options["value"])
if len(formula) > 255:
warn(
f"Length of list items '{formula}' exceeds Excel's limit of "
f"255, use a formula range instead"
)
return -2
# Set some defaults if they haven't been defined by the user.
if "ignore_blank" not in options:
options["ignore_blank"] = 1
if "dropdown" not in options:
options["dropdown"] = 1
if "show_input" not in options:
options["show_input"] = 1
if "show_error" not in options:
options["show_error"] = 1
# These are the cells to which the validation is applied.
options["cells"] = [[first_row, first_col, last_row, last_col]]
# A (for now) undocumented parameter to pass additional cell ranges.
if "other_cells" in options:
options["cells"].extend(options["other_cells"])
# Override with user defined multiple range if provided.
if "multi_range" in options:
options["multi_range"] = options["multi_range"].replace("$", "")
# Store the validation information until we close the worksheet.
self.validations.append(options)
return 0
@convert_range_args
def conditional_format(
self,
first_row: int,
first_col: int,
last_row: int,
last_col: int,
options: Optional[Dict[str, Any]] = None,
) -> Literal[0, -1, -2]:
"""
Add a conditional format to a worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
options: Conditional format options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Incorrect parameter or option.
"""
# Check that row and col are valid without storing the values.
if self._check_dimensions(first_row, first_col, True, True):
return -1
if self._check_dimensions(last_row, last_col, True, True):
return -1
if options is None:
options = {}
else:
# Copy the user defined options so they aren't modified.
options = options.copy()
# Valid input parameters.
valid_parameter = {
"type",
"format",
"criteria",
"value",
"minimum",
"maximum",
"stop_if_true",
"min_type",
"mid_type",
"max_type",
"min_value",
"mid_value",
"max_value",
"min_color",
"mid_color",
"max_color",
"min_length",
"max_length",
"multi_range",
"bar_color",
"bar_negative_color",
"bar_negative_color_same",
"bar_solid",
"bar_border_color",
"bar_negative_border_color",
"bar_negative_border_color_same",
"bar_no_border",
"bar_direction",
"bar_axis_position",
"bar_axis_color",
"bar_only",
"data_bar_2010",
"icon_style",
"reverse_icons",
"icons_only",
"icons",
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameter:
warn(f"Unknown parameter '{param_key}' in conditional_format()")
return -2
# 'type' is a required parameter.
if "type" not in options:
warn("Parameter 'type' is required in conditional_format()")
return -2
# Valid types.
valid_type = {
"cell": "cellIs",
"date": "date",
"time": "time",
"average": "aboveAverage",
"duplicate": "duplicateValues",
"unique": "uniqueValues",
"top": "top10",
"bottom": "top10",
"text": "text",
"time_period": "timePeriod",
"blanks": "containsBlanks",
"no_blanks": "notContainsBlanks",
"errors": "containsErrors",
"no_errors": "notContainsErrors",
"2_color_scale": "2_color_scale",
"3_color_scale": "3_color_scale",
"data_bar": "dataBar",
"formula": "expression",
"icon_set": "iconSet",
}
# Check for valid types.
if options["type"] not in valid_type:
warn(
f"Unknown value '{options['type']}' for parameter 'type' "
f"in conditional_format()"
)
return -2
if options["type"] == "bottom":
options["direction"] = "bottom"
options["type"] = valid_type[options["type"]]
# Valid criteria types.
criteria_type = {
"between": "between",
"not between": "notBetween",
"equal to": "equal",
"=": "equal",
"==": "equal",
"not equal to": "notEqual",
"!=": "notEqual",
"<>": "notEqual",
"greater than": "greaterThan",
">": "greaterThan",
"less than": "lessThan",
"<": "lessThan",
"greater than or equal to": "greaterThanOrEqual",
">=": "greaterThanOrEqual",
"less than or equal to": "lessThanOrEqual",
"<=": "lessThanOrEqual",
"containing": "containsText",
"not containing": "notContains",
"begins with": "beginsWith",
"ends with": "endsWith",
"yesterday": "yesterday",
"today": "today",
"last 7 days": "last7Days",
"last week": "lastWeek",
"this week": "thisWeek",
"next week": "nextWeek",
"last month": "lastMonth",
"this month": "thisMonth",
"next month": "nextMonth",
# For legacy, but incorrect, support.
"continue week": "nextWeek",
"continue month": "nextMonth",
}
# Check for valid criteria types.
if "criteria" in options and options["criteria"] in criteria_type:
options["criteria"] = criteria_type[options["criteria"]]
# Convert boolean values if required.
if "value" in options and isinstance(options["value"], bool):
options["value"] = str(options["value"]).upper()
# Convert date/times value if required.
if options["type"] in ("date", "time"):
options["type"] = "cellIs"
if "value" in options:
if not _supported_datetime(options["value"]):
warn("Conditional format 'value' must be a datetime object.")
return -2
date_time = self._convert_date_time(options["value"])
# Format date number to the same precision as Excel.
options["value"] = f"{date_time:.16g}"
if "minimum" in options:
if not _supported_datetime(options["minimum"]):
warn("Conditional format 'minimum' must be a datetime object.")
return -2
date_time = self._convert_date_time(options["minimum"])
options["minimum"] = f"{date_time:.16g}"
if "maximum" in options:
if not _supported_datetime(options["maximum"]):
warn("Conditional format 'maximum' must be a datetime object.")
return -2
date_time = self._convert_date_time(options["maximum"])
options["maximum"] = f"{date_time:.16g}"
# Valid icon styles.
valid_icons = {
"3_arrows": "3Arrows", # 1
"3_flags": "3Flags", # 2
"3_traffic_lights_rimmed": "3TrafficLights2", # 3
"3_symbols_circled": "3Symbols", # 4
"4_arrows": "4Arrows", # 5
"4_red_to_black": "4RedToBlack", # 6
"4_traffic_lights": "4TrafficLights", # 7
"5_arrows_gray": "5ArrowsGray", # 8
"5_quarters": "5Quarters", # 9
"3_arrows_gray": "3ArrowsGray", # 10
"3_traffic_lights": "3TrafficLights", # 11
"3_signs": "3Signs", # 12
"3_symbols": "3Symbols2", # 13
"4_arrows_gray": "4ArrowsGray", # 14
"4_ratings": "4Rating", # 15
"5_arrows": "5Arrows", # 16
"5_ratings": "5Rating",
} # 17
# Set the icon set properties.
if options["type"] == "iconSet":
# An icon_set must have an icon style.
if not options.get("icon_style"):
warn(
"The 'icon_style' parameter must be specified when "
"'type' == 'icon_set' in conditional_format()."
)
return -3
# Check for valid icon styles.
if options["icon_style"] not in valid_icons:
warn(
f"Unknown icon_style '{options['icon_style']}' "
f"in conditional_format()."
)
return -2
options["icon_style"] = valid_icons[options["icon_style"]]
# Set the number of icons for the icon style.
options["total_icons"] = 3
if options["icon_style"].startswith("4"):
options["total_icons"] = 4
elif options["icon_style"].startswith("5"):
options["total_icons"] = 5
options["icons"] = self._set_icon_props(
options.get("total_icons"), options.get("icons")
)
# Swap last row/col for first row/col as necessary
if first_row > last_row:
first_row, last_row = last_row, first_row
if first_col > last_col:
first_col, last_col = last_col, first_col
# Set the formatting range.
cell_range = xl_range(first_row, first_col, last_row, last_col)
start_cell = xl_rowcol_to_cell(first_row, first_col)
# Override with user defined multiple range if provided.
if "multi_range" in options:
cell_range = options["multi_range"]
cell_range = cell_range.replace("$", "")
# Get the dxf format index.
if "format" in options and options["format"]:
options["format"] = options["format"]._get_dxf_index()
# Set the priority based on the order of adding.
options["priority"] = self.dxf_priority
self.dxf_priority += 1
# Check for 2010 style data_bar parameters.
# pylint: disable=too-many-boolean-expressions
if (
self.use_data_bars_2010
or options.get("data_bar_2010")
or options.get("bar_solid")
or options.get("bar_border_color")
or options.get("bar_negative_color")
or options.get("bar_negative_color_same")
or options.get("bar_negative_border_color")
or options.get("bar_negative_border_color_same")
or options.get("bar_no_border")
or options.get("bar_axis_position")
or options.get("bar_axis_color")
or options.get("bar_direction")
):
options["is_data_bar_2010"] = True
# Special handling of text criteria.
if options["type"] == "text":
value = options["value"]
length = len(value)
criteria = options["criteria"]
if options["criteria"] == "containsText":
options["type"] = "containsText"
options["formula"] = f'NOT(ISERROR(SEARCH("{value}",{start_cell})))'
elif options["criteria"] == "notContains":
options["type"] = "notContainsText"
options["formula"] = f'ISERROR(SEARCH("{value}",{start_cell}))'
elif options["criteria"] == "beginsWith":
options["type"] = "beginsWith"
options["formula"] = f'LEFT({start_cell},{length})="{value}"'
elif options["criteria"] == "endsWith":
options["type"] = "endsWith"
options["formula"] = f'RIGHT({start_cell},{length})="{value}"'
else:
warn(f"Invalid text criteria '{criteria}' in conditional_format()")
# Special handling of time time_period criteria.
if options["type"] == "timePeriod":
if options["criteria"] == "yesterday":
options["formula"] = f"FLOOR({start_cell},1)=TODAY()-1"
elif options["criteria"] == "today":
options["formula"] = f"FLOOR({start_cell},1)=TODAY()"
elif options["criteria"] == "tomorrow":
options["formula"] = f"FLOOR({start_cell},1)=TODAY()+1"
# fmt: off
elif options["criteria"] == "last7Days":
options["formula"] = (
f"AND(TODAY()-FLOOR({start_cell},1)<=6,"
f"FLOOR({start_cell},1)<=TODAY())"
)
# fmt: on
elif options["criteria"] == "lastWeek":
options["formula"] = (
f"AND(TODAY()-ROUNDDOWN({start_cell},0)>=(WEEKDAY(TODAY())),"
f"TODAY()-ROUNDDOWN({start_cell},0)<(WEEKDAY(TODAY())+7))"
)
elif options["criteria"] == "thisWeek":
options["formula"] = (
f"AND(TODAY()-ROUNDDOWN({start_cell},0)<=WEEKDAY(TODAY())-1,"
f"ROUNDDOWN({start_cell},0)-TODAY()<=7-WEEKDAY(TODAY()))"
)
elif options["criteria"] == "nextWeek":
options["formula"] = (
f"AND(ROUNDDOWN({start_cell},0)-TODAY()>(7-WEEKDAY(TODAY())),"
f"ROUNDDOWN({start_cell},0)-TODAY()<(15-WEEKDAY(TODAY())))"
)
elif options["criteria"] == "lastMonth":
options["formula"] = (
f"AND(MONTH({start_cell})=MONTH(TODAY())-1,"
f"OR(YEAR({start_cell})=YEAR("
f"TODAY()),AND(MONTH({start_cell})=1,YEAR(A1)=YEAR(TODAY())-1)))"
)
# fmt: off
elif options["criteria"] == "thisMonth":
options["formula"] = (
f"AND(MONTH({start_cell})=MONTH(TODAY()),"
f"YEAR({start_cell})=YEAR(TODAY()))"
)
# fmt: on
elif options["criteria"] == "nextMonth":
options["formula"] = (
f"AND(MONTH({start_cell})=MONTH(TODAY())+1,"
f"OR(YEAR({start_cell})=YEAR("
f"TODAY()),AND(MONTH({start_cell})=12,"
f"YEAR({start_cell})=YEAR(TODAY())+1)))"
)
else:
warn(
f"Invalid time_period criteria '{options['criteria']}' "
f"in conditional_format()"
)
# Special handling of blanks/error types.
if options["type"] == "containsBlanks":
options["formula"] = f"LEN(TRIM({start_cell}))=0"
if options["type"] == "notContainsBlanks":
options["formula"] = f"LEN(TRIM({start_cell}))>0"
if options["type"] == "containsErrors":
options["formula"] = f"ISERROR({start_cell})"
if options["type"] == "notContainsErrors":
options["formula"] = f"NOT(ISERROR({start_cell}))"
# Special handling for 2 color scale.
if options["type"] == "2_color_scale":
options["type"] = "colorScale"
# Color scales don't use any additional formatting.
options["format"] = None
# Turn off 3 color parameters.
options["mid_type"] = None
options["mid_color"] = None
options.setdefault("min_type", "min")
options.setdefault("max_type", "max")
options.setdefault("min_value", 0)
options.setdefault("max_value", 0)
options.setdefault("min_color", Color("#FF7128"))
options.setdefault("max_color", Color("#FFEF9C"))
options["min_color"] = Color._from_value(options["min_color"])
options["max_color"] = Color._from_value(options["max_color"])
# Special handling for 3 color scale.
if options["type"] == "3_color_scale":
options["type"] = "colorScale"
# Color scales don't use any additional formatting.
options["format"] = None
options.setdefault("min_type", "min")
options.setdefault("mid_type", "percentile")
options.setdefault("max_type", "max")
options.setdefault("min_value", 0)
options.setdefault("max_value", 0)
options.setdefault("min_color", Color("#F8696B"))
options.setdefault("mid_color", Color("#FFEB84"))
options.setdefault("max_color", Color("#63BE7B"))
options["min_color"] = Color._from_value(options["min_color"])
options["mid_color"] = Color._from_value(options["mid_color"])
options["max_color"] = Color._from_value(options["max_color"])
# Set a default mid value.
if "mid_value" not in options:
options["mid_value"] = 50
# Special handling for data bar.
if options["type"] == "dataBar":
# Color scales don't use any additional formatting.
options["format"] = None
if not options.get("min_type"):
options["min_type"] = "min"
options["x14_min_type"] = "autoMin"
else:
options["x14_min_type"] = options["min_type"]
if not options.get("max_type"):
options["max_type"] = "max"
options["x14_max_type"] = "autoMax"
else:
options["x14_max_type"] = options["max_type"]
options.setdefault("min_value", 0)
options.setdefault("max_value", 0)
options.setdefault("bar_color", Color("#638EC6"))
options.setdefault("bar_border_color", options["bar_color"])
options.setdefault("bar_only", False)
options.setdefault("bar_no_border", False)
options.setdefault("bar_solid", False)
options.setdefault("bar_direction", "")
options.setdefault("bar_negative_color", Color("#FF0000"))
options.setdefault("bar_negative_border_color", Color("#FF0000"))
options.setdefault("bar_negative_color_same", False)
options.setdefault("bar_negative_border_color_same", False)
options.setdefault("bar_axis_position", "")
options.setdefault("bar_axis_color", Color("#000000"))
options["bar_color"] = Color._from_value(options["bar_color"])
options["bar_border_color"] = Color._from_value(options["bar_border_color"])
options["bar_axis_color"] = Color._from_value(options["bar_axis_color"])
options["bar_negative_color"] = Color._from_value(
options["bar_negative_color"]
)
options["bar_negative_border_color"] = Color._from_value(
options["bar_negative_border_color"]
)
# Adjust for 2010 style data_bar parameters.
if options.get("is_data_bar_2010"):
self.excel_version = 2010
if options["min_type"] == "min" and options["min_value"] == 0:
options["min_value"] = None
if options["max_type"] == "max" and options["max_value"] == 0:
options["max_value"] = None
options["range"] = cell_range
# Strip the leading = from formulas.
try:
options["min_value"] = options["min_value"].lstrip("=")
except (KeyError, AttributeError):
pass
try:
options["mid_value"] = options["mid_value"].lstrip("=")
except (KeyError, AttributeError):
pass
try:
options["max_value"] = options["max_value"].lstrip("=")
except (KeyError, AttributeError):
pass
# Store the conditional format until we close the worksheet.
if cell_range in self.cond_formats:
self.cond_formats[cell_range].append(options)
else:
self.cond_formats[cell_range] = [options]
return 0
@convert_range_args
def add_table(
self,
first_row: int,
first_col: int,
last_row: int,
last_col: int,
options: Optional[Dict[str, Any]] = None,
) -> Literal[0, -1, -2, -3]:
"""
Add an Excel table to a worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
options: Table format options. (Optional)
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Incorrect parameter or option.
-3: Not supported in constant_memory mode.
"""
table = {}
col_formats = {}
if options is None:
options = {}
else:
# Copy the user defined options so they aren't modified.
options = options.copy()
if self.constant_memory:
warn("add_table() isn't supported in 'constant_memory' mode")
return -3
# Check that row and col are valid without storing the values.
if self._check_dimensions(first_row, first_col, True, True):
return -1
if self._check_dimensions(last_row, last_col, True, True):
return -1
# Swap last row/col for first row/col as necessary.
if first_row > last_row:
(first_row, last_row) = (last_row, first_row)
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
# Check if the table range overlaps a previous merged or table range.
# This is a critical file corruption error in Excel.
cell_range = xl_range(first_row, first_col, last_row, last_col)
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if self.table_cells.get((row, col)):
previous_range = self.table_cells.get((row, col))
raise OverlappingRange(
f"Table range '{cell_range}' overlaps previous "
f"table range '{previous_range}'."
)
if self.merged_cells.get((row, col)):
previous_range = self.merged_cells.get((row, col))
raise OverlappingRange(
f"Table range '{cell_range}' overlaps previous "
f"merge range '{previous_range}'."
)
self.table_cells[(row, col)] = cell_range
# Valid input parameters.
valid_parameter = {
"autofilter",
"banded_columns",
"banded_rows",
"columns",
"data",
"first_column",
"header_row",
"last_column",
"name",
"style",
"total_row",
"description",
"title",
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameter:
warn(f"Unknown parameter '{param_key}' in add_table()")
return -2
# Turn on Excel's defaults.
options["banded_rows"] = options.get("banded_rows", True)
options["header_row"] = options.get("header_row", True)
options["autofilter"] = options.get("autofilter", True)
# Check that there are enough rows.
num_rows = last_row - first_row
if options["header_row"]:
num_rows -= 1
if num_rows < 0:
warn("Must have at least one data row in in add_table()")
return -2
# Set the table options.
table["show_first_col"] = options.get("first_column", False)
table["show_last_col"] = options.get("last_column", False)
table["show_row_stripes"] = options.get("banded_rows", False)
table["show_col_stripes"] = options.get("banded_columns", False)
table["header_row_count"] = options.get("header_row", 0)
table["totals_row_shown"] = options.get("total_row", False)
table["description"] = options.get("description")
table["title"] = options.get("title")
# Set the table name.
if "name" in options:
name = options["name"]
table["name"] = name
if " " in name:
warn(f"Name '{name}' in add_table() cannot contain spaces")
return -2
# Warn if the name contains invalid chars as defined by Excel.
if not re.match(r"^[\w\\][\w\\.]*$", name, re.UNICODE) or re.match(
r"^\d", name
):
warn(f"Invalid Excel characters in add_table(): '{name}'")
return -2
# Warn if the name looks like a cell name.
if re.match(r"^[a-zA-Z][a-zA-Z]?[a-dA-D]?\d+$", name):
warn(f"Name looks like a cell name in add_table(): '{name}'")
return -2
# Warn if the name looks like a R1C1 cell reference.
if re.match(r"^[rcRC]$", name) or re.match(r"^[rcRC]\d+[rcRC]\d+$", name):
warn(f"Invalid name '{name}' like a RC cell ref in add_table()")
return -2
# Set the table style.
if "style" in options:
table["style"] = options["style"]
if table["style"] is None:
table["style"] = ""
# Remove whitespace from style name.
table["style"] = table["style"].replace(" ", "")
else:
table["style"] = "TableStyleMedium9"
# Set the data range rows (without the header and footer).
first_data_row = first_row
last_data_row = last_row
if options.get("header_row"):
first_data_row += 1
if options.get("total_row"):
last_data_row -= 1
# Set the table and autofilter ranges.
table["range"] = xl_range(first_row, first_col, last_row, last_col)
table["a_range"] = xl_range(first_row, first_col, last_data_row, last_col)
# If the header row if off the default is to turn autofilter off.
if not options["header_row"]:
options["autofilter"] = 0
# Set the autofilter range.
if options["autofilter"]:
table["autofilter"] = table["a_range"]
# Add the table columns.
col_id = 1
table["columns"] = []
seen_names = {}
for col_num in range(first_col, last_col + 1):
# Set up the default column data.
col_data = {
"id": col_id,
"name": "Column" + str(col_id),
"total_string": "",
"total_function": "",
"custom_total": "",
"total_value": 0,
"formula": "",
"format": None,
"name_format": None,
}
# Overwrite the defaults with any user defined values.
if "columns" in options:
# Check if there are user defined values for this column.
if col_id <= len(options["columns"]):
user_data = options["columns"][col_id - 1]
else:
user_data = None
if user_data:
# Get the column format.
xformat = user_data.get("format", None)
# Map user defined values to internal values.
if user_data.get("header"):
col_data["name"] = user_data["header"]
# Excel requires unique case insensitive header names.
header_name = col_data["name"]
name = header_name.lower()
if name in seen_names:
warn(f"Duplicate header name in add_table(): '{name}'")
return -2
seen_names[name] = True
col_data["name_format"] = user_data.get("header_format")
# Handle the column formula.
if "formula" in user_data and user_data["formula"]:
formula = user_data["formula"]
# Remove the formula '=' sign if it exists.
if formula.startswith("="):
formula = formula.lstrip("=")
# Convert Excel 2010 "@" ref to 2007 "#This Row".
formula = self._prepare_table_formula(formula)
# Escape any future functions.
formula = self._prepare_formula(formula, True)
col_data["formula"] = formula
# We write the formulas below after the table data.
# Handle the function for the total row.
if user_data.get("total_function"):
function = user_data["total_function"]
if function == "count_nums":
function = "countNums"
if function == "std_dev":
function = "stdDev"
subtotals = set(
[
"average",
"countNums",
"count",
"max",
"min",
"stdDev",
"sum",
"var",
]
)
if function in subtotals:
formula = self._table_function_to_formula(
function, col_data["name"]
)
else:
formula = self._prepare_formula(function, True)
col_data["custom_total"] = formula
function = "custom"
col_data["total_function"] = function
value = user_data.get("total_value", 0)
self._write_formula(last_row, col_num, formula, xformat, value)
elif user_data.get("total_string"):
# Total label only (not a function).
total_string = user_data["total_string"]
col_data["total_string"] = total_string
self._write_string(
last_row, col_num, total_string, user_data.get("format")
)
# Get the dxf format index.
if xformat is not None:
col_data["format"] = xformat._get_dxf_index()
# Store the column format for writing the cell data.
# It doesn't matter if it is undefined.
col_formats[col_id - 1] = xformat
# Store the column data.
table["columns"].append(col_data)
# Write the column headers to the worksheet.
if options["header_row"]:
self._write_string(
first_row, col_num, col_data["name"], col_data["name_format"]
)
col_id += 1
# Write the cell data if supplied.
if "data" in options:
data = options["data"]
i = 0 # For indexing the row data.
for row in range(first_data_row, last_data_row + 1):
j = 0 # For indexing the col data.
for col in range(first_col, last_col + 1):
if i < len(data) and j < len(data[i]):
token = data[i][j]
if j in col_formats:
self._write(row, col, token, col_formats[j])
else:
self._write(row, col, token, None)
j += 1
i += 1
# Write any columns formulas after the user supplied table data to
# overwrite it if required.
for col_id, col_num in enumerate(range(first_col, last_col + 1)):
column_data = table["columns"][col_id]
if column_data and column_data["formula"]:
formula_format = col_formats.get(col_id)
formula = column_data["formula"]
for row in range(first_data_row, last_data_row + 1):
self._write_formula(row, col_num, formula, formula_format)
# Store the table data.
self.tables.append(table)
# Store the filter cell positions for use in the autofit calculation.
if options["autofilter"]:
for col in range(first_col, last_col + 1):
# Check that the table autofilter doesn't overlap a worksheet filter.
if self.filter_cells.get((first_row, col)):
filter_type, filter_range = self.filter_cells.get((first_row, col))
if filter_type == "worksheet":
raise OverlappingRange(
f"Table autofilter range '{cell_range}' overlaps previous "
f"Worksheet autofilter range '{filter_range}'."
)
self.filter_cells[(first_row, col)] = ("table", cell_range)
return 0
@convert_cell_args
def add_sparkline(
self, row: int, col: int, options: Optional[Dict[str, Any]] = None
) -> Literal[0, -1, -2]:
"""
Add sparklines to the worksheet.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
options: Sparkline formatting options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
-2: Incorrect parameter or option.
"""
# Check that row and col are valid without storing the values.
if self._check_dimensions(row, col, True, True):
return -1
sparkline = {"locations": [xl_rowcol_to_cell(row, col)]}
if options is None:
options = {}
# Valid input parameters.
valid_parameters = {
"location",
"range",
"type",
"high_point",
"low_point",
"negative_points",
"first_point",
"last_point",
"markers",
"style",
"series_color",
"negative_color",
"markers_color",
"first_color",
"last_color",
"high_color",
"low_color",
"max",
"min",
"axis",
"reverse",
"empty_cells",
"show_hidden",
"plot_hidden",
"date_axis",
"weight",
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameters:
warn(f"Unknown parameter '{param_key}' in add_sparkline()")
return -1
# 'range' is a required parameter.
if "range" not in options:
warn("Parameter 'range' is required in add_sparkline()")
return -2
# Handle the sparkline type.
spark_type = options.get("type", "line")
if spark_type not in ("line", "column", "win_loss"):
warn(
"Parameter 'type' must be 'line', 'column' "
"or 'win_loss' in add_sparkline()"
)
return -2
if spark_type == "win_loss":
spark_type = "stacked"
sparkline["type"] = spark_type
# We handle single location/range values or list of values.
if "location" in options:
if isinstance(options["location"], list):
sparkline["locations"] = options["location"]
else:
sparkline["locations"] = [options["location"]]
if isinstance(options["range"], list):
sparkline["ranges"] = options["range"]
else:
sparkline["ranges"] = [options["range"]]
range_count = len(sparkline["ranges"])
location_count = len(sparkline["locations"])
# The ranges and locations must match.
if range_count != location_count:
warn(
"Must have the same number of location and range "
"parameters in add_sparkline()"
)
return -2
# Store the count.
sparkline["count"] = len(sparkline["locations"])
# Get the worksheet name for the range conversion below.
sheetname = quote_sheetname(self.name)
# Cleanup the input ranges.
new_ranges = []
for spark_range in sparkline["ranges"]:
# Remove the absolute reference $ symbols.
spark_range = spark_range.replace("$", "")
# Remove the = from formula.
spark_range = spark_range.lstrip("=")
# Convert a simple range into a full Sheet1!A1:D1 range.
if "!" not in spark_range:
spark_range = sheetname + "!" + spark_range
new_ranges.append(spark_range)
sparkline["ranges"] = new_ranges
# Cleanup the input locations.
new_locations = []
for location in sparkline["locations"]:
location = location.replace("$", "")
new_locations.append(location)
sparkline["locations"] = new_locations
# Map options.
sparkline["high"] = options.get("high_point")
sparkline["low"] = options.get("low_point")
sparkline["negative"] = options.get("negative_points")
sparkline["first"] = options.get("first_point")
sparkline["last"] = options.get("last_point")
sparkline["markers"] = options.get("markers")
sparkline["min"] = options.get("min")
sparkline["max"] = options.get("max")
sparkline["axis"] = options.get("axis")
sparkline["reverse"] = options.get("reverse")
sparkline["hidden"] = options.get("show_hidden")
sparkline["weight"] = options.get("weight")
# Map empty cells options.
empty = options.get("empty_cells", "")
if empty == "zero":
sparkline["empty"] = 0
elif empty == "connect":
sparkline["empty"] = "span"
else:
sparkline["empty"] = "gap"
# Map the date axis range.
date_range = options.get("date_axis")
if date_range and "!" not in date_range:
date_range = sheetname + "!" + date_range
sparkline["date_axis"] = date_range
# Set the sparkline styles.
style_id = options.get("style", 0)
style = _get_sparkline_style(style_id)
sparkline["series_color"] = style["series"]
sparkline["negative_color"] = style["negative"]
sparkline["markers_color"] = style["markers"]
sparkline["first_color"] = style["first"]
sparkline["last_color"] = style["last"]
sparkline["high_color"] = style["high"]
sparkline["low_color"] = style["low"]
# Override the style colors with user defined colors.
self._set_spark_color(sparkline, options, "series_color")
self._set_spark_color(sparkline, options, "negative_color")
self._set_spark_color(sparkline, options, "markers_color")
self._set_spark_color(sparkline, options, "first_color")
self._set_spark_color(sparkline, options, "last_color")
self._set_spark_color(sparkline, options, "high_color")
self._set_spark_color(sparkline, options, "low_color")
self.sparklines.append(sparkline)
return 0
@convert_range_args
def set_selection(
self, first_row: int, first_col: int, last_row: int, last_col: int
) -> None:
"""
Set the selected cell or cells in a worksheet
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
Returns:
0: Nothing.
"""
pane = None
# Range selection. Do this before swapping max/min to allow the
# selection direction to be reversed.
active_cell = xl_rowcol_to_cell(first_row, first_col)
# Swap last row/col for first row/col if necessary
if first_row > last_row:
(first_row, last_row) = (last_row, first_row)
if first_col > last_col:
(first_col, last_col) = (last_col, first_col)
sqref = xl_range(first_row, first_col, last_row, last_col)
# Selection isn't set for cell A1.
if sqref == "A1":
return
self.selections = [[pane, active_cell, sqref]]
@convert_cell_args
def set_top_left_cell(self, row: int = 0, col: int = 0) -> None:
"""
Set the first visible cell at the top left of a worksheet.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
Returns:
0: Nothing.
"""
if row == 0 and col == 0:
return
self.top_left_cell = xl_rowcol_to_cell(row, col)
def outline_settings(
self,
visible: bool = 1,
symbols_below: bool = 1,
symbols_right: bool = 1,
auto_style: bool = 0,
) -> None:
"""
Control outline settings.
Args:
visible: Outlines are visible. Optional, defaults to True.
symbols_below: Show row outline symbols below the outline bar.
Optional, defaults to True.
symbols_right: Show column outline symbols to the right of the
outline bar. Optional, defaults to True.
auto_style: Use Automatic style. Optional, defaults to False.
Returns:
0: Nothing.
"""
self.outline_on = visible
self.outline_below = symbols_below
self.outline_right = symbols_right
self.outline_style = auto_style
self.outline_changed = True
@convert_cell_args
def freeze_panes(
self,
row: int,
col: int,
top_row: Optional[int] = None,
left_col: Optional[int] = None,
pane_type: int = 0,
) -> None:
"""
Create worksheet panes and mark them as frozen.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
top_row: Topmost visible row in scrolling region of pane.
left_col: Leftmost visible row in scrolling region of pane.
Returns:
0: Nothing.
"""
if top_row is None:
top_row = row
if left_col is None:
left_col = col
self.panes = [row, col, top_row, left_col, pane_type]
@convert_cell_args
def split_panes(
self,
x: float,
y: float,
top_row: Optional[int] = None,
left_col: Optional[int] = None,
) -> None:
"""
Create worksheet panes and mark them as split.
Args:
x: The position for the vertical split.
y: The position for the horizontal split.
top_row: Topmost visible row in scrolling region of pane.
left_col: Leftmost visible row in scrolling region of pane.
Returns:
0: Nothing.
"""
# Same as freeze panes with a different pane type.
self.freeze_panes(x, y, top_row, left_col, 2)
def set_zoom(self, zoom: int = 100) -> None:
"""
Set the worksheet zoom factor.
Args:
zoom: Scale factor: 10 <= zoom <= 400.
Returns:
Nothing.
"""
# Ensure the zoom scale is in Excel's range.
if zoom < 10 or zoom > 400:
warn(f"Zoom factor '{zoom}' outside range: 10 <= zoom <= 400")
zoom = 100
self.zoom = int(zoom)
def set_zoom_to_fit(self) -> None:
"""
Set the worksheet zoom to selection/fit. Only works for chartsheets.
Args:
None.
Returns:
Nothing.
"""
self.zoom_to_fit = True
def right_to_left(self) -> None:
"""
Display the worksheet right to left for some versions of Excel.
Args:
None.
Returns:
Nothing.
"""
self.is_right_to_left = True
def hide_zero(self) -> None:
"""
Hide zero values in worksheet cells.
Args:
None.
Returns:
Nothing.
"""
self.show_zeros = 0
def set_tab_color(self, color: Union[str, Color]) -> None:
"""
Set the color of the worksheet tab.
Args:
color: A #RGB color index.
Returns:
Nothing.
"""
self.tab_color = Color._from_value(color)
def protect(
self, password: str = "", options: Optional[Dict[str, Any]] = None
) -> None:
"""
Set the password and protection options of the worksheet.
Args:
password: An optional password string.
options: A dictionary of worksheet objects to protect.
Returns:
Nothing.
"""
if password != "":
password = self._encode_password(password)
if not options:
options = {}
# Default values for objects that can be protected.
defaults = {
"sheet": True,
"content": False,
"objects": False,
"scenarios": False,
"format_cells": False,
"format_columns": False,
"format_rows": False,
"insert_columns": False,
"insert_rows": False,
"insert_hyperlinks": False,
"delete_columns": False,
"delete_rows": False,
"select_locked_cells": True,
"sort": False,
"autofilter": False,
"pivot_tables": False,
"select_unlocked_cells": True,
}
# Overwrite the defaults with user specified values.
for key in options.keys():
if key in defaults:
defaults[key] = options[key]
else:
warn(f"Unknown protection object: '{key}'")
# Set the password after the user defined values.
defaults["password"] = password
self.protect_options = defaults
def unprotect_range(
self,
cell_range: str,
range_name: Optional[str] = None,
password: Optional[str] = None,
) -> int:
"""
Unprotect ranges within a protected worksheet.
Args:
cell_range: The cell or cell range to unprotect.
range_name: An optional name for the range.
password: An optional password string. (undocumented)
Returns:
0: Success.
-1: Parameter error.
"""
if cell_range is None:
warn("Cell range must be specified in unprotect_range()")
return -1
# Sanitize the cell range.
cell_range = cell_range.lstrip("=")
cell_range = cell_range.replace("$", "")
self.num_protected_ranges += 1
if range_name is None:
range_name = "Range" + str(self.num_protected_ranges)
if password:
password = self._encode_password(password)
self.protected_ranges.append((cell_range, range_name, password))
return 0
@convert_cell_args
def insert_button(
self, row: int, col: int, options: Optional[Dict[str, Any]] = None
) -> Literal[0, -1]:
"""
Insert a button form object into the worksheet.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
options: Button formatting options.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Check insert (row, col) without storing.
if self._check_dimensions(row, col, True, True):
warn(f"Cannot insert button at ({row}, {col}).")
return -1
if options is None:
options = {}
# Create a new button object.
height = self.default_row_height
width = self.default_col_width
button_number = 1 + len(self.buttons_list)
button = ButtonType(row, col, height, width, button_number, options)
self.buttons_list.append(button)
self.has_vml = True
return 0
@convert_cell_args
def insert_checkbox(
self, row: int, col: int, boolean: bool, cell_format: Optional[Format] = None
):
"""
Insert a boolean checkbox in a worksheet cell.
Args:
row: The cell row (zero indexed).
col: The cell column (zero indexed).
boolean: The boolean value to display as a checkbox.
cell_format: Cell Format object. (optional)
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Ensure that the checkbox property is set in the user defined format.
if cell_format and not cell_format.checkbox:
# This needs to be fixed with a clone.
cell_format.set_checkbox()
# If no format is supplied create and/or use the default checkbox format.
if not cell_format:
if not self.default_checkbox_format:
self.default_checkbox_format = self.workbook_add_format()
self.default_checkbox_format.set_checkbox()
cell_format = self.default_checkbox_format
return self._write_boolean(row, col, boolean, cell_format)
###########################################################################
#
# Public API. Page Setup methods.
#
###########################################################################
def set_landscape(self) -> None:
"""
Set the page orientation as landscape.
Args:
None.
Returns:
Nothing.
"""
self.orientation = 0
self.page_setup_changed = True
def set_portrait(self) -> None:
"""
Set the page orientation as portrait.
Args:
None.
Returns:
Nothing.
"""
self.orientation = 1
self.page_setup_changed = True
def set_page_view(self, view: Literal[0, 1, 2] = 1) -> None:
"""
Set the page view mode.
Args:
0: Normal view mode
1: Page view mode (the default)
2: Page break view mode
Returns:
Nothing.
"""
self.page_view = view
def set_pagebreak_view(self) -> None:
"""
Set the page view mode.
Args:
None.
Returns:
Nothing.
"""
self.page_view = 2
def set_paper(self, paper_size: Union[Literal[1, 9], int]) -> None:
"""
Set the paper type. US Letter = 1, A4 = 9.
Args:
paper_size: Paper index.
Returns:
Nothing.
"""
if paper_size:
self.paper_size = paper_size
self.page_setup_changed = True
def center_horizontally(self) -> None:
"""
Center the page horizontally.
Args:
None.
Returns:
Nothing.
"""
self.print_options_changed = True
self.hcenter = 1
def center_vertically(self) -> None:
"""
Center the page vertically.
Args:
None.
Returns:
Nothing.
"""
self.print_options_changed = True
self.vcenter = 1
def set_margins(
self,
left: float = 0.7,
right: float = 0.7,
top: float = 0.75,
bottom: float = 0.75,
) -> None:
"""
Set all the page margins in inches.
Args:
left: Left margin.
right: Right margin.
top: Top margin.
bottom: Bottom margin.
Returns:
Nothing.
"""
self.margin_left = left
self.margin_right = right
self.margin_top = top
self.margin_bottom = bottom
def set_header(
self, header: str = "", options: Optional[Dict[str, Any]] = None, margin=None
) -> None:
"""
Set the page header caption and optional margin.
Args:
header: Header string.
margin: Header margin.
options: Header options, mainly for images.
Returns:
Nothing.
"""
header_orig = header
header = header.replace("&[Picture]", "&G")
if len(header) > 255:
warn("Header string cannot be longer than Excel's limit of 255 characters")
return
if options is not None:
# For backward compatibility allow options to be the margin.
if not isinstance(options, dict):
options = {"margin": options}
else:
options = {}
# Copy the user defined options so they aren't modified.
options = options.copy()
# For backward compatibility.
if margin is not None:
options["margin"] = margin
# Reset the list in case the function is called more than once.
self.header_images = []
if options.get("image_left"):
options["image_data"] = options.get("image_data_left")
image = self._image_from_source(options.get("image_left"), options)
image._header_position = "LH"
self.header_images.append(image)
if options.get("image_center"):
options["image_data"] = options.get("image_data_center")
image = self._image_from_source(options.get("image_center"), options)
image._header_position = "CH"
self.header_images.append(image)
if options.get("image_right"):
options["image_data"] = options.get("image_data_right")
image = self._image_from_source(options.get("image_right"), options)
image._header_position = "RH"
self.header_images.append(image)
placeholder_count = header.count("&G")
image_count = len(self.header_images)
if placeholder_count != image_count:
warn(
f"Number of footer images '{image_count}' doesn't match placeholder "
f"count '{placeholder_count}' in string: {header_orig}"
)
self.header_images = []
return
if "align_with_margins" in options:
self.header_footer_aligns = options["align_with_margins"]
if "scale_with_doc" in options:
self.header_footer_scales = options["scale_with_doc"]
self.header = header
self.margin_header = options.get("margin", 0.3)
self.header_footer_changed = True
if image_count:
self.has_header_vml = True
def set_footer(
self, footer: str = "", options: Optional[Dict[str, Any]] = None, margin=None
) -> None:
"""
Set the page footer caption and optional margin.
Args:
footer: Footer string.
margin: Footer margin.
options: Footer options, mainly for images.
Returns:
Nothing.
"""
footer_orig = footer
footer = footer.replace("&[Picture]", "&G")
if len(footer) > 255:
warn("Footer string cannot be longer than Excel's limit of 255 characters")
return
if options is not None:
# For backward compatibility allow options to be the margin.
if not isinstance(options, dict):
options = {"margin": options}
else:
options = {}
# Copy the user defined options so they aren't modified.
options = options.copy()
# For backward compatibility.
if margin is not None:
options["margin"] = margin
# Reset the list in case the function is called more than once.
self.footer_images = []
if options.get("image_left"):
options["image_data"] = options.get("image_data_left")
image = self._image_from_source(options.get("image_left"), options)
image._header_position = "LF"
self.footer_images.append(image)
if options.get("image_center"):
options["image_data"] = options.get("image_data_center")
image = self._image_from_source(options.get("image_center"), options)
image._header_position = "CF"
self.footer_images.append(image)
if options.get("image_right"):
options["image_data"] = options.get("image_data_right")
image = self._image_from_source(options.get("image_right"), options)
image._header_position = "RF"
self.footer_images.append(image)
placeholder_count = footer.count("&G")
image_count = len(self.footer_images)
if placeholder_count != image_count:
warn(
f"Number of footer images '{image_count}' doesn't match placeholder "
f"count '{placeholder_count}' in string: {footer_orig}"
)
self.footer_images = []
return
if "align_with_margins" in options:
self.header_footer_aligns = options["align_with_margins"]
if "scale_with_doc" in options:
self.header_footer_scales = options["scale_with_doc"]
self.footer = footer
self.margin_footer = options.get("margin", 0.3)
self.header_footer_changed = True
if image_count:
self.has_header_vml = True
def repeat_rows(self, first_row: int, last_row: Optional[int] = None) -> None:
"""
Set the rows to repeat at the top of each printed page.
Args:
first_row: Start row for range.
last_row: End row for range.
Returns:
Nothing.
"""
if last_row is None:
last_row = first_row
# Convert rows to 1 based.
first_row += 1
last_row += 1
# Create the row range area like: $1:$2.
area = f"${first_row}:${last_row}"
# Build up the print titles area "Sheet1!$1:$2"
sheetname = quote_sheetname(self.name)
self.repeat_row_range = sheetname + "!" + area
@convert_column_args
def repeat_columns(self, first_col: int, last_col: Optional[int] = None) -> None:
"""
Set the columns to repeat at the left hand side of each printed page.
Args:
first_col: Start column for range.
last_col: End column for range.
Returns:
Nothing.
"""
if last_col is None:
last_col = first_col
# Convert to A notation.
first_col = xl_col_to_name(first_col, 1)
last_col = xl_col_to_name(last_col, 1)
# Create a column range like $C:$D.
area = first_col + ":" + last_col
# Build up the print area range "=Sheet2!$C:$D"
sheetname = quote_sheetname(self.name)
self.repeat_col_range = sheetname + "!" + area
def hide_gridlines(self, option: Literal[0, 1, 2] = 1) -> None:
"""
Set the option to hide gridlines on the screen and the printed page.
Args:
option: 0 : Don't hide gridlines
1 : Hide printed gridlines only
2 : Hide screen and printed gridlines
Returns:
Nothing.
"""
if option == 0:
self.print_gridlines = 1
self.screen_gridlines = 1
self.print_options_changed = True
elif option == 1:
self.print_gridlines = 0
self.screen_gridlines = 1
else:
self.print_gridlines = 0
self.screen_gridlines = 0
def print_row_col_headers(self) -> None:
"""
Set the option to print the row and column headers on the printed page.
Args:
None.
Returns:
Nothing.
"""
self.print_headers = True
self.print_options_changed = True
def hide_row_col_headers(self) -> None:
"""
Set the option to hide the row and column headers on the worksheet.
Args:
None.
Returns:
Nothing.
"""
self.row_col_headers = True
@convert_range_args
def print_area(
self, first_row: int, first_col: int, last_row: int, last_col: int
) -> Literal[0, -1]:
"""
Set the print area in the current worksheet.
Args:
first_row: The first row of the cell range. (zero indexed).
first_col: The first column of the cell range.
last_row: The last row of the cell range. (zero indexed).
last_col: The last column of the cell range.
Returns:
0: Success.
-1: Row or column is out of worksheet bounds.
"""
# Set the print area in the current worksheet.
# Ignore max print area since it is the same as no area for Excel.
if (
first_row == 0
and first_col == 0
and last_row == self.xls_rowmax - 1
and last_col == self.xls_colmax - 1
):
return -1
# Build up the print area range "Sheet1!$A$1:$C$13".
area = self._convert_name_area(first_row, first_col, last_row, last_col)
self.print_area_range = area
return 0
def print_across(self) -> None:
"""
Set the order in which pages are printed.
Args:
None.
Returns:
Nothing.
"""
self.page_order = 1
self.page_setup_changed = True
def fit_to_pages(self, width: int, height: int) -> None:
"""
Fit the printed area to a specific number of pages both vertically and
horizontally.
Args:
width: Number of pages horizontally.
height: Number of pages vertically.
Returns:
Nothing.
"""
self.fit_page = 1
self.fit_width = width
self.fit_height = height
self.page_setup_changed = True
def set_start_page(self, start_page: int) -> None:
"""
Set the start page number when printing.
Args:
start_page: Start page number.
Returns:
Nothing.
"""
self.page_start = start_page
def set_print_scale(self, scale: int) -> None:
"""
Set the scale factor for the printed page.
Args:
scale: Print scale. 10 <= scale <= 400.
Returns:
Nothing.
"""
# Confine the scale to Excel's range.
if scale < 10 or scale > 400:
warn(f"Print scale '{scale}' outside range: 10 <= scale <= 400")
return
# Turn off "fit to page" option when print scale is on.
self.fit_page = 0
self.print_scale = int(scale)
self.page_setup_changed = True
def print_black_and_white(self) -> None:
"""
Set the option to print the worksheet in black and white.
Args:
None.
Returns:
Nothing.
"""
self.black_white = True
self.page_setup_changed = True
def set_h_pagebreaks(self, breaks: List[int]) -> None:
"""
Set the horizontal page breaks on a worksheet.
Args:
breaks: List of rows where the page breaks should be added.
Returns:
Nothing.
"""
self.hbreaks = breaks
def set_v_pagebreaks(self, breaks: List[int]) -> None:
"""
Set the horizontal page breaks on a worksheet.
Args:
breaks: List of columns where the page breaks should be added.
Returns:
Nothing.
"""
self.vbreaks = breaks
def set_vba_name(self, name: Optional[str] = None) -> None:
"""
Set the VBA name for the worksheet. By default this is the
same as the sheet name: i.e., Sheet1 etc.
Args:
name: The VBA name for the worksheet.
Returns:
Nothing.
"""
if name is not None:
self.vba_codename = name
else:
self.vba_codename = "Sheet" + str(self.index + 1)
def ignore_errors(self, options: Optional[Dict[str, Any]] = None) -> Literal[0, -1]:
"""
Ignore various Excel errors/warnings in a worksheet for user defined
ranges.
Args:
options: A dict of ignore errors keys with cell range values.
Returns:
0: Success.
-1: Incorrect parameter or option.
"""
if options is None:
return -1
# Copy the user defined options so they aren't modified.
options = options.copy()
# Valid input parameters.
valid_parameters = {
"number_stored_as_text",
"eval_error",
"formula_differs",
"formula_range",
"formula_unlocked",
"empty_cell_reference",
"list_data_validation",
"calculated_column",
"two_digit_text_year",
}
# Check for valid input parameters.
for param_key in options.keys():
if param_key not in valid_parameters:
warn(f"Unknown parameter '{param_key}' in ignore_errors()")
return -1
self.ignored_errors = options
return 0
###########################################################################
#
# Private API.
#
###########################################################################
def _initialize(self, init_data) -> None:
self.name = init_data["name"]
self.index = init_data["index"]
self.str_table = init_data["str_table"]
self.worksheet_meta = init_data["worksheet_meta"]
self.constant_memory = init_data["constant_memory"]
self.tmpdir = init_data["tmpdir"]
self.date_1904 = init_data["date_1904"]
self.strings_to_numbers = init_data["strings_to_numbers"]
self.strings_to_formulas = init_data["strings_to_formulas"]
self.strings_to_urls = init_data["strings_to_urls"]
self.nan_inf_to_errors = init_data["nan_inf_to_errors"]
self.default_date_format = init_data["default_date_format"]
self.default_url_format = init_data["default_url_format"]
self.workbook_add_format = init_data["workbook_add_format"]
self.excel2003_style = init_data["excel2003_style"]
self.remove_timezone = init_data["remove_timezone"]
self.max_url_length = init_data["max_url_length"]
self.use_future_functions = init_data["use_future_functions"]
self.embedded_images = init_data["embedded_images"]
self.default_row_height = init_data["default_row_height"]
self.default_col_width = init_data["default_col_width"]
self.max_digit_width = init_data["max_digit_width"]
self.cell_padding = init_data["cell_padding"]
self.max_col_width = init_data["max_col_width"]
self.original_row_height = self.default_row_height
if self.excel2003_style:
self.original_row_height = 17
self.default_row_height = 17
self.margin_left = 0.75
self.margin_right = 0.75
self.margin_top = 1
self.margin_bottom = 1
self.margin_header = 0.5
self.margin_footer = 0.5
self.header_footer_aligns = False
# Open a temp filehandle to store row data in constant_memory mode.
if self.constant_memory:
# This is sub-optimal but we need to create a temp file
# with utf8 encoding in Python < 3.
(fd, filename) = tempfile.mkstemp(dir=self.tmpdir)
os.close(fd)
self.row_data_filename = filename
# pylint: disable=consider-using-with
self.row_data_fh = open(filename, mode="w+", encoding="utf-8")
# Set as the worksheet filehandle until the file is assembled.
self.fh = self.row_data_fh
def _assemble_xml_file(self) -> None:
# Assemble and write the XML file.
# Write the XML declaration.
self._xml_declaration()
# Write the root worksheet element.
self._write_worksheet()
# Write the worksheet properties.
self._write_sheet_pr()
# Write the worksheet dimensions.
self._write_dimension()
# Write the sheet view properties.
self._write_sheet_views()
# Write the sheet format properties.
self._write_sheet_format_pr()
# Write the sheet column info.
self._write_cols()
# Write the worksheet data such as rows columns and cells.
if not self.constant_memory:
self._write_sheet_data()
else:
self._write_optimized_sheet_data()
# Write the sheetProtection element.
self._write_sheet_protection()
# Write the protectedRanges element.
self._write_protected_ranges()
# Write the phoneticPr element.
if self.excel2003_style:
self._write_phonetic_pr()
# Write the autoFilter element.
self._write_auto_filter()
# Write the mergeCells element.
self._write_merge_cells()
# Write the conditional formats.
self._write_conditional_formats()
# Write the dataValidations element.
self._write_data_validations()
# Write the hyperlink element.
self._write_hyperlinks()
# Write the printOptions element.
self._write_print_options()
# Write the worksheet page_margins.
self._write_page_margins()
# Write the worksheet page setup.
self._write_page_setup()
# Write the headerFooter element.
self._write_header_footer()
# Write the rowBreaks element.
self._write_row_breaks()
# Write the colBreaks element.
self._write_col_breaks()
# Write the ignoredErrors element.
self._write_ignored_errors()
# Write the drawing element.
self._write_drawings()
# Write the legacyDrawing element.
self._write_legacy_drawing()
# Write the legacyDrawingHF element.
self._write_legacy_drawing_hf()
# Write the picture element, for the background.
self._write_picture()
# Write the tableParts element.
self._write_table_parts()
# Write the extLst elements.
self._write_ext_list()
# Close the worksheet tag.
self._xml_end_tag("worksheet")
# Close the file.
self._xml_close()
def _check_dimensions(
self, row: int, col: int, ignore_row=False, ignore_col=False
) -> int:
# Check that row and col are valid and store the max and min
# values for use in other methods/elements. The ignore_row /
# ignore_col flags is used to indicate that we wish to perform
# the dimension check without storing the value. The ignore
# flags are use by set_row() and data_validate.
# Check that the row/col are within the worksheet bounds.
if row < 0 or col < 0:
return -1
if row >= self.xls_rowmax or col >= self.xls_colmax:
return -1
# In constant_memory mode we don't change dimensions for rows
# that are already written.
if not ignore_row and not ignore_col and self.constant_memory:
if row < self.previous_row:
return -2
if not ignore_row:
if self.dim_rowmin is None or row < self.dim_rowmin:
self.dim_rowmin = row
if self.dim_rowmax is None or row > self.dim_rowmax:
self.dim_rowmax = row
if not ignore_col:
if self.dim_colmin is None or col < self.dim_colmin:
self.dim_colmin = col
if self.dim_colmax is None or col > self.dim_colmax:
self.dim_colmax = col
return 0
def _convert_date_time(self, dt_obj):
# Convert a datetime object to an Excel serial date and time.
return _datetime_to_excel_datetime(dt_obj, self.date_1904, self.remove_timezone)
def _convert_name_area(self, row_num_1, col_num_1, row_num_2, col_num_2):
# Convert zero indexed rows and columns to the format required by
# worksheet named ranges, eg, "Sheet1!$A$1:$C$13".
range1 = ""
range2 = ""
area = ""
row_col_only = 0
# Convert to A1 notation.
col_char_1 = xl_col_to_name(col_num_1, 1)
col_char_2 = xl_col_to_name(col_num_2, 1)
row_char_1 = "$" + str(row_num_1 + 1)
row_char_2 = "$" + str(row_num_2 + 1)
# We need to handle special cases that refer to rows or columns only.
if row_num_1 == 0 and row_num_2 == self.xls_rowmax - 1:
range1 = col_char_1
range2 = col_char_2
row_col_only = 1
elif col_num_1 == 0 and col_num_2 == self.xls_colmax - 1:
range1 = row_char_1
range2 = row_char_2
row_col_only = 1
else:
range1 = col_char_1 + row_char_1
range2 = col_char_2 + row_char_2
# A repeated range is only written once (if it isn't a special case).
if range1 == range2 and not row_col_only:
area = range1
else:
area = range1 + ":" + range2
# Build up the print area range "Sheet1!$A$1:$C$13".
sheetname = quote_sheetname(self.name)
area = sheetname + "!" + area
return area
def _sort_pagebreaks(self, breaks):
# This is an internal method used to filter elements of a list of
# pagebreaks used in the _store_hbreak() and _store_vbreak() methods.
# It:
# 1. Removes duplicate entries from the list.
# 2. Sorts the list.
# 3. Removes 0 from the list if present.
if not breaks:
return []
breaks_set = set(breaks)
if 0 in breaks_set:
breaks_set.remove(0)
breaks_list = list(breaks_set)
breaks_list.sort()
# The Excel 2007 specification says that the maximum number of page
# breaks is 1026. However, in practice it is actually 1023.
max_num_breaks = 1023
if len(breaks_list) > max_num_breaks:
breaks_list = breaks_list[:max_num_breaks]
return breaks_list
def _extract_filter_tokens(self, expression):
# Extract the tokens from the filter expression. The tokens are mainly
# non-whitespace groups. The only tricky part is to extract string
# tokens that contain whitespace and/or quoted double quotes (Excel's
# escaped quotes).
#
# Examples: 'x < 2000'
# 'x > 2000 and x < 5000'
# 'x = "foo"'
# 'x = "foo bar"'
# 'x = "foo "" bar"'
#
if not expression:
return []
token_re = re.compile(r'"(?:[^"]|"")*"|\S+')
tokens = token_re.findall(expression)
new_tokens = []
# Remove single leading and trailing quotes and un-escape other quotes.
for token in tokens:
if token.startswith('"'):
token = token[1:]
if token.endswith('"'):
token = token[:-1]
token = token.replace('""', '"')
new_tokens.append(token)
return new_tokens
def _parse_filter_expression(self, expression, tokens):
# Converts the tokens of a possibly conditional expression into 1 or 2
# sub expressions for further parsing.
#
# Examples:
# ('x', '==', 2000) -> exp1
# ('x', '>', 2000, 'and', 'x', '<', 5000) -> exp1 and exp2
if len(tokens) == 7:
# The number of tokens will be either 3 (for 1 expression)
# or 7 (for 2 expressions).
conditional = tokens[3]
if re.match("(and|&&)", conditional):
conditional = 0
elif re.match(r"(or|\|\|)", conditional):
conditional = 1
else:
warn(
f"Token '{conditional}' is not a valid conditional "
f"in filter expression '{expression}'"
)
expression_1 = self._parse_filter_tokens(expression, tokens[0:3])
expression_2 = self._parse_filter_tokens(expression, tokens[4:7])
return expression_1 + [conditional] + expression_2
return self._parse_filter_tokens(expression, tokens)
def _parse_filter_tokens(self, expression, tokens):
# Parse the 3 tokens of a filter expression and return the operator
# and token. The use of numbers instead of operators is a legacy of
# Spreadsheet::WriteExcel.
operators = {
"==": 2,
"=": 2,
"=~": 2,
"eq": 2,
"!=": 5,
"!~": 5,
"ne": 5,
"<>": 5,
"<": 1,
"<=": 3,
">": 4,
">=": 6,
}
operator = operators.get(tokens[1], None)
token = tokens[2]
# Special handling of "Top" filter expressions.
if re.match("top|bottom", tokens[0].lower()):
value = int(tokens[1])
if value < 1 or value > 500:
warn(
f"The value '{token}' in expression '{expression}' "
f"must be in the range 1 to 500"
)
token = token.lower()
if token not in ("items", "%"):
warn(
f"The type '{token}' in expression '{expression}' "
f"must be either 'items' or '%%'"
)
if tokens[0].lower() == "top":
operator = 30
else:
operator = 32
if tokens[2] == "%":
operator += 1
token = str(value)
if not operator and tokens[0]:
warn(
f"Token '{token[0]}' is not a valid operator "
f"in filter expression '{expression}'."
)
# Special handling for Blanks/NonBlanks.
if re.match("blanks|nonblanks", token.lower()):
# Only allow Equals or NotEqual in this context.
if operator not in (2, 5):
warn(
f"The operator '{tokens[1]}' in expression '{expression}' "
f"is not valid in relation to Blanks/NonBlanks'."
)
token = token.lower()
# The operator should always be 2 (=) to flag a "simple" equality
# in the binary record. Therefore we convert <> to =.
if token == "blanks":
if operator == 5:
token = " "
else:
if operator == 5:
operator = 2
token = "blanks"
else:
operator = 5
token = " "
# if the string token contains an Excel match character then change the
# operator type to indicate a non "simple" equality.
if operator == 2 and re.search("[*?]", token):
operator = 22
return [operator, token]
def _encode_password(self, password) -> str:
# Hash a worksheet password. Based on the algorithm in
# ECMA-376-4:2016, Office Open XML File Formats — Transitional
# Migration Features, Additional attributes for workbookProtection
# element (Part 1, §18.2.29).
digest = 0x0000
for char in password[::-1]:
digest = ((digest >> 14) & 0x01) | ((digest << 1) & 0x7FFF)
digest ^= ord(char)
digest = ((digest >> 14) & 0x01) | ((digest << 1) & 0x7FFF)
digest ^= len(password)
digest ^= 0xCE4B
return f"{digest:X}"
def _image_from_source(self, source, options: Optional[Dict[str, Any]] = None):
# Backward compatibility utility method to convert an input argument to
# an Image object. The source can be a filename, BytesIO stream or
# an existing Image object.
if isinstance(source, Image):
image = source
elif options is not None and options.get("image_data"):
image = Image(options["image_data"])
image.image_name = source
else:
image = Image(source)
return image
def _prepare_image(
self,
image: Image,
image_id: int,
drawing_id: int,
) -> None:
# Set up images/drawings.
# Get the effective image width and height in pixels.
width = image._width * image._x_scale
height = image._height * image._y_scale
# Scale by non 96dpi resolutions.
width *= 96.0 / image._x_dpi
height *= 96.0 / image._y_dpi
dimensions = self._position_object_emus(
image._col,
image._row,
image._x_offset,
image._y_offset,
width,
height,
image._anchor,
)
# Convert from pixels to emus.
width = int(0.5 + (width * 9525))
height = int(0.5 + (height * 9525))
# Create a Drawing obj to use with worksheet unless one already exists.
if not self.drawing:
drawing = Drawing()
drawing.embedded = 1
self.drawing = drawing
self.external_drawing_links.append(
["/drawing", "../drawings/drawing" + str(drawing_id) + ".xml", None]
)
else:
drawing = self.drawing
drawing_object = DrawingInfo()
drawing_object._drawing_type = DrawingTypes.IMAGE
drawing_object._dimensions = dimensions
drawing_object._description = image.image_name
drawing_object._width = width
drawing_object._height = height
drawing_object._shape = None
drawing_object._anchor = image._anchor
drawing_object._rel_index = 0
drawing_object._decorative = image._decorative
if image.description is not None:
drawing_object._description = image.description
if image._url:
url = image._url
target = url._target()
target_mode = url._target_mode()
if not self.drawing_rels.get(url._link):
self.drawing_links.append(["/hyperlink", target, target_mode])
url._rel_index = self._get_drawing_rel_index(url._link)
drawing_object._url = url
if not self.drawing_rels.get(image._digest):
self.drawing_links.append(
[
"/image",
"../media/image" + str(image_id) + "." + image._image_extension,
]
)
drawing_object._rel_index = self._get_drawing_rel_index(image._digest)
drawing._add_drawing_object(drawing_object)
def _prepare_shape(self, index, drawing_id) -> None:
# Set up shapes/drawings.
(
row,
col,
x_offset,
y_offset,
x_scale,
y_scale,
text,
anchor,
options,
description,
decorative,
) = self.shapes[index]
width = options.get("width", self.default_col_width * 3)
height = options.get("height", self.default_row_height * 6)
width *= x_scale
height *= y_scale
dimensions = self._position_object_emus(
col, row, x_offset, y_offset, width, height, anchor
)
# Convert from pixels to emus.
width = int(0.5 + (width * 9525))
height = int(0.5 + (height * 9525))
# Create a Drawing obj to use with worksheet unless one already exists.
if not self.drawing:
drawing = Drawing()
drawing.embedded = 1
self.drawing = drawing
self.external_drawing_links.append(
["/drawing", "../drawings/drawing" + str(drawing_id) + ".xml", None]
)
else:
drawing = self.drawing
shape = Shape("rect", "TextBox", options)
shape.text = text
drawing_object = DrawingInfo()
drawing_object._drawing_type = DrawingTypes.SHAPE
drawing_object._dimensions = dimensions
drawing_object._width = width
drawing_object._height = height
drawing_object._description = description
drawing_object._shape = shape
drawing_object._anchor = anchor
drawing_object._rel_index = 0
drawing_object._decorative = decorative
url = Url.from_options(options)
if url:
target = url._target()
target_mode = url._target_mode()
if not self.drawing_rels.get(url._link):
self.drawing_links.append(["/hyperlink", target, target_mode])
url._rel_index = self._get_drawing_rel_index(url._link)
drawing_object._url = url
drawing._add_drawing_object(drawing_object)
def _prepare_header_image(self, image_id, image) -> None:
# Set up an image without a drawing object for header/footer images.
# Strip the extension from the filename.
image.image_name = re.sub(r"\..*$", "", image.image_name)
if not self.vml_drawing_rels.get(image._digest):
self.vml_drawing_links.append(
[
"/image",
"../media/image" + str(image_id) + "." + image._image_extension,
]
)
image._ref_id = self._get_vml_drawing_rel_index(image._digest)
self.header_images_list.append(image)
def _prepare_background(self, image_id, image_extension) -> None:
# Set up an image without a drawing object for backgrounds.
self.external_background_links.append(
["/image", "../media/image" + str(image_id) + "." + image_extension]
)
def _prepare_chart(self, index, chart_id, drawing_id) -> None:
# Set up chart/drawings.
(
row,
col,
chart,
x_offset,
y_offset,
x_scale,
y_scale,
anchor,
description,
decorative,
) = self.charts[index]
chart.id = chart_id - 1
# Use user specified dimensions, if any.
width = int(0.5 + (chart.width * x_scale))
height = int(0.5 + (chart.height * y_scale))
dimensions = self._position_object_emus(
col, row, x_offset, y_offset, width, height, anchor
)
# Set the chart name for the embedded object if it has been specified.
name = chart.chart_name
# Create a Drawing obj to use with worksheet unless one already exists.
if not self.drawing:
drawing = Drawing()
drawing.embedded = 1
self.drawing = drawing
self.external_drawing_links.append(
["/drawing", "../drawings/drawing" + str(drawing_id) + ".xml"]
)
else:
drawing = self.drawing
drawing_object = DrawingInfo()
drawing_object._drawing_type = DrawingTypes.CHART
drawing_object._dimensions = dimensions
drawing_object._width = width
drawing_object._height = height
drawing_object._name = name
drawing_object._shape = None
drawing_object._anchor = anchor
drawing_object._rel_index = self._get_drawing_rel_index()
drawing_object._description = description
drawing_object._decorative = decorative
drawing._add_drawing_object(drawing_object)
self.drawing_links.append(
["/chart", "../charts/chart" + str(chart_id) + ".xml"]
)
def _position_object_emus(
self, col_start, row_start, x1, y1, width, height, anchor
):
# Calculate the vertices that define the position of a graphical
# object within the worksheet in EMUs.
#
# The vertices are expressed as English Metric Units (EMUs). There are
# 12,700 EMUs per point. Therefore, 12,700 * 3 /4 = 9,525 EMUs per
# pixel
(
col_start,
row_start,
x1,
y1,
col_end,
row_end,
x2,
y2,
x_abs,
y_abs,
) = self._position_object_pixels(
col_start, row_start, x1, y1, width, height, anchor
)
# Convert the pixel values to EMUs. See above.
x1 = int(0.5 + 9525 * x1)
y1 = int(0.5 + 9525 * y1)
x2 = int(0.5 + 9525 * x2)
y2 = int(0.5 + 9525 * y2)
x_abs = int(0.5 + 9525 * x_abs)
y_abs = int(0.5 + 9525 * y_abs)
return (col_start, row_start, x1, y1, col_end, row_end, x2, y2, x_abs, y_abs)
# Calculate the vertices that define the position of a graphical object
# within the worksheet in pixels.
#
# +------------+------------+
# | A | B |
# +-----+------------+------------+
# | |(x1,y1) | |
# | 1 |(A1)._______|______ |
# | | | | |
# | | | | |
# +-----+----| OBJECT |-----+
# | | | | |
# | 2 | |______________. |
# | | | (B2)|
# | | | (x2,y2)|
# +---- +------------+------------+
#
# Example of an object that covers some of the area from cell A1 to B2.
#
# Based on the width and height of the object we need to calculate 8 vars:
#
# col_start, row_start, col_end, row_end, x1, y1, x2, y2.
#
# We also calculate the absolute x and y position of the top left vertex of
# the object. This is required for images.
#
# The width and height of the cells that the object occupies can be
# variable and have to be taken into account.
#
# The values of col_start and row_start are passed in from the calling
# function. The values of col_end and row_end are calculated by
# subtracting the width and height of the object from the width and
# height of the underlying cells.
#
def _position_object_pixels(
self, col_start, row_start, x1, y1, width, height, anchor
):
# col_start # Col containing upper left corner of object.
# x1 # Distance to left side of object.
#
# row_start # Row containing top left corner of object.
# y1 # Distance to top of object.
#
# col_end # Col containing lower right corner of object.
# x2 # Distance to right side of object.
#
# row_end # Row containing bottom right corner of object.
# y2 # Distance to bottom of object.
#
# width # Width of object frame.
# height # Height of object frame.
#
# x_abs # Absolute distance to left side of object.
# y_abs # Absolute distance to top side of object.
x_abs = 0
y_abs = 0
# Adjust start column for negative offsets.
# pylint: disable=chained-comparison
while x1 < 0 and col_start > 0:
x1 += self._size_col(col_start - 1)
col_start -= 1
# Adjust start row for negative offsets.
while y1 < 0 and row_start > 0:
y1 += self._size_row(row_start - 1)
row_start -= 1
# Ensure that the image isn't shifted off the page at top left.
x1 = max(0, x1)
y1 = max(0, y1)
# Calculate the absolute x offset of the top-left vertex.
if self.col_size_changed:
for col_id in range(col_start):
x_abs += self._size_col(col_id)
else:
# Optimization for when the column widths haven't changed.
x_abs += self.default_col_width * col_start
x_abs += x1
# Calculate the absolute y offset of the top-left vertex.
if self.row_size_changed:
for row_id in range(row_start):
y_abs += self._size_row(row_id)
else:
# Optimization for when the row heights haven't changed.
y_abs += self.default_row_height * row_start
y_abs += y1
# Adjust start column for offsets that are greater than the col width.
while x1 >= self._size_col(col_start, anchor):
x1 -= self._size_col(col_start)
col_start += 1
# Adjust start row for offsets that are greater than the row height.
while y1 >= self._size_row(row_start, anchor):
y1 -= self._size_row(row_start)
row_start += 1
# Initialize end cell to the same as the start cell.
col_end = col_start
row_end = row_start
# Don't offset the image in the cell if the row/col is hidden.
if self._size_col(col_start, anchor) > 0:
width = width + x1
if self._size_row(row_start, anchor) > 0:
height = height + y1
# Subtract the underlying cell widths to find end cell of the object.
while width >= self._size_col(col_end, anchor):
width -= self._size_col(col_end, anchor)
col_end += 1
# Subtract the underlying cell heights to find end cell of the object.
while height >= self._size_row(row_end, anchor):
height -= self._size_row(row_end, anchor)
row_end += 1
# The end vertices are whatever is left from the width and height.
x2 = width
y2 = height
return [col_start, row_start, x1, y1, col_end, row_end, x2, y2, x_abs, y_abs]
def _size_col(self, col: int, anchor=0):
# Look up the cell value to see if it has been changed.
if col in self.col_info:
width = self.col_info[col].width
hidden = self.col_info[col].hidden
if width is None:
width = self.default_col_width
if hidden and anchor != 4:
width = 0
return width
return self.default_col_width
def _size_row(self, row: int, anchor=0):
# Look up the cell value to see if it has been changed
if row in self.row_sizes:
height = self.row_sizes[row][0]
hidden = self.row_sizes[row][1]
if hidden and anchor != 4:
height = 0
return height
return self.default_row_height
def _pixels_to_height(self, pixels):
# Convert the height of a cell from pixels to character units.
return 0.75 * pixels
def _comment_vertices(self, comment: CommentType):
# Calculate the positions of the comment object.
anchor = 0
vertices = self._position_object_pixels(
comment.start_col,
comment.start_row,
comment.x_offset,
comment.y_offset,
comment.width,
comment.height,
anchor,
)
# Add the width and height for VML.
vertices.append(comment.width)
vertices.append(comment.height)
return vertices
def _button_vertices(self, button: ButtonType):
# Calculate the positions of the button object.
anchor = 0
vertices = self._position_object_pixels(
button.col,
button.row,
button.x_offset,
button.y_offset,
button.width,
button.height,
anchor,
)
# Add the width and height for VML.
vertices.append(button.width)
vertices.append(button.height)
return vertices
def _prepare_vml_objects(
self, vml_data_id, vml_shape_id, vml_drawing_id, comment_id
):
comments = []
# Sort the comments into row/column order for easier comparison
# testing and set the external links for comments and buttons.
row_nums = sorted(self.comments.keys())
for row in row_nums:
col_nums = sorted(self.comments[row].keys())
for col in col_nums:
comment = self.comments[row][col]
comment.vertices = self._comment_vertices(comment)
# Set comment visibility if required and not user defined.
if comment.is_visible is None:
comment.is_visible = self.comments_visible
# Set comment author if not already user defined.
if comment.author is None:
comment.author = self.comments_author
comments.append(comment)
for button in self.buttons_list:
button.vertices = self._button_vertices(button)
self.external_vml_links.append(
["/vmlDrawing", "../drawings/vmlDrawing" + str(vml_drawing_id) + ".vml"]
)
if self.has_comments:
self.comments_list = comments
self.external_comment_links.append(
["/comments", "../comments" + str(comment_id) + ".xml"]
)
count = len(comments)
start_data_id = vml_data_id
# The VML o:idmap data id contains a comma separated range when there
# is more than one 1024 block of comments, like this: data="1,2".
for i in range(int(count / 1024)):
data_id = start_data_id + i + 1
vml_data_id = f"{vml_data_id},{data_id}"
self.vml_data_id = vml_data_id
self.vml_shape_id = vml_shape_id
return count
def _prepare_header_vml_objects(self, vml_header_id, vml_drawing_id) -> None:
# Set up external linkage for VML header/footer images.
self.vml_header_id = vml_header_id
self.external_vml_links.append(
["/vmlDrawing", "../drawings/vmlDrawing" + str(vml_drawing_id) + ".vml"]
)
def _prepare_tables(self, table_id, seen) -> None:
# Set the table ids for the worksheet tables.
for table in self.tables:
table["id"] = table_id
if table.get("name") is None:
# Set a default name.
table["name"] = "Table" + str(table_id)
# Check for duplicate table names.
name = table["name"].lower()
if name in seen:
raise DuplicateTableName(
f"Duplicate name '{table['name']}' used in worksheet.add_table()."
)
seen[name] = True
# Store the link used for the rels file.
self.external_table_links.append(
["/table", "../tables/table" + str(table_id) + ".xml"]
)
table_id += 1
def _table_function_to_formula(self, function, col_name):
# Convert a table total function to a worksheet formula.
formula = ""
# Escape special characters, as required by Excel.
col_name = col_name.replace("'", "''")
col_name = col_name.replace("#", "'#")
col_name = col_name.replace("]", "']")
col_name = col_name.replace("[", "'[")
subtotals = {
"average": 101,
"countNums": 102,
"count": 103,
"max": 104,
"min": 105,
"stdDev": 107,
"sum": 109,
"var": 110,
}
if function in subtotals:
func_num = subtotals[function]
formula = f"SUBTOTAL({func_num},[{col_name}])"
else:
warn(f"Unsupported function '{function}' in add_table()")
return formula
def _set_spark_color(self, sparkline, options, user_color) -> None:
# Set the sparkline color.
if user_color not in options:
return
sparkline[user_color] = Color._from_value(options[user_color])
def _get_range_data(self, row_start, col_start, row_end, col_end):
# Returns a range of data from the worksheet _table to be used in
# chart cached data. Strings are returned as SST ids and decoded
# in the workbook. Return None for data that doesn't exist since
# Excel can chart have series with data missing.
if self.constant_memory:
return ()
data = []
# Iterate through the table data.
for row_num in range(row_start, row_end + 1):
# Store None if row doesn't exist.
if row_num not in self.table:
data.append(None)
continue
for col_num in range(col_start, col_end + 1):
if col_num in self.table[row_num]:
cell = self.table[row_num][col_num]
cell_type = cell.__class__.__name__
if cell_type in ("Number", "Datetime"):
# Return a number with Excel's precision.
data.append(f"{cell.number:.16g}")
elif cell_type == "String":
# Return a string from it's shared string index.
index = cell.string
string = self.str_table._get_shared_string(index)
data.append(string)
elif cell_type in ("Formula", "ArrayFormula"):
# Return the formula value.
value = cell.value
if value is None:
value = 0
data.append(value)
elif cell_type == "Blank":
# Return a empty cell.
data.append("")
else:
# Store None if column doesn't exist.
data.append(None)
return data
def _csv_join(self, *items):
# Create a csv string for use with data validation formulas and lists.
# Convert non string types to string.
items = [str(item) if not isinstance(item, str) else item for item in items]
return ",".join(items)
def _escape_url(self, url):
# Don't escape URL if it looks already escaped.
if re.search("%[0-9a-fA-F]{2}", url):
return url
# Can't use url.quote() here because it doesn't match Excel.
url = url.replace("%", "%25")
url = url.replace('"', "%22")
url = url.replace(" ", "%20")
url = url.replace("<", "%3c")
url = url.replace(">", "%3e")
url = url.replace("[", "%5b")
url = url.replace("]", "%5d")
url = url.replace("^", "%5e")
url = url.replace("`", "%60")
url = url.replace("{", "%7b")
url = url.replace("}", "%7d")
return url
def _get_drawing_rel_index(self, target=None):
# Get the index used to address a drawing rel link.
if target is None:
self.drawing_rels_id += 1
return self.drawing_rels_id
if self.drawing_rels.get(target):
return self.drawing_rels[target]
self.drawing_rels_id += 1
self.drawing_rels[target] = self.drawing_rels_id
return self.drawing_rels_id
def _get_vml_drawing_rel_index(self, target=None):
# Get the index used to address a vml drawing rel link.
if self.vml_drawing_rels.get(target):
return self.vml_drawing_rels[target]
self.vml_drawing_rels_id += 1
self.vml_drawing_rels[target] = self.vml_drawing_rels_id
return self.vml_drawing_rels_id
###########################################################################
#
# The following font methods are mainly duplicated from the Styles class
# with appropriate changes for rich string styles.
#
###########################################################################
def _write_font(self, xf_format) -> None:
# Write the <font> element.
xml_writer = self.rstring
xml_writer._xml_start_tag("rPr")
# Handle the main font properties.
if xf_format.bold:
xml_writer._xml_empty_tag("b")
if xf_format.italic:
xml_writer._xml_empty_tag("i")
if xf_format.font_strikeout:
xml_writer._xml_empty_tag("strike")
if xf_format.font_outline:
xml_writer._xml_empty_tag("outline")
if xf_format.font_shadow:
xml_writer._xml_empty_tag("shadow")
# Handle the underline variants.
if xf_format.underline:
self._write_underline(xf_format.underline)
# Handle super/subscript.
if xf_format.font_script == 1:
self._write_vert_align("superscript")
if xf_format.font_script == 2:
self._write_vert_align("subscript")
# Write the font size
xml_writer._xml_empty_tag("sz", [("val", xf_format.font_size)])
# Handle colors.
if xf_format.theme == -1:
# Ignore for excel2003_style.
pass
elif xf_format.theme:
self._write_rstring_color("color", [("theme", xf_format.theme)])
elif xf_format.color_indexed:
self._write_rstring_color("color", [("indexed", xf_format.color_indexed)])
elif xf_format.font_color:
color = xf_format.font_color
if not color._is_automatic:
self._write_rstring_color("color", color._attributes())
else:
self._write_rstring_color("color", [("theme", 1)])
# Write some other font properties related to font families.
xml_writer._xml_empty_tag("rFont", [("val", xf_format.font_name)])
xml_writer._xml_empty_tag("family", [("val", xf_format.font_family)])
if xf_format.font_name == "Calibri" and not xf_format.hyperlink:
xml_writer._xml_empty_tag("scheme", [("val", xf_format.font_scheme)])
xml_writer._xml_end_tag("rPr")
def _write_underline(self, underline) -> None:
# Write the underline font element.
attributes = []
# Handle the underline variants.
if underline == 2:
attributes = [("val", "double")]
elif underline == 33:
attributes = [("val", "singleAccounting")]
elif underline == 34:
attributes = [("val", "doubleAccounting")]
self.rstring._xml_empty_tag("u", attributes)
def _write_vert_align(self, val) -> None:
# Write the <vertAlign> font sub-element.
attributes = [("val", val)]
self.rstring._xml_empty_tag("vertAlign", attributes)
def _write_rstring_color(self, name, attributes) -> None:
# Write the <color> element.
self.rstring._xml_empty_tag(name, attributes)
def _opt_close(self) -> None:
# Close the row data filehandle in constant_memory mode.
if not self.row_data_fh_closed:
self.row_data_fh.close()
self.row_data_fh_closed = True
def _opt_reopen(self) -> None:
# Reopen the row data filehandle in constant_memory mode.
if self.row_data_fh_closed:
filename = self.row_data_filename
# pylint: disable=consider-using-with
self.row_data_fh = open(filename, mode="a+", encoding="utf-8")
self.row_data_fh_closed = False
self.fh = self.row_data_fh
def _set_icon_props(self, total_icons, user_props=None):
# Set the sub-properties for icons.
props = []
# Set the defaults.
for _ in range(total_icons):
props.append({"criteria": False, "value": 0, "type": "percent"})
# Set the default icon values based on the number of icons.
if total_icons == 3:
props[0]["value"] = 67
props[1]["value"] = 33
if total_icons == 4:
props[0]["value"] = 75
props[1]["value"] = 50
props[2]["value"] = 25
if total_icons == 5:
props[0]["value"] = 80
props[1]["value"] = 60
props[2]["value"] = 40
props[3]["value"] = 20
# Overwrite default properties with user defined properties.
if user_props:
# Ensure we don't set user properties for lowest icon.
max_data = len(user_props)
if max_data >= total_icons:
max_data = total_icons - 1
for i in range(max_data):
# Set the user defined 'value' property.
if user_props[i].get("value") is not None:
props[i]["value"] = user_props[i]["value"]
# Remove the formula '=' sign if it exists.
tmp = props[i]["value"]
if isinstance(tmp, str) and tmp.startswith("="):
props[i]["value"] = tmp.lstrip("=")
# Set the user defined 'type' property.
if user_props[i].get("type"):
valid_types = ("percent", "percentile", "number", "formula")
if user_props[i]["type"] not in valid_types:
warn(
f"Unknown icon property type '{user_props[i]['type']}' "
f"for sub-property 'type' in conditional_format()."
)
else:
props[i]["type"] = user_props[i]["type"]
if props[i]["type"] == "number":
props[i]["type"] = "num"
# Set the user defined 'criteria' property.
criteria = user_props[i].get("criteria")
if criteria and criteria == ">":
props[i]["criteria"] = True
return props
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_worksheet(self) -> None:
# Write the <worksheet> element. This is the root element.
schema = "http://schemas.openxmlformats.org/"
xmlns = schema + "spreadsheetml/2006/main"
xmlns_r = schema + "officeDocument/2006/relationships"
xmlns_mc = schema + "markup-compatibility/2006"
ms_schema = "http://schemas.microsoft.com/"
xmlns_x14ac = ms_schema + "office/spreadsheetml/2009/9/ac"
attributes = [("xmlns", xmlns), ("xmlns:r", xmlns_r)]
# Add some extra attributes for Excel 2010. Mainly for sparklines.
if self.excel_version == 2010:
attributes.append(("xmlns:mc", xmlns_mc))
attributes.append(("xmlns:x14ac", xmlns_x14ac))
attributes.append(("mc:Ignorable", "x14ac"))
self._xml_start_tag("worksheet", attributes)
def _write_dimension(self) -> None:
# Write the <dimension> element. This specifies the range of
# cells in the worksheet. As a special case, empty
# spreadsheets use 'A1' as a range.
if self.dim_rowmin is None and self.dim_colmin is None:
# If the min dimensions are not defined then no dimensions
# have been set and we use the default 'A1'.
ref = "A1"
elif self.dim_rowmin is None and self.dim_colmin is not None:
# If the row dimensions aren't set but the column
# dimensions are set then they have been changed via
# set_column().
if self.dim_colmin == self.dim_colmax:
# The dimensions are a single cell and not a range.
ref = xl_rowcol_to_cell(0, self.dim_colmin)
else:
# The dimensions are a cell range.
cell_1 = xl_rowcol_to_cell(0, self.dim_colmin)
cell_2 = xl_rowcol_to_cell(0, self.dim_colmax)
ref = cell_1 + ":" + cell_2
elif self.dim_rowmin == self.dim_rowmax and self.dim_colmin == self.dim_colmax:
# The dimensions are a single cell and not a range.
ref = xl_rowcol_to_cell(self.dim_rowmin, self.dim_colmin)
else:
# The dimensions are a cell range.
cell_1 = xl_rowcol_to_cell(self.dim_rowmin, self.dim_colmin)
cell_2 = xl_rowcol_to_cell(self.dim_rowmax, self.dim_colmax)
ref = cell_1 + ":" + cell_2
self._xml_empty_tag("dimension", [("ref", ref)])
def _write_sheet_views(self) -> None:
# Write the <sheetViews> element.
self._xml_start_tag("sheetViews")
# Write the sheetView element.
self._write_sheet_view()
self._xml_end_tag("sheetViews")
def _write_sheet_view(self) -> None:
# Write the <sheetViews> element.
attributes = []
# Hide screen gridlines if required.
if not self.screen_gridlines:
attributes.append(("showGridLines", 0))
# Hide screen row/column headers.
if self.row_col_headers:
attributes.append(("showRowColHeaders", 0))
# Hide zeroes in cells.
if not self.show_zeros:
attributes.append(("showZeros", 0))
# Display worksheet right to left for Hebrew, Arabic and others.
if self.is_right_to_left:
attributes.append(("rightToLeft", 1))
# Show that the sheet tab is selected.
if self.selected:
attributes.append(("tabSelected", 1))
# Turn outlines off. Also required in the outlinePr element.
if not self.outline_on:
attributes.append(("showOutlineSymbols", 0))
# Set the page view/layout mode if required.
if self.page_view == 1:
attributes.append(("view", "pageLayout"))
elif self.page_view == 2:
attributes.append(("view", "pageBreakPreview"))
# Set the first visible cell.
if self.top_left_cell != "":
attributes.append(("topLeftCell", self.top_left_cell))
# Set the zoom level.
if self.zoom != 100:
attributes.append(("zoomScale", self.zoom))
if self.page_view == 0 and self.zoom_scale_normal:
attributes.append(("zoomScaleNormal", self.zoom))
if self.page_view == 1:
attributes.append(("zoomScalePageLayoutView", self.zoom))
if self.page_view == 2:
attributes.append(("zoomScaleSheetLayoutView", self.zoom))
attributes.append(("workbookViewId", 0))
if self.is_chartsheet and self.zoom_to_fit:
attributes.append(("zoomToFit", 1))
if self.panes or self.selections:
self._xml_start_tag("sheetView", attributes)
self._write_panes()
self._write_selections()
self._xml_end_tag("sheetView")
else:
self._xml_empty_tag("sheetView", attributes)
def _write_sheet_format_pr(self) -> None:
# Write the <sheetFormatPr> element.
height_in_chars = self.default_row_height * 0.75
row_level = self.outline_row_level
col_level = self.outline_col_level
attributes = [("defaultRowHeight", f"{height_in_chars:.16g}")]
if self.default_row_height != self.original_row_height:
attributes.append(("customHeight", 1))
if self.default_row_zeroed:
attributes.append(("zeroHeight", 1))
if row_level:
attributes.append(("outlineLevelRow", row_level))
if col_level:
attributes.append(("outlineLevelCol", col_level))
if self.excel_version == 2010:
attributes.append(("x14ac:dyDescent", "0.25"))
self._xml_empty_tag("sheetFormatPr", attributes)
def _write_cols(self) -> None:
# Write the <cols> element and <col> sub elements.
# Exit unless some column have been formatted.
if not self.col_info:
return
self._xml_start_tag("cols")
# Use the first element of the column information structures to set
# the initial/previous properties.
first_col = (sorted(self.col_info.keys()))[0]
last_col = first_col
prev_col_options = self.col_info[first_col]
del self.col_info[first_col]
deleted_col = first_col
deleted_col_options = prev_col_options
for col in sorted(self.col_info.keys()):
col_options = self.col_info[col]
# Check if the column number is contiguous with the previous
# column and if the properties are the same.
if col == last_col + 1 and col_options == prev_col_options:
last_col = col
else:
# If not contiguous/equal then we write out the current range
# of columns and start again.
self._write_col_info(first_col, last_col, prev_col_options)
first_col = col
last_col = first_col
prev_col_options = col_options
# We will exit the previous loop with one unhandled column range.
self._write_col_info(first_col, last_col, prev_col_options)
# Put back the deleted first column information structure.
self.col_info[deleted_col] = deleted_col_options
self._xml_end_tag("cols")
def _write_col_info(self, col_min: int, col_max: int, col_info: ColumnInfo) -> None:
# Write the <col> element.
width = col_info.width
has_custom_width = True
xf_index = 0
# Get the cell_format index.
if col_info.column_format:
xf_index = col_info.column_format._get_xf_index()
# Set the Excel default column width.
if width is None:
if not col_info.hidden:
width = self.default_col_width
has_custom_width = False
else:
width = 0
elif width == self.default_col_width:
# Width is defined but same as default.
has_custom_width = False
# Convert column width from pixels to character width.
char_width = (width * 256 // self.max_digit_width) / 256.0
attributes = [
("min", col_min + 1),
("max", col_max + 1),
("width", f"{char_width:.16g}"),
]
if xf_index:
attributes.append(("style", xf_index))
if col_info.hidden:
attributes.append(("hidden", "1"))
if col_info.autofit:
attributes.append(("bestFit", "1"))
if has_custom_width:
attributes.append(("customWidth", "1"))
if col_info.level:
attributes.append(("outlineLevel", col_info.level))
if col_info.collapsed:
attributes.append(("collapsed", "1"))
self._xml_empty_tag("col", attributes)
def _write_sheet_data(self) -> None:
# Write the <sheetData> element.
if self.dim_rowmin is None:
# If the dimensions aren't defined there is no data to write.
self._xml_empty_tag("sheetData")
else:
self._xml_start_tag("sheetData")
self._write_rows()
self._xml_end_tag("sheetData")
def _write_optimized_sheet_data(self) -> None:
# Write the <sheetData> element when constant_memory is on. In this
# case we read the data stored in the temp file and rewrite it to the
# XML sheet file.
if self.dim_rowmin is None:
# If the dimensions aren't defined then there is no data to write.
self._xml_empty_tag("sheetData")
else:
self._xml_start_tag("sheetData")
# Rewind the filehandle that was used for temp row data.
buff_size = 65536
self.row_data_fh.seek(0)
data = self.row_data_fh.read(buff_size)
while data:
self.fh.write(data)
data = self.row_data_fh.read(buff_size)
self.row_data_fh.close()
os.unlink(self.row_data_filename)
self._xml_end_tag("sheetData")
def _write_page_margins(self) -> None:
# Write the <pageMargins> element.
attributes = [
("left", self.margin_left),
("right", self.margin_right),
("top", self.margin_top),
("bottom", self.margin_bottom),
("header", self.margin_header),
("footer", self.margin_footer),
]
self._xml_empty_tag("pageMargins", attributes)
def _write_page_setup(self) -> None:
# Write the <pageSetup> element.
#
# The following is an example taken from Excel.
#
# <pageSetup
# paperSize="9"
# scale="110"
# fitToWidth="2"
# fitToHeight="2"
# pageOrder="overThenDown"
# orientation="portrait"
# blackAndWhite="1"
# draft="1"
# horizontalDpi="200"
# verticalDpi="200"
# r:id="rId1"
# />
#
attributes = []
# Skip this element if no page setup has changed.
if not self.page_setup_changed:
return
# Set paper size.
if self.paper_size:
attributes.append(("paperSize", self.paper_size))
# Set the print_scale.
if self.print_scale != 100:
attributes.append(("scale", self.print_scale))
# Set the "Fit to page" properties.
if self.fit_page and self.fit_width != 1:
attributes.append(("fitToWidth", self.fit_width))
if self.fit_page and self.fit_height != 1:
attributes.append(("fitToHeight", self.fit_height))
# Set the page print direction.
if self.page_order:
attributes.append(("pageOrder", "overThenDown"))
# Set start page for printing.
if self.page_start > 1:
attributes.append(("firstPageNumber", self.page_start))
# Set page orientation.
if self.orientation:
attributes.append(("orientation", "portrait"))
else:
attributes.append(("orientation", "landscape"))
# Set the print in black and white option.
if self.black_white:
attributes.append(("blackAndWhite", "1"))
# Set start page for printing.
if self.page_start != 0:
attributes.append(("useFirstPageNumber", "1"))
# Set the DPI. Mainly only for testing.
if self.is_chartsheet:
if self.horizontal_dpi:
attributes.append(("horizontalDpi", self.horizontal_dpi))
if self.vertical_dpi:
attributes.append(("verticalDpi", self.vertical_dpi))
else:
if self.vertical_dpi:
attributes.append(("verticalDpi", self.vertical_dpi))
if self.horizontal_dpi:
attributes.append(("horizontalDpi", self.horizontal_dpi))
self._xml_empty_tag("pageSetup", attributes)
def _write_print_options(self) -> None:
# Write the <printOptions> element.
attributes = []
if not self.print_options_changed:
return
# Set horizontal centering.
if self.hcenter:
attributes.append(("horizontalCentered", 1))
# Set vertical centering.
if self.vcenter:
attributes.append(("verticalCentered", 1))
# Enable row and column headers.
if self.print_headers:
attributes.append(("headings", 1))
# Set printed gridlines.
if self.print_gridlines:
attributes.append(("gridLines", 1))
self._xml_empty_tag("printOptions", attributes)
def _write_header_footer(self) -> None:
# Write the <headerFooter> element.
attributes = []
if not self.header_footer_scales:
attributes.append(("scaleWithDoc", 0))
if not self.header_footer_aligns:
attributes.append(("alignWithMargins", 0))
if self.header_footer_changed:
self._xml_start_tag("headerFooter", attributes)
if self.header:
self._write_odd_header()
if self.footer:
self._write_odd_footer()
self._xml_end_tag("headerFooter")
elif self.excel2003_style:
self._xml_empty_tag("headerFooter", attributes)
def _write_odd_header(self) -> None:
# Write the <headerFooter> element.
self._xml_data_element("oddHeader", self.header)
def _write_odd_footer(self) -> None:
# Write the <headerFooter> element.
self._xml_data_element("oddFooter", self.footer)
def _write_rows(self) -> None:
# Write out the worksheet data as a series of rows and cells.
self._calculate_spans()
for row_num in range(self.dim_rowmin, self.dim_rowmax + 1):
if (
row_num in self.row_info
or row_num in self.comments
or self.table[row_num]
):
# Only process rows with formatting, cell data and/or comments.
span_index = int(row_num / 16)
if span_index in self.row_spans:
span = self.row_spans[span_index]
else:
span = None
if self.table[row_num]:
# Write the cells if the row contains data.
if row_num not in self.row_info:
self._write_row(row_num, span)
else:
self._write_row(row_num, span, self.row_info[row_num])
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if col_num in self.table[row_num]:
col_ref = self.table[row_num][col_num]
self._write_cell(row_num, col_num, col_ref)
self._xml_end_tag("row")
elif row_num in self.comments:
# Row with comments in cells.
if row_num not in self.row_info:
self._write_empty_row(row_num, span, None)
else:
self._write_empty_row(row_num, span, self.row_info[row_num])
else:
# Blank row with attributes only.
if row_num not in self.row_info:
self._write_empty_row(row_num, span, None)
else:
self._write_empty_row(row_num, span, self.row_info[row_num])
def _write_single_row(self, current_row_num=0) -> None:
# Write out the worksheet data as a single row with cells.
# This method is used when constant_memory is on. A single
# row is written and the data table is reset. That way only
# one row of data is kept in memory at any one time. We don't
# write span data in the optimized case since it is optional.
# Set the new previous row as the current row.
row_num = self.previous_row
self.previous_row = current_row_num
if row_num in self.row_info or row_num in self.comments or self.table[row_num]:
# Only process rows with formatting, cell data and/or comments.
# No span data in optimized mode.
span = None
if self.table[row_num]:
# Write the cells if the row contains data.
if row_num not in self.row_info:
self._write_row(row_num, span)
else:
self._write_row(row_num, span, self.row_info[row_num])
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if col_num in self.table[row_num]:
col_ref = self.table[row_num][col_num]
self._write_cell(row_num, col_num, col_ref)
self._xml_end_tag("row")
else:
# Row attributes or comments only.
self._write_empty_row(row_num, span, self.row_info[row_num])
# Reset table.
self.table.clear()
def _calculate_spans(self) -> None:
# Calculate the "spans" attribute of the <row> tag. This is an
# XLSX optimization and isn't strictly required. However, it
# makes comparing files easier. The span is the same for each
# block of 16 rows.
spans = {}
span_min = None
span_max = None
for row_num in range(self.dim_rowmin, self.dim_rowmax + 1):
if row_num in self.table:
# Calculate spans for cell data.
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if col_num in self.table[row_num]:
if span_min is None:
span_min = col_num
span_max = col_num
else:
span_min = min(span_min, col_num)
span_max = max(span_max, col_num)
if row_num in self.comments:
# Calculate spans for comments.
for col_num in range(self.dim_colmin, self.dim_colmax + 1):
if row_num in self.comments and col_num in self.comments[row_num]:
if span_min is None:
span_min = col_num
span_max = col_num
else:
span_min = min(span_min, col_num)
span_max = max(span_max, col_num)
if ((row_num + 1) % 16 == 0) or row_num == self.dim_rowmax:
span_index = int(row_num / 16)
if span_min is not None:
span_min += 1
span_max += 1
spans[span_index] = f"{span_min}:{span_max}"
span_min = None
self.row_spans = spans
def _write_row(
self,
row: int,
spans: Optional[str],
row_info: Optional[RowInfo] = None,
empty_row: bool = False,
) -> None:
# Write the <row> element.
xf_index = 0
if row_info:
height = row_info.height
row_format = row_info.row_format
hidden = row_info.hidden
level = row_info.level
collapsed = row_info.collapsed
else:
height = None
row_format = None
hidden = 0
level = 0
collapsed = 0
if height is None:
height = self.default_row_height
attributes = [("r", row + 1)]
# Get the cell_format index.
if row_format:
xf_index = row_format._get_xf_index()
# Add row attributes where applicable.
if spans:
attributes.append(("spans", spans))
if xf_index:
attributes.append(("s", xf_index))
if row_format:
attributes.append(("customFormat", 1))
if height != self.original_row_height or (
height == self.original_row_height and height != self.default_row_height
):
height_in_chars = height * 0.75
attributes.append(("ht", f"{height_in_chars:.16g}"))
if hidden:
attributes.append(("hidden", 1))
if height != self.original_row_height or (
height == self.original_row_height and height != self.default_row_height
):
attributes.append(("customHeight", 1))
if level:
attributes.append(("outlineLevel", level))
if collapsed:
attributes.append(("collapsed", 1))
if self.excel_version == 2010:
attributes.append(("x14ac:dyDescent", "0.25"))
if empty_row:
self._xml_empty_tag_unencoded("row", attributes)
else:
self._xml_start_tag_unencoded("row", attributes)
def _write_empty_row(
self, row: int, spans: Optional[str], row_info: Optional[RowInfo] = None
) -> None:
# Write and empty <row> element.
self._write_row(row, spans, row_info, empty_row=True)
def _write_cell(self, row: int, col: int, cell) -> None:
# Write the <cell> element.
# Note. This is the innermost loop so efficiency is important.
cell_range = xl_rowcol_to_cell_fast(row, col)
attributes = [("r", cell_range)]
if cell.format:
# Add the cell format index.
xf_index = cell.format._get_xf_index()
attributes.append(("s", xf_index))
elif row in self.row_info and self.row_info[row].row_format:
# Add the row format.
row_format = self.row_info[row].row_format
attributes.append(("s", row_format._get_xf_index()))
elif col in self.col_info:
# Add the column format.
column_format = self.col_info[col].column_format
if column_format is not None:
attributes.append(("s", column_format._get_xf_index()))
type_cell_name = cell.__class__.__name__
# Write the various cell types.
if type_cell_name in ("Number", "Datetime"):
# Write a number.
self._xml_number_element(cell.number, attributes)
elif type_cell_name in ("String", "RichString"):
# Write a string.
string = cell.string
if not self.constant_memory:
# Write a shared string.
self._xml_string_element(string, attributes)
else:
# Write an optimized in-line string.
# Convert control character to a _xHHHH_ escape.
string = self._escape_control_characters(string)
# Write any rich strings without further tags.
if string.startswith("<r>") and string.endswith("</r>"):
self._xml_rich_inline_string(string, attributes)
else:
# Add attribute to preserve leading or trailing whitespace.
preserve = _preserve_whitespace(string)
self._xml_inline_string(string, preserve, attributes)
elif type_cell_name == "Formula":
# Write a formula. First check the formula value type.
value = cell.value
if isinstance(cell.value, bool):
attributes.append(("t", "b"))
if cell.value:
value = 1
else:
value = 0
elif isinstance(cell.value, str):
error_codes = (
"#DIV/0!",
"#N/A",
"#NAME?",
"#NULL!",
"#NUM!",
"#REF!",
"#VALUE!",
)
if cell.value == "":
# Allow blank to force recalc in some third party apps.
pass
elif cell.value in error_codes:
attributes.append(("t", "e"))
else:
attributes.append(("t", "str"))
self._xml_formula_element(cell.formula, value, attributes)
elif type_cell_name == "ArrayFormula":
# Write a array formula.
if cell.atype == "dynamic":
attributes.append(("cm", 1))
# First check if the formula value is a string.
try:
float(cell.value)
except ValueError:
attributes.append(("t", "str"))
# Write an array formula.
self._xml_start_tag("c", attributes)
self._write_cell_array_formula(cell.formula, cell.range)
self._write_cell_value(cell.value)
self._xml_end_tag("c")
elif type_cell_name == "Blank":
# Write a empty cell.
self._xml_empty_tag("c", attributes)
elif type_cell_name == "Boolean":
# Write a boolean cell.
attributes.append(("t", "b"))
self._xml_start_tag("c", attributes)
self._write_cell_value(cell.boolean)
self._xml_end_tag("c")
elif type_cell_name == "Error":
# Write a boolean cell.
attributes.append(("t", "e"))
attributes.append(("vm", cell.value))
self._xml_start_tag("c", attributes)
self._write_cell_value(cell.error)
self._xml_end_tag("c")
def _write_cell_value(self, value) -> None:
# Write the cell value <v> element.
if value is None:
value = ""
self._xml_data_element("v", value)
def _write_cell_array_formula(self, formula, cell_range) -> None:
# Write the cell array formula <f> element.
attributes = [("t", "array"), ("ref", cell_range)]
self._xml_data_element("f", formula, attributes)
def _write_sheet_pr(self) -> None:
# Write the <sheetPr> element for Sheet level properties.
attributes = []
if (
not self.fit_page
and not self.filter_on
and not self.tab_color
and not self.outline_changed
and not self.vba_codename
):
return
if self.vba_codename:
attributes.append(("codeName", self.vba_codename))
if self.filter_on:
attributes.append(("filterMode", 1))
if self.fit_page or self.tab_color or self.outline_changed:
self._xml_start_tag("sheetPr", attributes)
self._write_tab_color()
self._write_outline_pr()
self._write_page_set_up_pr()
self._xml_end_tag("sheetPr")
else:
self._xml_empty_tag("sheetPr", attributes)
def _write_page_set_up_pr(self) -> None:
# Write the <pageSetUpPr> element.
if not self.fit_page:
return
attributes = [("fitToPage", 1)]
self._xml_empty_tag("pageSetUpPr", attributes)
def _write_tab_color(self) -> None:
# Write the <tabColor> element.
color = self.tab_color
if not color:
return
self._write_color("tabColor", color._attributes())
def _write_outline_pr(self) -> None:
# Write the <outlinePr> element.
attributes = []
if not self.outline_changed:
return
if self.outline_style:
attributes.append(("applyStyles", 1))
if not self.outline_below:
attributes.append(("summaryBelow", 0))
if not self.outline_right:
attributes.append(("summaryRight", 0))
if not self.outline_on:
attributes.append(("showOutlineSymbols", 0))
self._xml_empty_tag("outlinePr", attributes)
def _write_row_breaks(self) -> None:
# Write the <rowBreaks> element.
page_breaks = self._sort_pagebreaks(self.hbreaks)
if not page_breaks:
return
count = len(page_breaks)
attributes = [
("count", count),
("manualBreakCount", count),
]
self._xml_start_tag("rowBreaks", attributes)
for row_num in page_breaks:
self._write_brk(row_num, 16383)
self._xml_end_tag("rowBreaks")
def _write_col_breaks(self) -> None:
# Write the <colBreaks> element.
page_breaks = self._sort_pagebreaks(self.vbreaks)
if not page_breaks:
return
count = len(page_breaks)
attributes = [
("count", count),
("manualBreakCount", count),
]
self._xml_start_tag("colBreaks", attributes)
for col_num in page_breaks:
self._write_brk(col_num, 1048575)
self._xml_end_tag("colBreaks")
def _write_brk(self, brk_id, brk_max) -> None:
# Write the <brk> element.
attributes = [("id", brk_id), ("max", brk_max), ("man", 1)]
self._xml_empty_tag("brk", attributes)
def _write_merge_cells(self) -> None:
# Write the <mergeCells> element.
merged_cells = self.merge
count = len(merged_cells)
if not count:
return
attributes = [("count", count)]
self._xml_start_tag("mergeCells", attributes)
for merged_range in merged_cells:
# Write the mergeCell element.
self._write_merge_cell(merged_range)
self._xml_end_tag("mergeCells")
def _write_merge_cell(self, merged_range) -> None:
# Write the <mergeCell> element.
(row_min, col_min, row_max, col_max) = merged_range
# Convert the merge dimensions to a cell range.
cell_1 = xl_rowcol_to_cell(row_min, col_min)
cell_2 = xl_rowcol_to_cell(row_max, col_max)
ref = cell_1 + ":" + cell_2
attributes = [("ref", ref)]
self._xml_empty_tag("mergeCell", attributes)
def _write_hyperlinks(self) -> None:
# Process any stored hyperlinks in row/col order and write the
# <hyperlinks> element. The attributes are different for internal
# and external links.
# Sort the hyperlinks into row order.
row_nums = sorted(self.hyperlinks.keys())
# Exit if there are no hyperlinks to process.
if not row_nums:
return
# Write the hyperlink elements.
self._xml_start_tag("hyperlinks")
# Iterate over the rows.
for row_num in row_nums:
# Sort the hyperlinks into column order.
col_nums = sorted(self.hyperlinks[row_num].keys())
# Iterate over the columns.
for col_num in col_nums:
# Get the link data for this cell.
url = self.hyperlinks[row_num][col_num]
# If the cell was overwritten by the user and isn't a string
# then we have to add the url as the string to display.
if self.table and self.table[row_num] and self.table[row_num][col_num]:
cell = self.table[row_num][col_num]
if cell.__class__.__name__ != "String":
url._is_object_link = True
if url._link_type in (UrlTypes.URL, UrlTypes.EXTERNAL):
# External link with rel file relationship.
self.rel_count += 1
self._write_hyperlink_external(
row_num, col_num, self.rel_count, url
)
# Links for use by the packager.
self.external_hyper_links.append(
["/hyperlink", url._target(), "External"]
)
else:
# Internal link with rel file relationship.
self._write_hyperlink_internal(row_num, col_num, url)
self._xml_end_tag("hyperlinks")
def _write_hyperlink_external(
self, row: int, col: int, id_num: int, url: Url
) -> None:
# Write the <hyperlink> element for external links.
ref = xl_rowcol_to_cell(row, col)
r_id = "rId" + str(id_num)
attributes = [("ref", ref), ("r:id", r_id)]
if url._anchor:
attributes.append(("location", url._anchor))
if url._is_object_link:
attributes.append(("display", url._text))
if url._tip:
attributes.append(("tooltip", url._tip))
self._xml_empty_tag("hyperlink", attributes)
def _write_hyperlink_internal(self, row: int, col: int, url: Url) -> None:
# Write the <hyperlink> element for internal links.
ref = xl_rowcol_to_cell(row, col)
attributes = [("ref", ref), ("location", url._link)]
if url._tip:
attributes.append(("tooltip", url._tip))
attributes.append(("display", url._text))
self._xml_empty_tag("hyperlink", attributes)
def _write_auto_filter(self) -> None:
# Write the <autoFilter> element.
if not self.autofilter_ref:
return
attributes = [("ref", self.autofilter_ref)]
if self.filter_on:
# Autofilter defined active filters.
self._xml_start_tag("autoFilter", attributes)
self._write_autofilters()
self._xml_end_tag("autoFilter")
else:
# Autofilter defined without active filters.
self._xml_empty_tag("autoFilter", attributes)
def _write_autofilters(self) -> None:
# Function to iterate through the columns that form part of an
# autofilter range and write the appropriate filters.
(col1, col2) = self.filter_range
for col in range(col1, col2 + 1):
# Skip if column doesn't have an active filter.
if col not in self.filter_cols:
continue
# Retrieve the filter tokens and write the autofilter records.
tokens = self.filter_cols[col]
filter_type = self.filter_type[col]
# Filters are relative to first column in the autofilter.
self._write_filter_column(col - col1, filter_type, tokens)
def _write_filter_column(self, col_id, filter_type, filters) -> None:
# Write the <filterColumn> element.
attributes = [("colId", col_id)]
self._xml_start_tag("filterColumn", attributes)
if filter_type == 1:
# Type == 1 is the new XLSX style filter.
self._write_filters(filters)
else:
# Type == 0 is the classic "custom" filter.
self._write_custom_filters(filters)
self._xml_end_tag("filterColumn")
def _write_filters(self, filters) -> None:
# Write the <filters> element.
non_blanks = [filter for filter in filters if str(filter).lower() != "blanks"]
attributes = []
if len(filters) != len(non_blanks):
attributes = [("blank", 1)]
if len(filters) == 1 and len(non_blanks) == 0:
# Special case for blank cells only.
self._xml_empty_tag("filters", attributes)
else:
# General case.
self._xml_start_tag("filters", attributes)
for autofilter in sorted(non_blanks):
self._write_filter(autofilter)
self._xml_end_tag("filters")
def _write_filter(self, val) -> None:
# Write the <filter> element.
attributes = [("val", val)]
self._xml_empty_tag("filter", attributes)
def _write_custom_filters(self, tokens) -> None:
# Write the <customFilters> element.
if len(tokens) == 2:
# One filter expression only.
self._xml_start_tag("customFilters")
self._write_custom_filter(*tokens)
self._xml_end_tag("customFilters")
else:
# Two filter expressions.
attributes = []
# Check if the "join" operand is "and" or "or".
if tokens[2] == 0:
attributes = [("and", 1)]
else:
attributes = [("and", 0)]
# Write the two custom filters.
self._xml_start_tag("customFilters", attributes)
self._write_custom_filter(tokens[0], tokens[1])
self._write_custom_filter(tokens[3], tokens[4])
self._xml_end_tag("customFilters")
def _write_custom_filter(self, operator, val) -> None:
# Write the <customFilter> element.
attributes = []
operators = {
1: "lessThan",
2: "equal",
3: "lessThanOrEqual",
4: "greaterThan",
5: "notEqual",
6: "greaterThanOrEqual",
22: "equal",
}
# Convert the operator from a number to a descriptive string.
if operators[operator] is not None:
operator = operators[operator]
else:
warn(f"Unknown operator = {operator}")
# The 'equal' operator is the default attribute and isn't stored.
if operator != "equal":
attributes.append(("operator", operator))
attributes.append(("val", val))
self._xml_empty_tag("customFilter", attributes)
def _write_sheet_protection(self) -> None:
# Write the <sheetProtection> element.
attributes = []
if not self.protect_options:
return
options = self.protect_options
if options["password"]:
attributes.append(("password", options["password"]))
if options["sheet"]:
attributes.append(("sheet", 1))
if options["content"]:
attributes.append(("content", 1))
if not options["objects"]:
attributes.append(("objects", 1))
if not options["scenarios"]:
attributes.append(("scenarios", 1))
if options["format_cells"]:
attributes.append(("formatCells", 0))
if options["format_columns"]:
attributes.append(("formatColumns", 0))
if options["format_rows"]:
attributes.append(("formatRows", 0))
if options["insert_columns"]:
attributes.append(("insertColumns", 0))
if options["insert_rows"]:
attributes.append(("insertRows", 0))
if options["insert_hyperlinks"]:
attributes.append(("insertHyperlinks", 0))
if options["delete_columns"]:
attributes.append(("deleteColumns", 0))
if options["delete_rows"]:
attributes.append(("deleteRows", 0))
if not options["select_locked_cells"]:
attributes.append(("selectLockedCells", 1))
if options["sort"]:
attributes.append(("sort", 0))
if options["autofilter"]:
attributes.append(("autoFilter", 0))
if options["pivot_tables"]:
attributes.append(("pivotTables", 0))
if not options["select_unlocked_cells"]:
attributes.append(("selectUnlockedCells", 1))
self._xml_empty_tag("sheetProtection", attributes)
def _write_protected_ranges(self) -> None:
# Write the <protectedRanges> element.
if self.num_protected_ranges == 0:
return
self._xml_start_tag("protectedRanges")
for cell_range, range_name, password in self.protected_ranges:
self._write_protected_range(cell_range, range_name, password)
self._xml_end_tag("protectedRanges")
def _write_protected_range(self, cell_range, range_name, password) -> None:
# Write the <protectedRange> element.
attributes = []
if password:
attributes.append(("password", password))
attributes.append(("sqref", cell_range))
attributes.append(("name", range_name))
self._xml_empty_tag("protectedRange", attributes)
def _write_drawings(self) -> None:
# Write the <drawing> elements.
if not self.drawing:
return
self.rel_count += 1
self._write_drawing(self.rel_count)
def _write_drawing(self, drawing_id) -> None:
# Write the <drawing> element.
r_id = "rId" + str(drawing_id)
attributes = [("r:id", r_id)]
self._xml_empty_tag("drawing", attributes)
def _write_legacy_drawing(self) -> None:
# Write the <legacyDrawing> element.
if not self.has_vml:
return
# Increment the relationship id for any drawings or comments.
self.rel_count += 1
r_id = "rId" + str(self.rel_count)
attributes = [("r:id", r_id)]
self._xml_empty_tag("legacyDrawing", attributes)
def _write_legacy_drawing_hf(self) -> None:
# Write the <legacyDrawingHF> element.
if not self.has_header_vml:
return
# Increment the relationship id for any drawings or comments.
self.rel_count += 1
r_id = "rId" + str(self.rel_count)
attributes = [("r:id", r_id)]
self._xml_empty_tag("legacyDrawingHF", attributes)
def _write_picture(self) -> None:
# Write the <picture> element.
if not self.background_image:
return
# Increment the relationship id.
self.rel_count += 1
r_id = "rId" + str(self.rel_count)
attributes = [("r:id", r_id)]
self._xml_empty_tag("picture", attributes)
def _write_data_validations(self) -> None:
# Write the <dataValidations> element.
validations = self.validations
count = len(validations)
if not count:
return
attributes = [("count", count)]
self._xml_start_tag("dataValidations", attributes)
for validation in validations:
# Write the dataValidation element.
self._write_data_validation(validation)
self._xml_end_tag("dataValidations")
def _write_data_validation(self, options) -> None:
# Write the <dataValidation> element.
sqref = ""
attributes = []
# Set the cell range(s) for the data validation.
for cells in options["cells"]:
# Add a space between multiple cell ranges.
if sqref != "":
sqref += " "
(row_first, col_first, row_last, col_last) = cells
# Swap last row/col for first row/col as necessary
if row_first > row_last:
(row_first, row_last) = (row_last, row_first)
if col_first > col_last:
(col_first, col_last) = (col_last, col_first)
sqref += xl_range(row_first, col_first, row_last, col_last)
if options.get("multi_range"):
sqref = options["multi_range"]
if options["validate"] != "none":
attributes.append(("type", options["validate"]))
if options["criteria"] != "between":
attributes.append(("operator", options["criteria"]))
if "error_type" in options:
if options["error_type"] == 1:
attributes.append(("errorStyle", "warning"))
if options["error_type"] == 2:
attributes.append(("errorStyle", "information"))
if options["ignore_blank"]:
attributes.append(("allowBlank", 1))
if not options["dropdown"]:
attributes.append(("showDropDown", 1))
if options["show_input"]:
attributes.append(("showInputMessage", 1))
if options["show_error"]:
attributes.append(("showErrorMessage", 1))
if "error_title" in options:
attributes.append(("errorTitle", options["error_title"]))
if "error_message" in options:
attributes.append(("error", options["error_message"]))
if "input_title" in options:
attributes.append(("promptTitle", options["input_title"]))
if "input_message" in options:
attributes.append(("prompt", options["input_message"]))
attributes.append(("sqref", sqref))
if options["validate"] == "none":
self._xml_empty_tag("dataValidation", attributes)
else:
self._xml_start_tag("dataValidation", attributes)
# Write the formula1 element.
self._write_formula_1(options["value"])
# Write the formula2 element.
if options["maximum"] is not None:
self._write_formula_2(options["maximum"])
self._xml_end_tag("dataValidation")
def _write_formula_1(self, formula) -> None:
# Write the <formula1> element.
if isinstance(formula, list):
formula = self._csv_join(*formula)
formula = f'"{formula}"'
else:
# Check if the formula is a number.
try:
float(formula)
except ValueError:
# Not a number. Remove the formula '=' sign if it exists.
if formula.startswith("="):
formula = formula.lstrip("=")
self._xml_data_element("formula1", formula)
def _write_formula_2(self, formula) -> None:
# Write the <formula2> element.
# Check if the formula is a number.
try:
float(formula)
except ValueError:
# Not a number. Remove the formula '=' sign if it exists.
if formula.startswith("="):
formula = formula.lstrip("=")
self._xml_data_element("formula2", formula)
def _write_conditional_formats(self) -> None:
# Write the Worksheet conditional formats.
ranges = sorted(self.cond_formats.keys())
if not ranges:
return
for cond_range in ranges:
self._write_conditional_formatting(
cond_range, self.cond_formats[cond_range]
)
def _write_conditional_formatting(self, cond_range, params) -> None:
# Write the <conditionalFormatting> element.
attributes = [("sqref", cond_range)]
self._xml_start_tag("conditionalFormatting", attributes)
for param in params:
# Write the cfRule element.
self._write_cf_rule(param)
self._xml_end_tag("conditionalFormatting")
def _write_cf_rule(self, params) -> None:
# Write the <cfRule> element.
attributes = [("type", params["type"])]
if "format" in params and params["format"] is not None:
attributes.append(("dxfId", params["format"]))
attributes.append(("priority", params["priority"]))
if params.get("stop_if_true"):
attributes.append(("stopIfTrue", 1))
if params["type"] == "cellIs":
attributes.append(("operator", params["criteria"]))
self._xml_start_tag("cfRule", attributes)
if "minimum" in params and "maximum" in params:
self._write_formula_element(params["minimum"])
self._write_formula_element(params["maximum"])
else:
self._write_formula_element(params["value"])
self._xml_end_tag("cfRule")
elif params["type"] == "aboveAverage":
if re.search("below", params["criteria"]):
attributes.append(("aboveAverage", 0))
if re.search("equal", params["criteria"]):
attributes.append(("equalAverage", 1))
if re.search("[123] std dev", params["criteria"]):
match = re.search("([123]) std dev", params["criteria"])
attributes.append(("stdDev", match.group(1)))
self._xml_empty_tag("cfRule", attributes)
elif params["type"] == "top10":
if "criteria" in params and params["criteria"] == "%":
attributes.append(("percent", 1))
if "direction" in params:
attributes.append(("bottom", 1))
rank = params["value"] or 10
attributes.append(("rank", rank))
self._xml_empty_tag("cfRule", attributes)
elif params["type"] == "duplicateValues":
self._xml_empty_tag("cfRule", attributes)
elif params["type"] == "uniqueValues":
self._xml_empty_tag("cfRule", attributes)
elif (
params["type"] == "containsText"
or params["type"] == "notContainsText"
or params["type"] == "beginsWith"
or params["type"] == "endsWith"
):
attributes.append(("operator", params["criteria"]))
attributes.append(("text", params["value"]))
self._xml_start_tag("cfRule", attributes)
self._write_formula_element(params["formula"])
self._xml_end_tag("cfRule")
elif params["type"] == "timePeriod":
attributes.append(("timePeriod", params["criteria"]))
self._xml_start_tag("cfRule", attributes)
self._write_formula_element(params["formula"])
self._xml_end_tag("cfRule")
elif (
params["type"] == "containsBlanks"
or params["type"] == "notContainsBlanks"
or params["type"] == "containsErrors"
or params["type"] == "notContainsErrors"
):
self._xml_start_tag("cfRule", attributes)
self._write_formula_element(params["formula"])
self._xml_end_tag("cfRule")
elif params["type"] == "colorScale":
self._xml_start_tag("cfRule", attributes)
self._write_color_scale(params)
self._xml_end_tag("cfRule")
elif params["type"] == "dataBar":
self._xml_start_tag("cfRule", attributes)
self._write_data_bar(params)
if params.get("is_data_bar_2010"):
self._write_data_bar_ext(params)
self._xml_end_tag("cfRule")
elif params["type"] == "expression":
self._xml_start_tag("cfRule", attributes)
self._write_formula_element(params["criteria"])
self._xml_end_tag("cfRule")
elif params["type"] == "iconSet":
self._xml_start_tag("cfRule", attributes)
self._write_icon_set(params)
self._xml_end_tag("cfRule")
def _write_formula_element(self, formula) -> None:
# Write the <formula> element.
# Check if the formula is a number.
try:
float(formula)
except ValueError:
# Not a number. Remove the formula '=' sign if it exists.
if formula.startswith("="):
formula = formula.lstrip("=")
self._xml_data_element("formula", formula)
def _write_color_scale(self, param) -> None:
# Write the <colorScale> element.
self._xml_start_tag("colorScale")
self._write_cfvo(param["min_type"], param["min_value"])
if param["mid_type"] is not None:
self._write_cfvo(param["mid_type"], param["mid_value"])
self._write_cfvo(param["max_type"], param["max_value"])
self._write_color("color", param["min_color"]._attributes())
if param["mid_color"] is not None:
self._write_color("color", param["mid_color"]._attributes())
self._write_color("color", param["max_color"]._attributes())
self._xml_end_tag("colorScale")
def _write_data_bar(self, param) -> None:
# Write the <dataBar> element.
attributes = []
# Min and max bar lengths in in the spec but not supported directly by
# Excel.
if "min_length" in param:
attributes.append(("minLength", param["min_length"]))
if "max_length" in param:
attributes.append(("maxLength", param["max_length"]))
if param.get("bar_only"):
attributes.append(("showValue", 0))
self._xml_start_tag("dataBar", attributes)
self._write_cfvo(param["min_type"], param["min_value"])
self._write_cfvo(param["max_type"], param["max_value"])
self._write_color("color", param["bar_color"]._attributes())
self._xml_end_tag("dataBar")
def _write_data_bar_ext(self, param) -> None:
# Write the <extLst> dataBar extension element.
# Create a pseudo GUID for each unique Excel 2010 data bar.
worksheet_count = self.index + 1
data_bar_count = len(self.data_bars_2010) + 1
guid = "{DA7ABA51-AAAA-BBBB-%04X-%012X}" % (worksheet_count, data_bar_count)
# Store the 2010 data bar parameters to write the extLst elements.
param["guid"] = guid
self.data_bars_2010.append(param)
self._xml_start_tag("extLst")
self._write_ext("{B025F937-C7B1-47D3-B67F-A62EFF666E3E}")
self._xml_data_element("x14:id", guid)
self._xml_end_tag("ext")
self._xml_end_tag("extLst")
def _write_icon_set(self, param) -> None:
# Write the <iconSet> element.
attributes = []
# Don't set attribute for default style.
if param["icon_style"] != "3TrafficLights":
attributes = [("iconSet", param["icon_style"])]
if param.get("icons_only"):
attributes.append(("showValue", 0))
if param.get("reverse_icons"):
attributes.append(("reverse", 1))
self._xml_start_tag("iconSet", attributes)
# Write the properties for different icon styles.
for icon in reversed(param["icons"]):
self._write_cfvo(icon["type"], icon["value"], icon["criteria"])
self._xml_end_tag("iconSet")
def _write_cfvo(self, cf_type, val, criteria=None) -> None:
# Write the <cfvo> element.
attributes = [("type", cf_type)]
if val is not None:
attributes.append(("val", val))
if criteria:
attributes.append(("gte", 0))
self._xml_empty_tag("cfvo", attributes)
def _write_color(self, name, attributes) -> None:
# Write the <color> element.
self._xml_empty_tag(name, attributes)
def _write_selections(self) -> None:
# Write the <selection> elements.
for selection in self.selections:
self._write_selection(*selection)
def _write_selection(self, pane, active_cell, sqref) -> None:
# Write the <selection> element.
attributes = []
if pane:
attributes.append(("pane", pane))
if active_cell:
attributes.append(("activeCell", active_cell))
if sqref:
attributes.append(("sqref", sqref))
self._xml_empty_tag("selection", attributes)
def _write_panes(self) -> None:
# Write the frozen or split <pane> elements.
panes = self.panes
if not panes:
return
if panes[4] == 2:
self._write_split_panes(*panes)
else:
self._write_freeze_panes(*panes)
def _write_freeze_panes(
self, row: int, col: int, top_row, left_col, pane_type
) -> None:
# Write the <pane> element for freeze panes.
attributes = []
y_split = row
x_split = col
top_left_cell = xl_rowcol_to_cell(top_row, left_col)
active_pane = ""
state = ""
active_cell = ""
sqref = ""
# Move user cell selection to the panes.
if self.selections:
(_, active_cell, sqref) = self.selections[0]
self.selections = []
# Set the active pane.
if row and col:
active_pane = "bottomRight"
row_cell = xl_rowcol_to_cell(row, 0)
col_cell = xl_rowcol_to_cell(0, col)
self.selections.append(["topRight", col_cell, col_cell])
self.selections.append(["bottomLeft", row_cell, row_cell])
self.selections.append(["bottomRight", active_cell, sqref])
elif col:
active_pane = "topRight"
self.selections.append(["topRight", active_cell, sqref])
else:
active_pane = "bottomLeft"
self.selections.append(["bottomLeft", active_cell, sqref])
# Set the pane type.
if pane_type == 0:
state = "frozen"
elif pane_type == 1:
state = "frozenSplit"
else:
state = "split"
if x_split:
attributes.append(("xSplit", x_split))
if y_split:
attributes.append(("ySplit", y_split))
attributes.append(("topLeftCell", top_left_cell))
attributes.append(("activePane", active_pane))
attributes.append(("state", state))
self._xml_empty_tag("pane", attributes)
def _write_split_panes(self, row: int, col: int, top_row, left_col, _) -> None:
# Write the <pane> element for split panes.
attributes = []
has_selection = False
active_pane = ""
active_cell = ""
sqref = ""
y_split = row
x_split = col
# Move user cell selection to the panes.
if self.selections:
(_, active_cell, sqref) = self.selections[0]
self.selections = []
has_selection = True
# Convert the row and col to 1/20 twip units with padding.
if y_split:
y_split = int(20 * y_split + 300)
if x_split:
x_split = self._calculate_x_split_width(x_split)
# For non-explicit topLeft definitions, estimate the cell offset based
# on the pixels dimensions. This is only a workaround and doesn't take
# adjusted cell dimensions into account.
if top_row == row and left_col == col:
top_row = int(0.5 + (y_split - 300) / 20 / 15)
left_col = int(0.5 + (x_split - 390) / 20 / 3 * 4 / 64)
top_left_cell = xl_rowcol_to_cell(top_row, left_col)
# If there is no selection set the active cell to the top left cell.
if not has_selection:
active_cell = top_left_cell
sqref = top_left_cell
# Set the Cell selections.
if row and col:
active_pane = "bottomRight"
row_cell = xl_rowcol_to_cell(top_row, 0)
col_cell = xl_rowcol_to_cell(0, left_col)
self.selections.append(["topRight", col_cell, col_cell])
self.selections.append(["bottomLeft", row_cell, row_cell])
self.selections.append(["bottomRight", active_cell, sqref])
elif col:
active_pane = "topRight"
self.selections.append(["topRight", active_cell, sqref])
else:
active_pane = "bottomLeft"
self.selections.append(["bottomLeft", active_cell, sqref])
# Format splits to the same precision as Excel.
if x_split:
attributes.append(("xSplit", f"{x_split:.16g}"))
if y_split:
attributes.append(("ySplit", f"{y_split:.16g}"))
attributes.append(("topLeftCell", top_left_cell))
if has_selection:
attributes.append(("activePane", active_pane))
self._xml_empty_tag("pane", attributes)
def _calculate_x_split_width(self, width):
# Convert column width from user units to pane split width.
max_digit_width = 7 # For Calabri 11.
padding = 5
# Convert to pixels.
if width < 1:
pixels = int(width * (max_digit_width + padding) + 0.5)
else:
pixels = int(width * max_digit_width + 0.5) + padding
# Convert to points.
points = pixels * 3 / 4
# Convert to twips (twentieths of a point).
twips = points * 20
# Add offset/padding.
width = twips + 390
return width
def _write_table_parts(self) -> None:
# Write the <tableParts> element.
tables = self.tables
count = len(tables)
# Return if worksheet doesn't contain any tables.
if not count:
return
attributes = [
(
"count",
count,
)
]
self._xml_start_tag("tableParts", attributes)
for _ in tables:
# Write the tablePart element.
self.rel_count += 1
self._write_table_part(self.rel_count)
self._xml_end_tag("tableParts")
def _write_table_part(self, r_id) -> None:
# Write the <tablePart> element.
r_id = "rId" + str(r_id)
attributes = [
(
"r:id",
r_id,
)
]
self._xml_empty_tag("tablePart", attributes)
def _write_ext_list(self) -> None:
# Write the <extLst> element for data bars and sparklines.
has_data_bars = len(self.data_bars_2010)
has_sparklines = len(self.sparklines)
if not has_data_bars and not has_sparklines:
return
# Write the extLst element.
self._xml_start_tag("extLst")
if has_data_bars:
self._write_ext_list_data_bars()
if has_sparklines:
self._write_ext_list_sparklines()
self._xml_end_tag("extLst")
def _write_ext_list_data_bars(self) -> None:
# Write the Excel 2010 data_bar subelements.
self._write_ext("{78C0D931-6437-407d-A8EE-F0AAD7539E65}")
self._xml_start_tag("x14:conditionalFormattings")
# Write the Excel 2010 conditional formatting data bar elements.
for data_bar in self.data_bars_2010:
# Write the x14:conditionalFormatting element.
self._write_conditional_formatting_2010(data_bar)
self._xml_end_tag("x14:conditionalFormattings")
self._xml_end_tag("ext")
def _write_conditional_formatting_2010(self, data_bar) -> None:
# Write the <x14:conditionalFormatting> element.
xmlns_xm = "http://schemas.microsoft.com/office/excel/2006/main"
attributes = [("xmlns:xm", xmlns_xm)]
self._xml_start_tag("x14:conditionalFormatting", attributes)
# Write the x14:cfRule element.
self._write_x14_cf_rule(data_bar)
# Write the x14:dataBar element.
self._write_x14_data_bar(data_bar)
# Write the x14 max and min data bars.
self._write_x14_cfvo(data_bar["x14_min_type"], data_bar["min_value"])
self._write_x14_cfvo(data_bar["x14_max_type"], data_bar["max_value"])
if not data_bar["bar_no_border"]:
# Write the x14:borderColor element.
self._write_x14_border_color(data_bar["bar_border_color"])
# Write the x14:negativeFillColor element.
if not data_bar["bar_negative_color_same"]:
self._write_x14_negative_fill_color(data_bar["bar_negative_color"])
# Write the x14:negativeBorderColor element.
if (
not data_bar["bar_no_border"]
and not data_bar["bar_negative_border_color_same"]
):
self._write_x14_negative_border_color(data_bar["bar_negative_border_color"])
# Write the x14:axisColor element.
if data_bar["bar_axis_position"] != "none":
self._write_x14_axis_color(data_bar["bar_axis_color"])
self._xml_end_tag("x14:dataBar")
self._xml_end_tag("x14:cfRule")
# Write the xm:sqref element.
self._xml_data_element("xm:sqref", data_bar["range"])
self._xml_end_tag("x14:conditionalFormatting")
def _write_x14_cf_rule(self, data_bar) -> None:
# Write the <x14:cfRule> element.
rule_type = "dataBar"
guid = data_bar["guid"]
attributes = [("type", rule_type), ("id", guid)]
self._xml_start_tag("x14:cfRule", attributes)
def _write_x14_data_bar(self, data_bar) -> None:
# Write the <x14:dataBar> element.
min_length = 0
max_length = 100
attributes = [
("minLength", min_length),
("maxLength", max_length),
]
if not data_bar["bar_no_border"]:
attributes.append(("border", 1))
if data_bar["bar_solid"]:
attributes.append(("gradient", 0))
if data_bar["bar_direction"] == "left":
attributes.append(("direction", "leftToRight"))
if data_bar["bar_direction"] == "right":
attributes.append(("direction", "rightToLeft"))
if data_bar["bar_negative_color_same"]:
attributes.append(("negativeBarColorSameAsPositive", 1))
if (
not data_bar["bar_no_border"]
and not data_bar["bar_negative_border_color_same"]
):
attributes.append(("negativeBarBorderColorSameAsPositive", 0))
if data_bar["bar_axis_position"] == "middle":
attributes.append(("axisPosition", "middle"))
if data_bar["bar_axis_position"] == "none":
attributes.append(("axisPosition", "none"))
self._xml_start_tag("x14:dataBar", attributes)
def _write_x14_cfvo(self, rule_type, value) -> None:
# Write the <x14:cfvo> element.
attributes = [("type", rule_type)]
if rule_type in ("min", "max", "autoMin", "autoMax"):
self._xml_empty_tag("x14:cfvo", attributes)
else:
self._xml_start_tag("x14:cfvo", attributes)
self._xml_data_element("xm:f", value)
self._xml_end_tag("x14:cfvo")
def _write_x14_border_color(self, color) -> None:
# Write the <x14:borderColor> element.
self._write_color("x14:borderColor", color._attributes())
def _write_x14_negative_fill_color(self, color) -> None:
# Write the <x14:negativeFillColor> element.
self._xml_empty_tag("x14:negativeFillColor", color._attributes())
def _write_x14_negative_border_color(self, color) -> None:
# Write the <x14:negativeBorderColor> element.
self._xml_empty_tag("x14:negativeBorderColor", color._attributes())
def _write_x14_axis_color(self, color) -> None:
# Write the <x14:axisColor> element.
self._xml_empty_tag("x14:axisColor", color._attributes())
def _write_ext_list_sparklines(self) -> None:
# Write the sparkline extension sub-elements.
self._write_ext("{05C60535-1F16-4fd2-B633-F4F36F0B64E0}")
# Write the x14:sparklineGroups element.
self._write_sparkline_groups()
# Write the sparkline elements.
for sparkline in reversed(self.sparklines):
# Write the x14:sparklineGroup element.
self._write_sparkline_group(sparkline)
# Write the x14:colorSeries element.
self._write_color_series(sparkline["series_color"])
# Write the x14:colorNegative element.
self._write_color_negative(sparkline["negative_color"])
# Write the x14:colorAxis element.
self._write_color_axis()
# Write the x14:colorMarkers element.
self._write_color_markers(sparkline["markers_color"])
# Write the x14:colorFirst element.
self._write_color_first(sparkline["first_color"])
# Write the x14:colorLast element.
self._write_color_last(sparkline["last_color"])
# Write the x14:colorHigh element.
self._write_color_high(sparkline["high_color"])
# Write the x14:colorLow element.
self._write_color_low(sparkline["low_color"])
if sparkline["date_axis"]:
self._xml_data_element("xm:f", sparkline["date_axis"])
self._write_sparklines(sparkline)
self._xml_end_tag("x14:sparklineGroup")
self._xml_end_tag("x14:sparklineGroups")
self._xml_end_tag("ext")
def _write_sparklines(self, sparkline) -> None:
# Write the <x14:sparklines> element and <x14:sparkline> sub-elements.
# Write the sparkline elements.
self._xml_start_tag("x14:sparklines")
for i in range(sparkline["count"]):
spark_range = sparkline["ranges"][i]
location = sparkline["locations"][i]
self._xml_start_tag("x14:sparkline")
self._xml_data_element("xm:f", spark_range)
self._xml_data_element("xm:sqref", location)
self._xml_end_tag("x14:sparkline")
self._xml_end_tag("x14:sparklines")
def _write_ext(self, uri) -> None:
# Write the <ext> element.
schema = "http://schemas.microsoft.com/office/"
xmlns_x14 = schema + "spreadsheetml/2009/9/main"
attributes = [
("xmlns:x14", xmlns_x14),
("uri", uri),
]
self._xml_start_tag("ext", attributes)
def _write_sparkline_groups(self) -> None:
# Write the <x14:sparklineGroups> element.
xmlns_xm = "http://schemas.microsoft.com/office/excel/2006/main"
attributes = [("xmlns:xm", xmlns_xm)]
self._xml_start_tag("x14:sparklineGroups", attributes)
def _write_sparkline_group(self, options) -> None:
# Write the <x14:sparklineGroup> element.
#
# Example for order.
#
# <x14:sparklineGroup
# manualMax="0"
# manualMin="0"
# lineWeight="2.25"
# type="column"
# dateAxis="1"
# displayEmptyCellsAs="span"
# markers="1"
# high="1"
# low="1"
# first="1"
# last="1"
# negative="1"
# displayXAxis="1"
# displayHidden="1"
# minAxisType="custom"
# maxAxisType="custom"
# rightToLeft="1">
#
empty = options.get("empty")
attributes = []
if options.get("max") is not None:
if options["max"] == "group":
options["cust_max"] = "group"
else:
attributes.append(("manualMax", options["max"]))
options["cust_max"] = "custom"
if options.get("min") is not None:
if options["min"] == "group":
options["cust_min"] = "group"
else:
attributes.append(("manualMin", options["min"]))
options["cust_min"] = "custom"
# Ignore the default type attribute (line).
if options["type"] != "line":
attributes.append(("type", options["type"]))
if options.get("weight"):
attributes.append(("lineWeight", options["weight"]))
if options.get("date_axis"):
attributes.append(("dateAxis", 1))
if empty:
attributes.append(("displayEmptyCellsAs", empty))
if options.get("markers"):
attributes.append(("markers", 1))
if options.get("high"):
attributes.append(("high", 1))
if options.get("low"):
attributes.append(("low", 1))
if options.get("first"):
attributes.append(("first", 1))
if options.get("last"):
attributes.append(("last", 1))
if options.get("negative"):
attributes.append(("negative", 1))
if options.get("axis"):
attributes.append(("displayXAxis", 1))
if options.get("hidden"):
attributes.append(("displayHidden", 1))
if options.get("cust_min"):
attributes.append(("minAxisType", options["cust_min"]))
if options.get("cust_max"):
attributes.append(("maxAxisType", options["cust_max"]))
if options.get("reverse"):
attributes.append(("rightToLeft", 1))
self._xml_start_tag("x14:sparklineGroup", attributes)
def _write_spark_color(self, tag, color) -> None:
# Helper function for the sparkline color functions below.
if color:
self._write_color(tag, color._attributes())
def _write_color_series(self, color) -> None:
# Write the <x14:colorSeries> element.
self._write_spark_color("x14:colorSeries", color)
def _write_color_negative(self, color) -> None:
# Write the <x14:colorNegative> element.
self._write_spark_color("x14:colorNegative", color)
def _write_color_axis(self) -> None:
# Write the <x14:colorAxis> element.
self._write_spark_color("x14:colorAxis", Color("#000000"))
def _write_color_markers(self, color) -> None:
# Write the <x14:colorMarkers> element.
self._write_spark_color("x14:colorMarkers", color)
def _write_color_first(self, color) -> None:
# Write the <x14:colorFirst> element.
self._write_spark_color("x14:colorFirst", color)
def _write_color_last(self, color) -> None:
# Write the <x14:colorLast> element.
self._write_spark_color("x14:colorLast", color)
def _write_color_high(self, color) -> None:
# Write the <x14:colorHigh> element.
self._write_spark_color("x14:colorHigh", color)
def _write_color_low(self, color) -> None:
# Write the <x14:colorLow> element.
self._write_spark_color("x14:colorLow", color)
def _write_phonetic_pr(self) -> None:
# Write the <phoneticPr> element.
attributes = [
("fontId", "0"),
("type", "noConversion"),
]
self._xml_empty_tag("phoneticPr", attributes)
def _write_ignored_errors(self) -> None:
# Write the <ignoredErrors> element.
if not self.ignored_errors:
return
self._xml_start_tag("ignoredErrors")
if self.ignored_errors.get("number_stored_as_text"):
ignored_range = self.ignored_errors["number_stored_as_text"]
self._write_ignored_error("numberStoredAsText", ignored_range)
if self.ignored_errors.get("eval_error"):
ignored_range = self.ignored_errors["eval_error"]
self._write_ignored_error("evalError", ignored_range)
if self.ignored_errors.get("formula_differs"):
ignored_range = self.ignored_errors["formula_differs"]
self._write_ignored_error("formula", ignored_range)
if self.ignored_errors.get("formula_range"):
ignored_range = self.ignored_errors["formula_range"]
self._write_ignored_error("formulaRange", ignored_range)
if self.ignored_errors.get("formula_unlocked"):
ignored_range = self.ignored_errors["formula_unlocked"]
self._write_ignored_error("unlockedFormula", ignored_range)
if self.ignored_errors.get("empty_cell_reference"):
ignored_range = self.ignored_errors["empty_cell_reference"]
self._write_ignored_error("emptyCellReference", ignored_range)
if self.ignored_errors.get("list_data_validation"):
ignored_range = self.ignored_errors["list_data_validation"]
self._write_ignored_error("listDataValidation", ignored_range)
if self.ignored_errors.get("calculated_column"):
ignored_range = self.ignored_errors["calculated_column"]
self._write_ignored_error("calculatedColumn", ignored_range)
if self.ignored_errors.get("two_digit_text_year"):
ignored_range = self.ignored_errors["two_digit_text_year"]
self._write_ignored_error("twoDigitTextYear", ignored_range)
self._xml_end_tag("ignoredErrors")
def _write_ignored_error(self, error_type, ignored_range) -> None:
# Write the <ignoredError> element.
attributes = [
("sqref", ignored_range),
(error_type, 1),
]
self._xml_empty_tag("ignoredError", attributes)
| Worksheet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.