language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/1200-1299/1276.Number of Burgers with No Waste of Ingredients/Solution.py | {
"start": 0,
"end": 244
} | class ____:
def numOfBurgers(self, tomatoSlices: int, cheeseSlices: int) -> List[int]:
k = 4 * cheeseSlices - tomatoSlices
y = k // 2
x = cheeseSlices - y
return [] if k % 2 or y < 0 or x < 0 else [x, y]
| Solution |
python | jazzband__tablib | src/tablib/_vendor/dbfpy/fields.py | {
"start": 7232,
"end": 8668
} | class ____(DbfFieldDef):
"""Definition of the numeric field."""
typeCode = "N"
# XXX: now I'm not sure it was a good idea to make a class field
# `defaultValue` instead of a generic method as it was implemented
# previously -- it's ok with all types except number, cuz
# if self.decimalCount is 0, we should return 0 and 0.0 otherwise.
defaultValue = 0
def decodeValue(self, value):
"""Return a number decoded from ``value``.
If decimals is zero, value will be decoded as an integer;
or as a float otherwise.
Return:
Return value is a int (long) or float instance.
"""
value = value.strip(b' \0')
if b'.' in value:
# a float (has decimal separator)
return float(value)
elif value:
# must be an integer
return int(value)
else:
return 0
def encodeValue(self, value):
"""Return string containing encoded ``value``."""
_rv = ("%*.*f" % (self.length, self.decimalCount, value)) # noqa: UP031
if len(_rv) > self.length:
_ppos = _rv.find(".")
if 0 <= _ppos <= self.length:
_rv = _rv[:self.length]
else:
raise ValueError(
f"[{self.name}] Numeric overflow: {_rv} (field width: {self.length})"
)
return _rv
| DbfNumericFieldDef |
python | huggingface__transformers | src/transformers/models/hubert/modeling_hubert.py | {
"start": 39223,
"end": 45746
} | class ____(HubertPreTrainedModel):
def __init__(self, config, target_lang: Optional[str] = None):
r"""
target_lang (`str`, *optional*):
Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or
adapter.<lang>.bin. Only relevant when using an instance of [`HubertForCTC`] with adapters. Uses 'eng' by
default.
"""
super().__init__(config)
self.hubert = HubertModel(config)
self.dropout = nn.Dropout(config.final_dropout)
self.target_lang = target_lang
if config.vocab_size is None:
raise ValueError(
f"You are trying to instantiate {self.__class__} with a configuration that "
"does not define the vocabulary size of the language model head. Please "
"instantiate the model as follows: `HubertForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
"or define `vocab_size` of your model's configuration."
)
output_hidden_size = (
config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
)
self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
def tie_weights(self, **kwargs):
"""
This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
passing `target_lang=...` to `from_pretrained(...)`.
This method is **not** supposed to be called by the user and is prone to be changed in the future.
"""
# Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to
# correctly load adapter layers for Hubert so that we do not have to introduce a new API to
# [`PreTrainedModel`]. While slightly hacky, Hubert never has to tie input and output embeddings, so that it is
# ok to repurpose this function here.
target_lang = self.target_lang
if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None:
raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.")
elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None:
logger.info("By default `target_lang` is set to 'eng'.")
elif target_lang is not None:
self.load_adapter(target_lang, force_load=True)
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.hubert.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.hubert.parameters():
param.requires_grad = False
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[tuple, CausalLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None and labels.max() >= self.config.vocab_size:
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
outputs = self.hubert(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# retrieve loss input_lengths from attention_mask
attention_mask = (
attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
)
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
# assuming that padded tokens are filled with -100
# when not being attended to
labels_mask = labels >= 0
target_lengths = labels_mask.sum(-1)
flattened_targets = labels.masked_select(labels_mask)
# ctc_loss doesn't support fp16
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
with torch.backends.cudnn.flags(enabled=False):
loss = nn.functional.ctc_loss(
log_probs,
flattened_targets,
input_lengths,
target_lengths,
blank=self.config.pad_token_id,
reduction=self.config.ctc_loss_reduction,
zero_infinity=self.config.ctc_zero_infinity,
)
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@auto_docstring(
custom_intro="""
Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
SUPERB Keyword Spotting.
"""
)
| HubertForCTC |
python | PrefectHQ__prefect | src/prefect/server/events/actions.py | {
"start": 41252,
"end": 41835
} | class ____(FlowRunStateChangeAction):
"""Suspends a flow run associated with the trigger"""
type: Literal["suspend-flow-run"] = "suspend-flow-run"
async def new_state(self, triggered_action: "TriggeredAction") -> StateCreate:
state = Suspended(
timeout_seconds=3600,
message=f"Suspended by Automation {triggered_action.automation.id}",
)
return StateCreate(
type=state.type,
name=state.name,
message=state.message,
state_details=state.state_details,
)
| SuspendFlowRun |
python | readthedocs__readthedocs.org | readthedocs/subscriptions/apps.py | {
"start": 62,
"end": 1166
} | class ____(AppConfig):
"""App configuration."""
name = "readthedocs.subscriptions"
label = "subscriptions"
def ready(self):
import readthedocs.subscriptions.event_handlers # noqa
import readthedocs.subscriptions.signals # noqa
import readthedocs.subscriptions.tasks # noqa
self._add_custom_manager()
def _add_custom_manager(self):
"""
Add a custom manager to the djstripe Subscription model.
Patching the model directly isn't recommended,
since there may be an additional setup
done by django when adding a manager.
Using django's contribute_to_class is the recommended
way of adding a custom manager to a third party model.
The new manager will be accessible from ``Subscription.readthedocs``.
"""
from djstripe.models import Subscription
from readthedocs.subscriptions.querysets import StripeSubscriptionQueryset
manager = StripeSubscriptionQueryset.as_manager()
manager.contribute_to_class(Subscription, "readthedocs")
| SubscriptionsConfig |
python | spack__spack | lib/spack/spack/vendor/jinja2/nodes.py | {
"start": 10143,
"end": 10377
} | class ____(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ("nodes",)
nodes: t.List["Expr"]
| Output |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0098_detectorgroup_detector_set_null.py | {
"start": 222,
"end": 1711
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("workflow_engine", "0097_add_unique_constraint_to_datasource"),
]
operations = [
migrations.AlterField(
model_name="detectorgroup",
name="detector",
field=sentry.db.models.fields.foreignkey.FlexibleForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="workflow_engine.detector",
),
),
]
| Migration |
python | ansible__ansible | test/lib/ansible_test/_internal/host_profiles.py | {
"start": 58613,
"end": 58849
} | class ____(HostProfile[NetworkInventoryConfig]):
"""Host profile for a network inventory."""
@property
def name(self) -> str:
"""The name of the host profile."""
return self.config.path
| NetworkInventoryProfile |
python | weaviate__weaviate-python-client | weaviate/gql/filter.py | {
"start": 14700,
"end": 15276
} | class ____(NearMedia):
"""NearVideo class used to filter weaviate objects."""
def __init__(
self,
content: dict,
):
"""Initialize a NearVideo class instance.
Args:
content: The content of the `nearVideo` clause.
Raises:
TypeError: If 'content' is not of type dict.
TypeError: If 'content["video"]' is not of type str.
ValueError: If 'content' has key "certainty"/"distance" but the value is not float.
"""
super().__init__(content, MediaType.VIDEO)
| NearVideo |
python | spyder-ide__spyder | spyder/plugins/remoteclient/api/modules/base.py | {
"start": 923,
"end": 1058
} | class ____(SpyderRemoteAPIError):
"""
Exception for errors related to a closed session.
"""
...
| SpyderRemoteSessionClosed |
python | openai__openai-python | src/openai/resources/chat/completions/messages.py | {
"start": 7335,
"end": 7570
} | class ____:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
self.list = _legacy_response.async_to_raw_response_wrapper(
messages.list,
)
| AsyncMessagesWithRawResponse |
python | getsentry__sentry | src/sentry/replays/lib/cache.py | {
"start": 2550,
"end": 3511
} | class ____(CacheProtocol[T, U]):
"""
Auto cache implementation. Caches the result of a function call on read.
If the intent is to use this cache in a threaded environment you will need to ensure the cache
passed to the init method is thread-safe. This cache does not guarantee thread-safety on its
own.
"""
def __init__(self, fn: Callable[[T], U], cache: CacheProtocol[T, U]) -> None:
self.cache = cache
self.fn = fn
def __contains__(self, key: T) -> bool:
return key in self.cache
def __len__(self):
return len(self.cache)
def __delitem__(self, key: T) -> None:
del self.cache[key]
def __setitem__(self, key: T, value: U) -> None:
self.cache[key] = value
def __getitem__(self, key: T) -> U:
try:
return self.cache[key]
except KeyError:
value = self.fn(key)
self[key] = value
return value
| AutoCache |
python | getsentry__sentry-python | sentry_sdk/integrations/loguru.py | {
"start": 4080,
"end": 6534
} | class ____(_LoguruBaseHandler, BreadcrumbHandler):
"""Modified version of :class:`sentry_sdk.integrations.logging.BreadcrumbHandler` to use loguru's level names."""
pass
def loguru_sentry_logs_handler(message):
# type: (Message) -> None
# This is intentionally a callable sink instead of a standard logging handler
# since otherwise we wouldn't get direct access to message.record
client = sentry_sdk.get_client()
if not client.is_active():
return
if not has_logs_enabled(client.options):
return
record = message.record
if (
LoguruIntegration.sentry_logs_level is None
or record["level"].no < LoguruIntegration.sentry_logs_level
):
return
otel_severity_number, otel_severity_text = _log_level_to_otel(
record["level"].no, SEVERITY_TO_OTEL_SEVERITY
)
attrs = {"sentry.origin": "auto.log.loguru"} # type: dict[str, Any]
project_root = client.options["project_root"]
if record.get("file"):
if project_root is not None and record["file"].path.startswith(project_root):
attrs["code.file.path"] = record["file"].path[len(project_root) + 1 :]
else:
attrs["code.file.path"] = record["file"].path
if record.get("line") is not None:
attrs["code.line.number"] = record["line"]
if record.get("function"):
attrs["code.function.name"] = record["function"]
if record.get("thread"):
attrs["thread.name"] = record["thread"].name
attrs["thread.id"] = record["thread"].id
if record.get("process"):
attrs["process.pid"] = record["process"].id
attrs["process.executable.name"] = record["process"].name
if record.get("name"):
attrs["logger.name"] = record["name"]
extra = record.get("extra")
if isinstance(extra, dict):
for key, value in extra.items():
if isinstance(value, (str, int, float, bool)):
attrs[f"sentry.message.parameter.{key}"] = value
else:
attrs[f"sentry.message.parameter.{key}"] = safe_repr(value)
client._capture_log(
{
"severity_text": otel_severity_text,
"severity_number": otel_severity_number,
"body": record["message"],
"attributes": attrs,
"time_unix_nano": int(record["time"].timestamp() * 1e9),
"trace_id": None,
}
)
| LoguruBreadcrumbHandler |
python | getsentry__sentry | src/sentry/backup/comparators.py | {
"start": 23234,
"end": 25172
} | class ____(JSONScrubbingComparator):
"""
A normal equality comparison, except that it allows the right-side value to be `None` or
missing.
"""
def compare(self, on: InstanceID, left: Any, right: Any) -> list[ComparatorFinding]:
findings = []
fields = sorted(self.fields)
for f in fields:
if left["fields"].get(f) is None and right["fields"].get(f) is None:
continue
if right["fields"].get(f) is None:
continue
lv = left["fields"][f]
rv = right["fields"][f]
if lv != rv:
findings.append(
ComparatorFinding(
kind=self.get_kind(),
on=on,
left_pk=left["pk"],
right_pk=right["pk"],
reason=f"""the left value ("{lv}") of `{f}` was not equal to the right value ("{rv}")""",
)
)
return findings
def existence(self, on: InstanceID, left: Any, right: Any) -> list[ComparatorFinding]:
"""Ensure that all tracked fields on either both models or neither."""
findings = []
for f in self.fields:
missing_on_left = f not in left["fields"] or left["fields"][f] is None
missing_on_right = f not in right["fields"] or right["fields"][f] is None
if missing_on_left and missing_on_right:
continue
if missing_on_left:
findings.append(
ComparatorFinding(
kind=self.get_kind_existence_check(),
on=on,
left_pk=left["pk"],
right_pk=right["pk"],
reason=f"the left `{f}` value was missing",
)
)
return findings
| EqualOrRemovedComparator |
python | pytorch__pytorch | test/dynamo/test_interop.py | {
"start": 146,
"end": 2073
} | class ____(torch._dynamo.test_case.TestCase):
def _common(self, fn):
inputs = [torch.randn(10), torch.randn(10)]
ref = fn(*inputs)
opt_fn = torch.compile(fn, backend="eager", fullgraph=True)
res = opt_fn(*inputs)
self.assertEqual(ref, res)
def test_fx_fn(self):
fx_fn = torch.fx.symbolic_trace(fn)
self._common(lambda a, b: fx_fn(a, b) + 1)
def test_script_fn(self):
script_fn = torch.jit.script(fn)
self._common(lambda a, b: script_fn(a, b) + 1)
def test_trace_fn(self):
trace_fn = torch.jit.trace(fn, [torch.zeros(10), torch.zeros(10)])
self._common(lambda a, b: trace_fn(a, b) + 1)
def test_staticmethod_script_fn(self):
class Foo:
@staticmethod
@torch.jit.script
def _g(a):
return a**2
def g(self, a, b):
return self._g(a) + b
foo = Foo()
self._common(lambda a, b: foo.g(a, b) + 1)
def test_vmap_in_graph(self):
from functools import wraps
from torch._dynamo import allow_in_graph
def traceable(f):
f = allow_in_graph(f)
@wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
cnts = torch._dynamo.testing.CompileCounter()
x = torch.randn(3, 5, 3)
def fn(x):
return torch.vmap(torch.Tensor.t)(x)
fn_opt = torch.compile(fn, backend=cnts, fullgraph=True)
fn_opt_traceable = torch.compile(traceable(fn), backend=cnts, fullgraph=True)
self.assertEqual(fn(x), fn_opt(x))
self.assertEqual(cnts.frame_count, 1)
self.assertEqual(fn_opt(x), fn_opt_traceable(x))
self.assertEqual(cnts.frame_count, 2)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| InteropTests |
python | sympy__sympy | doc/ext/docscrape_sphinx.py | {
"start": 8299,
"end": 8503
} | class ____(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
| SphinxFunctionDoc |
python | redis__redis-py | redis/commands/core.py | {
"start": 52289,
"end": 56490
} | class ____:
"""
Command builder for BITFIELD commands.
"""
def __init__(
self,
client: Union["redis.client.Redis", "redis.asyncio.client.Redis"],
key: str,
default_overflow: Optional[str] = None,
):
self.client = client
self.key = key
self._default_overflow = default_overflow
# for typing purposes, run the following in constructor and in reset()
self.operations: list[tuple[EncodableT, ...]] = []
self._last_overflow = "WRAP"
self.reset()
def reset(self):
"""
Reset the state of the instance to when it was constructed
"""
self.operations = []
self._last_overflow = "WRAP"
self.overflow(self._default_overflow or self._last_overflow)
def overflow(self, overflow: str):
"""
Update the overflow algorithm of successive INCRBY operations
:param overflow: Overflow algorithm, one of WRAP, SAT, FAIL. See the
Redis docs for descriptions of these algorithmsself.
:returns: a :py:class:`BitFieldOperation` instance.
"""
overflow = overflow.upper()
if overflow != self._last_overflow:
self._last_overflow = overflow
self.operations.append(("OVERFLOW", overflow))
return self
def incrby(
self,
fmt: str,
offset: BitfieldOffsetT,
increment: int,
overflow: Optional[str] = None,
):
"""
Increment a bitfield by a given amount.
:param fmt: format-string for the bitfield being updated, e.g. 'u8'
for an unsigned 8-bit integer.
:param offset: offset (in number of bits). If prefixed with a
'#', this is an offset multiplier, e.g. given the arguments
fmt='u8', offset='#2', the offset will be 16.
:param int increment: value to increment the bitfield by.
:param str overflow: overflow algorithm. Defaults to WRAP, but other
acceptable values are SAT and FAIL. See the Redis docs for
descriptions of these algorithms.
:returns: a :py:class:`BitFieldOperation` instance.
"""
if overflow is not None:
self.overflow(overflow)
self.operations.append(("INCRBY", fmt, offset, increment))
return self
def get(self, fmt: str, offset: BitfieldOffsetT):
"""
Get the value of a given bitfield.
:param fmt: format-string for the bitfield being read, e.g. 'u8' for
an unsigned 8-bit integer.
:param offset: offset (in number of bits). If prefixed with a
'#', this is an offset multiplier, e.g. given the arguments
fmt='u8', offset='#2', the offset will be 16.
:returns: a :py:class:`BitFieldOperation` instance.
"""
self.operations.append(("GET", fmt, offset))
return self
def set(self, fmt: str, offset: BitfieldOffsetT, value: int):
"""
Set the value of a given bitfield.
:param fmt: format-string for the bitfield being read, e.g. 'u8' for
an unsigned 8-bit integer.
:param offset: offset (in number of bits). If prefixed with a
'#', this is an offset multiplier, e.g. given the arguments
fmt='u8', offset='#2', the offset will be 16.
:param int value: value to set at the given position.
:returns: a :py:class:`BitFieldOperation` instance.
"""
self.operations.append(("SET", fmt, offset, value))
return self
@property
def command(self):
cmd = ["BITFIELD", self.key]
for ops in self.operations:
cmd.extend(ops)
return cmd
def execute(self) -> ResponseT:
"""
Execute the operation(s) in a single BITFIELD command. The return value
is a list of values corresponding to each operation. If the client
used to create this instance was a pipeline, the list of values
will be present within the pipeline's execute.
"""
command = self.command
self.reset()
return self.client.execute_command(*command)
| BitFieldOperation |
python | PyCQA__pylint | doc/data/messages/d/declare-non-slot/good.py | {
"start": 0,
"end": 83
} | class ____:
__slots__ = ("name", "surname")
name: str
surname: str
| Student |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hubspot/components.py | {
"start": 13195,
"end": 17048
} | class ____(HttpRequester):
"""
Engagements stream uses different endpoints:
- Engagements Recent if start_date/state is less than 30 days and API is able to return all records (<10k), or
- Engagements All which extracts all records, but supports filter on connector side
Recent Engagements API:
https://legacydocs.hubspot.com/docs/methods/engagements/get-recent-engagements
Important: This endpoint returns only last 10k most recently updated records in the last 30 days.
All Engagements API:
https://legacydocs.hubspot.com/docs/methods/engagements/get-all-engagements
Important:
1. The stream is declared to use one stream slice from start date(default/config/state) to time.now(). It doesn't have step.
Based on this we can use stream_slice["start_time"] and be sure that this is equal to value in initial state.
Stream Slice [start_time] is used to define _use_recent_api, concurrent processing of date windows is incompatible and therefore does not support using a step
2.The stream is declared to use 250 as page size param in pagination.
Recent Engagements API have 100 as max param but doesn't fail is bigger value was provided and returns to 100 as default.
3. The stream has is_client_side_incremental=true to filter Engagements All response.
"""
recent_api_total_records_limit = 10000
recent_api_last_days_limit = 29
recent_api_path = "/engagements/v1/engagements/recent/modified"
all_api_path = "/engagements/v1/engagements/paged"
_use_recent_api = None
def should_use_recent_api(self, stream_slice: StreamSlice) -> bool:
if self._use_recent_api is not None:
return self._use_recent_api
# Recent engagements API returns records updated in the last 30 days only. If start time is older All engagements API should be used
if int(stream_slice["start_time"]) >= int(
DatetimeParser().format((ab_datetime_now() - timedelta(days=self.recent_api_last_days_limit)), "%ms")
):
# Recent engagements API returns only 10k most recently updated records.
# API response indicates that there are more records so All engagements API should be used
_, response = self._http_client.send_request(
http_method=self.get_method().value,
url=self._join_url(self.get_url_base(), self.recent_api_path),
headers=self._request_headers({}, stream_slice, {}, {}),
params={"count": 250, "since": stream_slice["start_time"]},
request_kwargs={"stream": self.stream_response},
)
if response.json().get("total") <= self.recent_api_total_records_limit:
self._use_recent_api = True
else:
self._use_recent_api = False
return self._use_recent_api
def get_path(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> str:
if self.should_use_recent_api(stream_slice):
return self.recent_api_path
return self.all_api_path
def get_request_params(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> MutableMapping[str, Any]:
request_params = self._request_options_provider.get_request_params(
stream_state=stream_state,
stream_slice=stream_slice,
next_page_token=next_page_token,
)
if self.should_use_recent_api(stream_slice):
request_params.update({"since": stream_slice["start_time"]})
return request_params
| EngagementsHttpRequester |
python | openai__openai-python | src/openai/types/shared/response_format_json_object.py | {
"start": 201,
"end": 352
} | class ____(BaseModel):
type: Literal["json_object"]
"""The type of response format being defined. Always `json_object`."""
| ResponseFormatJSONObject |
python | huggingface__transformers | src/transformers/convert_slow_tokenizer.py | {
"start": 34032,
"end": 34813
} | class ____(SpmConverter):
def vocab(self, proto):
vocab = [
("<s>", 0.0),
("<pad>", 0.0),
("</s>", 0.0),
("<unk>", 0.0),
]
vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]]
vocab += [("<mask>", 0.0)]
return vocab
def unk_id(self, proto):
unk_id = 3
return unk_id
def post_processor(self):
return processors.TemplateProcessing(
single="<s> $A </s>",
pair="<s> $A </s> </s> $B </s>",
special_tokens=[
("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")),
("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")),
],
)
| XLMRobertaConverter |
python | kubernetes-client__python | kubernetes/client/models/v1_limit_response.py | {
"start": 383,
"end": 4744
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'queuing': 'V1QueuingConfiguration',
'type': 'str'
}
attribute_map = {
'queuing': 'queuing',
'type': 'type'
}
def __init__(self, queuing=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1LimitResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._queuing = None
self._type = None
self.discriminator = None
if queuing is not None:
self.queuing = queuing
self.type = type
@property
def queuing(self):
"""Gets the queuing of this V1LimitResponse. # noqa: E501
:return: The queuing of this V1LimitResponse. # noqa: E501
:rtype: V1QueuingConfiguration
"""
return self._queuing
@queuing.setter
def queuing(self, queuing):
"""Sets the queuing of this V1LimitResponse.
:param queuing: The queuing of this V1LimitResponse. # noqa: E501
:type: V1QueuingConfiguration
"""
self._queuing = queuing
@property
def type(self):
"""Gets the type of this V1LimitResponse. # noqa: E501
`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required. # noqa: E501
:return: The type of this V1LimitResponse. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1LimitResponse.
`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required. # noqa: E501
:param type: The type of this V1LimitResponse. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1LimitResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1LimitResponse):
return True
return self.to_dict() != other.to_dict()
| V1LimitResponse |
python | google__jax | build/tools/command.py | {
"start": 1237,
"end": 1857
} | class ____:
"""
Represents the result of executing a subprocess command.
"""
command: str
return_code: int = 2 # Defaults to not successful
logs: str = ""
start_time: datetime.datetime = dataclasses.field(
default_factory=datetime.datetime.now
)
end_time: Optional[datetime.datetime] = None
async def _process_log_stream(stream, result: CommandResult):
"""Logs the output of a subprocess stream."""
while True:
line_bytes = await stream.readline()
if not line_bytes:
break
line = line_bytes.decode().rstrip()
result.logs += line
logger.info("%s", line)
| CommandResult |
python | tiangolo__fastapi | docs_src/additional_responses/tutorial004.py | {
"start": 130,
"end": 701
} | class ____(BaseModel):
id: str
value: str
responses = {
404: {"description": "Item not found"},
302: {"description": "The item was moved"},
403: {"description": "Not enough privileges"},
}
app = FastAPI()
@app.get(
"/items/{item_id}",
response_model=Item,
responses={**responses, 200: {"content": {"image/png": {}}}},
)
async def read_item(item_id: str, img: Union[bool, None] = None):
if img:
return FileResponse("image.png", media_type="image/png")
else:
return {"id": "foo", "value": "there goes my hero"}
| Item |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/rich/tree.py | {
"start": 341,
"end": 8087
} | class ____(JupyterMixin):
"""A renderable for a tree structure.
Args:
label (RenderableType): The renderable or str for the tree label.
style (StyleType, optional): Style of this tree. Defaults to "tree".
guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line".
expanded (bool, optional): Also display children. Defaults to True.
highlight (bool, optional): Highlight renderable (if str). Defaults to False.
"""
def __init__(
self,
label: RenderableType,
*,
style: StyleType = "tree",
guide_style: StyleType = "tree.line",
expanded: bool = True,
highlight: bool = False,
hide_root: bool = False,
) -> None:
self.label = label
self.style = style
self.guide_style = guide_style
self.children: List[Tree] = []
self.expanded = expanded
self.highlight = highlight
self.hide_root = hide_root
def add(
self,
label: RenderableType,
*,
style: Optional[StyleType] = None,
guide_style: Optional[StyleType] = None,
expanded: bool = True,
highlight: Optional[bool] = False,
) -> "Tree":
"""Add a child tree.
Args:
label (RenderableType): The renderable or str for the tree label.
style (StyleType, optional): Style of this tree. Defaults to "tree".
guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line".
expanded (bool, optional): Also display children. Defaults to True.
highlight (Optional[bool], optional): Highlight renderable (if str). Defaults to False.
Returns:
Tree: A new child Tree, which may be further modified.
"""
node = Tree(
label,
style=self.style if style is None else style,
guide_style=self.guide_style if guide_style is None else guide_style,
expanded=expanded,
highlight=self.highlight if highlight is None else highlight,
)
self.children.append(node)
return node
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
stack: List[Iterator[Tuple[bool, Tree]]] = []
pop = stack.pop
push = stack.append
new_line = Segment.line()
get_style = console.get_style
null_style = Style.null()
guide_style = get_style(self.guide_style, default="") or null_style
SPACE, CONTINUE, FORK, END = range(4)
ASCII_GUIDES = (" ", "| ", "+-- ", "`-- ")
TREE_GUIDES = [
(" ", "│ ", "├── ", "└── "),
(" ", "┃ ", "┣━━ ", "┗━━ "),
(" ", "║ ", "╠══ ", "╚══ "),
]
_Segment = Segment
def make_guide(index: int, style: Style) -> Segment:
"""Make a Segment for a level of the guide lines."""
if options.ascii_only:
line = ASCII_GUIDES[index]
else:
guide = 1 if style.bold else (2 if style.underline2 else 0)
line = TREE_GUIDES[0 if options.legacy_windows else guide][index]
return _Segment(line, style)
levels: List[Segment] = [make_guide(CONTINUE, guide_style)]
push(iter(loop_last([self])))
guide_style_stack = StyleStack(get_style(self.guide_style))
style_stack = StyleStack(get_style(self.style))
remove_guide_styles = Style(bold=False, underline2=False)
depth = 0
while stack:
stack_node = pop()
try:
last, node = next(stack_node)
except StopIteration:
levels.pop()
if levels:
guide_style = levels[-1].style or null_style
levels[-1] = make_guide(FORK, guide_style)
guide_style_stack.pop()
style_stack.pop()
continue
push(stack_node)
if last:
levels[-1] = make_guide(END, levels[-1].style or null_style)
guide_style = guide_style_stack.current + get_style(node.guide_style)
style = style_stack.current + get_style(node.style)
prefix = levels[(2 if self.hide_root else 1) :]
renderable_lines = console.render_lines(
Styled(node.label, style),
options.update(
width=options.max_width
- sum(level.cell_length for level in prefix),
highlight=self.highlight,
height=None,
),
pad=options.justify is not None,
)
if not (depth == 0 and self.hide_root):
for first, line in loop_first(renderable_lines):
if prefix:
yield from _Segment.apply_style(
prefix,
style.background_style,
post_style=remove_guide_styles,
)
yield from line
yield new_line
if first and prefix:
prefix[-1] = make_guide(
SPACE if last else CONTINUE, prefix[-1].style or null_style
)
if node.expanded and node.children:
levels[-1] = make_guide(
SPACE if last else CONTINUE, levels[-1].style or null_style
)
levels.append(
make_guide(END if len(node.children) == 1 else FORK, guide_style)
)
style_stack.push(get_style(node.style))
guide_style_stack.push(get_style(node.guide_style))
push(iter(loop_last(node.children)))
depth += 1
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
stack: List[Iterator[Tree]] = [iter([self])]
pop = stack.pop
push = stack.append
minimum = 0
maximum = 0
measure = Measurement.get
level = 0
while stack:
iter_tree = pop()
try:
tree = next(iter_tree)
except StopIteration:
level -= 1
continue
push(iter_tree)
min_measure, max_measure = measure(console, options, tree.label)
indent = level * 4
minimum = max(min_measure + indent, minimum)
maximum = max(max_measure + indent, maximum)
if tree.expanded and tree.children:
push(iter(tree.children))
level += 1
return Measurement(minimum, maximum)
if __name__ == "__main__": # pragma: no cover
from pip._vendor.rich.console import Group
from pip._vendor.rich.markdown import Markdown
from pip._vendor.rich.panel import Panel
from pip._vendor.rich.syntax import Syntax
from pip._vendor.rich.table import Table
table = Table(row_styles=["", "dim"])
table.add_column("Released", style="cyan", no_wrap=True)
table.add_column("Title", style="magenta")
table.add_column("Box Office", justify="right", style="green")
table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889")
code = """\
| Tree |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 357610,
"end": 358208
} | class ____:
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = _multiarray_tests.test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = _multiarray_tests.test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = _multiarray_tests.test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
| TestAsCArray |
python | explosion__spaCy | spacy/pipeline/attributeruler.py | {
"start": 1556,
"end": 13601
} | class ____(Pipe):
"""Set token-level attributes for tokens matched by Matcher patterns.
Additionally supports importing patterns from tag maps and morph rules.
DOCS: https://spacy.io/api/attributeruler
"""
def __init__(
self,
vocab: Vocab,
name: str = "attribute_ruler",
*,
validate: bool = False,
scorer: Optional[Callable] = attribute_ruler_score,
) -> None:
"""Create the AttributeRuler. After creation, you can add patterns
with the `.initialize()` or `.add_patterns()` methods, or load patterns
with `.from_bytes()` or `.from_disk()`. Loading patterns will remove
any patterns you've added previously.
vocab (Vocab): The vocab.
name (str): The pipe name. Defaults to "attribute_ruler".
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_token_attr for the attributes "tag", "pos", "morph" and
"lemma" and Scorer.score_token_attr_per_feat for the attribute
"morph".
RETURNS (AttributeRuler): The AttributeRuler component.
DOCS: https://spacy.io/api/attributeruler#init
"""
self.name = name
self.vocab = vocab
self.matcher = Matcher(self.vocab, validate=validate)
self.validate = validate
self.attrs: List[Dict] = []
self._attrs_unnormed: List[Dict] = [] # store for reference
self.indices: List[int] = []
self.scorer = scorer
def clear(self) -> None:
"""Reset all patterns."""
self.matcher = Matcher(self.vocab, validate=self.validate)
self.attrs = []
self._attrs_unnormed = []
self.indices = []
def initialize(
self,
get_examples: Optional[Callable[[], Iterable[Example]]],
*,
nlp: Optional[Language] = None,
patterns: Optional[Iterable[AttributeRulerPatternType]] = None,
tag_map: Optional[TagMapType] = None,
morph_rules: Optional[MorphRulesType] = None,
) -> None:
"""Initialize the attribute ruler by adding zero or more patterns.
Rules can be specified as a sequence of dicts using the `patterns`
keyword argument. You can also provide rules using the "tag map" or
"morph rules" formats supported by spaCy prior to v3.
"""
self.clear()
if patterns:
self.add_patterns(patterns)
if tag_map:
self.load_from_tag_map(tag_map)
if morph_rules:
self.load_from_morph_rules(morph_rules)
def __call__(self, doc: Doc) -> Doc:
"""Apply the AttributeRuler to a Doc and set all attribute exceptions.
doc (Doc): The document to process.
RETURNS (Doc): The processed Doc.
DOCS: https://spacy.io/api/attributeruler#call
"""
error_handler = self.get_error_handler()
try:
matches = self.match(doc)
self.set_annotations(doc, matches)
return doc
except Exception as e:
return error_handler(self.name, self, [doc], e)
def match(self, doc: Doc):
matches = self.matcher(doc, allow_missing=True, as_spans=False)
# Sort by the attribute ID, so that later rules have precedence
matches = [
(int(self.vocab.strings[m_id]), m_id, s, e) for m_id, s, e in matches # type: ignore
]
matches.sort()
return matches
def set_annotations(self, doc, matches):
"""Modify the document in place"""
for attr_id, match_id, start, end in matches:
span = Span(doc, start, end, label=match_id)
attrs = self.attrs[attr_id]
index = self.indices[attr_id]
try:
# The index can be negative, which makes it annoying to do
# the boundscheck. Let Span do it instead.
token = span[index] # noqa: F841
except IndexError:
# The original exception is just our conditional logic, so we
# raise from.
raise ValueError(
Errors.E1001.format(
patterns=self.matcher.get(span.label),
span=[t.text for t in span],
index=index,
)
) from None
set_token_attrs(span[index], attrs)
def load_from_tag_map(
self, tag_map: Dict[str, Dict[Union[int, str], Union[int, str]]]
) -> None:
"""Load attribute ruler patterns from a tag map.
tag_map (dict): The tag map that maps fine-grained tags to
coarse-grained tags and morphological features.
DOCS: https://spacy.io/api/attributeruler#load_from_morph_rules
"""
for tag, attrs in tag_map.items():
pattern = [{"TAG": tag}]
attrs, morph_attrs = _split_morph_attrs(attrs)
if "MORPH" not in attrs:
morph = self.vocab.morphology.add(morph_attrs)
attrs["MORPH"] = self.vocab.strings[morph]
else:
morph = self.vocab.morphology.add(attrs["MORPH"])
attrs["MORPH"] = self.vocab.strings[morph]
self.add([pattern], attrs) # type: ignore[list-item]
def load_from_morph_rules(
self, morph_rules: Dict[str, Dict[str, Dict[Union[int, str], Union[int, str]]]]
) -> None:
"""Load attribute ruler patterns from morph rules.
morph_rules (dict): The morph rules that map token text and
fine-grained tags to coarse-grained tags, lemmas and morphological
features.
DOCS: https://spacy.io/api/attributeruler#load_from_morph_rules
"""
for tag in morph_rules:
for word in morph_rules[tag]:
pattern = [{"ORTH": word, "TAG": tag}]
attrs = morph_rules[tag][word]
attrs, morph_attrs = _split_morph_attrs(attrs)
if "MORPH" in attrs:
morph = self.vocab.morphology.add(attrs["MORPH"])
attrs["MORPH"] = self.vocab.strings[morph]
elif morph_attrs:
morph = self.vocab.morphology.add(morph_attrs)
attrs["MORPH"] = self.vocab.strings[morph]
self.add([pattern], attrs) # type: ignore[list-item]
def add(
self, patterns: Iterable[MatcherPatternType], attrs: Dict, index: int = 0
) -> None:
"""Add Matcher patterns for tokens that should be modified with the
provided attributes. The token at the specified index within the
matched span will be assigned the attributes.
patterns (Iterable[List[Dict]]): A list of Matcher patterns.
attrs (Dict): The attributes to assign to the target token in the
matched span.
index (int): The index of the token in the matched span to modify. May
be negative to index from the end of the span. Defaults to 0.
DOCS: https://spacy.io/api/attributeruler#add
"""
# We need to make a string here, because otherwise the ID we pass back
# will be interpreted as the hash of a string, rather than an ordinal.
key = str(len(self.attrs))
self.matcher.add(self.vocab.strings.add(key), patterns) # type: ignore[arg-type]
self._attrs_unnormed.append(attrs)
attrs = normalize_token_attrs(self.vocab, attrs)
self.attrs.append(attrs)
self.indices.append(index)
def add_patterns(self, patterns: Iterable[AttributeRulerPatternType]) -> None:
"""Add patterns from a list of pattern dicts with the keys as the
arguments to AttributeRuler.add.
patterns (Iterable[dict]): A list of pattern dicts with the keys
as the arguments to AttributeRuler.add (patterns/attrs/index) to
add as patterns.
DOCS: https://spacy.io/api/attributeruler#add_patterns
"""
for p in patterns:
self.add(**p) # type: ignore[arg-type]
@property
def patterns(self) -> List[AttributeRulerPatternType]:
"""All the added patterns."""
all_patterns = []
for i in range(len(self.attrs)):
p = {}
p["patterns"] = self.matcher.get(str(i))[1]
p["attrs"] = self._attrs_unnormed[i] # type: ignore
p["index"] = self.indices[i] # type: ignore
all_patterns.append(p)
return all_patterns # type: ignore[return-value]
def to_bytes(self, exclude: Iterable[str] = SimpleFrozenList()) -> bytes:
"""Serialize the AttributeRuler to a bytestring.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (bytes): The serialized object.
DOCS: https://spacy.io/api/attributeruler#to_bytes
"""
serialize = {}
serialize["vocab"] = lambda: self.vocab.to_bytes(exclude=exclude)
serialize["patterns"] = lambda: srsly.msgpack_dumps(self.patterns)
return util.to_bytes(serialize, exclude)
def from_bytes(
self, bytes_data: bytes, exclude: Iterable[str] = SimpleFrozenList()
) -> "AttributeRuler":
"""Load the AttributeRuler from a bytestring.
bytes_data (bytes): The data to load.
exclude (Iterable[str]): String names of serialization fields to exclude.
returns (AttributeRuler): The loaded object.
DOCS: https://spacy.io/api/attributeruler#from_bytes
"""
def load_patterns(b):
self.add_patterns(srsly.msgpack_loads(b))
deserialize = {
"vocab": lambda b: self.vocab.from_bytes(b, exclude=exclude),
"patterns": load_patterns,
}
util.from_bytes(bytes_data, deserialize, exclude)
return self
def to_disk(
self, path: Union[Path, str], exclude: Iterable[str] = SimpleFrozenList()
) -> None:
"""Serialize the AttributeRuler to disk.
path (Union[Path, str]): A path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
DOCS: https://spacy.io/api/attributeruler#to_disk
"""
serialize = {
"vocab": lambda p: self.vocab.to_disk(p, exclude=exclude),
"patterns": lambda p: srsly.write_msgpack(p, self.patterns),
}
util.to_disk(path, serialize, exclude)
def from_disk(
self, path: Union[Path, str], exclude: Iterable[str] = SimpleFrozenList()
) -> "AttributeRuler":
"""Load the AttributeRuler from disk.
path (Union[Path, str]): A path to a directory.
exclude (Iterable[str]): String names of serialization fields to exclude.
RETURNS (AttributeRuler): The loaded object.
DOCS: https://spacy.io/api/attributeruler#from_disk
"""
def load_patterns(p):
self.add_patterns(srsly.read_msgpack(p))
deserialize = {
"vocab": lambda p: self.vocab.from_disk(p, exclude=exclude),
"patterns": load_patterns,
}
util.from_disk(path, deserialize, exclude)
return self
def _split_morph_attrs(attrs: dict) -> Tuple[dict, dict]:
"""Split entries from a tag map or morph rules dict into to two dicts, one
with the token-level features (POS, LEMMA) and one with the remaining
features, which are presumed to be individual MORPH features."""
other_attrs = {}
morph_attrs = {}
for k, v in attrs.items():
if k in "_" or k in IDS.keys() or k in IDS.values():
other_attrs[k] = v
else:
morph_attrs[k] = v
return other_attrs, morph_attrs
# Setup backwards compatibility hook for factories
def __getattr__(name):
if name == "make_attribute_ruler":
module = importlib.import_module("spacy.pipeline.factories")
return module.make_attribute_ruler
raise AttributeError(f"module {__name__} has no attribute {name}")
| AttributeRuler |
python | huggingface__transformers | src/transformers/models/mgp_str/modeling_mgp_str.py | {
"start": 14549,
"end": 18416
} | class ____(MgpstrPreTrainedModel):
config: MgpstrConfig
main_input_name = "pixel_values"
def __init__(self, config: MgpstrConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.mgp_str = MgpstrModel(config)
self.char_a3_module = MgpstrA3Module(config)
self.bpe_a3_module = MgpstrA3Module(config)
self.wp_a3_module = MgpstrA3Module(config)
self.char_head = nn.Linear(config.hidden_size, config.num_character_labels)
self.bpe_head = nn.Linear(config.hidden_size, config.num_bpe_labels)
self.wp_head = nn.Linear(config.hidden_size, config.num_wordpiece_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: torch.FloatTensor,
output_attentions: Optional[bool] = None,
output_a3_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.FloatTensor], MgpstrModelOutput]:
r"""
output_a3_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of a3 modules. See `a3_attentions` under returned tensors
for more detail.
Example:
```python
>>> from transformers import (
... MgpstrProcessor,
... MgpstrForSceneTextRecognition,
... )
>>> import requests
>>> from PIL import Image
>>> # load image from the IIIT-5k dataset
>>> url = "https://i.postimg.cc/ZKwLg2Gw/367-14.png"
>>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
>>> processor = MgpstrProcessor.from_pretrained("alibaba-damo/mgp-str-base")
>>> pixel_values = processor(images=image, return_tensors="pt").pixel_values
>>> model = MgpstrForSceneTextRecognition.from_pretrained("alibaba-damo/mgp-str-base")
>>> # inference
>>> outputs = model(pixel_values)
>>> out_strs = processor.batch_decode(outputs.logits)
>>> out_strs["generated_text"]
'["ticket"]'
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
mgp_outputs = self.mgp_str(
pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = mgp_outputs[0]
char_a3_out, char_attention = self.char_a3_module(sequence_output)
bpe_a3_out, bpe_attention = self.bpe_a3_module(sequence_output)
wp_a3_out, wp_attention = self.wp_a3_module(sequence_output)
char_logits = self.char_head(char_a3_out)
bpe_logits = self.bpe_head(bpe_a3_out)
wp_logits = self.wp_head(wp_a3_out)
all_a3_attentions = (char_attention, bpe_attention, wp_attention) if output_a3_attentions else None
all_logits = (char_logits, bpe_logits, wp_logits)
if not return_dict:
outputs = (all_logits, all_a3_attentions) + mgp_outputs[1:]
return tuple(output for output in outputs if output is not None)
return MgpstrModelOutput(
logits=all_logits,
hidden_states=mgp_outputs.hidden_states,
attentions=mgp_outputs.attentions,
a3_attentions=all_a3_attentions,
)
__all__ = ["MgpstrModel", "MgpstrPreTrainedModel", "MgpstrForSceneTextRecognition"]
| MgpstrForSceneTextRecognition |
python | milvus-io__pymilvus | pymilvus/client/search_result.py | {
"start": 373,
"end": 9806
} | class ____(list):
ids: List[Union[str, int]]
distances: List[float]
lazy_field_data: List[schema_pb2.FieldData]
has_materialized: bool
dynamic_fields: List[str]
start: int
def __init__(
self,
start: int,
end: int,
all_pks: List[Union[str, int]],
all_scores: List[float],
fields_data: List[schema_pb2.FieldData],
output_fields: List[str],
pk_name: str,
):
self.ids = all_pks[start:end]
self.distances = all_scores[start:end]
self.dynamic_fields = set(output_fields) - {
field_data.field_name for field_data in fields_data
}
self.lazy_field_data = []
self.has_materialized = False
self.start = start
top_k_res = [
Hit({pk_name: all_pks[i], "distance": all_scores[i], "entity": {}}, pk_name=pk_name)
for i in range(start, end)
]
for field_data in fields_data:
data = get_field_data(field_data)
has_valid = len(field_data.valid_data) > 0
if field_data.type in [
DataType.BOOL,
DataType.INT8,
DataType.INT16,
DataType.INT32,
DataType.INT64,
DataType.FLOAT,
DataType.DOUBLE,
DataType.VARCHAR,
DataType.GEOMETRY,
DataType.TIMESTAMPTZ,
]:
if has_valid:
[
hit["entity"].__setitem__(
field_data.field_name,
data[i + start] if field_data.valid_data[i + start] else None,
)
for i, hit in enumerate(top_k_res)
]
else:
[
hit["entity"].__setitem__(field_data.field_name, data[i + start])
for i, hit in enumerate(top_k_res)
]
elif field_data.type == DataType.ARRAY:
element_type = field_data.scalars.array_data.element_type
for i, hit in enumerate(top_k_res):
array_data = field_data.scalars.array_data.data[i + start]
extracted_array_row_data = extract_array_row_data([array_data], element_type)
hit["entity"].__setitem__(field_data.field_name, extracted_array_row_data[0])
elif field_data.type in {
DataType.FLOAT_VECTOR,
DataType.BINARY_VECTOR,
DataType.BFLOAT16_VECTOR,
DataType.FLOAT16_VECTOR,
DataType.INT8_VECTOR,
DataType.SPARSE_FLOAT_VECTOR,
DataType.JSON,
DataType._ARRAY_OF_STRUCT,
DataType._ARRAY_OF_VECTOR,
}:
self.lazy_field_data.append(field_data)
else:
msg = f"Unsupported field type: {field_data.type}"
raise MilvusException(msg)
super().__init__(top_k_res)
def __str__(self) -> str:
"""Only print at most 10 query results"""
reminder = f" ... and {len(self) - 10} entities remaining" if len(self) > 10 else ""
return f"{self[:10]}{reminder}"
def __getitem__(self, key: int):
self.materialize()
return super().__getitem__(key)
def get_raw_item(self, idx: int):
"""Get the item at index without triggering materialization"""
return list.__getitem__(self, idx)
def __iter__(self):
self.materialize()
return super().__iter__()
def materialize(self):
if not self.has_materialized:
for field_data in self.lazy_field_data:
field_name = field_data.field_name
if field_data.type in [
DataType.FLOAT_VECTOR,
DataType.BINARY_VECTOR,
DataType.BFLOAT16_VECTOR,
DataType.FLOAT16_VECTOR,
DataType.INT8_VECTOR,
]:
data = get_field_data(field_data)
dim = field_data.vectors.dim
if field_data.type in [DataType.BINARY_VECTOR]:
dim = dim // 8
elif field_data.type in [DataType.BFLOAT16_VECTOR, DataType.FLOAT16_VECTOR]:
dim = dim * 2
idx = self.start * dim
for i in range(len(self)):
item = self.get_raw_item(i)
item["entity"][field_name] = data[idx : idx + dim]
idx += dim
elif field_data.type == DataType.SPARSE_FLOAT_VECTOR:
idx = self.start
for i in range(len(self)):
item = self.get_raw_item(i)
item["entity"][field_name] = entity_helper.sparse_proto_to_rows(
field_data.vectors.sparse_float_vector, idx, idx + 1
)[0]
idx += 1
elif field_data.type == DataType.JSON:
idx = self.start
for i in range(len(self)):
item = self.get_raw_item(i)
if field_data.valid_data and not field_data.valid_data[idx]:
item["entity"][field_name] = None
else:
json_data = field_data.scalars.json_data.data[idx]
try:
json_dict_list = (
orjson.loads(json_data) if json_data is not None else None
)
except Exception as e:
logger.error(
f"HybridHits::materialize::Failed to load JSON data: {e}, original data: {json_data}"
)
raise
if not field_data.is_dynamic:
item["entity"][field_data.field_name] = json_dict_list
elif not self.dynamic_fields:
item["entity"].update(json_dict_list)
else:
item["entity"].update(
{
k: v
for k, v in json_dict_list.items()
if k in self.dynamic_fields
}
)
idx += 1
elif field_data.type == DataType._ARRAY_OF_STRUCT:
# Process struct arrays - convert column format back to array of structs
idx = self.start
struct_arrays = get_field_data(field_data)
if struct_arrays and hasattr(struct_arrays, "fields"):
for i in range(len(self)):
item = self.get_raw_item(i)
item["entity"][field_name] = (
entity_helper.extract_struct_array_from_column_data(
struct_arrays, idx
)
)
idx += 1
else:
for i in range(len(self)):
item = self.get_raw_item(i)
item["entity"][field_name] = None
elif field_data.type == DataType._ARRAY_OF_VECTOR:
idx = self.start
if hasattr(field_data, "vectors") and hasattr(
field_data.vectors, "vector_array"
):
vector_array = field_data.vectors.vector_array
for i in range(len(self)):
item = self.get_raw_item(i)
if idx < len(vector_array.data):
vector_data = vector_array.data[idx]
dim = vector_data.dim
float_data = vector_data.float_vector.data
num_vectors = len(float_data) // dim
row_vectors = []
for vec_idx in range(num_vectors):
vec_start = vec_idx * dim
vec_end = vec_start + dim
row_vectors.append(list(float_data[vec_start:vec_end]))
item["entity"][field_name] = row_vectors
else:
item["entity"][field_name] = []
idx += 1
else:
for i in range(len(self)):
item = self.get_raw_item(i)
item["entity"][field_name] = []
else:
msg = f"Unsupported field type: {field_data.type}"
raise MilvusException(msg)
self.has_materialized = True
__repr__ = __str__
| HybridHits |
python | numba__numba | numba/core/typeinfer.py | {
"start": 8989,
"end": 10554
} | class ____(_BuildContainerConstraint):
def __init__(self, target, items, loc):
self.target = target
self.items = items
self.loc = loc
def __call__(self, typeinfer):
with new_error_context("typing of {container_type} at {loc}",
container_type=types.List, loc=self.loc):
typevars = typeinfer.typevars
tsets = [typevars[i.name].get() for i in self.items]
if not tsets:
typeinfer.add_type(self.target,
types.List(types.undefined),
loc=self.loc)
else:
for typs in itertools.product(*tsets):
unified = typeinfer.context.unify_types(*typs)
if unified is not None:
# pull out literals if available
islit = [isinstance(x, types.Literal) for x in typs]
iv = None
if all(islit):
iv = [x.literal_value for x in typs]
typeinfer.add_type(self.target,
types.List(unified,
initial_value=iv),
loc=self.loc)
else:
typeinfer.add_type(self.target,
types.LiteralList(typs),
loc=self.loc)
| BuildListConstraint |
python | zostera__django-bootstrap4 | example/app/views.py | {
"start": 2520,
"end": 2586
} | class ____(TemplateView):
template_name = "app/misc.html"
| MiscView |
python | pypa__warehouse | warehouse/subscriptions/models.py | {
"start": 3839,
"end": 4225
} | class ____(db.Model):
__tablename__ = "stripe_subscription_products"
__repr__ = make_repr("product_name")
product_id: Mapped[str | None] # generated by Payment Service Provider
product_name: Mapped[str]
description: Mapped[str]
is_active: Mapped[bool_true]
tax_code: Mapped[str | None] # https://stripe.com/docs/tax/tax-categories
| StripeSubscriptionProduct |
python | huggingface__transformers | tests/models/glpn/test_modeling_glpn.py | {
"start": 1321,
"end": 1677
} | class ____(ConfigTester):
def create_and_test_config_common_properties(self):
config = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(config, "hidden_sizes"))
self.parent.assertTrue(hasattr(config, "num_attention_heads"))
self.parent.assertTrue(hasattr(config, "num_encoder_blocks"))
| GLPNConfigTester |
python | django__django | tests/model_fields/test_imagefield.py | {
"start": 12321,
"end": 12567
} | class ____(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField with one dimensions field.
"""
PersonModel = PersonWithHeight
@skipIf(Image is None, "Pillow is required to test ImageField")
| ImageFieldOneDimensionTests |
python | wandb__wandb | wandb/vendor/pygments/lexers/lisp.py | {
"start": 78343,
"end": 84464
} | class ____(RegexLexer):
"""
For `newLISP. <www.newlisp.org>`_ source code (version 10.3.0).
.. versionadded:: 1.5
"""
name = 'NewLisp'
aliases = ['newlisp']
filenames = ['*.lsp', '*.nl', '*.kif']
mimetypes = ['text/x-newlisp', 'application/x-newlisp']
flags = re.IGNORECASE | re.MULTILINE | re.UNICODE
# list of built-in functions for newLISP version 10.3
builtins = (
'^', '--', '-', ':', '!', '!=', '?', '@', '*', '/', '&', '%', '+', '++',
'<', '<<', '<=', '=', '>', '>=', '>>', '|', '~', '$', '$0', '$1', '$10',
'$11', '$12', '$13', '$14', '$15', '$2', '$3', '$4', '$5', '$6', '$7',
'$8', '$9', '$args', '$idx', '$it', '$main-args', 'abort', 'abs',
'acos', 'acosh', 'add', 'address', 'amb', 'and', 'append-file',
'append', 'apply', 'args', 'array-list', 'array?', 'array', 'asin',
'asinh', 'assoc', 'atan', 'atan2', 'atanh', 'atom?', 'base64-dec',
'base64-enc', 'bayes-query', 'bayes-train', 'begin',
'beta', 'betai', 'bind', 'binomial', 'bits', 'callback',
'case', 'catch', 'ceil', 'change-dir', 'char', 'chop', 'Class', 'clean',
'close', 'command-event', 'cond', 'cons', 'constant',
'context?', 'context', 'copy-file', 'copy', 'cos', 'cosh', 'count',
'cpymem', 'crc32', 'crit-chi2', 'crit-z', 'current-line', 'curry',
'date-list', 'date-parse', 'date-value', 'date', 'debug', 'dec',
'def-new', 'default', 'define-macro', 'define',
'delete-file', 'delete-url', 'delete', 'destroy', 'det', 'device',
'difference', 'directory?', 'directory', 'div', 'do-until', 'do-while',
'doargs', 'dolist', 'dostring', 'dotimes', 'dotree', 'dump', 'dup',
'empty?', 'encrypt', 'ends-with', 'env', 'erf', 'error-event',
'eval-string', 'eval', 'exec', 'exists', 'exit', 'exp', 'expand',
'explode', 'extend', 'factor', 'fft', 'file-info', 'file?', 'filter',
'find-all', 'find', 'first', 'flat', 'float?', 'float', 'floor', 'flt',
'fn', 'for-all', 'for', 'fork', 'format', 'fv', 'gammai', 'gammaln',
'gcd', 'get-char', 'get-float', 'get-int', 'get-long', 'get-string',
'get-url', 'global?', 'global', 'if-not', 'if', 'ifft', 'import', 'inc',
'index', 'inf?', 'int', 'integer?', 'integer', 'intersect', 'invert',
'irr', 'join', 'lambda-macro', 'lambda?', 'lambda', 'last-error',
'last', 'legal?', 'length', 'let', 'letex', 'letn',
'list?', 'list', 'load', 'local', 'log', 'lookup',
'lower-case', 'macro?', 'main-args', 'MAIN', 'make-dir', 'map', 'mat',
'match', 'max', 'member', 'min', 'mod', 'module', 'mul', 'multiply',
'NaN?', 'net-accept', 'net-close', 'net-connect', 'net-error',
'net-eval', 'net-interface', 'net-ipv', 'net-listen', 'net-local',
'net-lookup', 'net-packet', 'net-peek', 'net-peer', 'net-ping',
'net-receive-from', 'net-receive-udp', 'net-receive', 'net-select',
'net-send-to', 'net-send-udp', 'net-send', 'net-service',
'net-sessions', 'new', 'nil?', 'nil', 'normal', 'not', 'now', 'nper',
'npv', 'nth', 'null?', 'number?', 'open', 'or', 'ostype', 'pack',
'parse-date', 'parse', 'peek', 'pipe', 'pmt', 'pop-assoc', 'pop',
'post-url', 'pow', 'prefix', 'pretty-print', 'primitive?', 'print',
'println', 'prob-chi2', 'prob-z', 'process', 'prompt-event',
'protected?', 'push', 'put-url', 'pv', 'quote?', 'quote', 'rand',
'random', 'randomize', 'read', 'read-char', 'read-expr', 'read-file',
'read-key', 'read-line', 'read-utf8', 'reader-event',
'real-path', 'receive', 'ref-all', 'ref', 'regex-comp', 'regex',
'remove-dir', 'rename-file', 'replace', 'reset', 'rest', 'reverse',
'rotate', 'round', 'save', 'search', 'seed', 'seek', 'select', 'self',
'semaphore', 'send', 'sequence', 'series', 'set-locale', 'set-ref-all',
'set-ref', 'set', 'setf', 'setq', 'sgn', 'share', 'signal', 'silent',
'sin', 'sinh', 'sleep', 'slice', 'sort', 'source', 'spawn', 'sqrt',
'starts-with', 'string?', 'string', 'sub', 'swap', 'sym', 'symbol?',
'symbols', 'sync', 'sys-error', 'sys-info', 'tan', 'tanh', 'term',
'throw-error', 'throw', 'time-of-day', 'time', 'timer', 'title-case',
'trace-highlight', 'trace', 'transpose', 'Tree', 'trim', 'true?',
'true', 'unicode', 'unify', 'unique', 'unless', 'unpack', 'until',
'upper-case', 'utf8', 'utf8len', 'uuid', 'wait-pid', 'when', 'while',
'write', 'write-char', 'write-file', 'write-line',
'xfer-event', 'xml-error', 'xml-parse', 'xml-type-tags', 'zero?',
)
# valid names
valid_name = r'([\w!$%&*+.,/<=>?@^~|-])+|(\[.*?\])+'
tokens = {
'root': [
# shebang
(r'#!(.*?)$', Comment.Preproc),
# comments starting with semicolon
(r';.*$', Comment.Single),
# comments starting with #
(r'#.*$', Comment.Single),
# whitespace
(r'\s+', Text),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
# braces
(r'\{', String, "bracestring"),
# [text] ... [/text] delimited strings
(r'\[text\]*', String, "tagstring"),
# 'special' operators...
(r"('|:)", Operator),
# highlight the builtins
(words(builtins, suffix=r'\b'),
Keyword),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Variable),
# the remaining variables
(valid_name, String.Symbol),
# parentheses
(r'(\(|\))', Punctuation),
],
# braced strings...
'bracestring': [
(r'\{', String, "#push"),
(r'\}', String, "#pop"),
('[^{}]+', String),
],
# tagged [text]...[/text] delimited strings...
'tagstring': [
(r'(?s)(.*?)(\[/text\])', String, '#pop'),
],
}
| NewLispLexer |
python | numba__numba | numba/tests/cloudpickle_main_class.py | {
"start": 122,
"end": 155
} | class ____:
classvar = None
| Klass |
python | scipy__scipy | scipy/signal/_ltisys.py | {
"start": 14862,
"end": 22729
} | class ____(LinearTimeInvariant):
r"""Linear Time Invariant system class in transfer function form.
Represents the system as the continuous-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the
discrete-time transfer function
:math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
`TransferFunction` systems inherit additional
functionality from the `lti`, respectively the `dlti` classes, depending on
which system representation is used.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, lti, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be
represented as ``[1, 3, 5]``)
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([1., 3., 3.]),
array([1., 2., 1.]),
dt: None
)
Construct the transfer function
:math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of
0.1 seconds:
>>> signal.TransferFunction(num, den, dt=0.1)
TransferFunctionDiscrete(
array([1., 3., 3.]),
array([1., 2., 1.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of lti."""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_tf()
# Choose whether to inherit from `lti` or from `dlti`
if cls is TransferFunction:
if kwargs.get('dt') is None:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous,
*system,
**kwargs)
else:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete,
*system,
**kwargs)
# No special conversion needed
return super().__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super().__init__(**kwargs)
self._num = None
self._den = None
self.num, self.den = normalize(*system)
def __repr__(self):
"""Return representation of the system's transfer function"""
return (
f'{self.__class__.__name__}(\n'
f'{repr(self.num)},\n'
f'{repr(self.den)},\n'
f'dt: {repr(self.dt)}\n)'
)
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self._num
@num.setter
def num(self, num):
self._num = atleast_1d(num)
# Update dimensions
if len(self.num.shape) > 1:
self.outputs, self.inputs = self.num.shape
else:
self.outputs = 1
self.inputs = 1
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self._den
@den.setter
def den(self, den):
self._den = atleast_1d(den)
def _copy(self, system):
"""
Copy the parameters of another `TransferFunction` object
Parameters
----------
system : `TransferFunction`
The `StateSpace` system that is to be copied
"""
self.num = system.num
self.den = system.den
def to_tf(self):
"""
Return a copy of the current `TransferFunction` system.
Returns
-------
sys : instance of `TransferFunction`
The current system (copy)
"""
return copy.deepcopy(self)
def to_zpk(self):
"""
Convert system representation to `ZerosPolesGain`.
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*tf2zpk(self.num, self.den),
**self._dt_dict)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*tf2ss(self.num, self.den),
**self._dt_dict)
@staticmethod
def _z_to_zinv(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((np.zeros(diff), den))
elif diff < 0:
num = np.hstack((np.zeros(-diff), num))
return num, den
@staticmethod
def _zinv_to_z(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((den, np.zeros(diff)))
elif diff < 0:
num = np.hstack((num, np.zeros(-diff)))
return num, den
| TransferFunction |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_v2_test.py | {
"start": 97519,
"end": 117298
} | class ____(test.TestCase):
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegex(ValueError,
'feature_columns must not be empty'):
fc_old.input_layer(features={}, feature_columns=[])
def test_should_be_dense_column(self):
with self.assertRaisesRegex(ValueError, 'must be a _DenseColumn'):
fc_old.input_layer(
features={'a': [[0]]},
feature_columns=[
fc.categorical_column_with_hash_bucket('wire_cast', 4)
])
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegex(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
fc_old.input_layer(
features={'a': [[0]]}, feature_columns={'a': fc.numeric_column('a')})
def test_bare_column(self):
with ops.Graph().as_default():
features = features = {'a': [0.]}
net = fc_old.input_layer(features, fc.numeric_column('a'))
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[0.]], self.evaluate(net))
def test_column_generator(self):
with ops.Graph().as_default():
features = features = {'a': [0.], 'b': [1.]}
columns = (fc.numeric_column(key) for key in features)
net = fc_old.input_layer(features, columns)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[0., 1.]], self.evaluate(net))
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegex(
ValueError, 'Duplicate feature column name found for columns'):
fc_old.input_layer(
features={'a': [[0]]},
feature_columns=[fc.numeric_column('a'),
fc.numeric_column('a')])
def test_one_column(self):
price = fc.numeric_column('price')
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
net = fc_old.input_layer(features, [price])
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[1.], [5.]], self.evaluate(net))
def test_multi_dimension(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
net = fc_old.input_layer(features, [price])
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_raises_if_shape_mismatch(self):
price = fc.numeric_column('price', shape=2)
with ops.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegex(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
fc_old.input_layer(features, [price])
def test_reshaping(self):
price = fc.numeric_column('price', shape=[1, 2])
with ops.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
net = fc_old.input_layer(features, [price])
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_multi_column(self):
price1 = fc.numeric_column('price1', shape=2)
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
net = fc_old.input_layer(features, [price1, price2])
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net))
def test_fills_cols_to_vars(self):
# Provide three _DenseColumn's to input_layer: a _NumericColumn, a
# _BucketizedColumn, and an _EmbeddingColumn. Only the _EmbeddingColumn
# creates a Variable.
price1 = fc.numeric_column('price1')
dense_feature = fc.numeric_column('dense_feature')
dense_feature_bucketized = fc.bucketized_column(
dense_feature, boundaries=[0.])
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'price1': [[3.], [4.]],
'dense_feature': [[-1.], [4.]],
'sparse_feature': [['a'], ['x']],
}
cols_to_vars = {}
all_cols = [price1, dense_feature_bucketized, some_embedding_column]
fc_old.input_layer(features, all_cols, cols_to_vars=cols_to_vars)
self.assertCountEqual(list(cols_to_vars.keys()), all_cols)
self.assertEqual(0, len(cols_to_vars[price1]))
self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized]))
self.assertEqual(1, len(cols_to_vars[some_embedding_column]))
self.assertIsInstance(cols_to_vars[some_embedding_column][0],
variables_lib.Variable)
self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10])
def test_fills_cols_to_vars_shared_embedding(self):
# Provide 5 DenseColumn's to input_layer: a NumericColumn, a
# BucketizedColumn, an EmbeddingColumn, two SharedEmbeddingColumns. The
# EmbeddingColumn creates a Variable and the two SharedEmbeddingColumns
# shared one variable.
# SharedEmbeddingColumns are graph-only
with ops.Graph().as_default():
price1 = fc.numeric_column('price1')
dense_feature = fc.numeric_column('dense_feature')
dense_feature_bucketized = fc.bucketized_column(
dense_feature, boundaries=[0.])
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
categorical_column_a = fc.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = fc.categorical_column_with_identity(
key='bbb', num_buckets=3)
shared_embedding_a, shared_embedding_b = fc.shared_embedding_columns(
[categorical_column_a, categorical_column_b], dimension=2)
features = {
'price1': [[3.], [4.]],
'dense_feature': [[-1.], [4.]],
'sparse_feature': [['a'], ['x']],
'aaa':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
cols_to_vars = {}
all_cols = [
price1, dense_feature_bucketized, some_embedding_column,
shared_embedding_a, shared_embedding_b
]
fc_old.input_layer(features, all_cols, cols_to_vars=cols_to_vars)
self.assertCountEqual(list(cols_to_vars.keys()), all_cols)
self.assertEqual(0, len(cols_to_vars[price1]))
self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized]))
self.assertEqual(1, len(cols_to_vars[some_embedding_column]))
self.assertEqual(1, len(cols_to_vars[shared_embedding_a]))
# This is a bug in the current implementation and should be fixed in the
# new one.
self.assertEqual(0, len(cols_to_vars[shared_embedding_b]))
self.assertIsInstance(cols_to_vars[some_embedding_column][0],
variables_lib.Variable)
self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [5, 10])
self.assertIsInstance(cols_to_vars[shared_embedding_a][0],
variables_lib.Variable)
self.assertAllEqual(cols_to_vars[shared_embedding_a][0].shape, [3, 2])
def test_fills_cols_to_vars_partitioned_variables(self):
price1 = fc.numeric_column('price1')
dense_feature = fc.numeric_column('dense_feature')
dense_feature_bucketized = fc.bucketized_column(
dense_feature, boundaries=[0.])
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'price1': [[3.], [4.]],
'dense_feature': [[-1.], [4.]],
'sparse_feature': [['a'], ['x']],
}
cols_to_vars = {}
all_cols = [price1, dense_feature_bucketized, some_embedding_column]
with variable_scope.variable_scope(
'input_from_feature_columns',
partitioner=partitioned_variables.fixed_size_partitioner(3, axis=0)):
fc_old.input_layer(features, all_cols, cols_to_vars=cols_to_vars)
self.assertCountEqual(list(cols_to_vars.keys()), all_cols)
self.assertEqual(0, len(cols_to_vars[price1]))
self.assertEqual(0, len(cols_to_vars[dense_feature_bucketized]))
self.assertEqual(3, len(cols_to_vars[some_embedding_column]))
self.assertEqual(
'input_from_feature_columns/input_layer/sparse_feature_embedding/'
'embedding_weights/part_0:0',
cols_to_vars[some_embedding_column][0].name)
self.assertAllEqual(cols_to_vars[some_embedding_column][0].shape, [2, 10])
self.assertAllEqual(cols_to_vars[some_embedding_column][1].shape, [2, 10])
self.assertAllEqual(cols_to_vars[some_embedding_column][2].shape, [1, 10])
def test_column_order(self):
price_a = fc.numeric_column('price_a')
price_b = fc.numeric_column('price_b')
with ops.Graph().as_default():
features = {
'price_a': [[1.]],
'price_b': [[3.]],
}
net1 = fc_old.input_layer(features, [price_a, price_b])
net2 = fc_old.input_layer(features, [price_b, price_a])
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(lookup_ops.tables_initializer())
self.assertAllClose([[1., 3.]], self.evaluate(net1))
self.assertAllClose([[1., 3.]], self.evaluate(net2))
def test_fails_for_categorical_column(self):
animal = fc.categorical_column_with_identity('animal', num_buckets=4)
with ops.Graph().as_default():
features = {
'animal':
sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
with self.assertRaisesRegex(Exception, 'must be a _DenseColumn'):
fc_old.input_layer(features, [animal])
def test_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegex(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.input_layer(features, [price1, price2])
def test_subset_of_static_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
price3 = fc.numeric_column('price3')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegex(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.input_layer(features, [price1, price2, price3])
def test_runtime_batch_size_mismatch(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
net = fc_old.input_layer(features, [price1, price2])
with _initialized_session() as sess:
with self.assertRaisesRegex(errors.OpError,
'Dimension 0 in both shapes must be equal'):
sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = fc.numeric_column('price1')
price2 = fc.numeric_column('price2')
with ops.Graph().as_default():
features = {
'price1': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
'price2': array_ops.placeholder(dtype=dtypes.int64), # batchsize = 2
}
net = fc_old.input_layer(features, [price1, price2])
with _initialized_session() as sess:
sess.run(
net,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_multiple_layers_with_same_embedding_column(self):
some_sparse_column = fc.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = fc.embedding_column(
some_sparse_column, dimension=10)
with ops.Graph().as_default():
features = {
'sparse_feature': [['a'], ['x']],
}
all_cols = [some_embedding_column]
fc_old.input_layer(features, all_cols)
fc_old.input_layer(features, all_cols)
# Make sure that 2 variables get created in this case.
self.assertEqual(2, len(
ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)))
expected_var_names = [
'input_layer/sparse_feature_embedding/embedding_weights:0',
'input_layer_1/sparse_feature_embedding/embedding_weights:0'
]
self.assertCountEqual(
expected_var_names,
[v.name for v in ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)])
def test_with_1d_sparse_tensor(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
# one_hot_body_style has 3 dims in input_layer.
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc.embedding_column(
country, dimension=5, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price':
constant_op.constant([
11.,
12.,
]),
'body-style':
sparse_tensor.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
# This is dense tensor for the categorical_column.
'country':
constant_op.constant(['CA', 'US']),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
self.assertEqual(1, features['country'].shape.ndims)
if context.executing_eagerly():
# The variables will only be initialized in TF2
net = fc_old.input_layer(features,
[price, one_hot_body_style, embedded_country])
self.assertEqual(1 + 3 + 5, net.shape[1])
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual([[0., 0., 1., 11., 12., 13., 14., 15., 11.],
[1., 0., 0., 1., 2., 3., 4., 5., 12.]],
self.evaluate(net))
def test_with_1d_unknown_shape_sparse_tensor(self):
# This test needs to construct graph placeholders
# w/ unknown shapes, so we enter a graph
with ops.Graph().as_default():
embedding_values = (
(1., 2.), # id 0
(6., 7.), # id 1
(11., 12.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
# one_hot_body_style has 3 dims in input_layer.
body_style = fc.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = fc.indicator_column(body_style)
# embedded_body_style has 5 dims in input_layer.
country = fc.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = fc.embedding_column(
country, dimension=2, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price': array_ops.placeholder(dtypes.float32),
'body-style': array_ops.sparse_placeholder(dtypes.string),
# This is dense tensor for the categorical_column.
'country': array_ops.placeholder(dtypes.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
self.assertIsNone(features['country'].shape.ndims)
price_data = np.array([11., 12.])
body_style_data = sparse_tensor.SparseTensorValue(
indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,))
country_data = np.array([['US'], ['CA']])
net = fc_old.input_layer(features,
[price, one_hot_body_style, embedded_country])
self.assertEqual(1 + 3 + 2, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
def test_with_rank_0_feature(self):
# price has 1 dimension in input_layer
price = fc.numeric_column('price')
features = {
'price': constant_op.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'):
fc_old.input_layer(features, [price])
# This test needs to construct graph placeholders
# w/ dynamic rank 0, so we enter a graph
with ops.Graph().as_default():
# Dynamic rank 0 should fail
features = {
'price': array_ops.placeholder(dtypes.float32),
}
net = fc_old.input_layer(features, [price])
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
| FunctionalInputLayerTest |
python | langchain-ai__langchain | libs/core/tests/unit_tests/fake/callbacks.py | {
"start": 1242,
"end": 3091
} | class ____(BaseFakeCallbackHandler):
"""Base fake callback handler mixin for testing."""
def on_llm_start_common(self) -> None:
self.llm_starts += 1
self.starts += 1
def on_llm_end_common(self) -> None:
self.llm_ends += 1
self.ends += 1
def on_llm_error_common(self, *args: Any, **kwargs: Any) -> None:
self.errors += 1
self.errors_args.append({"args": args, "kwargs": kwargs})
def on_llm_new_token_common(self) -> None:
self.llm_streams += 1
def on_retry_common(self) -> None:
self.retries += 1
def on_chain_start_common(self) -> None:
self.chain_starts += 1
self.starts += 1
def on_chain_end_common(self) -> None:
self.chain_ends += 1
self.ends += 1
def on_chain_error_common(self) -> None:
self.errors += 1
def on_tool_start_common(self) -> None:
self.tool_starts += 1
self.starts += 1
def on_tool_end_common(self) -> None:
self.tool_ends += 1
self.ends += 1
def on_tool_error_common(self) -> None:
self.errors += 1
def on_agent_action_common(self) -> None:
self.agent_actions += 1
self.starts += 1
def on_agent_finish_common(self) -> None:
self.agent_ends += 1
self.ends += 1
def on_chat_model_start_common(self) -> None:
self.chat_model_starts += 1
self.starts += 1
def on_text_common(self) -> None:
self.text += 1
def on_retriever_start_common(self) -> None:
self.starts += 1
self.retriever_starts += 1
def on_retriever_end_common(self) -> None:
self.ends += 1
self.retriever_ends += 1
def on_retriever_error_common(self) -> None:
self.errors += 1
self.retriever_errors += 1
| BaseFakeCallbackHandlerMixin |
python | readthedocs__readthedocs.org | readthedocs/analytics/migrations/0002_track_status_code.py | {
"start": 182,
"end": 1678
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0041_track_task_id"),
("projects", "0087_use_booleanfield_null"),
("analytics", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="pageview",
name="full_path",
field=models.CharField(
blank=True,
help_text="Full path including the version and language parts.",
max_length=4096,
null=True,
),
),
migrations.AddField(
model_name="pageview",
name="status",
field=models.PositiveIntegerField(default=200, help_text="HTTP status code"),
),
migrations.AlterField(
model_name="pageview",
name="path",
field=models.CharField(help_text="Path relative to the version.", max_length=4096),
),
migrations.AlterField(
model_name="pageview",
name="version",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="page_views",
to="builds.version",
verbose_name="Version",
),
),
migrations.AlterUniqueTogether(
name="pageview",
unique_together={("project", "version", "path", "date", "status")},
),
]
| Migration |
python | numpy__numpy | numpy/distutils/fcompiler/pg.py | {
"start": 211,
"end": 1810
} | class ____(FCompiler):
compiler_type = 'pg'
description = 'Portland Group Fortran Compiler'
version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P<version>[\d.-]+).*'
if platform == 'darwin':
executables = {
'version_cmd': ["<F77>", "-V"],
'compiler_f77': ["pgfortran", "-dynamiclib"],
'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"],
'compiler_f90': ["pgfortran", "-dynamiclib"],
'linker_so': ["libtool"],
'archiver': ["ar", "-cr"],
'ranlib': ["ranlib"]
}
pic_flags = ['']
else:
executables = {
'version_cmd': ["<F77>", "-V"],
'compiler_f77': ["pgfortran"],
'compiler_fix': ["pgfortran", "-Mfixed"],
'compiler_f90': ["pgfortran"],
'linker_so': ["<F90>"],
'archiver': ["ar", "-cr"],
'ranlib': ["ranlib"]
}
pic_flags = ['-fpic']
module_dir_switch = '-module '
module_include_switch = '-I'
def get_flags(self):
opt = ['-Minform=inform', '-Mnosecond_underscore']
return self.pic_flags + opt
def get_flags_opt(self):
return ['-fast']
def get_flags_debug(self):
return ['-g']
if platform == 'darwin':
def get_flags_linker_so(self):
return ["-dynamic", '-undefined', 'dynamic_lookup']
else:
def get_flags_linker_so(self):
return ["-shared", '-fpic']
def runtime_library_dir_option(self, dir):
return '-R%s' % dir
import functools
| PGroupFCompiler |
python | altair-viz__altair | altair/datasets/_cache.py | {
"start": 6060,
"end": 9351
} | class ____(CompressedCache["_Dataset", "_FlSchema"]):
"""
`json`_, `gzip`_ -based, lazy schema lookup.
- Primarily benefits ``pandas``, which needs some help identifying **temporal** columns.
- Utilizes `data package`_ schema types.
- All methods return falsy containers instead of exceptions
.. _json:
https://docs.python.org/3/library/json.html
.. _gzip:
https://docs.python.org/3/library/gzip.html
.. _data package:
https://github.com/vega/vega-datasets/pull/631
"""
fp = _METADATA_DIR / "schemas.json.gz"
def __init__(
self,
*,
tp: type[MutableMapping[_Dataset, _FlSchema]] = dict["_Dataset", "_FlSchema"],
implementation: nw.Implementation = nw.Implementation.UNKNOWN,
) -> None:
self._mapping: MutableMapping[_Dataset, _FlSchema] = tp()
self._implementation: nw.Implementation = implementation
def read(self) -> Any:
import json
with self as f:
return json.load(f)
def __getitem__(self, key: _Dataset, /) -> _FlSchema:
return self.get(key, {})
def by_dtype(self, name: _Dataset, *dtypes: type[DType]) -> list[str]:
"""
Return column names specfied in ``name``'s schema.
Parameters
----------
name
Dataset name.
*dtypes
Optionally, only return columns matching the given data type(s).
"""
if (match := self[name]) and dtypes:
include = {_DTYPE_TO_FIELD[tp] for tp in dtypes}
return [col for col, tp_str in match.items() if tp_str in include]
else:
return list(match)
def is_active(self) -> bool:
return self._implementation in {
nw.Implementation.PANDAS,
nw.Implementation.PYARROW,
nw.Implementation.MODIN,
nw.Implementation.PYARROW,
}
def schema(self, name: _Dataset, /) -> nw.Schema:
it = ((col, _FIELD_TO_DTYPE[tp_str]()) for col, tp_str in self[name].items())
return nw.Schema(it)
def schema_kwds(self, meta: Metadata, /) -> dict[str, Any]:
name: Any = meta["dataset_name"]
if self.is_active() and (self[name]):
suffix = meta["suffix"]
if self._implementation.is_pandas_like():
if cols := self.by_dtype(name, nw.Date, nw.Datetime):
if suffix == ".json":
return {"convert_dates": cols}
elif suffix in {".csv", ".tsv"}:
return {"parse_dates": cols}
else:
schema = self.schema(name).to_arrow()
if suffix in {".csv", ".tsv"}:
from pyarrow.csv import ConvertOptions
# For pyarrow CSV reading, use the schema as intended
# This will fail for non-ISO date formats, but that's the correct behavior
# Users can handle this by using a different backend or converting dates manually
return {"convert_options": ConvertOptions(column_types=schema)} # pyright: ignore[reportCallIssue]
elif suffix == ".parquet":
return {"schema": schema}
return {}
| SchemaCache |
python | astropy__astropy | astropy/__init__.py | {
"start": 1363,
"end": 2827
} | class ____(_config.ConfigNamespace):
"""
Configuration parameters for `astropy`.
"""
unicode_output = _config.ConfigItem(
False,
"When True, use Unicode characters when outputting values, and "
"displaying widgets at the console.",
)
use_color = _config.ConfigItem(
sys.platform != "win32",
"When True, use ANSI color escape sequences when writing to the console.",
aliases=["astropy.utils.console.USE_COLOR", "astropy.logger.USE_COLOR"],
)
max_lines = _config.ConfigItem(
None,
description=(
"Maximum number of lines in the display of pretty-printed "
"objects. If not provided, try to determine automatically from the "
"terminal size. Negative numbers mean no limit."
),
cfgtype="integer(default=None)",
aliases=["astropy.table.pprint.max_lines"],
)
max_width = _config.ConfigItem(
None,
description=(
"Maximum number of characters per line in the display of "
"pretty-printed objects. If not provided, try to determine "
"automatically from the terminal size. Negative numbers mean no "
"limit."
),
cfgtype="integer(default=None)",
aliases=["astropy.table.pprint.max_width"],
)
conf = Conf()
# Define a base ScienceState for configuring constants and units
from .utils.state import ScienceState
| Conf |
python | lepture__authlib | authlib/oauth1/rfc5849/models.py | {
"start": 0,
"end": 1022
} | class ____:
def get_default_redirect_uri(self):
"""A method to get client default redirect_uri. For instance, the
database table for client has a column called ``default_redirect_uri``::
def get_default_redirect_uri(self):
return self.default_redirect_uri
:return: A URL string
"""
raise NotImplementedError()
def get_client_secret(self):
"""A method to return the client_secret of this client. For instance,
the database table has a column called ``client_secret``::
def get_client_secret(self):
return self.client_secret
"""
raise NotImplementedError()
def get_rsa_public_key(self):
"""A method to get the RSA public key for RSA-SHA1 signature method.
For instance, the value is saved on column ``rsa_public_key``::
def get_rsa_public_key(self):
return self.rsa_public_key
"""
raise NotImplementedError()
| ClientMixin |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 221974,
"end": 222932
} | class ____:
def test_non_bytes(self):
with pytest.raises(TypeError):
x509.OCSPNonce(38) # type:ignore[arg-type]
def test_eq(self):
nonce1 = x509.OCSPNonce(b"0" * 5)
nonce2 = x509.OCSPNonce(b"0" * 5)
assert nonce1 == nonce2
def test_ne(self):
nonce1 = x509.OCSPNonce(b"0" * 5)
nonce2 = x509.OCSPNonce(b"0" * 6)
assert nonce1 != nonce2
assert nonce1 != object()
def test_repr(self):
nonce1 = x509.OCSPNonce(b"nonce")
assert repr(nonce1) == "<OCSPNonce(nonce=b'nonce')>"
def test_hash(self):
nonce1 = x509.OCSPNonce(b"0" * 5)
nonce2 = x509.OCSPNonce(b"0" * 5)
nonce3 = x509.OCSPNonce(b"1" * 5)
assert hash(nonce1) == hash(nonce2)
assert hash(nonce1) != hash(nonce3)
def test_public_bytes(self):
ext = x509.OCSPNonce(b"0" * 5)
assert ext.public_bytes() == b"\x04\x0500000"
| TestOCSPNonce |
python | huggingface__transformers | tests/models/regnet/test_modeling_regnet.py | {
"start": 7761,
"end": 8932
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None
@slow
def test_inference_image_classification_head(self):
model = RegNetForImageClassification.from_pretrained("facebook/regnet-y-040").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
expected_shape = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, expected_shape)
expectations = Expectations(
{
(None, None): [-0.4180, -1.5051, -3.4836],
("cuda", 8): [-0.4180, -1.5051, -3.4836],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=2e-4, atol=2e-4)
| RegNetModelIntegrationTest |
python | pypa__pip | src/pip/_vendor/packaging/_tokenizer.py | {
"start": 2165,
"end": 5310
} | class ____:
"""Context-sensitive token parsing.
Provides methods to examine the input stream to check whether the next token
matches.
"""
def __init__(
self,
source: str,
*,
rules: dict[str, str | re.Pattern[str]],
) -> None:
self.source = source
self.rules: dict[str, re.Pattern[str]] = {
name: re.compile(pattern) for name, pattern in rules.items()
}
self.next_token: Token | None = None
self.position = 0
def consume(self, name: str) -> None:
"""Move beyond provided token name, if at current position."""
if self.check(name):
self.read()
def check(self, name: str, *, peek: bool = False) -> bool:
"""Check whether the next token has the provided name.
By default, if the check succeeds, the token *must* be read before
another check. If `peek` is set to `True`, the token is not loaded and
would need to be checked again.
"""
assert self.next_token is None, (
f"Cannot check for {name!r}, already have {self.next_token!r}"
)
assert name in self.rules, f"Unknown token name: {name!r}"
expression = self.rules[name]
match = expression.match(self.source, self.position)
if match is None:
return False
if not peek:
self.next_token = Token(name, match[0], self.position)
return True
def expect(self, name: str, *, expected: str) -> Token:
"""Expect a certain token name next, failing with a syntax error otherwise.
The token is *not* read.
"""
if not self.check(name):
raise self.raise_syntax_error(f"Expected {expected}")
return self.read()
def read(self) -> Token:
"""Consume the next token and return it."""
token = self.next_token
assert token is not None
self.position += len(token.text)
self.next_token = None
return token
def raise_syntax_error(
self,
message: str,
*,
span_start: int | None = None,
span_end: int | None = None,
) -> NoReturn:
"""Raise ParserSyntaxError at the given position."""
span = (
self.position if span_start is None else span_start,
self.position if span_end is None else span_end,
)
raise ParserSyntaxError(
message,
source=self.source,
span=span,
)
@contextlib.contextmanager
def enclosing_tokens(
self, open_token: str, close_token: str, *, around: str
) -> Iterator[None]:
if self.check(open_token):
open_position = self.position
self.read()
else:
open_position = None
yield
if open_position is None:
return
if not self.check(close_token):
self.raise_syntax_error(
f"Expected matching {close_token} for {open_token}, after {around}",
span_start=open_position,
)
self.read()
| Tokenizer |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 368325,
"end": 368979
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("MarketplaceListingEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("MarketplaceListing"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| MarketplaceListingConnection |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/asb.py | {
"start": 1759,
"end": 4007
} | class ____(BaseHook):
"""
BaseAzureServiceBusHook class to create session and create connection using connection string.
:param azure_service_bus_conn_id: Reference to the
:ref:`Azure Service Bus connection<howto/connection:azure_service_bus>`.
"""
conn_name_attr = "azure_service_bus_conn_id"
default_conn_name = "azure_service_bus_default"
conn_type = "azure_service_bus"
hook_name = "Azure Service Bus"
@classmethod
@add_managed_identity_connection_widgets
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import PasswordField, StringField
return {
"fully_qualified_namespace": StringField(
lazy_gettext("Fully Qualified Namespace"), widget=BS3TextFieldWidget()
),
"credential": PasswordField(lazy_gettext("Credential"), widget=BS3TextFieldWidget()),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom field behaviour."""
return {
"hidden_fields": ["port", "host", "extra", "login", "password"],
"relabeling": {"schema": "Connection String"},
"placeholders": {
"fully_qualified_namespace": (
"<Resource group>.servicebus.windows.net (for Azure AD authenticaltion)"
),
"credential": "credential",
"schema": "Endpoint=sb://<Resource group>.servicebus.windows.net/;SharedAccessKeyName=<AccessKeyName>;SharedAccessKey=<SharedAccessKey>",
},
}
def __init__(self, azure_service_bus_conn_id: str = default_conn_name) -> None:
super().__init__()
self.conn_id = azure_service_bus_conn_id
def get_conn(self):
raise NotImplementedError
def _get_field(self, extras: dict, field_name: str) -> str:
return get_field(
conn_id=self.conn_id,
conn_type=self.conn_type,
extras=extras,
field_name=field_name,
)
| BaseAzureServiceBusHook |
python | google__jax | jax/_src/clusters/cloud_tpu_cluster.py | {
"start": 2528,
"end": 6643
} | class ____(clusters.ClusterEnv):
name: str = "tpu"
"""Abstract cluster supports both single and multislice TPU environments.
If MEGASCALE_COORDINATOR_ADDRESS is not set, we assume single slice topology.
Concrete extensions of this class must implement methods for generating a list
of within-slice workers and a within-slice process ID.
`get_coordinator_address` must return the address of the host with
process ID 0 (as returned by `get_process_id`), since the coordinator service
is started on the host with process ID = 0.
"""
@classmethod
def is_env_present(cls) -> bool:
"""Override this method to return True if the environment is present."""
return False
@classmethod
def get_coordinator_address(cls, timeout_secs: int | None, override_coordinator_port: str | None) -> str:
# For both GCE via QueuedResources and GKE via JobSet, the
# Megascale coordinator address is set as the host with process id = 0,
# so can be used as the jax distributed system coordinator.
coordinator_address = get_tpu_env_value('MEGASCALE_COORDINATOR_ADDRESS')
if not coordinator_address:
# For both GCE (QueuedResources and TPUVM create) and GKE via Job API,
# the workers lists are sorted by process ID so the first one can
# be used as the jax distributed system coordinator.
coordinator_address = cls._get_worker_list_in_slice()[0]
coordinator_address = coordinator_address.split(':')[0]
logger.debug("TPU Cluster using coordinator address: %s", coordinator_address)
cls.wait_for_coordinator(coordinator_address, timeout_secs)
port = override_coordinator_port or coordinator_port
return f'{coordinator_address}:{port}'
@classmethod
def wait_for_coordinator(cls, coordinator_address, timeout_secs):
# The coordinator may not be up before the other hosts try to
# communicate with it. We check for its existence with retries.
coordinator_found = False
max_time = time.time() + timeout_secs
coordinator_retry_secs = 5
while not coordinator_found and time.time() < max_time:
try:
ip_address = socket.gethostbyname(coordinator_address)
coordinator_found = True
logger.debug("Found coordinator with address %s", coordinator_address)
except socket.gaierror:
logger.debug(
"Failed to recognize coordinator address %s"
" retrying...", coordinator_address
)
time.sleep(coordinator_retry_secs)
if not coordinator_found:
raise RuntimeError(f"Failed to recognize coordinator address {coordinator_address}")
@classmethod
def get_process_count(cls) -> int:
processes_per_slice = len(cls._get_worker_list_in_slice())
num_slices = cls._get_num_slices()
total_process_count = processes_per_slice * num_slices
logger.debug("Total process count of %s = %s processes per slice and %s slices", total_process_count, processes_per_slice, num_slices)
return total_process_count
@classmethod
def get_process_id(cls) -> int:
process_id_in_slice = cls._get_process_id_in_slice()
slice_id = cls._get_slice_id()
processes_per_slice = len(cls._get_worker_list_in_slice())
process_id = process_id_in_slice + slice_id * processes_per_slice
logger.debug("Process ID of %s generated by within-slice id %s and slice id %s", process_id, process_id_in_slice, slice_id)
return process_id
@staticmethod
def _get_num_slices() -> int:
num_slices = get_tpu_env_value('MEGASCALE_NUM_SLICES')
if not num_slices:
return 1
return int(num_slices) # type: ignore
@staticmethod
def _get_slice_id() -> int:
slice_id = get_tpu_env_value('MEGASCALE_SLICE_ID')
if not slice_id:
return 0
return int(slice_id) # type: ignore
@staticmethod
def _get_process_id_in_slice() -> int:
"""Returns a process ID that is unique within slice."""
raise NotImplementedError()
@staticmethod
def _get_worker_list_in_slice() -> list[str]:
"""Returns a list of worker endpoints/hostnames within slice."""
raise NotImplementedError()
| BaseTpuCluster |
python | pytorch__pytorch | torch/ao/nn/sparse/quantized/linear.py | {
"start": 262,
"end": 2904
} | class ____(torch.nn.Module):
_version = 1
def __init__(self, row_block_size=1, col_block_size=4, dtype=torch.qint8):
super().__init__()
if dtype != torch.qint8:
raise NotImplementedError("Linear prepacking only supports QINT8")
self.dtype = dtype
wq = torch._empty_affine_quantized(
[1, 1], scale=1.0, zero_point=0, dtype=torch.qint8
)
self.set_weight_bias(wq, None, row_block_size, col_block_size)
def _get_name(self):
return "SparseQuantizedLinearPackedParams"
@torch.jit.export
def set_weight_bias(
self,
weight: torch.Tensor,
bias: torch.Tensor | None,
row_block_size: int | None,
col_block_size: int | None,
) -> None:
assert row_block_size is not None and col_block_size is not None
self._packed_params = torch.ops.sparse.qlinear_prepack(
weight, bias, row_block_size, col_block_size
)
@torch.jit.export
def _weight_bias(self):
(weight, bias, block_sizes) = torch.ops.sparse.qlinear_unpack(
self._packed_params
)
return (weight, bias, block_sizes[0], block_sizes[1])
def forward(self, x):
return x
def _save_to_state_dict(self, destination, prefix, keep_vars):
super()._save_to_state_dict(destination, prefix, keep_vars)
destination[prefix + "dtype"] = self.dtype
destination[prefix + "_packed_params"] = self._weight_bias()
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
assert version <= self._version
self.dtype = state_dict.pop(prefix + "dtype")
weight, bias, row_block_size, col_block_size = state_dict.pop(
prefix + "_packed_params"
)
self.set_weight_bias(weight, bias, row_block_size, col_block_size)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
False,
missing_keys,
unexpected_keys,
error_msgs,
)
@torch.jit.export
def __getstate__(self):
return self._packed_params, self.training, self.dtype
@torch.jit.export
def __setstate__(self, state):
(self._packed_params, self.training, self.dtype) = state
def __repr__(self):
return self._weight_bias().__repr__()
# TODO (zaf): Inherit from `quantized.Linear` (T83294430)
| LinearPackedParams |
python | numpy__numpy | benchmarks/benchmarks/bench_ufunc.py | {
"start": 16465,
"end": 17127
} | class ____(Benchmark):
# In order to benchmark the speed of argument parsing, all but the
# out arguments are chosen such that they have minimal effect on the
# calculation.
a = np.arange(2.)
out = np.array(0.)
param_names = ['arg_kwarg']
params = [[
ArgPack(a,),
ArgPack(a, 0),
ArgPack(a, axis=0),
ArgPack(a, 0, None),
ArgPack(a, axis=0, dtype=None),
ArgPack(a, 0, None, out),
ArgPack(a, axis=0, dtype=None, out=out),
ArgPack(a, out=out)
]]
def time_add_reduce_arg_parsing(self, arg_pack):
np.add.reduce(*arg_pack.args, **arg_pack.kwargs)
| ArgParsingReduce |
python | plotly__plotly.py | plotly/graph_objs/choroplethmap/selected/_marker.py | {
"start": 233,
"end": 2222
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "choroplethmap.selected"
_path_str = "choroplethmap.selected.marker"
_valid_props = {"opacity"}
@property
def opacity(self):
"""
Sets the marker opacity of selected points.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def _prop_descriptions(self):
return """\
opacity
Sets the marker opacity of selected points.
"""
def __init__(self, arg=None, opacity=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.choroplethmap.
selected.Marker`
opacity
Sets the marker opacity of selected points.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choroplethmap.selected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmap.selected.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("opacity", arg, opacity)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | TheAlgorithms__Python | graphs/breadth_first_search.py | {
"start": 109,
"end": 2390
} | class ____:
def __init__(self) -> None:
self.vertices: dict[int, list[int]] = {}
def print_graph(self) -> None:
"""
prints adjacency list representation of graaph
>>> g = Graph()
>>> g.print_graph()
>>> g.add_edge(0, 1)
>>> g.print_graph()
0 : 1
"""
for i in self.vertices:
print(i, " : ", " -> ".join([str(j) for j in self.vertices[i]]))
def add_edge(self, from_vertex: int, to_vertex: int) -> None:
"""
adding the edge between two vertices
>>> g = Graph()
>>> g.print_graph()
>>> g.add_edge(0, 1)
>>> g.print_graph()
0 : 1
"""
if from_vertex in self.vertices:
self.vertices[from_vertex].append(to_vertex)
else:
self.vertices[from_vertex] = [to_vertex]
def bfs(self, start_vertex: int) -> set[int]:
"""
>>> g = Graph()
>>> g.add_edge(0, 1)
>>> g.add_edge(0, 1)
>>> g.add_edge(0, 2)
>>> g.add_edge(1, 2)
>>> g.add_edge(2, 0)
>>> g.add_edge(2, 3)
>>> g.add_edge(3, 3)
>>> sorted(g.bfs(2))
[0, 1, 2, 3]
"""
# initialize set for storing already visited vertices
visited = set()
# create a first in first out queue to store all the vertices for BFS
queue: Queue = Queue()
# mark the source node as visited and enqueue it
visited.add(start_vertex)
queue.put(start_vertex)
while not queue.empty():
vertex = queue.get()
# loop through all adjacent vertex and enqueue it if not yet visited
for adjacent_vertex in self.vertices[vertex]:
if adjacent_vertex not in visited:
queue.put(adjacent_vertex)
visited.add(adjacent_vertex)
return visited
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
g = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
# 0 : 1 -> 2
# 1 : 2
# 2 : 0 -> 3
# 3 : 3
assert sorted(g.bfs(2)) == [0, 1, 2, 3]
| Graph |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/del1.py | {
"start": 390,
"end": 499
} | class ____:
# This should generate an error because z1 is unbound.
del z1
z1 = 1
del z1
| ClassA |
python | huggingface__transformers | tests/models/resnet/test_modeling_resnet.py | {
"start": 1412,
"end": 5504
} | class ____:
def __init__(
self,
parent,
batch_size=3,
image_size=32,
num_channels=3,
embeddings_size=10,
hidden_sizes=[10, 20, 30, 40],
depths=[1, 1, 2, 1],
is_training=True,
use_labels=True,
hidden_act="relu",
num_labels=3,
scope=None,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.embeddings_size = embeddings_size
self.hidden_sizes = hidden_sizes
self.depths = depths
self.is_training = is_training
self.use_labels = use_labels
self.hidden_act = hidden_act
self.num_labels = num_labels
self.scope = scope
self.num_stages = len(hidden_sizes)
self.out_features = out_features
self.out_indices = out_indices
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return ResNetConfig(
num_channels=self.num_channels,
embeddings_size=self.embeddings_size,
hidden_sizes=self.hidden_sizes,
depths=self.depths,
hidden_act=self.hidden_act,
num_labels=self.num_labels,
out_features=self.out_features,
out_indices=self.out_indices,
)
def create_and_check_model(self, config, pixel_values, labels):
model = ResNetModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),
)
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = ResNetForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_backbone(self, config, pixel_values, labels):
model = ResNetBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:])
# verify backbone works with out_features=None
config.out_features = None
model = ResNetBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels), 1)
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| ResNetModelTester |
python | ray-project__ray | ci/raydepsets/workspace.py | {
"start": 3303,
"end": 4696
} | class ____:
def __init__(self, dir: str = None):
self.dir = (
dir if dir is not None else os.getenv("BUILD_WORKSPACE_DIRECTORY", None)
)
if self.dir is None:
raise RuntimeError("BUILD_WORKSPACE_DIRECTORY is not set")
def load_configs(self, config_path: str) -> Config:
merged_configs = self.merge_configs(self.get_all_configs(config_path))
return merged_configs
def get_all_configs(self, config_path: str) -> List[Config]:
return [self.load_config(path) for path in self.get_configs_dir(config_path)]
def get_configs_dir(self, configs_path: str) -> List[str]:
configs_dir = os.path.dirname(os.path.join(self.dir, configs_path))
return [
os.path.join(self.dir, configs_dir, path)
for path in os.listdir(os.path.join(self.dir, configs_dir))
if path.endswith(".depsets.yaml")
]
def load_config(self, config_path: str) -> Config:
with open(os.path.join(self.dir, config_path), "r") as f:
data = yaml.safe_load(f)
config_name = os.path.basename(config_path)
config = Config.from_dict(data, config_name)
return config
def merge_configs(self, configs: List[Config]) -> Config:
return Config(
depsets=[depset for config in configs for depset in config.depsets]
)
| Workspace |
python | pytorch__pytorch | test/jit/test_script_profile.py | {
"start": 1113,
"end": 2892
} | class ____(JitTestCase):
def test_basic(self):
seq = torch.jit.script(Sequence())
p = torch.jit._ScriptProfile()
p.enable()
seq(torch.rand((10, 100)))
p.disable()
self.assertNotEqual(p.dump_string(), "")
def test_script(self):
seq = Sequence()
p = torch.jit._ScriptProfile()
p.enable()
@torch.jit.script
def fn():
_ = seq(torch.rand((10, 100)))
fn()
p.disable()
self.assertNotEqual(p.dump_string(), "")
def test_multi(self):
seq = torch.jit.script(Sequence())
profiles = [torch.jit._ScriptProfile() for _ in range(5)]
for p in profiles:
p.enable()
last = None
while len(profiles) > 0:
seq(torch.rand((10, 10)))
p = profiles.pop()
p.disable()
stats = p.dump_string()
self.assertNotEqual(stats, "")
if last:
self.assertNotEqual(stats, last)
last = stats
def test_section(self):
seq = Sequence()
@torch.jit.script
def fn(max: int):
_ = seq(torch.rand((10, max)))
p = torch.jit._ScriptProfile()
p.enable()
fn(100)
p.disable()
s0 = p.dump_string()
fn(10)
p.disable()
s1 = p.dump_string()
p.enable()
fn(10)
p.disable()
s2 = p.dump_string()
self.assertEqual(s0, s1)
self.assertNotEqual(s1, s2)
def test_empty(self):
p = torch.jit._ScriptProfile()
p.enable()
p.disable()
self.assertEqual(p.dump_string(), "")
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestScriptProfile |
python | openai__openai-python | src/openai/types/chat/chat_completion_custom_tool_param.py | {
"start": 1066,
"end": 1412
} | class ____(TypedDict, total=False):
name: Required[str]
"""The name of the custom tool, used to identify it in tool calls."""
description: str
"""Optional description of the custom tool, used to provide more context."""
format: CustomFormat
"""The input format for the custom tool. Default is unconstrained text."""
| Custom |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-ragie/destination_ragie/writer.py | {
"start": 548,
"end": 17158
} | class ____:
METADATA_AIRBYTE_STREAM_FIELD = "airbyte_stream"
METADATA_CONTENT_HASH_FIELD = "airbyte_content_hash"
# Update RESERVED_METADATA_KEYS based on the provided API docs for POST /documents
RESERVED_METADATA_KEYS = {
"document_id",
"document_type",
"document_source",
"document_name",
"document_uploaded_at",
"start_time",
"end_time", # Keys from docs
# Include our internal keys
METADATA_AIRBYTE_STREAM_FIELD,
METADATA_CONTENT_HASH_FIELD,
}
def __init__(
self,
client: RagieClient,
config: RagieConfig,
catalog: ConfiguredAirbyteCatalog,
):
self.client = client
self.config = config
self.catalog = catalog
self.streams: Dict[str, ConfiguredAirbyteStream] = {
self._stream_tuple_to_id(s.stream.namespace, s.stream.name): s for s in catalog.streams
}
self.static_metadata = self.config.metadata_static_dict or {}
self.seen_hashes: Dict[str, Set[str]] = {}
self.hashes_preloaded: Set[str] = set()
logger.info("RagieWriter initialized.")
# Log relevant config settings (excluding secrets)
logger.debug(f"Configured Partition: {self.config.partition}")
logger.debug(f"Configured Metadata Fields: {self.config.metadata_fields}")
logger.debug(f"Configured Doc Name Field: {self.config.document_name_field}")
logger.debug(f"Configured External ID Field: {self.config.external_id_field}")
logger.debug(f"Configured Processing Mode: {self.config.processing_mode}")
logger.debug(f"Static Metadata Keys: {list(self.static_metadata.keys())}")
def _stream_tuple_to_id(self, namespace: Optional[str], name: str) -> str:
return f"{namespace}_{name}" if namespace else name
# --- Overwrite Logic (delete_streams_to_overwrite) - No changes needed ---
def delete_streams_to_overwrite(self) -> None:
streams_to_overwrite = [(sid, sc) for sid, sc in self.streams.items() if sc.destination_sync_mode == DestinationSyncMode.overwrite]
if not streams_to_overwrite:
return
stream_names = [sid for sid, _ in streams_to_overwrite]
logger.info(f"OVERWRITE mode for streams: {stream_names}. Deleting existing data...")
all_internal_ids_to_delete: Set[str] = set()
for stream_id, stream_config in streams_to_overwrite:
logger.info(f"Finding existing documents for stream '{stream_id}'...")
filter_conditions = {self.METADATA_AIRBYTE_STREAM_FIELD: stream_id}
try:
internal_ids = self.client.find_ids_by_metadata(filter_conditions)
if internal_ids:
logger.info(f"Found {len(internal_ids)} document IDs for stream '{stream_id}'.")
all_internal_ids_to_delete.update(internal_ids)
else:
logger.info(f"No existing documents found for stream '{stream_id}'.")
except Exception as e:
logger.error(f"Failed to find documents for overwrite stream '{stream_id}': {e}", exc_info=True)
raise AirbyteTracedException(
message=f"Failed to query existing documents for overwrite stream '{stream_id}'.",
internal_message=str(e),
failure_type=FailureType.system_error,
) from e
if all_internal_ids_to_delete:
logger.info(f"Attempting deletion of {len(all_internal_ids_to_delete)} documents for streams: {stream_names}")
try:
self.client.delete_documents_by_id(list(all_internal_ids_to_delete))
logger.info(f"Successfully processed deletion requests for overwrite streams.")
except Exception as e:
logger.error(f"Failed during document deletion for streams {stream_names}: {e}", exc_info=True)
raise AirbyteTracedException(
message=f"Failed to delete documents during overwrite for streams {stream_names}.",
internal_message=str(e),
failure_type=FailureType.system_error,
) from e
else:
logger.info("No documents found to delete across overwrite streams.")
# --- Helper Methods (_get_value_from_path, _calculate_content_hash, _preload_hashes_if_needed) - No changes needed ---
def _get_value_from_path(self, data: Dict[str, Any], path_str: Optional[str]) -> Any:
if not path_str or not isinstance(data, dict):
return None
path = path_str.split(".")
current = data
for i, key in enumerate(path):
if isinstance(current, dict):
if key in current:
current = current[key]
else:
return None
elif isinstance(current, list):
if key.isdigit():
try:
index = int(key)
if 0 <= index < len(current):
current = current[index]
else:
return None
except (ValueError, IndexError):
return None
else:
logger.debug(f"Attempted list access with non-integer key '{key}' in path '{path_str}'.")
return None
else:
return None
return current
def _calculate_content_hash(
self, metadata: Dict[str, Any], content: Optional[Dict[str, Any]] = None, file_info: Optional[Dict[str, Any]] = None
) -> str:
hasher = hashlib.sha256()
content_part = ""
metadata_part = ""
if file_info:
stable_file_info = {
"path": file_info.get("file_relative_path"),
"modified": file_info.get("modified"),
"size": file_info.get("bytes"),
}
stable_file_info = {k: v for k, v in stable_file_info.items() if v is not None}
content_part = json.dumps(stable_file_info, sort_keys=True, ensure_ascii=False)
elif content:
content_part = json.dumps(content, sort_keys=True, ensure_ascii=False)
hashable_metadata = {
k: v for k, v in metadata.items() if k not in [self.METADATA_AIRBYTE_STREAM_FIELD, self.METADATA_CONTENT_HASH_FIELD]
}
metadata_part = json.dumps(hashable_metadata, sort_keys=True, ensure_ascii=False)
combined_str = content_part + "::" + metadata_part
hasher.update(combined_str.encode("utf-8"))
hash_result = hasher.hexdigest()
# logger.debug(f"Calculated content hash: {hash_result} (File: {bool(file_info)}, Metadata Keys: {list(hashable_metadata.keys())})")
return hash_result
def _preload_hashes_if_needed(self, stream_id: str) -> None:
if stream_id in self.hashes_preloaded:
return
logger.info(f"Preloading hashes for stream '{stream_id}'...")
try:
filter_conditions = {self.METADATA_AIRBYTE_STREAM_FIELD: stream_id}
metadata_hash_field_path = f"metadata.{self.METADATA_CONTENT_HASH_FIELD}"
existing_docs = self.client.find_docs_by_metadata(filter_conditions, fields=["id", "metadata"])
hashes = set()
found_hashes = 0
docs_without_hash = 0
for doc in existing_docs:
doc_metadata = doc.get("metadata", {})
content_hash = doc_metadata.get(self.METADATA_CONTENT_HASH_FIELD)
if content_hash:
hashes.add(content_hash)
found_hashes += 1
else:
docs_without_hash += 1
self.seen_hashes[stream_id] = hashes
self.hashes_preloaded.add(stream_id)
log_msg = f"Finished preloading for '{stream_id}'. Found {len(hashes)} existing hashes."
if docs_without_hash > 0:
log_msg += f" ({docs_without_hash} docs missing hash)."
logger.info(log_msg)
except Exception as e:
logger.error(f"Failed to preload hashes for stream '{stream_id}': {e}", exc_info=True)
self.hashes_preloaded.add(stream_id)
self.seen_hashes[stream_id] = set()
logger.warning(f"Deduplication for '{stream_id}' may be incomplete due to hash preload failure.")
def _prepare_metadata(self, record_data: Dict[str, Any], stream_id: str) -> Dict[str, Any]:
"""Extracts, combines, and cleans metadata. Ensures values are suitable types."""
combined_metadata = copy.deepcopy(self.static_metadata)
if self.config.metadata_fields:
for field_path_str in self.config.metadata_fields:
value = self._get_value_from_path(record_data, field_path_str)
if value is not None:
key = field_path_str.replace(".", "_")
# Ragie metadata values: string, number, boolean, list of strings.
if isinstance(value, (str, bool)):
combined_metadata[key] = value
elif isinstance(value, (int, float)):
# Ensure it's finite (not NaN or Infinity)
if isinstance(value, float) and not all(map(float.isfinite, [value])):
logger.warning(f"Skipping non-finite float metadata field '{key}' (path: {field_path_str}). Value: {value}")
continue
combined_metadata[key] = value
elif isinstance(value, list) and all(isinstance(item, str) for item in value):
combined_metadata[key] = value
else:
# Try converting other types to string as fallback
try:
str_value = str(value)
combined_metadata[key] = str_value
logger.debug(f"Converted metadata field '{key}' (type: {type(value)}) to string.")
except Exception as str_err:
logger.warning(
f"Could not convert metadata field '{key}' from path '{field_path_str}' to string (type: {type(value)}). Error: {str_err}. Skipping."
)
final_metadata = {}
for key, value in combined_metadata.items():
new_key = key
# Clean key: remove leading/trailing spaces, handle reserved/internal names
clean_key = key.strip()
if not clean_key:
logger.warning(f"Skipping metadata field with empty key (original: '{key}').")
continue
new_key = clean_key
if new_key in self.RESERVED_METADATA_KEYS or new_key.startswith("_"):
temp_key = new_key.lstrip("_")
new_key = f"{temp_key}_" if temp_key else "_" # Handle case of key being only underscores
if new_key != key:
logger.debug(f"Adjusted reserved/internal metadata key '{key}' to '{new_key}'")
# replace common problematic chars like '.', '$', space
problematic_chars = [".", "$", " "]
if any(char in new_key for char in problematic_chars):
original_key = new_key
for char in problematic_chars:
new_key = new_key.replace(char, "_")
logger.warning(f"Adjusted metadata key '{original_key}' to '{new_key}' due to problematic characters.")
# Final check if cleaned key is empty or reserved again
if not new_key:
logger.warning(f"Skipping metadata field - key became empty after cleaning (original: '{key}').")
continue
if new_key in self.RESERVED_METADATA_KEYS:
new_key = f"{new_key}_"
logger.debug(f"Post-cleaning key '{key}' resulted in reserved key, appended underscore -> '{new_key}'")
final_metadata[new_key] = value
final_metadata[self.METADATA_AIRBYTE_STREAM_FIELD] = stream_id
# Check metadata size limits (optional but good practice)
if len(final_metadata) > 1000: # Approximation, actual limit counts list items individually
logger.warning(f"Metadata for record exceeds ~1000 key-value pairs ({len(final_metadata)}). Ragie might truncate or reject.")
return final_metadata
def queue_write_operation(self, record: AirbyteRecordMessage) -> None:
"""
Processes Airbyte record, prepares payload for Ragie client (JSON only).
"""
stream_id = self._stream_tuple_to_id(record.namespace, record.stream)
stream_config = self.streams.get(stream_id)
if not stream_config:
logger.warning(f"Stream config not found for '{stream_id}', skipping.")
return
if not isinstance(record.data, dict):
logger.warning(f"Record data is not dict in stream '{stream_id}', skipping.")
return
record_data = record.data
# Payload dictionary passed to the client.
payload: Dict[str, Any] = {}
content_to_send: Optional[Dict[str, Any]] = None
# --- 1. Extract Content Based on Config ---
if self.config.content_fields:
# Extract specified keys, supporting dot notation for nested fields
content_to_send = {}
for key in self.config.content_fields:
value = self._get_value_from_path(record_data, key)
if value is not None:
content_to_send[key] = value
else:
logger.warning(f"Key '{key}' not found in record data for stream '{stream_id}'.")
else:
# Use the entire record data
content_to_send = record_data
if not content_to_send:
logger.warning(f"Content data is empty in stream '{stream_id}'. Skipping.")
return
payload["data"] = content_to_send # Key for JSON data
# --- 2. Prepare Metadata ---
final_metadata = self._prepare_metadata(record_data, stream_id)
# --- 3. Determine Name ---
doc_name = None
if self.config.document_name_field:
value = self._get_value_from_path(record_data, self.config.document_name_field)
if value is not None:
doc_name = str(value)
if not doc_name: # Fallback if no name
doc_name = f"airbyte_{stream_id}_{uuid.uuid4()}"
payload["name"] = doc_name # Include name in payload for client
# --- 4. Determine External ID ---
external_id = None
if self.config.external_id_field:
value = self._get_value_from_path(record_data, self.config.external_id_field)
if value is not None:
external_id = str(value)
payload["external_id"] = external_id # Include external_id for client
# --- 5. Calculate Content Hash ---
temp_metadata_for_hashing = copy.deepcopy(final_metadata)
content_hash = self._calculate_content_hash(metadata=temp_metadata_for_hashing, content=content_to_send)
final_metadata[self.METADATA_CONTENT_HASH_FIELD] = content_hash
payload["metadata"] = final_metadata # Store final metadata dict in payload
# --- 6. Deduplication Check ---
if stream_config.destination_sync_mode == DestinationSyncMode.append_dedup:
self._preload_hashes_if_needed(stream_id)
if content_hash in self.seen_hashes.get(stream_id, set()):
logger.info(f"Skipping duplicate record in stream '{stream_id}' (Hash: {content_hash}, Name: '{doc_name}').")
return
else:
self.seen_hashes.setdefault(stream_id, set()).add(content_hash)
# --- 7. Add Other Parameters ---
payload["mode"] = self.config.processing_mode
payload["partition"] = self.config.partition
# --- 8. Send to Client ---
try:
logger.debug(f"Queueing JSON payload for '{doc_name}' (Stream: {stream_id}, Hash: {content_hash})")
self.client.index_documents([payload])
except Exception as e:
logger.error(f"Error during client indexing call: {e}", exc_info=True)
raise e # Re-raise for destination write loop
| RagieWriter |
python | spack__spack | lib/spack/spack/binary_distribution.py | {
"start": 109879,
"end": 109991
} | class ____(GenerateIndexError):
"""Raised when unable to list keys when generating key index"""
| CannotListKeys |
python | sphinx-doc__sphinx | tests/test_ext_napoleon/test_ext_napoleon_docstring.py | {
"start": 1421,
"end": 2096
} | class ____:
def test_attributes_docstring(self):
config = Config()
actual = NumpyDocstring(
cleandoc(NamedtupleSubclass.__doc__),
config=config,
app=None,
what='class',
name='NamedtupleSubclass',
obj=NamedtupleSubclass,
)
expected = """\
Sample namedtuple subclass
.. attribute:: attr1
Quick description of attr1
:type: Arbitrary type
.. attribute:: attr2
Quick description of attr2
:type: Another arbitrary type
.. attribute:: attr3
Adds a newline after the type
:type: Type
"""
assert str(actual) == expected
| TestNamedtupleSubclass |
python | pypa__warehouse | warehouse/packaging/interfaces.py | {
"start": 1885,
"end": 2414
} | class ____(Interface):
def check_project_name(name):
"""
Check if a project name is valid and available for use.
"""
def create_project(
name,
creator,
request,
*,
creator_is_owner=True,
organization_id: UUID | None = None,
):
"""
Creates a new project, recording a user as its creator.
If `creator_is_owner`, a `Role` is also added to the project
marking `creator` as a project owner.
"""
| IProjectService |
python | pypa__setuptools | setuptools/_distutils/tests/test_sdist.py | {
"start": 1487,
"end": 15062
} | class ____(support.TempdirManager):
def get_cmd(self, metadata=None):
"""Returns a cmd"""
if metadata is None:
metadata = {
'name': 'ns.fake--pkg',
'version': '1.0',
'url': 'xxx',
'author': 'xxx',
'author_email': 'xxx',
}
dist = Distribution(metadata)
dist.script_name = 'setup.py'
dist.packages = ['somecode']
dist.include_package_data = True
cmd = sdist(dist)
cmd.dist_dir = 'dist'
return dist, cmd
@pytest.mark.usefixtures('needs_zlib')
def test_prune_file_list(self):
# this test creates a project with some VCS dirs and an NFS rename
# file, then launches sdist to check they get pruned on all systems
# creating VCS directories with some files in them
os.mkdir(join(self.tmp_dir, 'somecode', '.svn'))
self.write_file((self.tmp_dir, 'somecode', '.svn', 'ok.py'), 'xxx')
os.mkdir(join(self.tmp_dir, 'somecode', '.hg'))
self.write_file((self.tmp_dir, 'somecode', '.hg', 'ok'), 'xxx')
os.mkdir(join(self.tmp_dir, 'somecode', '.git'))
self.write_file((self.tmp_dir, 'somecode', '.git', 'ok'), 'xxx')
self.write_file((self.tmp_dir, 'somecode', '.nfs0001'), 'xxx')
# now building a sdist
dist, cmd = self.get_cmd()
# zip is available universally
# (tar might not be installed under win32)
cmd.formats = ['zip']
cmd.ensure_finalized()
cmd.run()
# now let's check what we have
dist_folder = join(self.tmp_dir, 'dist')
files = os.listdir(dist_folder)
assert files == ['ns_fake_pkg-1.0.zip']
zip_file = zipfile.ZipFile(join(dist_folder, 'ns_fake_pkg-1.0.zip'))
try:
content = zip_file.namelist()
finally:
zip_file.close()
# making sure everything has been pruned correctly
expected = [
'',
'PKG-INFO',
'README',
'setup.py',
'somecode/',
'somecode/__init__.py',
]
assert sorted(content) == ['ns_fake_pkg-1.0/' + x for x in expected]
@pytest.mark.usefixtures('needs_zlib')
@pytest.mark.skipif("not shutil.which('tar')")
@pytest.mark.skipif("not shutil.which('gzip')")
def test_make_distribution(self):
# now building a sdist
dist, cmd = self.get_cmd()
# creating a gztar then a tar
cmd.formats = ['gztar', 'tar']
cmd.ensure_finalized()
cmd.run()
# making sure we have two files
dist_folder = join(self.tmp_dir, 'dist')
result = os.listdir(dist_folder)
result.sort()
assert result == ['ns_fake_pkg-1.0.tar', 'ns_fake_pkg-1.0.tar.gz']
os.remove(join(dist_folder, 'ns_fake_pkg-1.0.tar'))
os.remove(join(dist_folder, 'ns_fake_pkg-1.0.tar.gz'))
# now trying a tar then a gztar
cmd.formats = ['tar', 'gztar']
cmd.ensure_finalized()
cmd.run()
result = os.listdir(dist_folder)
result.sort()
assert result == ['ns_fake_pkg-1.0.tar', 'ns_fake_pkg-1.0.tar.gz']
@pytest.mark.usefixtures('needs_zlib')
def test_add_defaults(self):
# https://bugs.python.org/issue2279
# add_default should also include
# data_files and package_data
dist, cmd = self.get_cmd()
# filling data_files by pointing files
# in package_data
dist.package_data = {'': ['*.cfg', '*.dat'], 'somecode': ['*.txt']}
self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
self.write_file((self.tmp_dir, 'somecode', 'doc.dat'), '#')
# adding some data in data_files
data_dir = join(self.tmp_dir, 'data')
os.mkdir(data_dir)
self.write_file((data_dir, 'data.dt'), '#')
some_dir = join(self.tmp_dir, 'some')
os.mkdir(some_dir)
# make sure VCS directories are pruned (#14004)
hg_dir = join(self.tmp_dir, '.hg')
os.mkdir(hg_dir)
self.write_file((hg_dir, 'last-message.txt'), '#')
# a buggy regex used to prevent this from working on windows (#6884)
self.write_file((self.tmp_dir, 'buildout.cfg'), '#')
self.write_file((self.tmp_dir, 'inroot.txt'), '#')
self.write_file((some_dir, 'file.txt'), '#')
self.write_file((some_dir, 'other_file.txt'), '#')
dist.data_files = [
('data', ['data/data.dt', 'buildout.cfg', 'inroot.txt', 'notexisting']),
'some/file.txt',
'some/other_file.txt',
]
# adding a script
script_dir = join(self.tmp_dir, 'scripts')
os.mkdir(script_dir)
self.write_file((script_dir, 'script.py'), '#')
dist.scripts = [join('scripts', 'script.py')]
cmd.formats = ['zip']
cmd.use_defaults = True
cmd.ensure_finalized()
cmd.run()
# now let's check what we have
dist_folder = join(self.tmp_dir, 'dist')
files = os.listdir(dist_folder)
assert files == ['ns_fake_pkg-1.0.zip']
zip_file = zipfile.ZipFile(join(dist_folder, 'ns_fake_pkg-1.0.zip'))
try:
content = zip_file.namelist()
finally:
zip_file.close()
# making sure everything was added
expected = [
'',
'PKG-INFO',
'README',
'buildout.cfg',
'data/',
'data/data.dt',
'inroot.txt',
'scripts/',
'scripts/script.py',
'setup.py',
'some/',
'some/file.txt',
'some/other_file.txt',
'somecode/',
'somecode/__init__.py',
'somecode/doc.dat',
'somecode/doc.txt',
]
assert sorted(content) == ['ns_fake_pkg-1.0/' + x for x in expected]
# checking the MANIFEST
manifest = pathlib.Path(self.tmp_dir, 'MANIFEST').read_text(encoding='utf-8')
assert manifest == MANIFEST % {'sep': os.sep}
@staticmethod
def warnings(messages, prefix='warning: '):
return [msg for msg in messages if msg.startswith(prefix)]
@pytest.mark.usefixtures('needs_zlib')
def test_metadata_check_option(self, caplog):
# testing the `medata-check` option
dist, cmd = self.get_cmd(metadata={})
# this should raise some warnings !
# with the `check` subcommand
cmd.ensure_finalized()
cmd.run()
assert len(self.warnings(caplog.messages, 'warning: check: ')) == 1
# trying with a complete set of metadata
caplog.clear()
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
cmd.metadata_check = False
cmd.run()
assert len(self.warnings(caplog.messages, 'warning: check: ')) == 0
def test_show_formats(self, capsys):
show_formats()
# the output should be a header line + one line per format
num_formats = len(ARCHIVE_FORMATS.keys())
output = [
line
for line in capsys.readouterr().out.split('\n')
if line.strip().startswith('--formats=')
]
assert len(output) == num_formats
def test_finalize_options(self):
dist, cmd = self.get_cmd()
cmd.finalize_options()
# default options set by finalize
assert cmd.manifest == 'MANIFEST'
assert cmd.template == 'MANIFEST.in'
assert cmd.dist_dir == 'dist'
# formats has to be a string splitable on (' ', ',') or
# a stringlist
cmd.formats = 1
with pytest.raises(DistutilsOptionError):
cmd.finalize_options()
cmd.formats = ['zip']
cmd.finalize_options()
# formats has to be known
cmd.formats = 'supazipa'
with pytest.raises(DistutilsOptionError):
cmd.finalize_options()
# the following tests make sure there is a nice error message instead
# of a traceback when parsing an invalid manifest template
def _check_template(self, content, caplog):
dist, cmd = self.get_cmd()
os.chdir(self.tmp_dir)
self.write_file('MANIFEST.in', content)
cmd.ensure_finalized()
cmd.filelist = FileList()
cmd.read_template()
assert len(self.warnings(caplog.messages)) == 1
def test_invalid_template_unknown_command(self, caplog):
self._check_template('taunt knights *', caplog)
def test_invalid_template_wrong_arguments(self, caplog):
# this manifest command takes one argument
self._check_template('prune', caplog)
@pytest.mark.skipif("platform.system() != 'Windows'")
def test_invalid_template_wrong_path(self, caplog):
# on Windows, trailing slashes are not allowed
# this used to crash instead of raising a warning: #8286
self._check_template('include examples/', caplog)
@pytest.mark.usefixtures('needs_zlib')
def test_get_file_list(self):
# make sure MANIFEST is recalculated
dist, cmd = self.get_cmd()
# filling data_files by pointing files in package_data
dist.package_data = {'somecode': ['*.txt']}
self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
cmd.formats = ['gztar']
cmd.ensure_finalized()
cmd.run()
assert ilen(clean_lines(cmd.manifest)) == 5
# adding a file
self.write_file((self.tmp_dir, 'somecode', 'doc2.txt'), '#')
# make sure build_py is reinitialized, like a fresh run
build_py = dist.get_command_obj('build_py')
build_py.finalized = False
build_py.ensure_finalized()
cmd.run()
manifest2 = list(clean_lines(cmd.manifest))
# do we have the new file in MANIFEST ?
assert len(manifest2) == 6
assert 'doc2.txt' in manifest2[-1]
@pytest.mark.usefixtures('needs_zlib')
def test_manifest_marker(self):
# check that autogenerated MANIFESTs have a marker
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
cmd.run()
assert (
next(clean_lines(cmd.manifest))
== '# file GENERATED by distutils, do NOT edit'
)
@pytest.mark.usefixtures('needs_zlib')
def test_manifest_comments(self):
# make sure comments don't cause exceptions or wrong includes
contents = dedent(
"""\
# bad.py
#bad.py
good.py
"""
)
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
self.write_file((self.tmp_dir, cmd.manifest), contents)
self.write_file((self.tmp_dir, 'good.py'), '# pick me!')
self.write_file((self.tmp_dir, 'bad.py'), "# don't pick me!")
self.write_file((self.tmp_dir, '#bad.py'), "# don't pick me!")
cmd.run()
assert cmd.filelist.files == ['good.py']
@pytest.mark.usefixtures('needs_zlib')
def test_manual_manifest(self):
# check that a MANIFEST without a marker is left alone
dist, cmd = self.get_cmd()
cmd.formats = ['gztar']
cmd.ensure_finalized()
self.write_file((self.tmp_dir, cmd.manifest), 'README.manual')
self.write_file(
(self.tmp_dir, 'README.manual'),
'This project maintains its MANIFEST file itself.',
)
cmd.run()
assert cmd.filelist.files == ['README.manual']
assert list(clean_lines(cmd.manifest)) == ['README.manual']
archive_name = join(self.tmp_dir, 'dist', 'ns_fake_pkg-1.0.tar.gz')
archive = tarfile.open(archive_name)
try:
filenames = [tarinfo.name for tarinfo in archive]
finally:
archive.close()
assert sorted(filenames) == [
'ns_fake_pkg-1.0',
'ns_fake_pkg-1.0/PKG-INFO',
'ns_fake_pkg-1.0/README.manual',
]
@pytest.mark.usefixtures('needs_zlib')
@require_unix_id
@require_uid_0
@pytest.mark.skipif("not shutil.which('tar')")
@pytest.mark.skipif("not shutil.which('gzip')")
def test_make_distribution_owner_group(self):
# now building a sdist
dist, cmd = self.get_cmd()
# creating a gztar and specifying the owner+group
cmd.formats = ['gztar']
cmd.owner = pwd.getpwuid(0)[0]
cmd.group = grp.getgrgid(0)[0]
cmd.ensure_finalized()
cmd.run()
# making sure we have the good rights
archive_name = join(self.tmp_dir, 'dist', 'ns_fake_pkg-1.0.tar.gz')
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
assert member.uid == 0
assert member.gid == 0
finally:
archive.close()
# building a sdist again
dist, cmd = self.get_cmd()
# creating a gztar
cmd.formats = ['gztar']
cmd.ensure_finalized()
cmd.run()
# making sure we have the good rights
archive_name = join(self.tmp_dir, 'dist', 'ns_fake_pkg-1.0.tar.gz')
archive = tarfile.open(archive_name)
# note that we are not testing the group ownership here
# because, depending on the platforms and the container
# rights (see #7408)
try:
for member in archive.getmembers():
assert member.uid == os.getuid()
finally:
archive.close()
| TestSDist |
python | numpy__numpy | numpy/linalg/lapack_lite/fortran.py | {
"start": 637,
"end": 1139
} | class ____:
"""LineIterator(iterable)
Return rstrip()'d lines from iterable, while keeping a count of the
line number in the .lineno attribute.
"""
def __init__(self, iterable):
object.__init__(self)
self.iterable = iter(iterable)
self.lineno = 0
def __iter__(self):
return self
def __next__(self):
self.lineno += 1
line = next(self.iterable)
line = line.rstrip()
return line
next = __next__
| LineIterator |
python | gevent__gevent | src/greentest/3.14/test_urllib.py | {
"start": 30392,
"end": 31789
} | class ____(unittest.TestCase, FakeHTTPMixin):
"""Test urllib.urlretrieve() using fake http connections"""
def test_short_content_raises_ContentTooShortError(self):
self.addCleanup(urllib.request.urlcleanup)
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
def _reporthook(par1, par2, par3):
pass
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve(support.TEST_HTTP_URL,
reporthook=_reporthook)
finally:
self.unfakehttp()
def test_short_content_raises_ContentTooShortError_without_reporthook(self):
self.addCleanup(urllib.request.urlcleanup)
self.fakehttp(b'''HTTP/1.1 200 OK
Date: Wed, 02 Jan 2008 03:03:54 GMT
Server: Apache/1.3.33 (Debian GNU/Linux) mod_ssl/2.8.22 OpenSSL/0.9.7e
Connection: close
Content-Length: 100
Content-Type: text/html; charset=iso-8859-1
FF
''')
with self.assertRaises(urllib.error.ContentTooShortError):
try:
urllib.request.urlretrieve(support.TEST_HTTP_URL)
finally:
self.unfakehttp()
| urlretrieve_HttpTests |
python | python-openxml__python-docx | src/docx/oxml/xmlchemy.py | {
"start": 3144,
"end": 3677
} | class ____(type):
"""Metaclass for BaseOxmlElement."""
def __init__(cls, clsname: str, bases: tuple[type, ...], namespace: dict[str, Any]):
dispatchable = (
OneAndOnlyOne,
OneOrMore,
OptionalAttribute,
RequiredAttribute,
ZeroOrMore,
ZeroOrOne,
ZeroOrOneChoice,
)
for key, value in namespace.items():
if isinstance(value, dispatchable):
value.populate_class_members(cls, key)
| MetaOxmlElement |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_any.py | {
"start": 29369,
"end": 29490
} | class ____(ipaddress.IPv4Interface):
def __str__(self):
return super().__str__() + '_subclassed'
| SubInterfaceV4 |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_insights/src/connectors_insights/pylint_plugins/cdk_deprecation_checkers.py | {
"start": 1157,
"end": 2630
} | class ____(DeprecatedMixin, BaseChecker):
"""Check for deprecated classes and modules.
The DeprecatedMixin class is here:
https://github.com/pylint-dev/pylint/blob/a5a77f6e891f6e143439d19b5e7f0a29eb5ea1cd/pylint/checkers/deprecated.py#L31
"""
name = "deprecated"
msgs = {
**DeprecatedMixin.DEPRECATED_METHOD_MESSAGE,
**DeprecatedMixin.DEPRECATED_ARGUMENT_MESSAGE,
**DeprecatedMixin.DEPRECATED_CLASS_MESSAGE,
**DeprecatedMixin.DEPRECATED_MODULE_MESSAGE,
}
def deprecated_modules(self) -> set[str]:
"""Callback method called by DeprecatedMixin for every module found in the code.
Returns:
collections.abc.Container of deprecated module names.
"""
return DEPRECATED_MODULES
def deprecated_classes(self, module: str) -> set[str]:
"""Callback method called by DeprecatedMixin for every class found in the code.
Returns:
collections.abc.Container of deprecated class names.
"""
_deprecated_classes = set()
for deprecated_class in DEPRECATED_CLASSES:
if deprecated_class.module is None or deprecated_class.module == module:
_deprecated_classes.add(deprecated_class.name)
return _deprecated_classes
def register(linter: PyLinter) -> None:
linter.register_checker(DeprecationChecker(linter))
linter.register_checker(ForbiddenMethodNameChecker(linter))
| DeprecationChecker |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 14275,
"end": 15373
} | class ____(Drag):
''' *toolbar icon*: |pan_icon|
The pan tool allows the user to pan a Plot by left-dragging a mouse, or on
touch devices by dragging a finger or stylus, across the plot region.
The pan tool also activates the border regions of a Plot for "single axis"
panning. For instance, dragging in the vertical border or axis will effect
a pan in the vertical direction only, with horizontal dimension kept fixed.
.. |pan_icon| image:: /_images/icons/pan.svg
:height: 24px
:alt: Icon of four arrows meeting in a plus shape representing the pan tool in the toolbar.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
dimensions = Enum(Dimensions, default="both", help="""
Which dimensions the pan tool is constrained to act in. By default
the pan tool will pan in any dimension, but can be configured to only
pan horizontally across the width of the plot, or vertically across the
height of the plot.
""")
| PanTool |
python | django__django | tests/model_fields/models.py | {
"start": 18011,
"end": 18397
} | class ____(models.Model):
name = models.CharField(max_length=10)
lower_name = models.GeneratedField(
expression=Lower("name"),
db_persist=False,
output_field=models.CharField(db_collation=test_collation, max_length=11),
)
class Meta:
required_db_features = {"supports_virtual_generated_columns"}
| GeneratedModelOutputFieldDbCollationVirtual |
python | kamyu104__LeetCode-Solutions | Python/valid-sudoku.py | {
"start": 31,
"end": 785
} | class ____(object):
def isValidSudoku(self, board):
"""
:type board: List[List[str]]
:rtype: bool
"""
for i in xrange(9):
if not self.isValidList([board[i][j] for j in xrange(9)]) or \
not self.isValidList([board[j][i] for j in xrange(9)]):
return False
for i in xrange(3):
for j in xrange(3):
if not self.isValidList([board[m][n] for n in xrange(3 * j, 3 * j + 3) \
for m in xrange(3 * i, 3 * i + 3)]):
return False
return True
def isValidList(self, xs):
xs = filter(lambda x: x != '.', xs)
return len(set(xs)) == len(xs)
| Solution |
python | PyCQA__pylint | tests/functional/u/unexpected_special_method_signature.py | {
"start": 211,
"end": 1020
} | class ____:
def __enter__(self, other): # [unexpected-special-method-signature]
pass
def __del__(self, other): # [unexpected-special-method-signature]
pass
def __format__(self, other, other2): # [unexpected-special-method-signature]
pass
def __setattr__(self): # [unexpected-special-method-signature]
pass
def __round__(self, invalid, args): # [unexpected-special-method-signature]
pass
def __deepcopy__(self, memo, other): # [unexpected-special-method-signature]
pass
def __iter__(): # [no-method-argument]
pass
@staticmethod
def __getattr__(self, nanana): # [unexpected-special-method-signature]
pass
def __subclasses__(self, blabla): # [unexpected-special-method-signature]
pass
| Invalid |
python | ray-project__ray | release/k8s_tests/locustfile.py | {
"start": 200,
"end": 564
} | class ____(HTTPAdapter):
def __init__(self, timeout, *args, **kwargs):
self.timeout = timeout
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
| TimeoutHTTPAdapter |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/util.py | {
"start": 1349,
"end": 5349
} | class ____:
"""Apply Python context manager behavior to transaction objects.
Performs validation to ensure the subject of the transaction is not
used if the transaction were ended prematurely.
"""
__slots__ = ("_outer_trans_ctx", "_trans_subject", "__weakref__")
_trans_subject: Optional[_TConsSubject]
def _transaction_is_active(self) -> bool:
raise NotImplementedError()
def _transaction_is_closed(self) -> bool:
raise NotImplementedError()
def _rollback_can_be_called(self) -> bool:
"""indicates the object is in a state that is known to be acceptable
for rollback() to be called.
This does not necessarily mean rollback() will succeed or not raise
an error, just that there is currently no state detected that indicates
rollback() would fail or emit warnings.
It also does not mean that there's a transaction in progress, as
it is usually safe to call rollback() even if no transaction is
present.
.. versionadded:: 1.4.28
"""
raise NotImplementedError()
def _get_subject(self) -> _TConsSubject:
raise NotImplementedError()
def commit(self) -> None:
raise NotImplementedError()
def rollback(self) -> None:
raise NotImplementedError()
def close(self) -> None:
raise NotImplementedError()
@classmethod
def _trans_ctx_check(cls, subject: _TConsSubject) -> None:
trans_context = subject._trans_context_manager
if trans_context:
if not trans_context._transaction_is_active():
raise exc.InvalidRequestError(
"Can't operate on closed transaction inside context "
"manager. Please complete the context manager "
"before emitting further commands."
)
def __enter__(self) -> Self:
subject = self._get_subject()
# none for outer transaction, may be non-None for nested
# savepoint, legacy nesting cases
trans_context = subject._trans_context_manager
self._outer_trans_ctx = trans_context
self._trans_subject = subject
subject._trans_context_manager = self
return self
def __exit__(self, type_: Any, value: Any, traceback: Any) -> None:
subject = getattr(self, "_trans_subject", None)
# simplistically we could assume that
# "subject._trans_context_manager is self". However, any calling
# code that is manipulating __exit__ directly would break this
# assumption. alembic context manager
# is an example of partial use that just calls __exit__ and
# not __enter__ at the moment. it's safe to assume this is being done
# in the wild also
out_of_band_exit = (
subject is None or subject._trans_context_manager is not self
)
if type_ is None and self._transaction_is_active():
try:
self.commit()
except:
with util.safe_reraise():
if self._rollback_can_be_called():
self.rollback()
finally:
if not out_of_band_exit:
assert subject is not None
subject._trans_context_manager = self._outer_trans_ctx
self._trans_subject = self._outer_trans_ctx = None
else:
try:
if not self._transaction_is_active():
if not self._transaction_is_closed():
self.close()
else:
if self._rollback_can_be_called():
self.rollback()
finally:
if not out_of_band_exit:
assert subject is not None
subject._trans_context_manager = self._outer_trans_ctx
self._trans_subject = self._outer_trans_ctx = None
| TransactionalContext |
python | keras-team__keras | keras/src/applications/convnext.py | {
"start": 5307,
"end": 6450
} | class ____(Layer):
"""Stochastic Depth module.
It performs batch-wise dropping rather than sample-wise. In libraries like
`timm`, it's similar to `DropPath` layers that drops residual paths
sample-wise.
References:
- https://github.com/rwightman/pytorch-image-models
Args:
drop_path_rate (float): Probability of dropping paths. Should be within
[0, 1].
Returns:
Tensor either with the residual path dropped or kept.
"""
def __init__(self, drop_path_rate, **kwargs):
super().__init__(**kwargs)
self.drop_path_rate = drop_path_rate
def call(self, x, training=None):
if training:
keep_prob = 1 - self.drop_path_rate
shape = (ops.shape(x)[0],) + (1,) * (len(ops.shape(x)) - 1)
random_tensor = keep_prob + random.uniform(shape, 0, 1)
random_tensor = ops.floor(random_tensor)
return (x / keep_prob) * random_tensor
return x
def get_config(self):
config = super().get_config()
config.update({"drop_path_rate": self.drop_path_rate})
return config
| StochasticDepth |
python | sympy__sympy | sympy/plotting/series.py | {
"start": 79365,
"end": 81513
} | class ____(BaseSeries):
"""Represents generic numerical data.
Notes
=====
This class serves the purpose of back-compatibility with the "markers,
annotations, fill, rectangles" keyword arguments that represent
user-provided numerical data. In particular, it solves the problem of
combining together two or more plot-objects with the ``extend`` or
``append`` methods: user-provided numerical data is also taken into
consideration because it is stored in this series class.
Also note that the current implementation is far from optimal, as each
keyword argument is stored into an attribute in the ``Plot`` class, which
requires a hard-coded if-statement in the ``MatplotlibBackend`` class.
The implementation suggests that it is ok to add attributes and
if-statements to provide more and more functionalities for user-provided
numerical data (e.g. adding horizontal lines, or vertical lines, or bar
plots, etc). However, in doing so one would reinvent the wheel: plotting
libraries (like Matplotlib) already implements the necessary API.
Instead of adding more keyword arguments and attributes, users interested
in adding custom numerical data to a plot should retrieve the figure
created by this plotting module. For example, this code:
.. plot::
:context: close-figs
:include-source: True
from sympy import Symbol, plot, cos
x = Symbol("x")
p = plot(cos(x), markers=[{"args": [[0, 1, 2], [0, 1, -1], "*"]}])
Becomes:
.. plot::
:context: close-figs
:include-source: True
p = plot(cos(x), backend="matplotlib")
fig, ax = p._backend.fig, p._backend.ax
ax.plot([0, 1, 2], [0, 1, -1], "*")
fig
Which is far better in terms of readability. Also, it gives access to the
full plotting library capabilities, without the need to reinvent the wheel.
"""
is_generic = True
def __init__(self, tp, *args, **kwargs):
self.type = tp
self.args = args
self.rendering_kw = kwargs
def get_data(self):
return self.args
| GenericDataSeries |
python | pytorch__pytorch | test/quantization/core/test_workflow_module.py | {
"start": 28787,
"end": 36410
} | class ____(QuantizationTestCase):
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from(
(torch.per_tensor_affine, torch.per_tensor_symmetric))
)
def test_observer_scriptable(self, qdtype, qscheme):
ob_list = [
HistogramObserver(dtype=qdtype, qscheme=qscheme),
default_histogram_observer()
]
for obs in ob_list:
scripted = torch.jit.script(obs)
x = torch.rand(3, 4)
obs(x)
scripted(x)
self.assertTrue(torch.equal(obs.histogram, scripted.histogram))
buf = io.BytesIO()
torch.jit.save(scripted, buf)
buf.seek(0)
loaded = torch.jit.load(buf)
self.assertTrue(torch.equal(obs.histogram, scripted.histogram))
@given(qdtype=st.sampled_from((torch.qint8, torch.quint8)),
qscheme=st.sampled_from((torch.per_tensor_affine, torch.per_tensor_symmetric)),
reduce_range=st.booleans())
@settings(max_examples=10)
def test_histogram_observer(self, qdtype, qscheme, reduce_range):
myobs = HistogramObserver(bins=3, dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)
# Calculate qparams should work for empty observers
qparams = myobs.calculate_qparams()
x = torch.tensor([2.0, 3.0, 4.0, 5.0], requires_grad=True)
y = torch.tensor([5.0, 6.0, 7.0, 8.0])
out_x = myobs(x)
self.assertTrue(out_x.requires_grad)
myobs(y)
self.assertEqual(myobs.min_val, 2.0)
self.assertEqual(myobs.max_val, 8.0)
self.assertEqual(myobs.histogram, [2., 3., 3.])
qparams = myobs.calculate_qparams()
if reduce_range:
if qscheme == torch.per_tensor_symmetric:
ref_scale = 0.0470588 * 255 / 127
ref_zero_point = 0 if qdtype is torch.qint8 else 128
else:
ref_scale = 0.0235294 * 255 / 127
ref_zero_point = -64 if qdtype is torch.qint8 else 0
else:
if qscheme == torch.per_tensor_symmetric:
ref_scale = 0.0470588
ref_zero_point = 0 if qdtype is torch.qint8 else 128
else:
ref_scale = 0.0235294
ref_zero_point = -128 if qdtype is torch.qint8 else 0
self.assertEqual(qparams[1].item(), ref_zero_point)
self.assertEqual(qparams[0].item(), ref_scale, atol=1e-5, rtol=0)
# Test for serializability
state_dict = myobs.state_dict()
b = io.BytesIO()
torch.save(state_dict, b)
b.seek(0)
loaded_dict = torch.load(b)
for key in state_dict:
self.assertEqual(state_dict[key], loaded_dict[key])
loaded_obs = HistogramObserver(bins=3, dtype=qdtype, qscheme=qscheme, reduce_range=reduce_range)
loaded_obs.load_state_dict(loaded_dict)
loaded_qparams = loaded_obs.calculate_qparams()
self.assertEqual(myobs.min_val, loaded_obs.min_val)
self.assertEqual(myobs.max_val, loaded_obs.max_val)
self.assertEqual(myobs.histogram, loaded_obs.histogram)
self.assertEqual(myobs.bins, loaded_obs.bins)
self.assertEqual(myobs.calculate_qparams(), loaded_obs.calculate_qparams())
def test_histogram_observer_one_sided(self):
myobs = HistogramObserver(bins=8, dtype=torch.quint8, qscheme=torch.per_tensor_affine, reduce_range=True)
x = torch.tensor([0.0, 0.3, 1.2, 1.7])
y = torch.tensor([0.1, 1.3, 2.0, 2.7])
myobs(x)
myobs(y)
self.assertEqual(myobs.min_val, 0)
qparams = myobs.calculate_qparams()
self.assertEqual(qparams[1].item(), 0)
def test_histogram_observer_same_inputs(self):
myobs = HistogramObserver(bins=3, dtype=torch.qint8, qscheme=torch.per_tensor_symmetric,
reduce_range=False)
w = torch.ones(4, requires_grad=True)
x = torch.zeros(4, requires_grad=True)
y = torch.tensor([2.0, 3.0, 4.0, 5.0], requires_grad=True)
z = torch.tensor([5.0, 6.0, 7.0, 8.0])
myobs(w)
myobs(x)
myobs(x)
myobs(y)
myobs(z)
qparams = myobs.calculate_qparams()
self.assertEqual(myobs.min_val, 0.0)
self.assertEqual(myobs.max_val, 8.0)
self.assertEqual(myobs.histogram, [13.25, 3.75, 3.])
@skipIfTorchDynamo("too slow")
@given(N=st.sampled_from([10, 1000]),
bins=st.sampled_from([256, 512, 1024, 2048]),
dtype=st.sampled_from([torch.qint8, torch.quint8]),
qscheme=st.sampled_from([torch.per_tensor_affine, torch.per_tensor_symmetric]),
reduce_range=st.booleans())
def test_histogram_observer_against_reference(self, N, bins, dtype, qscheme, reduce_range):
ref_obs = _ReferenceHistogramObserver(bins=bins, dtype=dtype, qscheme=qscheme, reduce_range=reduce_range)
my_obs = HistogramObserver(bins=bins, dtype=dtype, qscheme=qscheme, reduce_range=reduce_range)
for _ in range(10):
X = torch.randn(N)
my_obs(X)
ref_obs(X)
self.assertEqual(my_obs.histogram, ref_obs.histogram)
self.assertEqual(my_obs.min_val, ref_obs.min_val)
self.assertEqual(my_obs.max_val, ref_obs.max_val)
ref_qparams = ref_obs.calculate_qparams()
my_qparams = my_obs.calculate_qparams()
for i in range(0, bins, 200):
for j in range(i + 5, bins, 200):
ref_qe = ref_obs._compute_quantization_error(i, j)
qe = my_obs._compute_quantization_error(i, j)
self.assertEqual(ref_qe, qe)
self.assertEqual(ref_qparams, my_qparams)
def test_histogram_observer_extreme_inputs(self):
"""
Ensures that the HistogramObserver is able to work correctly in
a rare case: extreme small max values
"""
obs = HistogramObserver()
test_input = torch.tensor(
[0.0, 0.0, 4.58e-41, 4.58e-41]
)
# Make sure it runs, two passes are required based on the behavior of forward func
# The first pass initializes min_val&max_val, and second pass calls _adjust_min_max
obs(test_input)
obs(test_input)
def test_histogram_observer_correct_numel(self):
for i in range(1, 10):
obs = HistogramObserver()
obs(torch.randn(i, i))
self.assertEqual(obs.histogram.sum().item(), i**2)
def test_histogram_observer_single_inputs(self):
# Make sure that if we pass single valued tensors to the observer, the code runs
observer = HistogramObserver(bins=10)
a = torch.FloatTensor([1])
b = torch.FloatTensor([3])
c = torch.FloatTensor([2])
d = torch.FloatTensor([4])
observer(a)
observer(b)
observer(c)
observer(d)
self.assertEqual(observer.min_val, 1)
self.assertEqual(observer.max_val, 4)
self.assertEqual(torch.sum(observer.histogram), 4)
def test_histogram_observer_update_within_range_succeeds(self):
# test if an update within the existing range actually updates
myobs = HistogramObserver(bins=10)
x = torch.tensor([0.0, 3.0, 4.0, 9.0])
y = torch.tensor([2.0, 3.0, 7.0, 8.0])
myobs(x)
myobs(y)
self.assertEqual(myobs.min_val, 0.0)
self.assertEqual(myobs.max_val, 9.0)
self.assertEqual(myobs.histogram, [1., 0., 1., 2., 1., 0., 0., 1., 1., 1.])
| TestHistogramObserver |
python | dask__distributed | distributed/dashboard/components/nvml.py | {
"start": 591,
"end": 5744
} | class ____(DashboardComponent):
"""How many tasks are on each worker"""
@log_errors
def __init__(self, scheduler, width=600, **kwargs):
self.last = 0
self.scheduler = scheduler
self.source = ColumnDataSource(
{
"memory": [1, 2],
"memory-half": [0.5, 1],
"memory_text": ["1B", "2B"],
"utilization": [1, 2],
"utilization-half": [0.5, 1],
"worker": ["a", "b"],
"gpu-index": [0, 0],
"y": [1, 2],
"escaped_worker": ["a", "b"],
}
)
memory = figure(
title="GPU Memory",
tools="",
width=int(width / 2),
name="gpu_memory_histogram",
**kwargs,
)
rect = memory.rect(
source=self.source,
x="memory-half",
y="y",
width="memory",
height=1,
color="#76B900",
)
rect.nonselection_glyph = None
utilization = figure(
title="GPU Utilization",
tools="",
width=int(width / 2),
name="gpu_utilization_histogram",
**kwargs,
)
rect = utilization.rect(
source=self.source,
x="utilization-half",
y="y",
width="utilization",
height=1,
color="#76B900",
)
rect.nonselection_glyph = None
memory.axis[0].ticker = BasicTicker(**TICKS_1024)
memory.xaxis[0].formatter = NumeralTickFormatter(format="0.0 b")
memory.xaxis.major_label_orientation = -math.pi / 12
memory.x_range.start = 0
for fig in [memory, utilization]:
fig.xaxis.minor_tick_line_alpha = 0
fig.yaxis.visible = False
fig.ygrid.visible = False
tap = TapTool(callback=OpenURL(url="./info/worker/@escaped_worker.html"))
fig.add_tools(tap)
fig.toolbar_location = None
fig.yaxis.visible = False
hover = HoverTool()
hover.tooltips = "@worker : @utilization %"
hover.point_policy = "follow_mouse"
utilization.add_tools(hover)
hover = HoverTool()
hover.tooltips = "@worker : @memory_text"
hover.point_policy = "follow_mouse"
memory.add_tools(hover)
self.memory_figure = memory
self.utilization_figure = utilization
self.utilization_figure.y_range = memory.y_range
self.utilization_figure.x_range.start = 0
self.utilization_figure.x_range.end = 100
@without_property_validation
@log_errors
def update(self):
workers = list(self.scheduler.workers.values())
utilization = []
memory = []
gpu_index = []
y = []
memory_total = 0
memory_max = 0
worker = []
for idx, ws in enumerate(workers):
try:
mem_used = ws.metrics["gpu_memory_used"]
mem_total = ws.metrics["gpu-memory-total"]
u = ws.metrics["gpu_utilization"]
except KeyError:
continue
memory_max = max(memory_max, mem_total)
memory_total += mem_total
utilization.append(int(u) if u else 0)
memory.append(mem_used)
worker.append(ws.address)
gpu_index.append(idx)
y.append(idx)
memory_text = [format_bytes(m) for m in memory]
result = {
"memory": memory,
"memory-half": [m / 2 for m in memory],
"memory_text": memory_text,
"utilization": utilization,
"utilization-half": [u / 2 for u in utilization],
"worker": worker,
"gpu-index": gpu_index,
"y": y,
"escaped_worker": [url_escape(w) for w in worker],
}
self.memory_figure.title.text = (
f"GPU Memory: {format_bytes(sum(memory))} / {format_bytes(memory_total)}"
)
self.memory_figure.x_range.end = memory_max
update(self.source, result)
@log_errors
def gpu_memory_doc(scheduler, extra, doc):
gpu_load = GPUCurrentLoad(scheduler, sizing_mode="stretch_both")
gpu_load.update()
add_periodic_callback(doc, gpu_load, 100)
doc.add_root(gpu_load.memory_figure)
doc.theme = BOKEH_THEME
@log_errors
def gpu_utilization_doc(scheduler, extra, doc):
gpu_load = GPUCurrentLoad(scheduler, sizing_mode="stretch_both")
gpu_load.update()
add_periodic_callback(doc, gpu_load, 100)
doc.add_root(gpu_load.utilization_figure)
doc.theme = BOKEH_THEME
@log_errors
def gpu_doc(scheduler, extra, doc):
gpu_load = GPUCurrentLoad(scheduler, sizing_mode="stretch_both")
gpu_load.update()
add_periodic_callback(doc, gpu_load, 100)
doc.add_root(gpu_load.memory_figure)
doc.add_root(gpu_load.utilization_figure)
doc.title = "Dask: GPU"
doc.theme = BOKEH_THEME
doc.template = env.get_template("gpu.html")
doc.template_variables.update(extra)
| GPUCurrentLoad |
python | huggingface__transformers | src/transformers/models/glm4v_moe/convert_glm4v_moe_mgt_weights_to_hf.py | {
"start": 776,
"end": 33298
} | class ____(pickle.Unpickler):
def find_class(self, mod_name, name):
class DummyClass:
def __init__(self, *args, **kwargs):
pass
if mod_name.startswith("megatron") or mod_name.startswith("glm") or mod_name.startswith("__main__"):
return DummyClass
return super().find_class(mod_name, name)
pickle.Unpickler = UnpicklerWrapper
def dict_access_multi(a_dict, keys):
if len(keys) == 0:
return a_dict
return dict_access_multi(a_dict[keys[0]], keys[1:])
def merge_qkv(
sd_list,
original_tp,
num_attention_heads,
multi_query_group_num,
attention_dim,
interleaved_qkv,
):
group_size = (num_attention_heads // multi_query_group_num + 2) * attention_dim
q, k, v = [], [], []
for sd in sd_list:
if interleaved_qkv:
shape = sd.shape
q_, k_, v_ = sd.view((multi_query_group_num // original_tp, group_size) + (shape[1:])).split(
[
(num_attention_heads // multi_query_group_num * attention_dim),
attention_dim,
attention_dim,
],
dim=1,
)
q_ = q_.reshape((-1,) + (shape[1:]))
k_ = k_.reshape((-1,) + (shape[1:]))
v_ = v_.reshape((-1,) + (shape[1:]))
else:
q_, k_, v_ = sd.split(
[
num_attention_heads * attention_dim // original_tp,
multi_query_group_num * attention_dim // original_tp,
multi_query_group_num * attention_dim // original_tp,
],
dim=0,
)
q.append(q_.clone())
k.append(k_.clone())
v.append(v_.clone())
q = torch.cat(q, dim=0)
k = torch.cat(k, dim=0)
v = torch.cat(v, dim=0)
return q, k, v
def merge_glu(sd_list):
return torch.cat(
[sd.chunk(dim=0, chunks=2)[0].clone() for sd in sd_list]
+ [sd.chunk(dim=0, chunks=2)[1].clone() for sd in sd_list],
dim=0,
)
def merge_glu_vit(sd_list, original_tp=None):
if not isinstance(sd_list, list):
sd_list = [sd_list]
gate_proj = torch.cat([sd.chunk(dim=0, chunks=2)[0].clone() for sd in sd_list], dim=0)
up_proj = torch.cat([sd.chunk(dim=0, chunks=2)[1].clone() for sd in sd_list], dim=0)
return gate_proj, up_proj
def split_glu(sd, cnt, idx):
return torch.cat(
(
sd.chunk(dim=0, chunks=2)[0].chunk(cnt, dim=0)[idx].clone(),
sd.chunk(dim=0, chunks=2)[1].chunk(cnt, dim=0)[idx].clone(),
),
dim=0,
)
def find_expert_weight(input_dict, layer_num, fc1=True):
if fc1:
pattern = re.compile(rf"^decoder\.layers\.{layer_num}\.mlp\.experts\.linear_fc1\.weight(\d+)$")
else:
pattern = re.compile(rf"^decoder\.layers\.{layer_num}\.mlp\.experts\.linear_fc2\.weight(\d+)$")
matched = []
for key in input_dict:
match = pattern.match(key)
if match:
weight_num = int(match.group(1))
matched.append((weight_num, key))
matched.sort(key=lambda x: x[0])
weights = [None for _ in range(len(matched) * len(input_dict[matched[0][1]]))]
for idx, key in matched:
for i, weight in enumerate(input_dict[key]):
weights[i * len(matched) + idx] = weight
return weights
def merge_tensors(
tp_sd,
keys,
original_tp,
target_tp,
current_tp,
slice_dim=None,
merge_fn=None,
):
cnt = original_tp // target_tp
offset = cnt * current_tp
sd_list = [dict_access_multi(tp_sd[i + offset], keys) for i in range(cnt)]
if slice_dim is not None:
return torch.cat(sd_list, dim=slice_dim)
assert merge_fn is not None
return merge_fn(sd_list)
def save_sharded_model(state_dict, output_path, max_shard_size_gb=5, num_layers=46, vision_num_layers=24):
os.makedirs(output_path, exist_ok=True)
layered_dict = {}
for layer_idx in range(num_layers):
layer_key = f"layer_{layer_idx}"
layered_dict[layer_key] = {}
for key, value in state_dict.items():
if f"model.language_model.layers.{layer_idx}." in key:
if isinstance(value, list):
assert len(value) == 1, f"{key} {value}"
value = value[0]
layered_dict[layer_key][key] = value
for layer_idx in range(vision_num_layers):
layer_key = f"visual_layer_{layer_idx}"
layered_dict[layer_key] = {}
for key, value in state_dict.items():
if f"model.visual.blocks.{layer_idx}." in key:
layered_dict[layer_key][key] = value
layered_dict["others"] = {}
for key, value in state_dict.items():
if not any(f"model.language_model.layers.{i}." in key for i in range(num_layers)) and not any(
f"model.visual.blocks.{i}." in key for i in range(vision_num_layers)
):
layered_dict["others"][key] = value
# Determine layer ordering
layer_order = []
for i in range(num_layers):
layer_order.append(f"layer_{i}")
for i in range(vision_num_layers):
layer_order.append(f"visual_layer_{i}")
layer_order.append("others")
# Calculate sizes and create shards by layer
param_sizes = {}
shards = []
current_shard = {}
current_shard_size = 0
max_shard_size_bytes = max_shard_size_gb * 1024 * 1024 * 1024
for layer_key in layer_order:
layer_weights = layered_dict[layer_key]
layer_size = sum(param.numel() * param.element_size() for param in layer_weights.values())
if current_shard_size + layer_size > max_shard_size_bytes and current_shard:
shards.append(current_shard)
current_shard = {}
current_shard_size = 0
for param_name, param in layer_weights.items():
current_shard[param_name] = param
current_shard_size += param.numel() * param.element_size()
param_sizes[param_name] = param.numel() * param.element_size()
if current_shard:
shards.append(current_shard)
index_dict = {"metadata": {"total_size": sum(param_sizes.values())}, "weight_map": {}}
for i, shard in enumerate(shards):
shard_filename = f"model-{i + 1:05d}-of-{len(shards):05d}.safetensors"
shard_path = os.path.join(output_path, shard_filename)
for param_name in shard:
index_dict["weight_map"][param_name] = shard_filename
save_file(shard, shard_path, metadata={"format": "pt"})
print(f"Saved shard {i + 1}/{len(shards)}: {shard_filename}")
print(f" Shard size: {sum(p.numel() * p.element_size() for p in shard.values()) / (1024**3):.2f} GB")
print(f" Keys in shard: {len(shard)}")
index_path = os.path.join(output_path, "model.safetensors.index.json")
with open(index_path, "w") as f:
json.dump(index_dict, f, indent=2)
return len(shards)
def merge_tp_weights(model_path, output_path, vllm_config_path=None):
origin_tp, origin_ep, origin_pp = -1, -1, -1
check_ep_or_pp_later = False
for item in Path(model_path).iterdir():
if item.is_dir():
match = re.match(r"mp_rank_(\d{2})(?:_(\d{3}))?(?:_(\d{3}))?", item.name)
if match:
groups = match.groups()
tp = int(groups[0])
origin_tp = max(origin_tp, tp + 1)
# maybe TP-EP or TP-PP, need check later
if groups[1] is not None and groups[2] is None:
pp = int(groups[1])
origin_pp = max(origin_pp, pp + 1)
origin_ep = 1
check_ep_or_pp_later = True
elif groups[1] is not None and groups[2] is not None:
pp = int(groups[1])
ep = int(groups[2])
origin_pp = max(origin_pp, pp + 1)
origin_ep = max(origin_ep, ep + 1)
else:
origin_ep = 1
origin_pp = 1
tensor_names_by_file = {}
mgt_sd = {}
for item in Path(model_path).iterdir():
if item.is_dir():
match = re.match(r"mp_rank_(\d{2})(?:_(\d{3}))?(?:_(\d{3}))?$", item.name)
if match:
groups = match.groups()
tp = int(groups[0])
pp = int(groups[1]) if groups[1] is not None else 0
ep = int(groups[2]) if groups[2] is not None else 0
file_path = item / "model_optim_rng.pt"
assert file_path.exists(), f"model_optim_rng.pt not found in {item}"
file_sd = torch.load(file_path, map_location="cpu", weights_only=False)
for k in list(file_sd.keys()):
if "_extra_state" in k or "dummy_parameter" in k:
file_sd.pop(k)
mgt_sd[(tp, pp, ep)] = file_sd
tensor_names = set()
if "model" in file_sd:
for key in file_sd["model"].keys():
tensor_names.add(key)
tensor_names_by_file[(tp, pp, ep)] = tensor_names
change_pp_to_ep = False
if check_ep_or_pp_later:
prefix_distribution = {}
for (tp, pp, ep), prefixes in tensor_names_by_file.items():
for prefix in prefixes:
if prefix not in prefix_distribution:
prefix_distribution[prefix] = set()
prefix_distribution[prefix].add((tp, pp, ep))
for prefix, locations in prefix_distribution.items():
if len(locations) > 1:
pp_values = {loc[1] for loc in locations}
if len(pp_values) > 1:
print(f"find '{prefix}' in multi ranks {pp_values} the parallelism should be TP-EP")
origin_ep = origin_pp
origin_pp = 1
change_pp_to_ep = True
break
else:
print(f"find '{prefix}' only in one ep, parallelism should be TP-PP")
break
print(f"Detected tensor parallel degree TP={origin_tp} EP={origin_ep} PP={origin_pp}")
if origin_tp <= 1 and origin_ep <= 1 and origin_pp <= 1:
print("Model is already at TP=1 EP=1 PP=1, no need to merge")
return
assert max(origin_tp, origin_ep) * origin_pp == len(tensor_names_by_file), "maybe some problem in origin weight"
organized_sd = {}
for (tp, pp, ep), file_sd in mgt_sd.items():
if change_pp_to_ep:
pp, ep = ep, pp
organized_sd.setdefault(pp, {})
organized_sd[pp][(ep, tp)] = file_sd
find_vpp = "model0" in file_sd
# support VPP, if each pp rank has n vpp blocks, we will treat the original model
# was parallel as pp n * origin_pp
if find_vpp:
organized_sd_vpp = {}
for i in range(origin_pp):
for (ep, tp), file_sd in organized_sd[i].items():
model_keys = sorted(
[key for key in file_sd.keys() if key.startswith("model") and key[5:].isdigit()],
key=lambda x: int(x[5:]),
)
vp_blocks = len(model_keys)
for idx, key in enumerate(model_keys):
assert key in file_sd, f"model {key} not found"
organized_sd_vpp.setdefault(idx * origin_pp + i, {})
organized_sd_vpp[idx * origin_pp + i][(ep, tp)] = {"model": file_sd[key]}
origin_pp = origin_pp * vp_blocks
organized_sd = organized_sd_vpp
ignore_list = ["_extra_state", "dummy_parameter"]
layer_share_list = [
"norm",
"conv3d",
"downsample",
"router",
"mlp.linear_fc2.bias",
"self_attention.linear_proj.bias",
"position_embeddings",
]
full_weights = {}
vit_layer_offset = 0
llm_layer_offset = 0
llm_layer_pattern = re.compile(r"^(decoder\.layers\.)(\d+)(\..*)$")
vit_layer_pattern = re.compile(r"^(vision_model\.transformer\.layers\.)(\d+)(\..*)$")
for pp in sorted(organized_sd.keys()):
pp_dict = organized_sd[pp]
next_llm_layer_offset = llm_layer_offset
next_vit_layer_offset = vit_layer_offset
ep_map = {}
tp_map = {}
tp_seen = set()
for (ep, tp), item in pp_dict.items():
if tp not in tp_seen:
tp_seen.add(tp)
tp_map[tp] = item
ep_map[ep] = item
for tp in sorted(tp_map.keys()):
sd = tp_map[tp]
for full_name, tensor in sd["model"].items():
if any(x in full_name for x in ignore_list):
continue
llm_name_match = llm_layer_pattern.match(full_name)
if llm_name_match:
# Use a closure to avoid global variable issues
def offset_layer(x, offset=llm_layer_offset):
nonlocal next_llm_layer_offset
_real_layer = int(x.group(2)) + offset
next_llm_layer_offset = max(next_llm_layer_offset, _real_layer + 1)
return f"{x.group(1)}{_real_layer}{x.group(3)}"
full_name = llm_layer_pattern.sub(offset_layer, full_name)
vit_name_match = vit_layer_pattern.match(full_name)
if vit_name_match:
# Use a closure to avoid global variable issues
def offset_layer(x, offset=vit_layer_offset):
nonlocal next_vit_layer_offset
_real_layer = int(x.group(2)) + offset
next_vit_layer_offset = max(next_vit_layer_offset, _real_layer + 1)
return f"{x.group(1)}{_real_layer}{x.group(3)}"
full_name = vit_layer_pattern.sub(offset_layer, full_name)
if layer_share_list and any(x in full_name for x in layer_share_list):
if full_name not in full_weights:
full_weights[full_name] = tensor
else:
assert torch.equal(tensor, full_weights[full_name]), (
f"detect diff param in tp named: {full_name}"
)
elif not re.search(r"\.experts\.", full_name):
full_weights.setdefault(full_name, [None for _ in range(origin_tp)])
full_weights[full_name][tp] = tensor
for ep in sorted(ep_map.keys()):
sd = ep_map[ep]
for full_name, tensor in sd["model"].items():
if any(x in full_name for x in ignore_list):
continue
name_match = llm_layer_pattern.match(full_name)
if name_match:
# Use a closure to avoid global variable issues
def offset_layer(x, offset=llm_layer_offset):
nonlocal next_llm_layer_offset
_real_layer = int(x.group(2)) + offset
next_llm_layer_offset = max(next_llm_layer_offset, _real_layer + 1)
return f"{x.group(1)}{_real_layer}{x.group(3)}"
full_name = llm_layer_pattern.sub(offset_layer, full_name)
if re.search(r"\.experts\.", full_name):
full_weights.setdefault(full_name, [None for _ in range(origin_ep)])
full_weights[full_name][ep] = tensor
llm_layer_offset = next_llm_layer_offset
vit_layer_offset = next_vit_layer_offset
for k in sorted(full_weights.keys()):
item = full_weights[k]
if isinstance(item, list):
print(f"{k} {len(item)} {item[0].shape} {item[0].dtype}", flush=True)
else:
print(f"{k} {item.shape} {item.dtype}", flush=True)
print(f"Loading vLLM configuration file: {vllm_config_path}")
with open(vllm_config_path, "r") as f:
model_config = json.load(f)
print(model_config)
text_config = model_config.get("text_config", {})
vision_config = model_config.get("vision_config", {})
num_layers = text_config.get("num_hidden_layers", 46)
llm_num_heads = text_config.get("num_attention_heads", 96)
num_kv_heads = text_config.get("num_key_value_heads", 8)
llm_attn_query_size = text_config.get("llm_attn_query_size", 12288)
head_dim = text_config.get("attention_dim", llm_attn_query_size // llm_num_heads)
vision_num_layers = vision_config.get("depth", 24)
vit_n_head = vision_config.get("num_heads", 12)
print(
f"Model parameters: num_layers={num_layers}, vision_num_layers={vision_num_layers}, "
f"num_heads={llm_num_heads}, multi_query_group_num={num_kv_heads}, llm_attn_query_size={llm_attn_query_size}"
)
print("Merging tensor parallel weights...")
interleaved_qkv = True
num_attention_heads = llm_num_heads
multi_query_group_num = num_kv_heads
attention_dim = head_dim
complete_state_dict = {}
# LLM
layer_i = 0
while f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" in full_weights:
if f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight" in full_weights:
complete_state_dict[f"model.language_model.layers.{layer_i}.input_layernorm.weight"] = full_weights[
f"decoder.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight"
]
if f"decoder.layers.{layer_i}.pre_mlp_layernorm.weight" in full_weights:
complete_state_dict[f"model.language_model.layers.{layer_i}.post_attention_layernorm.weight"] = (
full_weights[f"decoder.layers.{layer_i}.pre_mlp_layernorm.weight"]
)
elif f"decoder.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight" in full_weights:
complete_state_dict[f"model.language_model.layers.{layer_i}.post_attention_layernorm.weight"] = (
full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight"]
)
q, k, v = merge_qkv(
sd_list=full_weights[f"decoder.layers.{layer_i}.self_attention.linear_qkv.weight"],
original_tp=origin_tp,
num_attention_heads=num_attention_heads,
multi_query_group_num=multi_query_group_num,
attention_dim=attention_dim,
interleaved_qkv=interleaved_qkv,
)
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.q_proj.weight"] = q.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.k_proj.weight"] = k.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.v_proj.weight"] = v.clone()
if f"decoder.layers.{layer_i}.self_attention.linear_qkv.bias" in full_weights:
q_bias, k_bias, v_bias = merge_qkv(
sd_list=full_weights[f"decoder.layers.{layer_i}.self_attention.linear_qkv.bias"],
original_tp=origin_tp,
num_attention_heads=num_attention_heads,
multi_query_group_num=multi_query_group_num,
attention_dim=attention_dim,
interleaved_qkv=interleaved_qkv,
)
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.q_proj.bias"] = q_bias.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.k_proj.bias"] = k_bias.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.v_proj.bias"] = v_bias.clone()
o_proj = torch.cat(full_weights[f"decoder.layers.{layer_i}.self_attention.linear_proj.weight"], dim=1)
complete_state_dict[f"model.language_model.layers.{layer_i}.self_attn.o_proj.weight"] = o_proj.clone()
if f"decoder.layers.{layer_i}.mlp.shared_experts.linear_fc1.weight" in full_weights:
routed_expert_fc1_weights = find_expert_weight(full_weights, layer_i, fc1=True)
for idx, weight in enumerate(routed_expert_fc1_weights):
gate_proj_weight, up_proj_weight = merge_glu_vit([weight])
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.experts.{idx}.gate_proj.weight"] = (
gate_proj_weight.clone()
)
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.experts.{idx}.up_proj.weight"] = (
up_proj_weight.clone()
)
routed_expert_fc2_weights = find_expert_weight(full_weights, layer_i, fc1=False)
for idx, weight in enumerate(routed_expert_fc2_weights):
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.experts.{idx}.down_proj.weight"] = (
weight.clone()
)
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.gate.e_score_correction_bias"] = (
full_weights[f"decoder.layers.{layer_i}.mlp.router.expert_bias"]
)
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.gate.weight"] = full_weights[
f"decoder.layers.{layer_i}.mlp.router.weight"
]
gate_proj_weight, up_proj_weight = merge_glu_vit(
full_weights[f"decoder.layers.{layer_i}.mlp.shared_experts.linear_fc1.weight"]
)
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.shared_experts.gate_proj.weight"] = (
gate_proj_weight.clone()
)
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.shared_experts.up_proj.weight"] = (
up_proj_weight.clone()
)
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.shared_experts.down_proj.weight"] = (
full_weights[f"decoder.layers.{layer_i}.mlp.shared_experts.linear_fc2.weight"]
)
else:
# MLP - Use gate_up_proj
gate_proj_weight, up_proj_weight = merge_glu_vit(
full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc1.weight"]
)
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.gate_proj.weight"] = (
gate_proj_weight.clone()
)
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.up_proj.weight"] = up_proj_weight.clone()
complete_state_dict[f"model.language_model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
full_weights[f"decoder.layers.{layer_i}.mlp.linear_fc2.weight"], dim=1
)
layer_i += 1
# Embedd Model, LM Head, and Norm
embed_tokens = torch.cat(full_weights["embedding.word_embeddings.weight"], dim=0)
complete_state_dict["model.language_model.embed_tokens.weight"] = embed_tokens.clone()
lm_head = torch.cat(full_weights["output_layer.weight"], dim=0)
complete_state_dict["lm_head.weight"] = lm_head.clone()
complete_state_dict["model.language_model.norm.weight"] = full_weights["decoder.final_layernorm.weight"].clone()
# VLM
for layer_i in range(vision_num_layers):
complete_state_dict[f"model.visual.blocks.{layer_i}.norm1.weight"] = full_weights[
f"vision_model.transformer.layers.{layer_i}.self_attention.linear_qkv.layer_norm_weight"
]
complete_state_dict[f"model.visual.blocks.{layer_i}.norm2.weight"] = full_weights[
f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc1.layer_norm_weight"
]
q, k, v = merge_qkv(
sd_list=full_weights[f"vision_model.transformer.layers.{layer_i}.self_attention.linear_qkv.weight"],
original_tp=origin_tp,
num_attention_heads=vit_n_head,
multi_query_group_num=vit_n_head,
attention_dim=attention_dim,
interleaved_qkv=interleaved_qkv,
)
complete_state_dict[f"model.visual.blocks.{layer_i}.attn.qkv.weight"] = torch.cat((q, k, v), dim=0)
proj_weight = torch.cat(
full_weights[f"vision_model.transformer.layers.{layer_i}.self_attention.linear_proj.weight"], dim=1
)
complete_state_dict[f"model.visual.blocks.{layer_i}.attn.proj.weight"] = proj_weight.clone()
gate_proj_weight, up_proj_weight = merge_glu_vit(
full_weights[f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc1.weight"]
)
complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.gate_proj.weight"] = gate_proj_weight.clone()
complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.up_proj.weight"] = up_proj_weight.clone()
down_proj_weight = torch.cat(
full_weights[f"vision_model.transformer.layers.{layer_i}.mlp.linear_fc2.weight"], dim=1
)
complete_state_dict[f"model.visual.blocks.{layer_i}.mlp.down_proj.weight"] = down_proj_weight.clone()
complete_state_dict["model.visual.downsample.weight"] = (
full_weights["vision_model.downsample.weight"].clone().contiguous()
)
complete_state_dict["model.visual.downsample.bias"] = (
full_weights["vision_model.downsample.bias"].clone().contiguous()
)
# Merger
gate_proj, up_proj = merge_glu_vit(full_weights["vision_projection.encoder.linear_fc1.weight"])
down_proj = torch.cat(full_weights["vision_projection.encoder.linear_fc2.weight"], dim=1)
proj = torch.cat(full_weights["vision_projection.linear_fc_extra.weight"], dim=0)
complete_state_dict["model.visual.merger.gate_proj.weight"] = gate_proj.clone().contiguous()
complete_state_dict["model.visual.merger.up_proj.weight"] = up_proj.clone().contiguous()
complete_state_dict["model.visual.merger.down_proj.weight"] = down_proj.clone().contiguous()
complete_state_dict["model.visual.merger.proj.weight"] = proj.clone().contiguous()
if "vision_projection.layer_norm.weight" in full_weights:
complete_state_dict["model.visual.merger.post_projection_norm.weight"] = full_weights[
"vision_projection.layer_norm.weight"
]
if "vision_projection.layer_norm.bias" in full_weights:
complete_state_dict["model.visual.merger.post_projection_norm.bias"] = full_weights[
"vision_projection.layer_norm.bias"
]
complete_state_dict["model.visual.embeddings.position_embedding.weight"] = (
full_weights["vision_model.position_embeddings.weight"].clone().contiguous()
)
complete_state_dict["model.visual.patch_embed.proj.weight"] = (
full_weights["vision_model.conv3d.weight"].clone().contiguous()
)
complete_state_dict["model.visual.patch_embed.proj.bias"] = (
full_weights["vision_model.conv3d.bias"].clone().contiguous()
)
# Check for additional vision model norm layers mentioned in the expected output
if "vision_model.post_conv_layernorm.weight" in full_weights:
complete_state_dict["model.visual.post_conv_layernorm.weight"] = (
full_weights["vision_model.post_conv_layernorm.weight"].clone().contiguous()
)
if "vision_model.post_layernorm.weight" in full_weights:
complete_state_dict["model.visual.post_layernorm.weight"] = (
full_weights["vision_model.post_layernorm.weight"].clone().contiguous()
)
print(f"Total keys in state dict: {len(complete_state_dict)}")
print("bias use Float32")
save_sharded_model(
complete_state_dict,
output_path=output_path,
max_shard_size_gb=5,
num_layers=num_layers,
vision_num_layers=vision_num_layers,
)
hf_config = {
"architectures": ["Glm4vMoeForConditionalGeneration"],
"model_type": "glm4v_moe",
"image_start_token_id": model_config.get("image_start_token_id", 151339),
"image_end_token_id": model_config.get("image_end_token_id", 151340),
"video_start_token_id": model_config.get("video_start_token_id", 151341),
"video_end_token_id": model_config.get("video_end_token_id", 151342),
"transformers_version": "4.57.0.dev0",
}
txt_config = {
"model_type": "glm4v_moe_text",
"attention_bias": model_config.get("add_qkv_bias", True),
"use_qk_norm": model_config.get("use_qk_norm", False),
"attention_dropout": 0.0,
"pad_token_id": model_config.get("pad_token_id", 151329),
"eos_token_id": model_config.get("eos_token_id", [151329, 151336, 151338]),
"image_token_id": model_config.get("image_token_id", 151363),
"video_token_id": model_config.get("video_token_id", 151364),
"hidden_act": text_config.get("hidden_act", "silu"),
"hidden_size": text_config.get("hidden_size", 4096),
"initializer_range": 0.02,
"intermediate_size": text_config.get("intermediate_size", 10944),
"max_position_embeddings": text_config.get("seq_length", 131072),
"num_attention_heads": text_config.get("num_attention_heads", 96),
"num_hidden_layers": text_config.get("num_layers", 46),
"num_key_value_heads": text_config.get("multi_query_group_num", 2),
"rms_norm_eps": text_config.get("layernorm_epsilon", 1e-05),
"dtype": text_config.get("torch_dtype", "bfloat16"),
"use_cache": text_config.get("use_cache", True),
"vocab_size": text_config.get("vocab_size", 151424),
"partial_rotary_factor": 0.5,
"tie_word_embeddings": False,
"moe_intermediate_size": text_config.get("moe_intermediate_size", 1408),
"n_group": text_config.get("n_group", 1),
"n_routed_experts": text_config.get("n_routed_experts", 128),
"n_shared_experts": text_config.get("n_shared_experts", 1),
"norm_topk_prob": text_config.get("norm_topk_prob", True),
"num_experts_per_tok": text_config.get("num_experts_per_tok", 8),
"rope_parameters": {
"rope_type": "default",
"rope_theta": 10000.0,
"mrope_section": [8, 12, 12],
"partial_rotary_factor": 0.5,
},
}
hf_config["text_config"] = txt_config
if "vision_config" in model_config:
vision_config = {
"model_type": "glm4v_moe_vision",
"hidden_size": model_config["vision_config"].get("hidden_size", 1536),
"depth": model_config["vision_config"].get("num_layers", 24),
"num_heads": model_config["vision_config"].get("num_attention_heads", 12),
"attention_bias": model_config["vision_config"].get("attention_bias", False),
"intermediate_size": model_config.get("ffn_hidden_size", 13696),
"hidden_act": model_config["vision_config"].get("hidden_act", "silu"),
"hidden_dropout_prob": model_config["vision_config"].get("hidden_dropout_prob", 0.0),
"initializer_range": 0.02,
"image_size": model_config["vision_config"].get("image_size", 336),
"patch_size": model_config["vision_config"].get("patch_size", 14),
"out_hidden_size": model_config.get("hidden_size", 4096),
"rms_norm_eps": model_config["vision_config"].get("layernorm_epsilon", 1e-05),
"spatial_merge_size": model_config["vision_config"].get("downsample_ratio", 2),
"temporal_patch_size": model_config["vision_config"].get("t_patch", 2),
}
hf_config["vision_config"] = vision_config
config_path = os.path.join(output_path, "config.json")
with open(config_path, "w") as f:
json.dump(hf_config, f, indent=2)
print(f"Conversion complete! Model saved to {output_path}")
def parse_args():
parser = argparse.ArgumentParser(description="Convert Megatron model to HuggingFace format")
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to Megatron model directory",
)
parser.add_argument("--output_path", type=str, required=True, help="Output path for HuggingFace model directory")
parser.add_argument(
"--config_path", type=str, help="Path to vLLM configuration file for creating HuggingFace config"
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
merge_tp_weights(args.model_path, args.output_path, args.config_path)
| UnpicklerWrapper |
python | astropy__astropy | astropy/extern/configobj/configobj.py | {
"start": 5171,
"end": 5289
} | class ____(ConfigObjError):
"""
This error indicates a level of nesting that doesn't match.
"""
| NestingError |
python | pytorch__pytorch | test/jit/test_dtype_analysis.py | {
"start": 10132,
"end": 13188
} | class ____(TestDtypeBase):
def assert_output_dtype_equal(self, expected_res, prop_graph):
actual_dtype = self.node_output_dtypes(prop_graph)
if len(actual_dtype) == 1:
# For len=1, there is no tuple packing for expected_res.
self.assert_tensor_dtype_equal(expected_res, actual_dtype[0])
else:
self.assertEqual(len(expected_res), len(actual_dtype))
for expected, actual in zip(expected_res, actual_dtype):
self.assert_tensor_dtype_equal(expected, actual)
def assert_tensor_dtype_equal(self, tensor_output, graph_dtype):
if not isinstance(tensor_output, torch.Tensor):
return
self.assertEqual(tensor_output.dtype, graph_dtype)
def custom_rules_test_base(self, device, dtype, op, allow_eager_fail=False):
try:
samples = op.sample_inputs(device, dtype, requires_grad=False)
sample_input = first_sample(self, samples)
input_args = [sample_input.input, *sample_input.args]
expected_res = op(*input_args, **sample_input.kwargs)
except Exception as e:
if allow_eager_fail:
return
else:
raise e
func = op.get_op()
traced_fn = create_traced_fn(self, func)
# Have to run the traced function to actually generate the trace
traced_fn(sample_input.input, *sample_input.args, **sample_input.kwargs)
# Run the Dtype Analysis
graph = traced_fn.graph # Note this is a cached graph
input_tensors = [t for t in input_args if isinstance(t, torch.Tensor)]
input_tensors += [
v for v in sample_input.kwargs.values() if isinstance(v, torch.Tensor)
]
self.prop_dtype_on_graph(graph, input_tensors)
self.assert_output_dtype_equal(expected_res, graph)
@ops([op for op in op_db if op.aten_name in custom_rules_works_list])
def test_custom_rules(self, device, dtype, op):
self.custom_rules_test_base(device, dtype, op)
@ops([op for op in op_db if op.aten_name in custom_rules_works_list])
def test_custom_rules_ints(self, device, dtype, op):
# This is done because opinfos currently only runs on floats.
# Return fn, inputs_fn for all
if dtype == torch.float32:
dtype = torch.int32
else:
dtype = torch.int64
# Because ints are not always implemented, we need to allow for eager to fail
self.custom_rules_test_base(device, dtype, op, allow_eager_fail=True)
@expectedFailure
@ops([op for op in op_db if op.aten_name in custom_rules_expected_failure_list])
def test_custom_rules_expected_failure(self, device, dtype, op):
self.custom_rules_test_base(device, dtype, op)
TestDtypeCustomRulesCPU = None
# This creates TestDtypeCustomRulesCPU
instantiate_device_type_tests(TestDtypeCustomRules, globals(), only_for=("cpu",))
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestDtypeCustomRules |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_shape_base.py | {
"start": 20268,
"end": 29866
} | class ____(TestCase):
@pytest.fixture(params=["block", "force_concatenate", "force_slicing"])
def block(self, request):
# blocking small arrays and large arrays go through different paths.
# the algorithm is triggered depending on the number of element
# copies required.
# We define a test fixture that forces most tests to go through
# both code paths.
# Ultimately, this should be removed if a single algorithm is found
# to be faster for both small and large arrays.
def _block_force_concatenate(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_concatenate(arrays, list_ndim, result_ndim)
def _block_force_slicing(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_slicing(arrays, list_ndim, result_ndim)
if request.param == "force_concatenate":
return _block_force_concatenate
elif request.param == "force_slicing":
return _block_force_slicing
elif request.param == "block":
return block
else:
raise ValueError("Unknown blocking request. There is a typo in the tests.")
def test_returns_copy(self, block):
a = np.eye(3)
b = block(a)
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_block_total_size_estimate(self, block):
_, _, _, total_size = _block_setup([1])
assert total_size == 1
_, _, _, total_size = _block_setup([[1]])
assert total_size == 1
_, _, _, total_size = _block_setup([[1, 1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1], [1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1, 2], [3, 4]])
assert total_size == 4
def test_block_simple_row_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
desired = np.array([[1, 1, 2, 2], [1, 1, 2, 2]])
result = block([a_2d, b_2d])
assert_equal(desired, result)
def test_block_simple_column_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
expected = np.array([[1, 1], [1, 1], [2, 2], [2, 2]])
result = block([[a_2d], [b_2d]])
assert_equal(expected, result)
def test_block_with_1d_arrays_row_wise(self, block):
# # # 1-D vectors are treated as row arrays
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([1, 2, 3, 2, 3, 4])
result = block([a, b])
assert_equal(expected, result)
def test_block_with_1d_arrays_multiple_rows(self, block):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([[1, 2, 3, 2, 3, 4], [1, 2, 3, 2, 3, 4]])
result = block([[a, b], [a, b]])
assert_equal(expected, result)
def test_block_with_1d_arrays_column_wise(self, block):
# # # 1-D vectors are treated as row arrays
a_1d = np.array([1, 2, 3])
b_1d = np.array([2, 3, 4])
expected = np.array([[1, 2, 3], [2, 3, 4]])
result = block([[a_1d], [b_1d]])
assert_equal(expected, result)
def test_block_mixed_1d_and_2d(self, block):
a_2d = np.ones((2, 2))
b_1d = np.array([2, 2])
result = block([[a_2d], [b_1d]])
expected = np.array([[1, 1], [1, 1], [2, 2]])
assert_equal(expected, result)
def test_block_complicated(self, block):
# a bit more complicated
one_2d = np.array([[1, 1, 1]])
two_2d = np.array([[2, 2, 2]])
three_2d = np.array([[3, 3, 3, 3, 3, 3]])
four_1d = np.array([4, 4, 4, 4, 4, 4])
five_0d = np.array(5)
six_1d = np.array([6, 6, 6, 6, 6])
zero_2d = np.zeros((2, 6))
expected = np.array(
[
[1, 1, 1, 2, 2, 2],
[3, 3, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
]
)
result = block(
[[one_2d, two_2d], [three_2d], [four_1d], [five_0d, six_1d], [zero_2d]]
)
assert_equal(result, expected)
def test_nested(self, block):
one = np.array([1, 1, 1])
two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
three = np.array([3, 3, 3])
four = np.array([4, 4, 4])
five = np.array(5)
six = np.array([6, 6, 6, 6, 6])
zero = np.zeros((2, 6))
result = block([[block([[one], [three], [four]]), two], [five, six], [zero]])
expected = np.array(
[
[1, 1, 1, 2, 2, 2],
[3, 3, 3, 2, 2, 2],
[4, 4, 4, 2, 2, 2],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
]
)
assert_equal(result, expected)
def test_3d(self, block):
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
a010 = np.ones((2, 3, 2), int) * 3
a001 = np.ones((2, 2, 3), int) * 4
a011 = np.ones((2, 3, 3), int) * 5
a101 = np.ones((3, 2, 3), int) * 6
a110 = np.ones((3, 3, 2), int) * 7
a111 = np.ones((3, 3, 3), int) * 8
result = block(
[
[
[a000, a001],
[a010, a011],
],
[
[a100, a101],
[a110, a111],
],
]
)
expected = array(
[
[
[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
],
[
[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
],
[
[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
],
[
[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
],
[
[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
],
]
)
assert_array_equal(result, expected)
def test_block_with_mismatched_shape(self, block):
a = np.array([0, 0])
b = np.eye(2)
assert_raises(ValueError, block, [a, b])
assert_raises(ValueError, block, [b, a])
to_block = [
[np.ones((2, 3)), np.ones((2, 2))],
[np.ones((2, 2)), np.ones((2, 2))],
]
assert_raises(ValueError, block, to_block)
def test_no_lists(self, block):
assert_equal(block(1), np.array(1))
assert_equal(block(np.eye(3)), np.eye(3))
def test_invalid_nesting(self, block):
msg = "depths are mismatched"
assert_raises_regex(ValueError, msg, block, [1, [2]])
assert_raises_regex(ValueError, msg, block, [1, []])
assert_raises_regex(ValueError, msg, block, [[1], 2])
assert_raises_regex(ValueError, msg, block, [[], 2])
assert_raises_regex(
ValueError,
msg,
block,
[[[1], [2]], [[3, 4]], [5]], # missing brackets
)
def test_empty_lists(self, block):
assert_raises_regex(ValueError, "empty", block, [])
assert_raises_regex(ValueError, "empty", block, [[]])
assert_raises_regex(ValueError, "empty", block, [[1], []])
def test_tuple(self, block):
assert_raises_regex(TypeError, "tuple", block, ([1, 2], [3, 4]))
assert_raises_regex(TypeError, "tuple", block, [(1, 2), (3, 4)])
def test_different_ndims(self, block):
a = 1.0
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 1, 3))
result = block([a, b, c])
expected = np.array([[[1.0, 2.0, 2.0, 3.0, 3.0, 3.0]]])
assert_equal(result, expected)
def test_different_ndims_depths(self, block):
a = 1.0
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 2, 3))
result = block([[a, b], [c]])
expected = np.array([[[1.0, 2.0, 2.0], [3.0, 3.0, 3.0], [3.0, 3.0, 3.0]]])
assert_equal(result, expected)
def test_block_memory_order(self, block):
# 3D
arr_c = np.zeros((3,) * 3, order="C")
arr_f = np.zeros((3,) * 3, order="F")
b_c = [[[arr_c, arr_c], [arr_c, arr_c]], [[arr_c, arr_c], [arr_c, arr_c]]]
b_f = [[[arr_f, arr_f], [arr_f, arr_f]], [[arr_f, arr_f], [arr_f, arr_f]]]
assert block(b_c).flags["C_CONTIGUOUS"]
assert block(b_f).flags["F_CONTIGUOUS"]
arr_c = np.zeros((3, 3), order="C")
arr_f = np.zeros((3, 3), order="F")
# 2D
b_c = [[arr_c, arr_c], [arr_c, arr_c]]
b_f = [[arr_f, arr_f], [arr_f, arr_f]]
assert block(b_c).flags["C_CONTIGUOUS"]
assert block(b_f).flags["F_CONTIGUOUS"]
if __name__ == "__main__":
run_tests()
| TestBlock |
python | google__jax | jax/_src/internal_test_util/test_harnesses.py | {
"start": 3402,
"end": 3524
} | class ____(NamedTuple):
"""Descriptor for a static argument.
See description of `Harness`.
"""
value: Any
| StaticArg |
python | huggingface__transformers | src/transformers/models/efficientloftr/modeling_efficientloftr.py | {
"start": 25658,
"end": 28783
} | class ____(nn.Module):
def __init__(self, config: EfficientLoFTRConfig):
super().__init__()
self.fine_kernel_size = config.fine_kernel_size
fine_fusion_dims = config.fine_fusion_dims
self.out_conv = nn.Conv2d(
fine_fusion_dims[0], fine_fusion_dims[0], kernel_size=1, stride=1, padding=0, bias=False
)
self.out_conv_layers = nn.ModuleList()
for i in range(1, len(fine_fusion_dims)):
out_conv = EfficientLoFTROutConvBlock(config, fine_fusion_dims[i], fine_fusion_dims[i - 1])
self.out_conv_layers.append(out_conv)
def forward_pyramid(
self,
hidden_states: torch.Tensor,
residual_states: list[torch.Tensor],
) -> torch.Tensor:
hidden_states = self.out_conv(hidden_states)
hidden_states = nn.functional.interpolate(
hidden_states, scale_factor=2.0, mode="bilinear", align_corners=False
)
for i, layer in enumerate(self.out_conv_layers):
hidden_states = layer(hidden_states, residual_states[i])
return hidden_states
def forward(
self,
coarse_features: torch.Tensor,
residual_features: list[torch.Tensor] | tuple[torch.Tensor],
) -> tuple[torch.Tensor, torch.Tensor]:
"""
For each image pair, compute the fine features of pixels.
In both images, compute a patch of fine features center cropped around each coarse pixel.
In the first image, the feature patch is kernel_size large and long.
In the second image, it is (kernel_size + 2) large and long.
"""
batch_size, _, embed_dim, coarse_height, coarse_width = coarse_features.shape
coarse_features = coarse_features.reshape(-1, embed_dim, coarse_height, coarse_width)
residual_features = list(reversed(residual_features))
# 1. Fine feature extraction
fine_features = self.forward_pyramid(coarse_features, residual_features)
_, fine_embed_dim, fine_height, fine_width = fine_features.shape
fine_features = fine_features.reshape(batch_size, 2, fine_embed_dim, fine_height, fine_width)
fine_features_0 = fine_features[:, 0]
fine_features_1 = fine_features[:, 1]
# 2. Unfold all local windows in crops
stride = int(fine_height // coarse_height)
fine_features_0 = nn.functional.unfold(
fine_features_0, kernel_size=self.fine_kernel_size, stride=stride, padding=0
)
_, _, seq_len = fine_features_0.shape
fine_features_0 = fine_features_0.reshape(batch_size, -1, self.fine_kernel_size**2, seq_len)
fine_features_0 = fine_features_0.permute(0, 3, 2, 1)
fine_features_1 = nn.functional.unfold(
fine_features_1, kernel_size=self.fine_kernel_size + 2, stride=stride, padding=1
)
fine_features_1 = fine_features_1.reshape(batch_size, -1, (self.fine_kernel_size + 2) ** 2, seq_len)
fine_features_1 = fine_features_1.permute(0, 3, 2, 1)
return fine_features_0, fine_features_1
@auto_docstring
| EfficientLoFTRFineFusionLayer |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_join.py | {
"start": 11772,
"end": 18110
} | class ____:
def test_join(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
a = frame.loc[frame.index[:5], ["A"]]
b = frame.loc[frame.index[2:], ["B", "C"]]
joined = a.join(b, how="outer").reindex(frame.index)
expected = frame.copy().values.copy()
expected[np.isnan(joined.values)] = np.nan
expected = DataFrame(expected, index=frame.index, columns=frame.columns)
assert not np.isnan(joined.values).all()
tm.assert_frame_equal(joined, expected)
def test_join_segfault(self):
# GH#1532
df1 = DataFrame({"a": [1, 1], "b": [1, 2], "x": [1, 2]})
df2 = DataFrame({"a": [2, 2], "b": [1, 2], "y": [1, 2]})
df1 = df1.set_index(["a", "b"])
df2 = df2.set_index(["a", "b"])
# it works!
for how in ["left", "right", "outer"]:
df1.join(df2, how=how)
def test_join_str_datetime(self):
str_dates = ["20120209", "20120222"]
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
A = DataFrame(str_dates, index=range(2), columns=["aa"])
C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)
tst = A.join(C, on="aa")
assert len(tst.columns) == 3
def test_join_multiindex_leftright(self):
# GH 10741
df1 = DataFrame(
[
["a", "x", 0.471780],
["a", "y", 0.774908],
["a", "z", 0.563634],
["b", "x", -0.353756],
["b", "y", 0.368062],
["b", "z", -1.721840],
["c", "x", 1],
["c", "y", 2],
["c", "z", 3],
],
columns=["first", "second", "value1"],
).set_index(["first", "second"])
df2 = DataFrame([["a", 10], ["b", 20]], columns=["first", "value2"]).set_index(
["first"]
)
exp = DataFrame(
[
[0.471780, 10],
[0.774908, 10],
[0.563634, 10],
[-0.353756, 20],
[0.368062, 20],
[-1.721840, 20],
[1.000000, np.nan],
[2.000000, np.nan],
[3.000000, np.nan],
],
index=df1.index,
columns=["value1", "value2"],
)
# these must be the same results (but columns are flipped)
tm.assert_frame_equal(df1.join(df2, how="left"), exp)
tm.assert_frame_equal(df2.join(df1, how="right"), exp[["value2", "value1"]])
exp_idx = MultiIndex.from_product(
[["a", "b"], ["x", "y", "z"]], names=["first", "second"]
)
exp = DataFrame(
[
[0.471780, 10],
[0.774908, 10],
[0.563634, 10],
[-0.353756, 20],
[0.368062, 20],
[-1.721840, 20],
],
index=exp_idx,
columns=["value1", "value2"],
)
tm.assert_frame_equal(df1.join(df2, how="right"), exp)
tm.assert_frame_equal(df2.join(df1, how="left"), exp[["value2", "value1"]])
def test_join_multiindex_dates(self):
# GH 33692
date = pd.Timestamp(2000, 1, 1).date()
df1_index = MultiIndex.from_tuples([(0, date)], names=["index_0", "date"])
df1 = DataFrame({"col1": [0]}, index=df1_index)
df2_index = MultiIndex.from_tuples([(0, date)], names=["index_0", "date"])
df2 = DataFrame({"col2": [0]}, index=df2_index)
df3_index = MultiIndex.from_tuples([(0, date)], names=["index_0", "date"])
df3 = DataFrame({"col3": [0]}, index=df3_index)
result = df1.join([df2, df3])
expected_index = MultiIndex.from_tuples([(0, date)], names=["index_0", "date"])
expected = DataFrame(
{"col1": [0], "col2": [0], "col3": [0]}, index=expected_index
)
tm.assert_equal(result, expected)
def test_merge_join_different_levels_raises(self):
# GH#9455
# GH 40993: For raising, enforced in 2.0
# first dataframe
df1 = DataFrame(columns=["a", "b"], data=[[1, 11], [0, 22]])
# second dataframe
columns = MultiIndex.from_tuples([("a", ""), ("c", "c1")])
df2 = DataFrame(columns=columns, data=[[1, 33], [0, 44]])
# merge
with pytest.raises(
MergeError, match="Not allowed to merge between different levels"
):
pd.merge(df1, df2, on="a")
# join, see discussion in GH#12219
with pytest.raises(
MergeError, match="Not allowed to merge between different levels"
):
df1.join(df2, on="a")
def test_frame_join_tzaware(self):
tz = zoneinfo.ZoneInfo("US/Central")
test1 = DataFrame(
np.zeros((6, 3)),
index=date_range("2012-11-15 00:00:00", periods=6, freq="100ms", tz=tz),
)
test2 = DataFrame(
np.zeros((3, 3)),
index=date_range("2012-11-15 00:00:00", periods=3, freq="250ms", tz=tz),
columns=range(3, 6),
)
result = test1.join(test2, how="outer")
expected = test1.index.union(test2.index)
tm.assert_index_equal(result.index, expected)
assert result.index.tz.key == "US/Central"
def test_frame_join_categorical_index(self):
# GH 61675
cat_data = pd.Categorical(
[3, 4],
categories=pd.Series([2, 3, 4, 5], dtype="Int64"),
ordered=True,
)
values1 = "a b".split()
values2 = "foo bar".split()
df1 = DataFrame({"hr": cat_data, "values1": values1}).set_index("hr")
df2 = DataFrame({"hr": cat_data, "values2": values2}).set_index("hr")
df1.columns = pd.CategoricalIndex([4], dtype=cat_data.dtype, name="other_hr")
df2.columns = pd.CategoricalIndex([3], dtype=cat_data.dtype, name="other_hr")
df_joined = df1.join(df2)
expected = DataFrame(
{"hr": cat_data, "values1": values1, "values2": values2}
).set_index("hr")
expected.columns = pd.CategoricalIndex(
[4, 3], dtype=cat_data.dtype, name="other_hr"
)
tm.assert_frame_equal(df_joined, expected)
| TestDataFrameJoin |
python | jamielennox__requests-mock | tests/test_request.py | {
"start": 618,
"end": 4842
} | class ____(base.TestCase):
def setUp(self):
super(RequestTests, self).setUp()
self.mocker = requests_mock.Mocker()
self.addCleanup(self.mocker.stop)
self.mocker.start()
def do_request(self, **kwargs):
method = kwargs.pop('method', 'GET')
url = kwargs.pop('url', 'http://test.example.com/path')
status_code = kwargs.pop('status_code', 200)
data = uuid.uuid4().hex
m = self.mocker.register_uri(method,
url,
text=data,
status_code=status_code)
resp = requests.request(method, url, **kwargs)
self.assertEqual(status_code, resp.status_code)
self.assertEqual(data, resp.text)
self.assertTrue(m.called_once)
return m.last_request
def test_base_params(self):
req = self.do_request(method='GET', status_code=200)
self.assertIs(None, req.allow_redirects)
self.assertIs(None, req.timeout)
self.assertIs(True, req.verify)
self.assertIs(None, req.cert)
self.assertIs(False, req.stream)
# actually it's an OrderedDict, but equality works fine
# Skipping this check - it's problematic based on people's environments
# and in CI systems where there are proxies set up at the environment
# level. gh #127
# self.assertEqual({}, req.proxies)
def test_allow_redirects(self):
req = self.do_request(allow_redirects=False, status_code=300)
self.assertFalse(req.allow_redirects)
def test_timeout(self):
timeout = 300
req = self.do_request(timeout=timeout)
self.assertEqual(timeout, req.timeout)
def test_verify_false(self):
verify = False
req = self.do_request(verify=verify)
self.assertIs(verify, req.verify)
def test_verify_path(self):
verify = '/path/to/cacerts.pem'
req = self.do_request(verify=verify)
self.assertEqual(verify, req.verify)
def test_stream(self):
req = self.do_request()
self.assertIs(False, req.stream)
req = self.do_request(stream=False)
self.assertIs(False, req.stream)
req = self.do_request(stream=True)
self.assertIs(True, req.stream)
def test_certs(self):
cert = ('/path/to/cert.pem', 'path/to/key.pem')
req = self.do_request(cert=cert)
self.assertEqual(cert, req.cert)
self.assertTrue(req.verify)
def test_proxies(self):
proxies = {'http': 'foo.bar:3128',
'http://host.name': 'foo.bar:4012'}
req = self.do_request(proxies=proxies)
self.assertEqual(proxies, req.proxies)
self.assertIsNot(proxies, req.proxies)
def test_hostname_port_http(self):
req = self.do_request(url='http://host.example.com:81/path')
self.assertEqual('host.example.com:81', req.netloc)
self.assertEqual('host.example.com', req.hostname)
self.assertEqual(81, req.port)
def test_hostname_port_https(self):
req = self.do_request(url='https://host.example.com:8080/path')
self.assertEqual('host.example.com:8080', req.netloc)
self.assertEqual('host.example.com', req.hostname)
self.assertEqual(8080, req.port)
def test_hostname_default_port_http(self):
req = self.do_request(url='http://host.example.com/path')
self.assertEqual('host.example.com', req.netloc)
self.assertEqual('host.example.com', req.hostname)
self.assertEqual(80, req.port)
def test_hostname_default_port_https(self):
req = self.do_request(url='https://host.example.com/path')
self.assertEqual('host.example.com', req.netloc)
self.assertEqual('host.example.com', req.hostname)
self.assertEqual(443, req.port)
def test_to_string(self):
req = self.do_request(url='https://host.example.com/path')
self.assertEqual('GET https://host.example.com/path', str(req))
def test_empty_query_string(self):
req = self.do_request(url='https://host.example.com/path?key')
self.assertEqual([''], req.qs['key'])
| RequestTests |
python | django__django | django/db/models/query.py | {
"start": 2925,
"end": 5821
} | class ____(BaseIterable):
"""Iterable that yields a model instance for each row."""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
fetch_mode = queryset._fetch_mode
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
)
select, klass_info, annotation_col_map = (
compiler.select,
compiler.klass_info,
compiler.annotation_col_map,
)
model_cls = klass_info["model"]
select_fields = klass_info["select_fields"]
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [
f[0].target.attname for f in select[model_fields_start:model_fields_end]
]
related_populators = get_related_populators(klass_info, select, db, fetch_mode)
known_related_objects = [
(
field,
related_objs,
operator.attrgetter(
*[
(
field.attname
if from_field == "self"
else queryset.model._meta.get_field(from_field).attname
)
for from_field in field.from_fields
]
),
)
for field, related_objs in queryset._known_related_objects.items()
]
peers = []
for row in compiler.results_iter(results):
obj = model_cls.from_db(
db,
init_list,
row[model_fields_start:model_fields_end],
fetch_mode=fetch_mode,
)
if fetch_mode.track_peers:
peers.append(weak_ref(obj))
obj._state.peers = peers
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model.
for field, rel_objs, rel_getter in known_related_objects:
# Avoid overwriting objects loaded by, e.g., select_related().
if field.is_cached(obj):
continue
rel_obj_id = rel_getter(obj)
try:
rel_obj = rel_objs[rel_obj_id]
except KeyError:
pass # May happen in qs1 | qs2 scenarios.
else:
setattr(obj, field.name, rel_obj)
yield obj
| ModelIterable |
python | plotly__plotly.py | plotly/graph_objs/layout/smith/realaxis/_tickfont.py | {
"start": 235,
"end": 9930
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.smith.realaxis"
_path_str = "layout.smith.realaxis.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.layout.smith.r
ealaxis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.smith.realaxis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.smith.realaxis.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/custom_param_types.py | {
"start": 1528,
"end": 2514
} | class ____(click.Choice):
"""
Nicer formatted choice class for click. We have a lot of parameters sometimes, and formatting
them without spaces causes ugly artifacts as the words are broken. This one adds spaces so
that when the long list of choices does not wrap on words.
"""
name = "BetterChoice"
def __init__(self, *args):
super().__init__(*args)
self.all_choices: Sequence[str] = self.choices
def get_metavar(self, param, ctx=None) -> str:
choices_str = " | ".join(self.all_choices)
# Use curly braces to indicate a required argument.
if param.required and param.param_type_name == "argument":
return f"{{{choices_str}}}"
if param.param_type_name == "argument" and param.nargs == -1:
# avoid double [[ for multiple args
return f"{choices_str}"
# Use square braces to indicate an option or optional argument.
return f"[{choices_str}]"
| BetterChoice |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/reshape_op_test.py | {
"start": 1232,
"end": 8533
} | class ____(test.TestCase):
def _testReshape(self, x, y, use_gpu=False):
with self.cached_session(use_gpu=use_gpu):
np_ans = x.reshape(y)
tf_ans = array_ops.reshape(x, y)
out = self.evaluate(tf_ans)
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertShapeEqual(np_ans, tf_ans)
# Repeat with an int64 shape tensor.
y64 = constant_op.constant(y, dtype=dtypes.int64)
tf_ans = array_ops.reshape(x, y64)
out = self.evaluate(tf_ans)
self.assertEqual(tf_ans.get_shape(), out.shape)
self.assertShapeEqual(np_ans, tf_ans)
def _testZeroDimReshape(self, x, shape, expected, use_gpu=False):
with self.cached_session(use_gpu=use_gpu):
y = array_ops.reshape(x, shape)
out = self.evaluate(y)
self.assertEqual(expected, out.shape)
# Repeat with an int64 shape tensor.
shape64 = constant_op.constant(shape, dtype=dtypes.int64)
y = array_ops.reshape(x, shape64)
out = self.evaluate(y)
self.assertEqual(expected, out.shape)
def _testBothReshape(self, x, y):
self._testReshape(x, y, False)
self._testReshape(x, y, True)
def testBoolBasic(self):
x = np.arange(1., 7.).reshape([1, 6]) > 3
self._testBothReshape(x, [2, 3])
def testFloatBasic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float32)
self._testBothReshape(x, [2, 3])
def testFloat16Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float16)
self._testBothReshape(x, [2, 3])
def testBfloat16Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(dtypes.bfloat16.as_numpy_dtype)
self._testBothReshape(x, [2, 3])
def testDoubleBasic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.float64)
self._testBothReshape(x, [2, 3])
def testInt32Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.int32)
self._testBothReshape(x, [2, 3])
def testComplex64Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex64)
self._testBothReshape(x, [2, 3])
def testComplex128Basic(self):
x = np.arange(1., 7.).reshape([1, 6]).astype(np.complex128)
self._testBothReshape(x, [2, 3])
def testFloatReshapeThreeDimensions(self):
x = np.arange(1., 28.).reshape([1, 27]).astype(np.float32)
self._testBothReshape(x, [3, 3, 3])
def testFloatUnspecifiedDimOnly(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [-1])
def testFloatUnspecifiedDimBegin(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [-1, 2])
def testFloatUnspecifiedDimEnd(self):
x = np.arange(1., 7.).reshape([6]).astype(np.float32)
self._testBothReshape(x, [3, -1])
def testZeroDimBasic(self):
x = np.zeros([0, 6]).astype(np.float32)
self._testBothReshape(x, [0, 2, 3])
def testZeroDimReshapeR1(self):
x = np.zeros([0, 6]).astype(np.float32)
self._testBothReshape(x, [-1])
def testZeroDimReshapeR3(self):
x = np.zeros([0, 6]).astype(np.float32)
self._testBothReshape(x, [-1, 2, 3])
# TODO(vrv): Add tests for failure conditions once python test_util
# reports errors.
def testFloatReshapeGradThreeDimensions(self):
x = np.arange(1., 25.).reshape([2, 3, 4]).astype(np.float32)
input_tensor = constant_op.constant(x)
def reshape(x):
return array_ops.reshape(x, [1, 8, 3])
with self.cached_session():
err = gradient_checker_v2.max_error(
*gradient_checker_v2.compute_gradient(reshape, [input_tensor]))
self.assertLess(err, 1e-3)
def testFloatEmpty(self):
x = np.empty((0, 0, 0, 0), dtype=np.float32)
self._testBothReshape(x, [1, 2, 3, 0])
self._testBothReshape(x, [1, 0, 0, 4])
self._testBothReshape(x, [0, 0, 0, 0])
self._testBothReshape(x, [1, 2, 0])
self._testBothReshape(x, [0, 0, 0])
self._testBothReshape(x, [1, -1, 5])
def testZeroDimWithUnspecifiedDim(self):
for use_gpu in (True, False):
self._testZeroDimReshape(x=np.zeros([0, 6]).astype(np.float32),
shape=[0, -1, 3],
expected=(0, 2, 3),
use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testErrors(self):
y = constant_op.constant(0.0, shape=[23, 29, 31])
with self.assertRaisesRegex(ValueError, "must be evenly divisible by 17"):
array_ops.reshape(y, [17, -1])
z = constant_op.constant(0.0, shape=[32, 128])
with self.assertRaisesRegex(ValueError,
"Cannot reshape a tensor with 4096 elements"):
array_ops.reshape(z, [4095])
def testPartialShapes(self):
# Testing unknown shapes in graph building.
with ops.Graph().as_default():
x = array_ops.placeholder(dtypes.float32)
# Unknown input shape, partial new shape.
y = array_ops.reshape(x, [1, 1, -1, 1])
self.assertEqual([1, 1, None, 1], y.get_shape().as_list())
# Unknown input shape, unknown new shape.
y = array_ops.reshape(x, array_ops.placeholder(dtypes.int32))
self.assertEqual(None, y.get_shape().ndims)
# Unknown input shape, known rank for new shape.
y = array_ops.reshape(x, array_ops.placeholder(dtypes.int32, shape=(3,)))
self.assertEqual([None, None, None], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.stack()`.
y = array_ops.reshape(x, [array_ops.placeholder(dtypes.int32), 37])
self.assertEqual([None, 37], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.concat()`.
y = array_ops.reshape(
x,
array_ops.concat(
[array_ops.placeholder(
dtypes.int32, shape=(2,)), [37, 42]], 0))
self.assertEqual([None, None, 37, 42], y.get_shape().as_list())
# Unknown input shape, partial new shape using `tf.shape()`.
y = array_ops.reshape(
x,
array_ops.shape(
array_ops.placeholder(
dtypes.float32, shape=[None, 37, None])))
self.assertEqual([None, 37, None], y.get_shape().as_list())
def testTensorShape(self):
x = array_ops.zeros([1, 100])
y = array_ops.reshape(
x, [tensor_shape.Dimension(100),
tensor_shape.Dimension(1)])
self.assertEqual([100, 1], y.get_shape().as_list())
y = array_ops.reshape(x, tensor_shape.TensorShape([100, 1]))
self.assertEqual([100, 1], y.get_shape().as_list())
def testInt64Shape(self):
with ops.device("/device:CPU:0"):
x = array_ops.zeros([50000, 50000], dtype=dtypes.bool)
# Provide dimension larger than int32
y = array_ops.reshape(x, [50000**2])
self.assertEqual([50000**2], y.get_shape().as_list())
# Even if first dimension is within int32, ensure we correctly go to int64
y = array_ops.reshape(x, [1, 50000**2])
self.assertEqual([1, 50000**2], y.get_shape().as_list())
@test_util.run_v2_only
def testTooLargeShape(self):
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"too many elements"):
x = array_ops.reshape([1], np.array([21943, 45817, 30516, 61760, 38987]))
self.evaluate(x)
if __name__ == "__main__":
test.main()
| ReshapeTest |
python | huggingface__transformers | src/transformers/tokenization_utils_base.py | {
"start": 5020,
"end": 5322
} | class ____(NamedTuple):
"""
Character span in the original string.
Args:
start (`int`): Index of the first character in the original string.
end (`int`): Index of the character following the last character in the original string.
"""
start: int
end: int
| CharSpan |
python | huggingface__transformers | tests/pipelines/test_pipelines_text2text_generation.py | {
"start": 969,
"end": 4776
} | class ____(unittest.TestCase):
model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def get_test_pipeline(
self,
model,
tokenizer=None,
image_processor=None,
feature_extractor=None,
processor=None,
dtype="float32",
):
generator = Text2TextGenerationPipeline(
model=model,
tokenizer=tokenizer,
feature_extractor=feature_extractor,
image_processor=image_processor,
processor=processor,
dtype=dtype,
max_new_tokens=20,
)
return generator, ["Something to write", "Something else"]
def run_pipeline_test(self, generator, _):
outputs = generator("Something there")
self.assertEqual(outputs, [{"generated_text": ANY(str)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
outputs = generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True)
self.assertEqual(
outputs,
[
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
],
)
outputs = generator(
["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True
)
self.assertEqual(
outputs,
[
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
[{"generated_text": ANY(str)}, {"generated_text": ANY(str)}],
],
)
with self.assertRaises(TypeError):
generator(4)
@require_torch
def test_small_model_pt(self):
generator = pipeline(
"text2text-generation",
model="patrickvonplaten/t5-tiny-random",
num_beams=1,
max_new_tokens=9,
)
# do_sample=False necessary for reproducibility
outputs = generator("Something there", do_sample=False)
self.assertEqual(outputs, [{"generated_text": ""}])
num_return_sequences = 3
outputs = generator(
"Something there",
num_return_sequences=num_return_sequences,
num_beams=num_return_sequences,
)
target_outputs = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(outputs, target_outputs)
outputs = generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True)
self.assertEqual(
outputs,
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
)
generator.tokenizer.pad_token_id = generator.model.config.eos_token_id
generator.tokenizer.pad_token = "<pad>"
outputs = generator(
["This is a test", "This is a second test"],
do_sample=True,
num_return_sequences=2,
batch_size=2,
return_tensors=True,
)
self.assertEqual(
outputs,
[
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
],
)
| Text2TextGenerationPipelineTests |
python | fluentpython__example-code-2e | 05-data-classes/dataclass/club.py | {
"start": 53,
"end": 133
} | class ____:
name: str
guests: list = field(default_factory=list)
| ClubMember |
python | pytorch__pytorch | torch/_inductor/memory.py | {
"start": 801,
"end": 1016
} | class ____:
size_alloc: int = 0
size_free: int = 0
succ_nodes: OrderedSet[BaseSchedulerNode] = dataclasses.field(
default_factory=OrderedSet
)
@dataclasses.dataclass
| MemoryPlanningInfoForBuffer |
python | mkdocs__mkdocs | mkdocs/tests/config/config_options_tests.py | {
"start": 67464,
"end": 68068
} | class ____:
def __init__(self, name, cls):
self.name = name
self.cls = cls
def load(self):
return self.cls
@mock.patch(
'mkdocs.plugins.entry_points',
mock.Mock(
return_value=[
FakeEntryPoint('sample', FakePlugin),
FakeEntryPoint('sample2', FakePlugin2),
FakeEntryPoint('sample-e', EnabledPlugin),
FakeEntryPoint('readthedocs/sub_plugin', ThemePlugin),
FakeEntryPoint('overridden', FakePlugin2),
FakeEntryPoint('readthedocs/overridden', ThemePlugin2),
]
),
)
| FakeEntryPoint |
python | pytorch__pytorch | torch/_inductor/compile_fx_subproc.py | {
"start": 724,
"end": 3171
} | class ____(_OutOfProcessFxCompile):
@override
def _send_to_child_async(
self, input: _WireProtocolPickledInput
) -> Future[_WireProtocolPickledOutput]:
# TODO: Do we need to copy across some kind of logging IDs? (ChromiumEventLogger)
pool = self.process_pool()
# TODO: This is probably the wrong thing to do long-term - but for now
# let's share the cache so we can identify tests broken by this later.
env_vars = ["TORCHINDUCTOR_CACHE_DIR", "TRITON_CACHE_DIR"]
extra_env = {v: os.environ[v] for v in env_vars if v in os.environ}
return pool.submit(
_SubprocessFxCompile._run_in_child_subprocess, input, extra_env
)
@staticmethod
@functools.cache
def process_pool() -> AnyPool:
pool = SubprocPool(
# TODO: Consider raising this limit if we start using async w/
# subprocess and want to compile multiple graphs in parallel.
1,
kind=SubprocKind.SPAWN,
)
atexit.register(pool.shutdown)
return pool
@classmethod
def _run_in_child_subprocess(
cls,
pickled_input: _WireProtocolPickledInput,
extra_env: Optional[Mapping[str, str]],
) -> _WireProtocolPickledOutput:
# TODO: In subprocess mode we need to clear the inductor caches.
# The problem:
# 1. We compile in worker A which fills stuff in tmpdir
# 2. parent clears inductor caches which deletes tmpdirs and tells
# cpp_prefix_path() to clear its LRU cache
# 3. We compile a second time in subproc A - but since we never told
# cpp_prefix_path() in worker A to clear its LRU it thinks the
# tmpdir still exists and fails to compile.
#
# TODO: We probably should be using a separate tmpdir in the worker
# anyway... but we should probably still respect clear_caches()
# in the parent... maybe?
#
# TODO: We could be less aggressive by keeping a clock which gets
# incremented when we clear the cache, send the clock to the worker and
# only clear caches if the clock changed since last time.
#
clear_caches()
torch._inductor.metrics.reset()
# TODO: turn off config.fx_graph_async_compile
result = cls._run_in_child(pickled_input, extra_env)
return result
| _SubprocessFxCompile |
python | python-markdown__markdown | markdown/postprocessors.py | {
"start": 1612,
"end": 2219
} | class ____(util.Processor):
"""
Postprocessors are run after the ElementTree it converted back into text.
Each Postprocessor implements a `run` method that takes a pointer to a
text string, modifies it as necessary and returns a text string.
Postprocessors must extend `Postprocessor`.
"""
def run(self, text: str) -> str:
"""
Subclasses of `Postprocessor` should implement a `run` method, which
takes the html document as a single text string and returns a
(possibly modified) string.
"""
pass # pragma: no cover
| Postprocessor |
python | astropy__astropy | astropy/io/ascii/sextractor.py | {
"start": 4634,
"end": 6254
} | class ____(core.BaseReader):
"""SExtractor format table.
SExtractor is a package for faint-galaxy photometry (Bertin & Arnouts
1996, A&A Supp. 317, 393.)
See: https://sextractor.readthedocs.io/en/latest/
Example::
# 1 NUMBER
# 2 ALPHA_J2000
# 3 DELTA_J2000
# 4 FLUX_RADIUS
# 7 MAG_AUTO [mag]
# 8 X2_IMAGE Variance along x [pixel**2]
# 9 X_MAMA Barycenter position along MAMA x axis [m**(-6)]
# 10 MU_MAX Peak surface brightness above background [mag * arcsec**(-2)]
1 32.23222 10.1211 0.8 1.2 1.4 18.1 1000.0 0.00304 -3.498
2 38.12321 -88.1321 2.2 2.4 3.1 17.0 1500.0 0.00908 1.401
Note the skipped numbers since flux_radius has 3 columns. The three
FLUX_RADIUS columns will be named FLUX_RADIUS, FLUX_RADIUS_1, FLUX_RADIUS_2
Also note that a post-ID description (e.g. "Variance along x") is optional
and that units may be specified at the end of a line in brackets.
"""
_format_name = "sextractor"
_io_registry_can_write = False
_description = "SExtractor format table"
header_class = SExtractorHeader
data_class = SExtractorData
inputter_class = core.ContinuationLinesInputter
def read(self, table):
"""
Read input data (file-like object, filename, list of strings, or
single string) into a Table and return the result.
"""
out = super().read(table)
# remove the comments
if "comments" in out.meta:
del out.meta["comments"]
return out
def write(self, table):
raise NotImplementedError
| SExtractor |
python | django-mptt__django-mptt | tests/myapp/tests.py | {
"start": 2675,
"end": 3069
} | class ____(TestCase):
def assertTreeEqual(self, tree1, tree2):
if not isinstance(tree1, str):
tree1 = get_tree_details(tree1)
tree1 = tree_details(tree1)
if not isinstance(tree2, str):
tree2 = get_tree_details(tree2)
tree2 = tree_details(tree2)
return self.assertEqual(tree1, tree2, f"\n{tree1!r}\n != \n{tree2!r}")
| TreeTestCase |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.