language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | vyperlang__vyper | vyper/semantics/environment.py | {
"start": 272,
"end": 414
} | class ____(VyperType):
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(id(self))
| _EnvType |
python | cython__cython | Cython/Debugger/Tests/test_libcython_in_gdb.py | {
"start": 10101,
"end": 10826
} | class ____(DebugTestCase):
def test_backtrace(self):
libcython.parameters.colorize_code.value = False
self.break_and_run('os.path.join("foo", "bar")')
def match_backtrace_output(result):
assert re.search(r'\#\d+ *0x.* in spam\(\) at .*codefile\.pyx:22',
result), result
assert 'os.path.join("foo", "bar")' in result, result
result = gdb.execute('cy bt', to_string=True)
match_backtrace_output(result)
result = gdb.execute('cy bt -a', to_string=True)
match_backtrace_output(result)
# Apparently not everyone has main()
# assert re.search(r'\#0 *0x.* in main\(\)', result), result
| TestBacktrace |
python | pandas-dev__pandas | pandas/tests/series/indexing/test_getitem.py | {
"start": 6872,
"end": 11531
} | class ____:
def test_getitem_partial_str_slice_with_datetimeindex(self):
# GH#34860
arr = date_range("1/1/2008", "1/1/2009")
ser = arr.to_series()
result = ser["2008"]
rng = date_range(start="2008-01-01", end="2008-12-31")
expected = Series(rng, index=rng)
tm.assert_series_equal(result, expected)
def test_getitem_slice_strings_with_datetimeindex(self):
idx = DatetimeIndex(
["1/1/2000", "1/2/2000", "1/2/2000", "1/3/2000", "1/4/2000"]
)
ts = Series(np.random.default_rng(2).standard_normal(len(idx)), index=idx)
result = ts["1/2/2000":]
expected = ts[1:]
tm.assert_series_equal(result, expected)
result = ts["1/2/2000":"1/3/2000"]
expected = ts[1:4]
tm.assert_series_equal(result, expected)
def test_getitem_partial_str_slice_with_timedeltaindex(self):
rng = timedelta_range("1 day 10:11:12", freq="h", periods=500)
ser = Series(np.arange(len(rng)), index=rng)
result = ser["5 day":"6 day"]
expected = ser.iloc[86:134]
tm.assert_series_equal(result, expected)
result = ser["5 day":]
expected = ser.iloc[86:]
tm.assert_series_equal(result, expected)
result = ser[:"6 day"]
expected = ser.iloc[:134]
tm.assert_series_equal(result, expected)
def test_getitem_partial_str_slice_high_reso_with_timedeltaindex(self):
# higher reso
rng = timedelta_range("1 day 10:11:12", freq="us", periods=2000)
ser = Series(np.arange(len(rng)), index=rng)
result = ser["1 day 10:11:12":]
expected = ser.iloc[0:]
tm.assert_series_equal(result, expected)
result = ser["1 day 10:11:12.001":]
expected = ser.iloc[1000:]
tm.assert_series_equal(result, expected)
result = ser["1 days, 10:11:12.001001"]
assert result == ser.iloc[1001]
def test_getitem_slice_2d(self, datetime_series):
# GH#30588 multi-dimensional indexing deprecated
with pytest.raises(ValueError, match="Multi-dimensional indexing"):
datetime_series[:, np.newaxis]
def test_getitem_median_slice_bug(self):
index = date_range("20090415", "20090519", freq="2B")
ser = Series(np.random.default_rng(2).standard_normal(13), index=index)
indexer = [slice(6, 7, None)]
msg = "Indexing with a single-item list"
with pytest.raises(ValueError, match=msg):
# GH#31299
ser[indexer]
# but we're OK with a single-element tuple
result = ser[(indexer[0],)]
expected = ser[indexer[0]]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"slc, positions",
[
[slice(date(2018, 1, 1), None), [0, 1, 2]],
[slice(date(2019, 1, 2), None), [2]],
[slice(date(2020, 1, 1), None), []],
[slice(None, date(2020, 1, 1)), [0, 1, 2]],
[slice(None, date(2019, 1, 1)), [0]],
],
)
def test_getitem_slice_date(self, slc, positions):
# https://github.com/pandas-dev/pandas/issues/31501
ser = Series(
[0, 1, 2],
DatetimeIndex(["2019-01-01", "2019-01-01T06:00:00", "2019-01-02"]),
)
msg = "Slicing with a datetime.date object is deprecated"
with tm.assert_produces_warning(Pandas4Warning, match=msg):
result = ser[slc]
expected = ser.take(positions)
tm.assert_series_equal(result, expected)
def test_getitem_slice_float_raises(self, datetime_series):
msg = (
"cannot do slice indexing on DatetimeIndex with these indexers "
r"\[{key}\] of type float"
)
with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):
datetime_series[4.0:10.0]
with pytest.raises(TypeError, match=msg.format(key=r"4\.5")):
datetime_series[4.5:10.0]
def test_getitem_slice_bug(self):
ser = Series(range(10), index=list(range(10)))
result = ser[-12:]
tm.assert_series_equal(result, ser)
result = ser[-7:]
tm.assert_series_equal(result, ser[3:])
result = ser[:-12]
tm.assert_series_equal(result, ser[:0])
def test_getitem_slice_integers(self):
ser = Series(
np.random.default_rng(2).standard_normal(8),
index=[2, 4, 6, 8, 10, 12, 14, 16],
)
result = ser[:4]
expected = Series(ser.values[:4], index=[2, 4, 6, 8])
tm.assert_series_equal(result, expected)
| TestSeriesGetitemSlices |
python | numpy__numpy | numpy/f2py/symbolic.py | {
"start": 2608,
"end": 2793
} | class ____(Enum):
"""
Used in Op.APPLY expression to specify the function part.
"""
POS = 1
NEG = 2
ADD = 3
SUB = 4
MUL = 5
DIV = 6
POW = 7
| ArithOp |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pydoclint/DOC202_google.py | {
"start": 854,
"end": 1233
} | class ____(metaclass=abc.abcmeta):
@abc.abstractmethod
def f(self):
"""Lorem ipsum
Returns:
dict: The values
"""
return
# DOC202 -- never explicitly returns anything, just short-circuits
def foo(s: str, condition: bool):
"""Fooey things.
Returns:
None
"""
if not condition:
return
print(s)
| A |
python | huggingface__transformers | src/transformers/models/convnextv2/configuration_convnextv2.py | {
"start": 912,
"end": 5564
} | class ____(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ConvNextV2Model`]. It is used to instantiate an
ConvNeXTV2 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the ConvNeXTV2
[facebook/convnextv2-tiny-1k-224](https://huggingface.co/facebook/convnextv2-tiny-1k-224) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
patch_size (`int`, *optional*, defaults to 4):
Patch size to use in the patch embedding layer.
num_stages (`int`, *optional*, defaults to 4):
The number of stages in the model.
hidden_sizes (`list[int]`, *optional*, defaults to `[96, 192, 384, 768]`):
Dimensionality (hidden size) at each stage.
depths (`list[int]`, *optional*, defaults to `[3, 3, 9, 3]`):
Depth (number of blocks) for each stage.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in each block. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
drop_path_rate (`float`, *optional*, defaults to 0.0):
The drop rate for stochastic depth.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import ConvNeXTV2Config, ConvNextV2Model
>>> # Initializing a ConvNeXTV2 convnextv2-tiny-1k-224 style configuration
>>> configuration = ConvNeXTV2Config()
>>> # Initializing a model (with random weights) from the convnextv2-tiny-1k-224 style configuration
>>> model = ConvNextV2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "convnextv2"
def __init__(
self,
num_channels=3,
patch_size=4,
num_stages=4,
hidden_sizes=None,
depths=None,
hidden_act="gelu",
initializer_range=0.02,
layer_norm_eps=1e-12,
drop_path_rate=0.0,
image_size=224,
out_features=None,
out_indices=None,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.patch_size = patch_size
self.num_stages = num_stages
self.hidden_sizes = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
self.depths = [3, 3, 9, 3] if depths is None else depths
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.drop_path_rate = drop_path_rate
self.image_size = image_size
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
__all__ = ["ConvNextV2Config"]
| ConvNextV2Config |
python | getsentry__sentry | src/sentry/plugins/base/view.py | {
"start": 166,
"end": 1026
} | class ____:
"""
A mix-in which provides a render method which returns a special object to
enable embedding of content within base-views.
"""
def redirect(self, url: str) -> HttpResponseRedirect:
"""
Returns a redirect response type.
"""
return HttpResponseRedirect(url)
def render(self, template: str, context: dict[str, Any] | None = None) -> DeferredResponse:
"""
Given a template name, and an optional context (dictionary), returns a
ready-to-render response.
Default context includes the plugin instance.
>>> self.render('template.html', {'hello': 'world'})
"""
if context is None:
context = {}
context["plugin"] = self
return DeferredResponse(template, context)
__all__ = ["PluggableViewMixin"]
| PluggableViewMixin |
python | walkccc__LeetCode | solutions/1750. Minimum Length of String After Deleting Similar Ends/1750.py | {
"start": 0,
"end": 257
} | class ____:
def minimumLength(self, s: str) -> int:
i = 0
j = len(s) - 1
while i < j and s[i] == s[j]:
c = s[i]
while i <= j and s[i] == c:
i += 1
while i <= j and s[j] == c:
j -= 1
return j - i + 1
| Solution |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataproc.py | {
"start": 122443,
"end": 123919
} | class ____:
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook):
op = DataprocCreateWorkflowTemplateOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_REGION,
project_id=GCP_PROJECT,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
template=WORKFLOW_TEMPLATE,
)
op.execute(context=MagicMock())
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.create_workflow_template.assert_called_once_with(
region=GCP_REGION,
project_id=GCP_PROJECT,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
template=WORKFLOW_TEMPLATE,
)
def test_missing_region_parameter(self):
with pytest.raises((TypeError, AirflowException), match="missing keyword argument 'region'"):
DataprocCreateWorkflowTemplateOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
project_id=GCP_PROJECT,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
template=WORKFLOW_TEMPLATE,
)
| TestDataprocCreateWorkflowTemplateOperator |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 25148,
"end": 36275
} | class ____(Request):
"""
Create a new model not associated with a task
:param uri: URI for the model
:type uri: str
:param name: Model name Unique within the company.
:type name: str
:param comment: Model comment
:type comment: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param framework: Framework on which the model is based. Case insensitive.
Should be identical to the framework of the task which created the model.
:type framework: str
:param design: Json[d] object representing the model design. Should be
identical to the network design of the task which created the model
:type design: dict
:param labels: Json object
:type labels: dict
:param ready: Indication if the model is final and can be used by other tasks.
Default is false.
:type ready: bool
:param public: Create a public model Default is false.
:type public: bool
:param project: Project to which to model belongs
:type project: str
:param parent: Parent model
:type parent: str
:param task: Associated task ID
:type task: str
"""
_service = "models"
_action = "create"
_version = "2.13"
_schema = {
"definitions": {
"metadata_item": {
"properties": {
"key": {
"description": "The key uniquely identifying the metadata item inside the given entity",
"type": "string",
},
"type": {
"description": "The type of the metadata item",
"type": "string",
},
"value": {
"description": "The value stored in the metadata item",
"type": "string",
},
},
"type": "object",
}
},
"properties": {
"comment": {"description": "Model comment", "type": "string"},
"design": {
"additionalProperties": True,
"description": "Json[d] object representing the model design. Should be identical to the network design of the task which created the model",
"type": "object",
},
"framework": {
"description": "Framework on which the model is based. Case insensitive. Should be identical to the framework of the task which created the model.",
"type": "string",
},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object",
"type": "object",
},
"name": {
"description": "Model name Unique within the company.",
"type": "string",
},
"parent": {"description": "Parent model", "type": "string"},
"project": {
"description": "Project to which to model belongs",
"type": "string",
},
"public": {
"default": False,
"description": "Create a public model Default is false.",
"type": "boolean",
},
"ready": {
"default": False,
"description": "Indication if the model is final and can be used by other tasks. Default is false.",
"type": "boolean",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "Associated task ID", "type": "string"},
"uri": {"description": "URI for the model", "type": "string"},
"metadata": {
"type": "array",
"items": {"$ref": "#/definitions/metadata_item"},
"description": "Model metadata",
},
},
"required": ["uri", "name"],
"type": "object",
}
def __init__(
self,
uri: str,
name: str,
comment: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
framework: Optional[str] = None,
design: Optional[dict] = None,
labels: Optional[dict] = None,
ready: Optional[bool] = False,
public: Optional[bool] = False,
project: Optional[str] = None,
parent: Optional[str] = None,
task: Optional[str] = None,
metadata: Optional[List[Any]] = None,
**kwargs: Any
) -> None:
super(CreateRequest, self).__init__(**kwargs)
self.uri = uri
self.name = name
self.comment = comment
self.tags = tags
self.system_tags = system_tags
self.framework = framework
self.design = design
self.labels = labels
self.ready = ready
self.public = public
self.project = project
self.parent = parent
self.task = task
self.metadata = metadata
@schema_property("uri")
def uri(self) -> str:
return self._property_uri
@uri.setter
def uri(self, value: str) -> None:
if value is None:
self._property_uri = None
return
self.assert_isinstance(value, "uri", six.string_types)
self._property_uri = value
@schema_property("name")
def name(self) -> str:
return self._property_name
@name.setter
def name(self, value: str) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("framework")
def framework(self) -> Optional[str]:
return self._property_framework
@framework.setter
def framework(self, value: Optional[str]) -> None:
if value is None:
self._property_framework = None
return
self.assert_isinstance(value, "framework", six.string_types)
self._property_framework = value
@schema_property("design")
def design(self) -> Optional[dict]:
return self._property_design
@design.setter
def design(self, value: Optional[dict]) -> None:
if value is None:
self._property_design = None
return
self.assert_isinstance(value, "design", (dict,))
self._property_design = value
@schema_property("labels")
def labels(self) -> Optional[dict]:
return self._property_labels
@labels.setter
def labels(self, value: Optional[dict]) -> None:
if value is None:
self._property_labels = None
return
self.assert_isinstance(value, "labels", (dict,))
self._property_labels = value
@schema_property("ready")
def ready(self) -> Optional[bool]:
return self._property_ready
@ready.setter
def ready(self, value: Optional[bool]) -> None:
if value is None:
self._property_ready = None
return
self.assert_isinstance(value, "ready", (bool,))
self._property_ready = value
@schema_property("public")
def public(self) -> Optional[bool]:
return self._property_public
@public.setter
def public(self, value: Optional[bool]) -> None:
if value is None:
self._property_public = None
return
self.assert_isinstance(value, "public", (bool,))
self._property_public = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("metadata")
def metadata(self) -> Optional[List[Any]]:
return self._property_metadata
@metadata.setter
def metadata(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metadata = None
return
self.assert_isinstance(value, "metadata", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetadataItem.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metadata", MetadataItem, is_array=True)
self._property_metadata = value
| CreateRequest |
python | pennersr__django-allauth | allauth/socialaccount/providers/soundcloud/provider.py | {
"start": 227,
"end": 450
} | class ____(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get("permalink_url")
def get_avatar_url(self):
return self.account.extra_data.get("avatar_url")
| SoundCloudAccount |
python | doocs__leetcode | solution/0300-0399/0352.Data Stream as Disjoint Intervals/Solution.py | {
"start": 0,
"end": 1085
} | class ____:
def __init__(self):
self.mp = SortedDict()
def addNum(self, val: int) -> None:
n = len(self.mp)
ridx = self.mp.bisect_right(val)
lidx = n if ridx == 0 else ridx - 1
keys = self.mp.keys()
values = self.mp.values()
if (
lidx != n
and ridx != n
and values[lidx][1] + 1 == val
and values[ridx][0] - 1 == val
):
self.mp[keys[lidx]][1] = self.mp[keys[ridx]][1]
self.mp.pop(keys[ridx])
elif lidx != n and val <= values[lidx][1] + 1:
self.mp[keys[lidx]][1] = max(val, self.mp[keys[lidx]][1])
elif ridx != n and val >= values[ridx][0] - 1:
self.mp[keys[ridx]][0] = min(val, self.mp[keys[ridx]][0])
else:
self.mp[val] = [val, val]
def getIntervals(self) -> List[List[int]]:
return list(self.mp.values())
# # Your SummaryRanges object will be instantiated and called as such:
# # obj = SummaryRanges()
# # obj.addNum(val)
# # param_2 = obj.getIntervals()
| SummaryRanges |
python | PrefectHQ__prefect | tests/cli/deployment/test_deployment_cli.py | {
"start": 42249,
"end": 44214
} | class ____:
def test_delete_single_deployment(self, flojo_deployment: DeploymentResponse):
invoke_and_assert(
[
"deployment",
"delete",
f"rence-griffith/{flojo_deployment.name}",
],
expected_code=0,
)
@pytest.fixture
async def setup_many_deployments(
self,
prefect_client: PrefectClient,
flojo_deployment: DeploymentResponse,
):
for i in range(3):
await prefect_client.create_deployment(
flow_id=flojo_deployment.flow_id,
name=f"test-deployment-{i}",
)
@pytest.mark.usefixtures("setup_many_deployments")
async def test_delete_all_deployments(self, prefect_client: PrefectClient):
deployments = await prefect_client.read_deployments()
assert len(deployments) > 0
await run_sync_in_worker_thread(
invoke_and_assert,
["deployment", "delete", "--all"],
expected_code=0,
)
deployments = await prefect_client.read_deployments()
assert len(deployments) == 0
@pytest.mark.usefixtures("setup_many_deployments", "interactive_console")
def test_delete_all_deployments_needs_confirmation_with_interactive_console(
self,
):
invoke_and_assert(
["deployment", "delete", "--all"],
expected_code=0,
user_input="y",
expected_output_contains=[
"Are you sure you want to delete",
"Deleted",
"deployments",
],
)
def test_delete_all_deployments_fails_if_name_or_id_provided(self):
invoke_and_assert(
["deployment", "delete", "--all", "test-deployment"],
expected_code=1,
expected_output_contains="Cannot provide a deployment name or id when deleting all deployments.",
)
| TestDeploymentDelete |
python | Textualize__textual | src/textual/_animator.py | {
"start": 1017,
"end": 1416
} | class ____(Protocol):
"""Protocol for objects that can have their intrinsic values animated.
For example, the transition between two colors can be animated
because the class [`Color`][textual.color.Color.blend] satisfies this protocol.
"""
def blend(
self: ReturnType, destination: ReturnType, factor: float
) -> ReturnType: # pragma: no cover
...
| Animatable |
python | spyder-ide__spyder | spyder/plugins/workingdirectory/container.py | {
"start": 1290,
"end": 1454
} | class ____:
PathComboBox = 'path_combo'
# ---- Widgets
# ----------------------------------------------------------------------------
| WorkingDirectoryToolbarItems |
python | django__django | tests/swappable_models/models.py | {
"start": 207,
"end": 378
} | class ____(models.Model):
title = models.CharField(max_length=100)
publication_date = models.DateField()
byline = models.CharField(max_length=100)
| AlternateArticle |
python | doocs__leetcode | solution/0400-0499/0427.Construct Quad Tree/Solution.py | {
"start": 329,
"end": 1213
} | class ____:
def construct(self, grid: List[List[int]]) -> 'Node':
def dfs(a, b, c, d):
zero = one = 0
for i in range(a, c + 1):
for j in range(b, d + 1):
if grid[i][j] == 0:
zero = 1
else:
one = 1
isLeaf = zero + one == 1
val = isLeaf and one
if isLeaf:
return Node(grid[a][b], True)
topLeft = dfs(a, b, (a + c) // 2, (b + d) // 2)
topRight = dfs(a, (b + d) // 2 + 1, (a + c) // 2, d)
bottomLeft = dfs((a + c) // 2 + 1, b, c, (b + d) // 2)
bottomRight = dfs((a + c) // 2 + 1, (b + d) // 2 + 1, c, d)
return Node(val, isLeaf, topLeft, topRight, bottomLeft, bottomRight)
return dfs(0, 0, len(grid) - 1, len(grid[0]) - 1)
| Solution |
python | astropy__astropy | astropy/modeling/functional_models.py | {
"start": 77186,
"end": 79553
} | class ____(Fittable1DModel):
"""
One dimensional Box model.
Parameters
----------
amplitude : float
Amplitude A
x_0 : float
Position of the center of the box function
width : float
Width of the box
See Also
--------
Box2D, TrapezoidDisk2D
Notes
-----
Model formula:
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A & : x_0 - w/2 \\leq x \\leq x_0 + w/2 \\\\
0 & : \\text{else}
\\end{array}
\\right.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling.models import Box1D
plt.figure()
s1 = Box1D()
r = np.arange(-5, 5, .01)
for factor in range(1, 4):
s1.amplitude = factor
s1.width = factor
plt.plot(r, s1(r), color=str(0.25 * factor), lw=2)
plt.axis([-5, 5, -1, 4])
plt.show()
"""
amplitude = Parameter(default=1, description="Amplitude A", mag=True)
x_0 = Parameter(default=0, description="Position of center of box function")
width = Parameter(default=1, description="Width of the box")
@staticmethod
def evaluate(x, amplitude, x_0, width):
"""One dimensional Box model function."""
inside = np.logical_and(x >= x_0 - width / 2.0, x <= x_0 + width / 2.0)
return np.select([inside], [amplitude], 0)
@property
def bounding_box(self):
"""
Tuple defining the default ``bounding_box`` limits.
``(x_low, x_high))``
"""
dx = self.width / 2
return (self.x_0 - dx, self.x_0 + dx)
@property
def input_units(self):
if self.x_0.input_unit is None:
return None
return {self.inputs[0]: self.x_0.input_unit}
@property
def return_units(self):
if self.amplitude.unit is None:
return None
return {self.outputs[0]: self.amplitude.unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_0": inputs_unit[self.inputs[0]],
"width": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
| Box1D |
python | kamyu104__LeetCode-Solutions | Python/non-negative-integers-without-consecutive-ones.py | {
"start": 29,
"end": 614
} | class ____(object):
def findIntegers(self, num):
"""
:type num: int
:rtype: int
"""
dp = [0] * 32
dp[0], dp[1] = 1, 2
for i in xrange(2, len(dp)):
dp[i] = dp[i-1] + dp[i-2]
result, prev_bit = 0, 0
for i in reversed(xrange(31)):
if (num & (1 << i)) != 0:
result += dp[i]
if prev_bit == 1:
result -= 1
break
prev_bit = 1
else:
prev_bit = 0
return result + 1
| Solution |
python | getsentry__sentry | src/sentry/workflow_engine/endpoints/serializers/alertrule_workflow_serializer.py | {
"start": 189,
"end": 341
} | class ____(TypedDict):
ruleId: str | None
alertRuleId: str | None
workflowId: str
@register(AlertRuleWorkflow)
| ActionHandlerSerializerResponse |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/constitutional_ai/base.py | {
"start": 1024,
"end": 12455
} | class ____(Chain):
r'''Chain for applying constitutional principles.
!!! note
This class is deprecated. See below for a replacement implementation using
LangGraph. The benefits of this implementation are:
- Uses LLM tool calling features instead of parsing string responses;
- Support for both token-by-token and step-by-step streaming;
- Support for checkpointing and memory of chat history;
- Easier to modify or extend (e.g., with additional tools, structured responses, etc.)
Install LangGraph with:
```bash
pip install -U langgraph
```
```python
from typing import List, Optional, Tuple
from langchain_classic.chains.constitutional_ai.prompts import (
CRITIQUE_PROMPT,
REVISION_PROMPT,
)
from langchain_classic.chains.constitutional_ai.models import ConstitutionalPrinciple
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langgraph.graph import END, START, StateGraph
from typing_extensions import Annotated, TypedDict
model = ChatOpenAI(model="gpt-4o-mini")
class Critique(TypedDict):
"""Generate a critique, if needed."""
critique_needed: Annotated[bool, ..., "Whether or not a critique is needed."]
critique: Annotated[str, ..., "If needed, the critique."]
critique_prompt = ChatPromptTemplate.from_template(
"Critique this response according to the critique request. "
"If no critique is needed, specify that.\n\n"
"Query: {query}\n\n"
"Response: {response}\n\n"
"Critique request: {critique_request}"
)
revision_prompt = ChatPromptTemplate.from_template(
"Revise this response according to the critique and reivsion request.\n\n"
"Query: {query}\n\n"
"Response: {response}\n\n"
"Critique request: {critique_request}\n\n"
"Critique: {critique}\n\n"
"If the critique does not identify anything worth changing, ignore the "
"revision request and return 'No revisions needed'. If the critique "
"does identify something worth changing, revise the response based on "
"the revision request.\n\n"
"Revision Request: {revision_request}"
)
chain = model | StrOutputParser()
critique_chain = critique_prompt | model.with_structured_output(Critique)
revision_chain = revision_prompt | model | StrOutputParser()
class State(TypedDict):
query: str
constitutional_principles: List[ConstitutionalPrinciple]
initial_response: str
critiques_and_revisions: List[Tuple[str, str]]
response: str
async def generate_response(state: State):
"""Generate initial response."""
response = await chain.ainvoke(state["query"])
return {"response": response, "initial_response": response}
async def critique_and_revise(state: State):
"""Critique and revise response according to principles."""
critiques_and_revisions = []
response = state["initial_response"]
for principle in state["constitutional_principles"]:
critique = await critique_chain.ainvoke(
{
"query": state["query"],
"response": response,
"critique_request": principle.critique_request,
}
)
if critique["critique_needed"]:
revision = await revision_chain.ainvoke(
{
"query": state["query"],
"response": response,
"critique_request": principle.critique_request,
"critique": critique["critique"],
"revision_request": principle.revision_request,
}
)
response = revision
critiques_and_revisions.append((critique["critique"], revision))
else:
critiques_and_revisions.append((critique["critique"], ""))
return {
"critiques_and_revisions": critiques_and_revisions,
"response": response,
}
graph = StateGraph(State)
graph.add_node("generate_response", generate_response)
graph.add_node("critique_and_revise", critique_and_revise)
graph.add_edge(START, "generate_response")
graph.add_edge("generate_response", "critique_and_revise")
graph.add_edge("critique_and_revise", END)
app = graph.compile()
```
```python
constitutional_principles=[
ConstitutionalPrinciple(
critique_request="Tell if this answer is good.",
revision_request="Give a better answer.",
)
]
query = "What is the meaning of life? Answer in 10 words or fewer."
async for step in app.astream(
{"query": query, "constitutional_principles": constitutional_principles},
stream_mode="values",
):
subset = ["initial_response", "critiques_and_revisions", "response"]
print({k: v for k, v in step.items() if k in subset})
```
Example:
```python
from langchain_openai import OpenAI
from langchain_classic.chains import LLMChain, ConstitutionalChain
from langchain_classic.chains.constitutional_ai.models \
import ConstitutionalPrinciple
llmodelm = OpenAI()
qa_prompt = PromptTemplate(
template="Q: {question} A:",
input_variables=["question"],
)
qa_chain = LLMChain(llm=model, prompt=qa_prompt)
constitutional_chain = ConstitutionalChain.from_llm(
llm=model,
chain=qa_chain,
constitutional_principles=[
ConstitutionalPrinciple(
critique_request="Tell if this answer is good.",
revision_request="Give a better answer.",
)
],
)
constitutional_chain.run(question="What is the meaning of life?")
```
''' # noqa: E501
chain: LLMChain
constitutional_principles: list[ConstitutionalPrinciple]
critique_chain: LLMChain
revision_chain: LLMChain
return_intermediate_steps: bool = False
@classmethod
def get_principles(
cls,
names: list[str] | None = None,
) -> list[ConstitutionalPrinciple]:
"""Get constitutional principles by name.
Args:
names: List of names of constitutional principles to retrieve.
If `None` (Default), all principles are returned.
"""
if names is None:
return list(PRINCIPLES.values())
return [PRINCIPLES[name] for name in names]
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
chain: LLMChain,
critique_prompt: BasePromptTemplate = CRITIQUE_PROMPT,
revision_prompt: BasePromptTemplate = REVISION_PROMPT,
**kwargs: Any,
) -> "ConstitutionalChain":
"""Create a chain from an LLM."""
critique_chain = LLMChain(llm=llm, prompt=critique_prompt)
revision_chain = LLMChain(llm=llm, prompt=revision_prompt)
return cls(
chain=chain,
critique_chain=critique_chain,
revision_chain=revision_chain,
**kwargs,
)
@property
def input_keys(self) -> list[str]:
"""Input keys."""
return self.chain.input_keys
@property
def output_keys(self) -> list[str]:
"""Output keys."""
if self.return_intermediate_steps:
return ["output", "critiques_and_revisions", "initial_output"]
return ["output"]
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
response = self.chain.run(
**inputs,
callbacks=_run_manager.get_child("original"),
)
initial_response = response
input_prompt = self.chain.prompt.format(**inputs)
_run_manager.on_text(
text="Initial response: " + response + "\n\n",
verbose=self.verbose,
color="yellow",
)
critiques_and_revisions = []
for constitutional_principle in self.constitutional_principles:
# Do critique
raw_critique = self.critique_chain.run(
input_prompt=input_prompt,
output_from_model=response,
critique_request=constitutional_principle.critique_request,
callbacks=_run_manager.get_child("critique"),
)
critique = self._parse_critique(
output_string=raw_critique,
).strip()
# if the critique contains "No critique needed", then we're done
# in this case, initial_output is the same as output,
# but we'll keep it for consistency
if "no critique needed" in critique.lower():
critiques_and_revisions.append((critique, ""))
continue
# Do revision
revision = self.revision_chain.run(
input_prompt=input_prompt,
output_from_model=response,
critique_request=constitutional_principle.critique_request,
critique=critique,
revision_request=constitutional_principle.revision_request,
callbacks=_run_manager.get_child("revision"),
).strip()
response = revision
critiques_and_revisions.append((critique, revision))
_run_manager.on_text(
text=f"Applying {constitutional_principle.name}..." + "\n\n",
verbose=self.verbose,
color="green",
)
_run_manager.on_text(
text="Critique: " + critique + "\n\n",
verbose=self.verbose,
color="blue",
)
_run_manager.on_text(
text="Updated response: " + revision + "\n\n",
verbose=self.verbose,
color="yellow",
)
final_output: dict[str, Any] = {"output": response}
if self.return_intermediate_steps:
final_output["initial_output"] = initial_response
final_output["critiques_and_revisions"] = critiques_and_revisions
return final_output
@staticmethod
def _parse_critique(output_string: str) -> str:
if "Revision request:" not in output_string:
return output_string
output_string = output_string.split("Revision request:")[0]
if "\n\n" in output_string:
output_string = output_string.split("\n\n")[0]
return output_string
| ConstitutionalChain |
python | getsentry__sentry | src/sentry/search/events/builder/profile_functions.py | {
"start": 1079,
"end": 3004
} | class ____:
def resolve_column_name(self: ProfileFunctionsQueryBuilderProtocol, col: str) -> str:
# giving resolved a type here convinces mypy that the type is str
resolved: str = self.config.resolve_column(col)
return resolved
def get_field_type(self: ProfileFunctionsQueryBuilderProtocol, field: str) -> str | None:
# giving resolved a type here convinces mypy that the type is str
resolved: str | None = self.config.resolve_column_type(field)
return resolved
def process_profiling_function_columns(self, row: SnubaRow):
# We need to check both the aliased and non aliased names
# as not all use cases enable `transform_alias_to_input_format`
# and the events-stats endpoint does not actually apply it.
if "all_examples()" in row:
key = "all_examples()"
elif "all_examples" in row:
key = "all_examples"
else:
key = None
if key is not None:
parsed_examples = []
for example in row[key]:
profile_id, thread_id, start, end = example
# This is shaped like the `ExampleMetaData` in vroom
if not start and not end:
parsed_examples.append(
{
"profile_id": profile_id,
}
)
else:
parsed_examples.append(
{
"profiler_id": profile_id,
"thread_id": thread_id,
"start": datetime.fromisoformat(start).replace(tzinfo=UTC).timestamp(),
"end": datetime.fromisoformat(end).replace(tzinfo=UTC).timestamp(),
}
)
row[key] = parsed_examples
| ProfileFunctionsQueryBuilderMixin |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/data.py | {
"start": 12190,
"end": 12536
} | class ____(SpanProperty):
def __init__(self, spans: "Spans") -> None:
super().__init__(spans)
self.result: set[int] = set()
def finish(self) -> frozenset[int]:
return frozenset(self.result)
def stop_span(self, i: int, *, discarded: bool) -> None:
if discarded:
self.result.add(i)
| _discarded |
python | astropy__astropy | astropy/io/ascii/tdat.py | {
"start": 26881,
"end": 31336
} | class ____(core.BaseReader):
"""TDAT format
See: https://heasarc.gsfc.nasa.gov/docs/software/dbdocs/tdat.html
Example::
<HEADER>
# # and // are comments
table_name = example_table
table_description = "Example table"
#
# Table Parameters
#
field[id] = integer [meta.id] (key) // Unique ID
field[ra] = float:.4f_degree [pos.eq.ra] (index) // Right Ascension
field[name] = char12 [meta.id] // Name
#
# Virtual Parameters
#
table_author = Example et al.
#
# Data Format Specification
#
line[1] = id name ra
<DATA>
1|TargetOne|1.0|
2|TargetTwo|2.0|
<END>
The comments and keywords defined in the header, excepting common header
section titles and blank comments, are available via the output table
``meta`` attribute::
>>> from astropy.io import ascii
>>> lines = ascii.tdat.make_example_data()
>>> data = ascii.read(lines, format='tdat')
>>> print(data.meta['comments'])
['# and // are comments']
>>> for name, keyword in data.meta['keywords'].items():
... print(name, keyword)
table_name example_table
table_description Example table
table_author Example et al.
When writing to the TDAT format, the header will be auto-populated by
information in the Table, prioritizing information given in the Table.meta:
**comments** : list or string, (optional)
Table information which provide context. This information is
included in the header preceding all other lines and commented
out with #
**keywords** : dict, (optional, recommended)
Header keywords which will appear in the file as "name=value" lines.
Of particular importance are table_name, table_description,
and table_document_url.
If there is no Table.meta, this writer will attempt to automatically
generate the appropriate header information based on the table and
column properties and the recommendations for the TDAT format by HEASARC.
Column ``units`` are written using the CDS format.
Example::
>>> from astropy.table import Table
>>> import sys
>>> t = Table(names=('reference_id', 'RA', 'Name'),
... data=[[1, 2, 3], [1.0, 2.0, 3.0], ['c', 'd', 'e']])
>>> t.meta['table_name'] = "astropy_table"
>>> t.write(sys.stdout, format="ascii.tdat")
<HEADER>
table_name = astropy_table
#
# Table Parameters
#
field[reference_id] = int4
field[RA] = float8
field[Name] = char1
#
# Data Format Specification
#
line[1] = reference_id RA Name
#
<DATA>
1|1.0|c|
2|2.0|d|
3|3.0|e|
<END>
Including relevant metadata for the table and columns separately
is possible with a mixture of attribute assignment and additions to the
metadata::
>>> from astropy.table import Table
>>> from io import StringIO
>>> t = Table(names=('reference_id', 'RA', 'Name'),
... data=[[1, 2, 3], [1.0, 2.0, 3.0], ['c', 'd', 'e']])
>>> t.meta["table_name"] = "example_table"
>>> t.meta["table_description"] = "An example table for the tdat writer."
>>> t.add_index('reference_id')
>>> t.columns['reference_id'].meta['comment'] = "For internal reference only"
>>> t.add_index('RA')
>>> t.columns['RA'].unit = "degree"
>>> t.columns['RA'].format = ".4f"
>>> t.columns['RA'].meta['ucd'] = "pos.eq.ra"
>>> t.columns['Name'].description = "The name of the source (if available)"
>>> t.write(sys.stdout, format="ascii.tdat")
<HEADER>
table_name = example_table
table_description = An example table for the tdat writer.
#
# Table Parameters
#
field[reference_id] = int4 (key) // // For internal reference only
field[RA] = float8:.4f_deg [pos.eq.ra] (index)
field[Name] = char1 // The name of the source (if available)
#
# Data Format Specification
#
line[1] = reference_id RA Name
#
<DATA>
1|1.0000|c|
2|2.0000|d|
3|3.0000|e|
<END>
"""
_format_name = "tdat"
_description = "HEASARC tdat format"
_io_registry_can_write = True
_io_registry_suffix = ".tdat"
header_class = TdatHeader
data_class = TdatData
outputter_class = TdatOutputter
| Tdat |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed4.py | {
"start": 525,
"end": 780
} | class ____(TypedDict, extra_items=int | None):
name: str
year: int | None
details2: MovieDetails2 = {"name": "Kill Bill Vol. 1", "year": 2003}
# This should generate an error because "year" is not required.
movie2: Movie1 = details2
| MovieDetails2 |
python | paramiko__paramiko | tests/test_util.py | {
"start": 1549,
"end": 4939
} | class ____(unittest.TestCase):
def test_imports(self):
"""
Verify that all the classes can be imported from paramiko.
"""
for name in (
"Agent",
"AgentKey",
"AuthenticationException",
"AuthFailure",
"AuthHandler",
"AuthResult",
"AuthSource",
"AuthStrategy",
"AutoAddPolicy",
"BadAuthenticationType",
"BufferedFile",
"Channel",
"ChannelException",
"ConfigParseError",
"CouldNotCanonicalize",
"ECDSAKey",
"Ed25519Key",
"HostKeys",
"InMemoryPrivateKey",
"Message",
"MissingHostKeyPolicy",
"NoneAuth",
"OnDiskPrivateKey",
"Password",
"PasswordRequiredException",
"PrivateKey",
"RSAKey",
"RejectPolicy",
"SFTP",
"SFTPAttributes",
"SFTPClient",
"SFTPError",
"SFTPFile",
"SFTPHandle",
"SFTPServer",
"SFTPServerInterface",
"SSHClient",
"SSHConfig",
"SSHConfigDict",
"SSHException",
"SecurityOptions",
"ServerInterface",
"SourceResult",
"SubsystemHandler",
"Transport",
"WarningPolicy",
"util",
):
assert name in dir(paramiko)
def test_version_available_in_main_module(self):
assert paramiko.__version__ == metadata.version("paramiko")
def test_generate_key_bytes(self):
key_bytes = paramiko.util.generate_key_bytes(
sha1, b"ABCDEFGH", "This is my secret passphrase.", 64
)
hexy = "".join([f"{byte:02x}" for byte in key_bytes])
hexpected = "9110e2f6793b69363e58173e9436b13a5a4b339005741d5c680e505f57d871347b4239f14fb5c46e857d5e100424873ba849ac699cea98d729e57b3e84378e8b" # noqa
assert hexy == hexpected
def test_host_keys(self):
with open("hostfile.temp", "w") as f:
f.write(test_hosts_file)
try:
hostdict = paramiko.util.load_host_keys("hostfile.temp")
assert 2 == len(hostdict)
assert 1 == len(list(hostdict.values())[0])
assert 1 == len(list(hostdict.values())[1])
fp = hexlify(
hostdict["secure.example.com"]["ssh-rsa"].get_fingerprint()
).upper()
assert b"E6684DB30E109B67B70FF1DC5C7F1363" == fp
finally:
os.unlink("hostfile.temp")
def test_clamp_value(self):
assert 32768 == paramiko.util.clamp_value(32767, 32768, 32769)
assert 32767 == paramiko.util.clamp_value(32767, 32765, 32769)
assert 32769 == paramiko.util.clamp_value(32767, 32770, 32769)
def test_safe_string(self):
vanilla = b"vanilla"
has_bytes = b"has \7\3 bytes"
safe_vanilla = safe_string(vanilla)
safe_has_bytes = safe_string(has_bytes)
expected_bytes = b"has %07%03 bytes"
err = "{!r} != {!r}"
msg = err.format(safe_vanilla, vanilla)
assert safe_vanilla == vanilla, msg
msg = err.format(safe_has_bytes, expected_bytes)
assert safe_has_bytes == expected_bytes, msg
| UtilTest |
python | django__django | django/contrib/contenttypes/migrations/0001_initial.py | {
"start": 85,
"end": 1434
} | class ____(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="ContentType",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("name", models.CharField(max_length=100)),
("app_label", models.CharField(max_length=100)),
(
"model",
models.CharField(
max_length=100, verbose_name="python model class name"
),
),
],
options={
"ordering": ("name",),
"db_table": "django_content_type",
"verbose_name": "content type",
"verbose_name_plural": "content types",
},
bases=(models.Model,),
managers=[
("objects", django.contrib.contenttypes.models.ContentTypeManager()),
],
),
migrations.AlterUniqueTogether(
name="contenttype",
unique_together={("app_label", "model")},
),
]
| Migration |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/closure.py | {
"start": 1867,
"end": 6143
} | class ____:
pass
def closure():
obj = Object()
def source():
obj.x = _test_source()
def sink():
_test_sink(obj.x)
return source, sink
def closure_flow():
# TODO(T168869049): False Negative
source, sink = closure()
source()
sink()
def closure_no_flow():
source, sink = closure()
sink()
source()
def nonlocal_closure_write_reduction():
x = ""
z = ""
def source():
nonlocal x
x = _test_source()
return _test_source()
z = source()
_test_sink(x)
_test_sink(z)
def nonlocal_closure_reduction():
obj1 = ""
def source():
nonlocal obj1
obj1 = _test_source()
obj2 = _test_source()
return obj2
def tito(obj2):
return obj2, obj1
obj2 = source()
obj2, obj3 = tito(obj2)
# TODO(T170813777): Wrong model for define before variable initialization
# So in this case, moved the sink after obj3 declaration
def sink(obj2):
_test_sink(obj1)
_test_sink(obj2)
_test_sink(obj3)
sink(obj2)
def nonlocal_closure_multiple_writes():
x = 1
def conditional_write(condition):
nonlocal x
if condition:
x = _test_source()
else:
x = 0
# Note: The truthiness of the conditional isn't actually used
conditional_write(True)
_test_sink(x)
def overwrite():
nonlocal x
x = _test_source()
x = 0
x = 1
overwrite()
_test_sink(x)
def nonlocal_closure_flow():
obj = ""
def source():
nonlocal obj
obj = _test_source()
def sink():
_test_sink(obj)
source()
sink()
def nonlocal_closure_no_flow():
obj = ""
def source():
nonlocal obj
obj = _test_source()
def sink():
_test_sink(obj)
sink()
source()
def nonlocal_closure_inner_flow():
obj = ""
def flow():
nonlocal obj
obj = _test_source()
_test_sink(obj)
flow()
def nonlocal_closure_obscure():
obj = ""
def source():
nonlocal obj
obj = _test_source()
def sink():
_test_sink(obj)
return source, sink
def nonlocal_obscure_flow():
# TODO(T168868830): FN due to not knowing the returned functions match
# the models of functions defined in nonlocal_closure_obscure
source, sink = nonlocal_closure_obscure()
source()
sink()
def nonlocal_obscure_no_flow():
source, sink = nonlocal_closure_obscure()
sink()
source()
def nonlocal_closure_nested_flow():
outer = ""
def source1():
inner = ""
def source2():
def source3():
nonlocal inner
inner = _test_source()
source3()
_test_sink(inner)
source2()
_test_sink(inner)
nonlocal outer
outer = inner
source1()
_test_sink(outer)
def nonlocal_closure_wrapper_flow():
obj = ""
def source():
nonlocal obj
obj = _test_source()
def wrapper():
source()
wrapper()
_test_sink(obj) # TODO(T169118550): FN
def _test_source2(): ...
def nonlocal_closure_conditional_write():
obj = _test_source()
def conditional_write(cond):
nonlocal obj
if cond:
obj = _test_source2()
def clear():
nonlocal obj
obj = 0
conditional_write(True)
_test_sink(obj)
obj = 0
obj = _test_source()
clear()
# TODO(T169657906): [FP] Overwrite taint on nonlocal writes
_test_sink(obj)
def nonlocal_closure_tito():
x = _test_source()
def inner():
return x
result = inner()
_test_sink(result)
def tito_propagation():
wrapper_for_tito_propagation(_test_source())
def wrapper_for_tito_propagation(x):
def inner():
return x
_test_sink(inner())
def tito_propagation_hof():
wrapper_for_tito_propagation_hof(_test_source())
def wrapper_for_tito_propagation_hof(x):
def inner():
return x
tito_hof(inner)
def tito_hof(f):
_test_sink(f())
def parameter_order_swap_tito(x, y, z):
def inner():
return y, z, x
_test_sink(inner()[1])
T = TypeVar('T')
| Object |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 117907,
"end": 118073
} | class ____:
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5 * np.pi)
| TestRadians |
python | sphinx-doc__sphinx | sphinx/domains/index.py | {
"start": 2041,
"end": 3108
} | class ____(SphinxDirective):
"""Directive to add entries to the index."""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec: ClassVar[OptionSpec] = {
'name': directives.unchanged,
}
def run(self) -> list[Node]:
arguments = self.arguments[0].split('\n')
if 'name' in self.options:
targetname = self.options['name']
targetnode = nodes.target('', '', names=[targetname])
else:
targetid = 'index-%s' % self.env.new_serialno('index')
targetnode = nodes.target('', '', ids=[targetid])
self.state.document.note_explicit_target(targetnode)
indexnode = addnodes.index()
indexnode['entries'] = []
indexnode['inline'] = False
self.set_source_info(indexnode)
for entry in arguments:
indexnode['entries'].extend(
process_index_entry(entry, targetnode['ids'][0])
)
return [indexnode, targetnode]
| IndexDirective |
python | Lightning-AI__lightning | src/lightning/pytorch/demos/transformer.py | {
"start": 5517,
"end": 6655
} | class ____:
def __init__(self) -> None:
self.word2idx: dict[str, int] = {}
self.idx2word: list[str] = []
def add_word(self, word: str) -> int:
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self) -> int:
return len(self.idx2word)
def tokenize(path: Path) -> tuple[Tensor, Dictionary]:
dictionary = Dictionary()
assert os.path.exists(path)
# Add words to the dictionary
with open(path, encoding="utf8") as f:
for line in f:
words = line.split() + ["<eos>"]
for word in words:
dictionary.add_word(word)
# Tokenize file content
with open(path, encoding="utf8") as f:
idss: list[Tensor] = []
for line in f:
words = line.split() + ["<eos>"]
ids: list[int] = []
for word in words:
ids.append(dictionary.word2idx[word])
idss.append(torch.tensor(ids).type(torch.int64))
return torch.cat(idss), dictionary
| Dictionary |
python | redis__redis-py | redis/commands/bf/__init__.py | {
"start": 4600,
"end": 5724
} | class ____(CFCommands, AbstractBloom):
def __init__(self, client, **kwargs):
"""Create a new RedisBloom client."""
# Set the module commands' callbacks
_MODULE_CALLBACKS = {
CF_RESERVE: bool_ok,
# CF_ADD: spaceHolder,
# CF_ADDNX: spaceHolder,
# CF_INSERT: spaceHolder,
# CF_INSERTNX: spaceHolder,
# CF_EXISTS: spaceHolder,
# CF_DEL: spaceHolder,
# CF_COUNT: spaceHolder,
# CF_SCANDUMP: spaceHolder,
# CF_LOADCHUNK: spaceHolder,
}
_RESP2_MODULE_CALLBACKS = {
CF_INFO: CFInfo,
}
_RESP3_MODULE_CALLBACKS = {}
self.client = client
self.commandmixin = CFCommands
self.execute_command = client.execute_command
if get_protocol_version(self.client) in ["3", 3]:
_MODULE_CALLBACKS.update(_RESP3_MODULE_CALLBACKS)
else:
_MODULE_CALLBACKS.update(_RESP2_MODULE_CALLBACKS)
for k, v in _MODULE_CALLBACKS.items():
self.client.set_response_callback(k, v)
| CFBloom |
python | apache__airflow | providers/apache/hive/tests/unit/apache/hive/operators/test_hive.py | {
"start": 1149,
"end": 2198
} | class ____(TestHiveEnvironment):
def test_hive_airflow_default_config_queue(self):
op = HiveOperator(
task_id="test_default_config_queue",
hql=self.hql,
mapred_queue_priority="HIGH",
mapred_job_name="airflow.test_default_config_queue",
dag=self.dag,
)
# just check that the correct default value in test_default.cfg is used
test_config_hive_mapred_queue = conf.get("hive", "default_hive_mapred_queue")
assert op.hook.mapred_queue == test_config_hive_mapred_queue
def test_hive_airflow_default_config_queue_override(self):
specific_mapred_queue = "default"
op = HiveOperator(
task_id="test_default_config_queue",
hql=self.hql,
mapred_queue=specific_mapred_queue,
mapred_queue_priority="HIGH",
mapred_job_name="airflow.test_default_config_queue",
dag=self.dag,
)
assert op.hook.mapred_queue == specific_mapred_queue
| HiveOperatorConfigTest |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/components/shell-script-component/with-build-defs-pythonic.py | {
"start": 104,
"end": 978
} | class ____(dg.Component, dg.Resolvable):
"""Models a shell script as a Dagster asset."""
def __init__(self, script_path: str, asset_specs: Sequence[dg.ResolvedAssetSpec]):
self.script_path = script_path
self.asset_specs = asset_specs
# highlight-start
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
resolved_script_path = Path(context.path, self.script_path).absolute()
@dg.multi_asset(name=Path(self.script_path).stem, specs=self.asset_specs)
def _asset(context: dg.AssetExecutionContext):
self.execute(resolved_script_path, context)
return dg.Definitions(assets=[_asset])
# highlight-end
def execute(self, resolved_script_path: Path, context: dg.AssetExecutionContext):
return subprocess.run(["sh", str(resolved_script_path)], check=True)
| ShellCommand |
python | kamyu104__LeetCode-Solutions | Python/walking-robot-simulation-ii.py | {
"start": 29,
"end": 1226
} | class ____(object):
def __init__(self, width, height):
"""
:type width: int
:type height: int
"""
self.__w = width
self.__h = height
self.__curr = 0
def move(self, num):
"""
:type num: int
:rtype: None
"""
self.__curr += num
def getPos(self):
"""
:rtype: List[int]
"""
n = self.__curr % (2*((self.__w-1)+(self.__h-1)))
if n < self.__w:
return [n, 0]
n -= self.__w-1
if n < self.__h:
return [self.__w-1, n]
n -= self.__h-1
if n < self.__w:
return [(self.__w-1)-n, self.__h-1]
n -= self.__w-1
return [0, (self.__h-1)-n]
def getDir(self):
"""
:rtype: str
"""
n = self.__curr % (2*((self.__w-1)+(self.__h-1)))
if n < self.__w:
return "South" if n == 0 and self.__curr else "East"
n -= self.__w-1
if n < self.__h:
return "North"
n -= self.__h-1
if n < self.__w:
return "West"
n -= self.__w-1
return "South"
# Time: O(1)
# Space: O(1)
| Robot |
python | allegroai__clearml | examples/reporting/hyper_parameters.py | {
"start": 2148,
"end": 3815
} | class ____(TaskParameters):
iterations = param(
type=int,
desc="Number of iterations to run",
range=(0, 100000),
)
target_accuracy = percent_param(
desc="The target accuracy of the model",
)
my_task_parameters = MyTaskParameters(iterations=1000, target_accuracy=0.95)
my_task_parameters = task.connect(my_task_parameters, name='from TaskParameters-like object')
# -----------------------------------------------
# Report configuration objects via dictionary
# -----------------------------------------------
complex_nested_dict_configuration = {
'list_of_dicts': [{'a': 1, 'b': 2}, {'c': 3, 'd': 4}, {'e': 5, 'f': 6}],
'nested_dicts': {'nested': {'key': 'value', 'extra': 'value'}, 'number': 42},
'dict': {'simple': 'value', 'number': 2},
'list': [1, 2, 3],
'int': 3,
'float': 2.2,
'string': 'additional string',
}
complex_nested_dict_configuration = task.connect_configuration(
complex_nested_dict_configuration, name='configuration dictionary')
print(complex_nested_dict_configuration)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--argparser_int_value', help='integer value', type=int, default=1)
parser.add_argument('--argparser_disabled', action='store_true', default=False, help='disables something')
parser.add_argument('--argparser_str_value', help='string value', default='a string')
args = parser.parse_args()
print('Running under Python {0[0]}.{0[1]}.{0[2]}'.format(sys.version_info), file=sys.stderr)
task_params = task.get_parameters()
print("Task parameters are: {}".format(task_params))
| MyTaskParameters |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-jaguar/llama_index/vector_stores/jaguar/base.py | {
"start": 1036,
"end": 16562
} | class ____(BasePydanticVectorStore):
"""
Jaguar vector store.
See http://www.jaguardb.com
See http://github.com/fserv/jaguar-sdk
Examples:
`pip install llama-index-vector-stores-jaguar`
```python
from llama_index.vector_stores.jaguar import JaguarVectorStore
vectorstore = JaguarVectorStore(
pod = 'vdb',
store = 'mystore',
vector_index = 'v',
vector_type = 'cosine_fraction_float',
vector_dimension = 1536,
url='http://192.168.8.88:8080/fwww/',
)
```
"""
stores_text: bool = True
_pod: str = PrivateAttr()
_store: str = PrivateAttr()
_vector_index: str = PrivateAttr()
_vector_type: str = PrivateAttr()
_vector_dimension: int = PrivateAttr()
_jag: JaguarHttpClient = PrivateAttr()
_token: str = PrivateAttr()
def __init__(
self,
pod: str,
store: str,
vector_index: str,
vector_type: str,
vector_dimension: int,
url: str,
):
"""
Constructor of JaguarVectorStore.
Args:
pod: str: name of the pod (database)
store: str: name of vector store in the pod
vector_index: str: name of vector index of the store
vector_type: str: type of the vector index
vector_dimension: int: dimension of the vector index
url: str: URL end point of jaguar http server
"""
super().__init__(stores_text=True)
self._pod = self._sanitize_input(pod)
self._store = self._sanitize_input(store)
self._vector_index = self._sanitize_input(vector_index)
self._vector_type = self._sanitize_input(vector_type)
self._vector_dimension = vector_dimension
self._jag = JaguarHttpClient(url)
self._token = ""
def __del__(self) -> None:
pass
@classmethod
def class_name(cls) -> str:
return "JaguarVectorStore"
@property
def client(self) -> Any:
"""Get client."""
return self._jag
def _sanitize_input(self, value: str) -> str:
"""Sanitize input to prevent SQL injection."""
forbidden_chars = ['"', ";", "--", "/*", "*/"]
sanitized = value.replace("'", "\\'")
for char in forbidden_chars:
sanitized = sanitized.replace(char, "")
return sanitized
def add(
self,
nodes: Sequence[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of nodes with embeddings
"""
use_node_metadata = add_kwargs.get("use_node_metadata", False)
ids = []
for node in nodes:
text = node.get_text()
embedding = node.get_embedding()
if use_node_metadata is True:
metadata = node.metadata
else:
metadata = None
zid = self.add_text(text, embedding, metadata, **add_kwargs)
ids.append(zid)
return ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
podstore = self._pod + "." + self._store
q = (
"delete from "
+ podstore
+ " where zid='"
+ self._sanitize_input(ref_doc_id)
+ "'"
)
self.run(q)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""
Query index for top k most similar nodes.
Args:
query: VectorStoreQuery object
kwargs: may contain 'where', 'metadata_fields', 'args', 'fetch_k'
"""
embedding = query.query_embedding
k = query.similarity_top_k
(nodes, ids, simscores) = self.similarity_search_with_score(
embedding, k=k, form="node", **kwargs
)
return VectorStoreQueryResult(nodes=nodes, ids=ids, similarities=simscores)
def load_documents(
self, embedding: List[float], k: int, **kwargs: Any
) -> List[Document]:
"""
Query index to load top k most similar documents.
Args:
embedding: a list of floats
k: topK number
kwargs: may contain 'where', 'metadata_fields', 'args', 'fetch_k'
"""
return cast(
List[Document],
self.similarity_search_with_score(embedding, k=k, form="doc", **kwargs),
)
def create(
self,
metadata_fields: str,
text_size: int,
) -> None:
"""
Create the vector store on the backend database.
Args:
metadata_fields (str): exrta metadata columns and types
Returns:
True if successful; False if not successful
"""
podstore = self._pod + "." + self._store
"""
v:text column is required.
"""
q = "create store "
q += podstore
q += f" ({self._vector_index} vector({self._vector_dimension},"
q += f" '{self._vector_type}'),"
q += f" v:text char({text_size}),"
q += self._sanitize_input(metadata_fields) + ")"
self.run(q)
def add_text(
self,
text: str,
embedding: List[float],
metadata: Optional[dict] = None,
**kwargs: Any,
) -> str:
"""
Add texts through the embeddings and add to the vectorstore.
Args:
texts: text string to add to the jaguar vector store.
embedding: embedding vector of the text, list of floats
metadata: {'file_path': '../data/paul_graham/paul_graham_essay.txt',
'file_name': 'paul_graham_essay.txt',
'file_type': 'text/plain',
'file_size': 75042,
'creation_date': '2023-12-24',
'last_modified_date': '2023-12-24',
'last_accessed_date': '2023-12-28'}
kwargs: vector_index=name_of_vector_index
file_column=name_of_file_column
metadata={...}
Returns:
id from adding the text into the vectorstore
"""
text = self._sanitize_input(text)
vcol = self._vector_index
filecol = kwargs.get("file_column", "")
text_tag = kwargs.get("text_tag", "")
if text_tag != "":
text = text_tag + " " + text
podstorevcol = self._pod + "." + self._store + "." + vcol
q = "textcol " + podstorevcol
js = self.run(q)
if js == "":
return ""
textcol = js["data"]
zid = ""
if metadata is None:
### no metadata and no files to upload
str_vec = [str(x) for x in embedding]
values_comma = self._sanitize_input(",".join(str_vec))
podstore = self._pod + "." + self._store
q = "insert into " + podstore + " ("
q += vcol + "," + textcol + ") values ('" + values_comma
q += "','" + text + "')"
js = self.run(q, False)
zid = js["zid"]
else:
str_vec = [str(x) for x in embedding]
nvec, vvec, filepath = self._parseMeta(metadata, filecol)
if filecol != "":
rc = self._jag.postFile(self._token, filepath, 1)
if not rc:
return ""
names_comma = ",".join(nvec)
names_comma += "," + vcol
names_comma = self._sanitize_input(names_comma)
## col1,col2,col3,vecl
if vvec is not None and len(vvec) > 0:
values_comma = "'" + "','".join(vvec) + "'"
else:
values_comma = "'" + "','".join(vvec) + "'"
### 'va1','val2','val3'
values_comma += ",'" + ",".join(str_vec) + "'"
values_comma = self._sanitize_input(values_comma)
### 'v1,v2,v3'
podstore = self._pod + "." + self._store
q = "insert into " + podstore + " ("
q += names_comma + "," + textcol + ") values (" + values_comma
q += ",'" + text + "')"
if filecol != "":
js = self.run(q, True)
else:
js = self.run(q, False)
zid = js["zid"]
return zid
def similarity_search_with_score(
self,
embedding: Optional[List[float]],
k: int = 3,
form: str = "node",
**kwargs: Any,
) -> Union[Tuple[List[TextNode], List[str], List[float]], List[Document]]:
"""
Return nodes most similar to query embedding, along with ids and scores.
Args:
embedding: embedding of text to look up.
k: Number of nodes to return. Defaults to 3.
form: if "node", return Tuple[List[TextNode], List[str], List[float]]
if "doc", return List[Document]
kwargs: may have where, metadata_fields, args, fetch_k
Returns:
Tuple(list of nodes, list of ids, list of similaity scores)
"""
where = kwargs.get("where")
metadata_fields = kwargs.get("metadata_fields")
args = kwargs.get("args")
fetch_k = kwargs.get("fetch_k", -1)
vcol = self._vector_index
vtype = self._vector_type
if embedding is None:
return ([], [], [])
str_embeddings = [str(f) for f in embedding]
qv_comma = self._sanitize_input(",".join(str_embeddings))
podstore = self._pod + "." + self._store
q = (
"select similarity("
+ vcol
+ ",'"
+ qv_comma
+ "','topk="
+ str(k)
+ ",fetch_k="
+ str(fetch_k)
+ ",type="
+ vtype
)
q += ",with_score=yes,with_text=yes"
if args is not None:
q += "," + args
if metadata_fields is not None:
x = "&".join(metadata_fields)
q += ",metadata=" + x
q += "') from " + podstore
if where is not None:
q += " where " + self._sanitize_input(where)
jarr = self.run(q)
if jarr is None:
return ([], [], [])
nodes = []
ids = []
simscores = []
docs = []
for js in jarr:
score = js["score"]
text = js["text"]
zid = js["zid"]
md = {}
md["zid"] = zid
if metadata_fields is not None:
for m in metadata_fields:
mv = js[m]
md[m] = mv
if form == "node":
node = TextNode(
id_=zid,
text=text,
metadata=md,
)
nodes.append(node)
ids.append(zid)
simscores.append(float(score))
else:
doc = Document(
id_=zid,
text=text,
metadata=md,
)
docs.append(doc)
if form == "node":
return (nodes, ids, simscores)
else:
return docs
def is_anomalous(
self,
node: BaseNode,
**kwargs: Any,
) -> bool:
"""
Detect if given text is anomalous from the dataset.
Args:
query: Text to detect if it is anomaly
Returns:
True or False
"""
vcol = self._vector_index
vtype = self._vector_type
str_embeddings = [str(f) for f in node.get_embedding()]
qv_comma = ",".join(str_embeddings)
podstore = self._pod + "." + self._store
q = "select anomalous(" + vcol + ", '" + qv_comma + "', 'type=" + vtype + "')"
q += " from " + podstore
js = self.run(q)
if isinstance(js, list) and len(js) == 0:
return False
jd = json.loads(js[0])
return jd["anomalous"] == "YES"
def run(self, query: str, withFile: bool = False) -> dict:
"""
Run any query statement in jaguardb.
Args:
query (str): query statement to jaguardb
Returns:
None for invalid token, or
json result string
"""
if self._token == "":
logger.error(f"E0005 error run({query})")
return {}
resp = self._jag.post(query, self._token, withFile)
txt = resp.text
try:
return json.loads(txt)
except Exception:
return {}
def count(self) -> int:
"""
Count records of a store in jaguardb.
Args: no args
Returns: (int) number of records in pod store
"""
podstore = self._pod + "." + self._store
q = "select count() from " + podstore
js = self.run(q)
if isinstance(js, list) and len(js) == 0:
return 0
jd = json.loads(js[0])
return int(jd["data"])
def clear(self) -> None:
"""
Delete all records in jaguardb.
Args: No args
Returns: None
"""
podstore = self._pod + "." + self._store
q = "truncate store " + podstore
self.run(q)
def drop(self) -> None:
"""
Drop or remove a store in jaguardb.
Args: no args
Returns: None
"""
podstore = self._pod + "." + self._store
q = "drop store " + podstore
self.run(q)
def prt(self, msg: str) -> None:
nows = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with open("/tmp/debugjaguar.log", "a") as file:
print(f"{nows} msg={msg}", file=file, flush=True)
def login(
self,
jaguar_api_key: Optional[str] = "",
) -> bool:
"""
Login to jaguar server with a jaguar_api_key or let self._jag find a key.
Args:
optional jaguar_api_key (str): API key of user to jaguardb server
Returns:
True if successful; False if not successful
"""
if jaguar_api_key == "":
jaguar_api_key = self._jag.getApiKey()
self._jaguar_api_key = jaguar_api_key
self._token = self._jag.login(jaguar_api_key)
if self._token == "":
logger.error("E0001 error init(): invalid jaguar_api_key")
return False
return True
def logout(self) -> None:
"""
Logout to cleanup resources.
Args: no args
Returns: None
"""
self._jag.logout(self._token)
def _parseMeta(self, nvmap: dict, filecol: str) -> Tuple[List[str], List[str], str]:
filepath = ""
if filecol == "":
nvec = list(nvmap.keys())
vvec = list(nvmap.values())
else:
nvec = []
vvec = []
if filecol in nvmap:
nvec.append(filecol)
vvec.append(nvmap[filecol])
filepath = nvmap[filecol]
for k, v in nvmap.items():
if k != filecol:
nvec.append(k)
vvec.append(v)
return nvec, vvec, filepath
| JaguarVectorStore |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_path_converters.py | {
"start": 2393,
"end": 4009
} | class ____:
def test_simple_module(self):
root = Path("/project")
file_path = root / "mypackage" / "module.py"
result = generic_path_converter(file_path, root)
assert result == "mypackage.module"
def test_nested_module(self):
root = Path("/project")
file_path = root / "mypackage" / "subpackage" / "module.py"
result = generic_path_converter(file_path, root)
assert result == "mypackage.subpackage.module"
def test_init_module(self):
root = Path("/project")
file_path = root / "mypackage" / "__init__.py"
result = generic_path_converter(file_path, root)
assert result == "mypackage"
def test_root_level_module(self):
root = Path("/project")
file_path = root / "module.py"
result = generic_path_converter(file_path, root)
assert result == "module"
def test_root_level_init(self):
root = Path("/project")
file_path = root / "__init__.py"
result = generic_path_converter(file_path, root)
assert result is None
def test_file_outside_root(self):
root = Path("/project")
file_path = Path("/other") / "module.py"
result = generic_path_converter(file_path, root)
assert result is None
def test_non_python_file(self):
root = Path("/project")
file_path = root / "mypackage" / "data.txt"
# Should still work, just without .py extension handling
result = generic_path_converter(file_path, root)
assert result == "mypackage.data.txt"
| TestGenericPathConverter |
python | jazzband__django-simple-history | simple_history/management/commands/populate_history.py | {
"start": 206,
"end": 6197
} | class ____(BaseCommand):
args = "<app.model app.model ...>"
help = (
"Populates the corresponding HistoricalRecords field with "
"the current state of all instances in a model"
)
COMMAND_HINT = "Please specify a model or use the --auto option"
MODEL_NOT_FOUND = "Unable to find model"
MODEL_NOT_HISTORICAL = "No history model found"
NO_REGISTERED_MODELS = "No registered models were found\n"
START_SAVING_FOR_MODEL = "Saving historical records for {model}\n"
DONE_SAVING_FOR_MODEL = "Finished saving historical records for {model}\n"
EXISTING_HISTORY_FOUND = "Existing history found, skipping model"
INVALID_MODEL_ARG = "An invalid model was specified"
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument("models", nargs="*", type=str)
parser.add_argument(
"--auto",
action="store_true",
dest="auto",
default=False,
help="Automatically search for models with the HistoricalRecords field "
"type",
)
parser.add_argument(
"--batchsize",
action="store",
dest="batchsize",
default=200,
type=int,
help="Set a custom batch size when bulk inserting historical records.",
)
def handle(self, *args, **options):
self.verbosity = options["verbosity"]
to_process = set()
model_strings = options.get("models", []) or args
if model_strings:
for model_pair in self._handle_model_list(*model_strings):
to_process.add(model_pair)
elif options["auto"]:
to_process = self._auto_models()
else:
if self.verbosity >= 1:
self.stdout.write(self.COMMAND_HINT)
self._process(to_process, batch_size=options["batchsize"])
def _auto_models(self):
to_process = set()
for model in models.registered_models.values():
try: # avoid issues with multi-table inheritance
history_model = utils.get_history_model_for_model(model)
except NotHistoricalModelError:
continue
to_process.add((model, history_model))
if not to_process:
if self.verbosity >= 1:
self.stdout.write(self.NO_REGISTERED_MODELS)
return to_process
def _handle_model_list(self, *args):
failing = False
for natural_key in args:
try:
model, history = self._model_from_natural_key(natural_key)
except ValueError as e:
failing = True
self.stderr.write(f"{e}\n")
else:
if not failing:
yield (model, history)
if failing:
raise CommandError(self.INVALID_MODEL_ARG)
def _model_from_natural_key(self, natural_key):
try:
app_label, model = natural_key.split(".", 1)
except ValueError:
model = None
else:
try:
model = get_model(app_label, model)
except LookupError:
model = None
if not model:
msg = self.MODEL_NOT_FOUND + f" < {natural_key} >\n"
raise ValueError(msg)
try:
history_model = utils.get_history_model_for_model(model)
except NotHistoricalModelError:
msg = self.MODEL_NOT_HISTORICAL + f" < {natural_key} >\n"
raise ValueError(msg)
return model, history_model
def _bulk_history_create(self, model, batch_size):
"""Save a copy of all instances to the historical model.
:param model: Model you want to bulk create
:param batch_size: number of models to create at once.
:return:
"""
instances = []
history = utils.get_history_manager_for_model(model)
if self.verbosity >= 1:
self.stdout.write(
"Starting bulk creating history models for {} instances {}-{}".format(
model, 0, batch_size
)
)
iterator_kwargs = {"chunk_size": batch_size}
for index, instance in enumerate(
model._default_manager.iterator(**iterator_kwargs)
):
# Can't Just pass batch_size to bulk_create as this can lead to
# Out of Memory Errors as we load too many models into memory after
# creating them. So we only keep batch_size worth of models in
# historical_instances and clear them after we hit batch_size
if index % batch_size == 0:
history.bulk_history_create(instances, batch_size=batch_size)
instances = []
if self.verbosity >= 1:
self.stdout.write(
"Finished bulk creating history models for {} "
"instances {}-{}, starting next {}".format(
model, index - batch_size, index, batch_size
)
)
instances.append(instance)
# create any we didn't get in the last loop
if instances:
history.bulk_history_create(instances, batch_size=batch_size)
def _process(self, to_process, batch_size):
for model, history_model in to_process:
if history_model.objects.exists():
self.stderr.write(
"{msg} {model}\n".format(
msg=self.EXISTING_HISTORY_FOUND, model=model
)
)
continue
if self.verbosity >= 1:
self.stdout.write(self.START_SAVING_FOR_MODEL.format(model=model))
self._bulk_history_create(model, batch_size)
if self.verbosity >= 1:
self.stdout.write(self.DONE_SAVING_FOR_MODEL.format(model=model))
| Command |
python | pydata__xarray | xarray/groupers.py | {
"start": 5244,
"end": 6186
} | class ____(Grouper):
"""
Abstract base class for Grouper objects that allow specializing resampling-type GroupBy instructions.
Currently only used for TimeResampler, but could be used for SpaceResampler in the future.
"""
def compute_chunks(self, variable: Variable, *, dim: Hashable) -> tuple[int, ...]:
"""
Compute chunk sizes for this resampler.
This method should be implemented by subclasses to provide appropriate
chunking behavior for their specific resampling strategy.
Parameters
----------
variable : Variable
The variable being chunked.
dim : Hashable
The name of the dimension being chunked.
Returns
-------
tuple[int, ...]
A tuple of chunk sizes for the dimension.
"""
raise NotImplementedError("Subclasses must implement compute_chunks method")
@dataclass
| Resampler |
python | huggingface__transformers | src/transformers/models/emu3/modeling_emu3.py | {
"start": 43271,
"end": 43748
} | class ____(PreTrainedModel):
config: Emu3Config
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_no_split_modules = [
"Emu3DecoderLayer",
]
_skip_keys_device_placement = ["past_key_values", "causal_mask"]
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_flex_attn = True
_supports_attention_backend = True
| Emu3PreTrainedModel |
python | encode__django-rest-framework | tests/test_requests_client.py | {
"start": 1223,
"end": 1552
} | class ____(APIView):
def get(self, request):
headers = {
key[5:].replace('_', '-'): value
for key, value in request.META.items()
if key.startswith('HTTP_')
}
return Response({
'method': request.method,
'headers': headers
})
| HeadersView |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 29076,
"end": 32570
} | class ____(IterableDataset):
def __init__(self, size, error_event):
self.error_event = error_event
self.size = size
self.remaining = size
def __len__(self):
return self.size
def __iter__(self):
return self
def __next__(self):
worker_info = torch.utils.data.get_worker_info()
if (
self.error_event is not None
and self.error_event.is_set()
and worker_info.id == worker_info.num_workers - 1
):
# only error in the last worker
raise RuntimeError("Worker error")
self.remaining -= 1
if self.remaining < 0:
raise StopIteration
return torch.tensor(-1000)
# See TestDataLoader.test_proper_exit for usage
def _test_proper_exit(
is_iterable_dataset,
use_workers,
pin_memory,
exit_method,
hold_iter_reference,
loader_setup_event,
tester_setup_event,
persistent_workers,
):
num_workers = 2 if use_workers else 0
if exit_method == "worker_error" or exit_method == "worker_kill":
assert use_workers is True
if exit_method == "worker_error":
worker_error_event = mp.Event()
else:
worker_error_event = None
if is_iterable_dataset:
ds = TestProperExitIterableDataset(7, worker_error_event)
else:
ds = TestProperExitDataset(12, worker_error_event)
loader = DataLoader(
ds,
batch_size=1,
shuffle=False,
num_workers=num_workers,
pin_memory=pin_memory,
worker_init_fn=set_faulthander_if_available,
persistent_workers=persistent_workers,
)
error_it = 2
if use_workers:
# 2 is the magical per-worker prefetch number...
# FIXME: change this after the number becomes configurable.
if is_iterable_dataset:
assert len(ds) * num_workers > (error_it + 2 + 1)
else:
assert len(loader) > (error_it + 2 + 1) * num_workers
else:
if is_iterable_dataset:
assert len(ds) > error_it + 1
else:
assert len(loader) > error_it + 1
it = iter(loader)
if use_workers:
workers = it._workers
def kill_pid(pid):
psutil_p = psutil.Process(pid)
psutil_p.kill()
psutil_p.wait(JOIN_TIMEOUT)
assert not psutil_p.is_running()
for i, _ in enumerate(it):
if i == 0:
if not hold_iter_reference:
del it
del loader
loader_setup_event.set()
tester_setup_event.wait()
# ensure that the workers are still alive
if use_workers:
for w in workers:
assert w.is_alive()
if worker_error_event is not None:
worker_error_event.set()
if i == error_it:
if exit_method == "loader_error":
raise RuntimeError("Loader error")
elif exit_method == "loader_kill":
kill_pid(os.getpid())
elif exit_method == "worker_kill":
kill_pid(workers[-1].pid) # kill last worker
if not hold_iter_reference:
# Tries to trigger the __del__ clean-up rather than the automatic
# exiting of daemonic children. Technically it should be automatically
# triggered, but I don't want to rely on the implementation detail of
# Python gc.
gc.collect()
| TestProperExitIterableDataset |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 8359,
"end": 8516
} | class ____(models.Model):
uuid_primary_key = models.UUIDField(primary_key=True, default=uuid.uuid1)
field1 = models.CharField(max_length=30)
| UUIDPlainA |
python | fluentpython__example-code | 06-dp-1class-func/classic_strategy.py | {
"start": 2468,
"end": 2784
} | class ____(Promotion): # second Concrete Strategy
"""10% discount for each LineItem with 20 or more units"""
def discount(self, order):
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * .1
return discount
| BulkItemPromo |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/optics/pyoptic.py | {
"start": 17094,
"end": 17627
} | class ____(QtCore.QObject):
"""
Simple ray tracer.
Initialize with a list of rays and optics;
calling trace() will cause rays to be extended by propagating them through
each optic in sequence.
"""
def __init__(self, rays, optics):
QtCore.QObject.__init__(self)
self.optics = optics
self.rays = rays
for o in self.optics:
o.sigStateChanged.connect(self.trace)
self.trace()
def trace(self):
trace(self.rays, self.optics)
| Tracer |
python | huggingface__transformers | examples/modular-transformers/configuration_duplicated_method.py | {
"start": 733,
"end": 9723
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DuplicatedMethodModel`]. It is used to instantiate an DuplicatedMethod
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the DuplicatedMethod-7B.
e.g. [meta-duplicated_method/DuplicatedMethod-2-7b-hf](https://huggingface.co/meta-duplicated_method/DuplicatedMethod-2-7b-hf)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the DuplicatedMethod model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DuplicatedMethodModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. DuplicatedMethod 1 supports up to 2048 tokens,
DuplicatedMethod 2 up to 4096, CodeLlama up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
pretraining_tp (`int`, *optional*, defaults to 1):
Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
mlp_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
head_dim (`int`, *optional*):
The attention head dimension. If None, it will default to hidden_size // num_attention_heads
```python
>>> from transformers import DuplicatedMethodModel, DuplicatedMethodConfig
>>> # Initializing a DuplicatedMethod duplicated_method-7b style configuration
>>> configuration = DuplicatedMethodConfig()
>>> # Initializing a model from the duplicated_method-7b style configuration
>>> model = DuplicatedMethodModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "duplicated_method"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `DuplicatedMethodModel`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 32000,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 11008,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = None,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 2048,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
pretraining_tp: Optional[int] = 1,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters | dict[RopeParameters]] = None,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
mlp_bias: Optional[bool] = False,
head_dim: Optional[int] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.mlp_bias = mlp_bias
self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
# Try to set `rope_scaling` if available, otherwise use `rope_parameters`
rope_scaling = kwargs.pop("rope_scaling", None)
self.rope_parameters = rope_scaling or rope_parameters
# Validate the correctness of rotary position embeddings parameters
rope_theta = kwargs.get("rope_theta", 10000.0)
standardize_rope_params(self, rope_theta=rope_theta)
rope_config_validation(self)
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
@property
def vocab_size(self):
return 45
@vocab_size.setter
def vocab_size(self, value):
self.vocab_size = value
| DuplicatedMethodConfig |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 3787,
"end": 4574
} | class ____(DagsterError):
"""Indicates that you have attempted to construct a Pythonic config or resource class with an invalid value."""
def __init__(
self,
config_class: Optional[type],
field_name: Optional[str],
invalid_type: Any,
is_resource: bool = False,
**kwargs,
):
self.invalid_type = invalid_type
self.field_name = field_name
self.config_class = config_class
super().__init__(
_generate_pythonic_config_error_message(
config_class=config_class,
field_name=field_name,
invalid_type=invalid_type,
is_resource=is_resource,
),
**kwargs,
)
| DagsterInvalidPythonicConfigDefinitionError |
python | pytest-dev__pytest | src/_pytest/_code/code.py | {
"start": 16230,
"end": 30168
} | class ____(Generic[E]):
"""Wraps sys.exc_info() objects and offers help for navigating the traceback."""
_assert_start_repr: ClassVar = "AssertionError('assert "
_excinfo: tuple[type[E], E, TracebackType] | None
_striptext: str
_traceback: Traceback | None
def __init__(
self,
excinfo: tuple[type[E], E, TracebackType] | None,
striptext: str = "",
traceback: Traceback | None = None,
*,
_ispytest: bool = False,
) -> None:
check_ispytest(_ispytest)
self._excinfo = excinfo
self._striptext = striptext
self._traceback = traceback
@classmethod
def from_exception(
cls,
# Ignoring error: "Cannot use a covariant type variable as a parameter".
# This is OK to ignore because this class is (conceptually) readonly.
# See https://github.com/python/mypy/issues/7049.
exception: E, # type: ignore[misc]
exprinfo: str | None = None,
) -> ExceptionInfo[E]:
"""Return an ExceptionInfo for an existing exception.
The exception must have a non-``None`` ``__traceback__`` attribute,
otherwise this function fails with an assertion error. This means that
the exception must have been raised, or added a traceback with the
:py:meth:`~BaseException.with_traceback()` method.
:param exprinfo:
A text string helping to determine if we should strip
``AssertionError`` from the output. Defaults to the exception
message/``__str__()``.
.. versionadded:: 7.4
"""
assert exception.__traceback__, (
"Exceptions passed to ExcInfo.from_exception(...)"
" must have a non-None __traceback__."
)
exc_info = (type(exception), exception, exception.__traceback__)
return cls.from_exc_info(exc_info, exprinfo)
@classmethod
def from_exc_info(
cls,
exc_info: tuple[type[E], E, TracebackType],
exprinfo: str | None = None,
) -> ExceptionInfo[E]:
"""Like :func:`from_exception`, but using old-style exc_info tuple."""
_striptext = ""
if exprinfo is None and isinstance(exc_info[1], AssertionError):
exprinfo = getattr(exc_info[1], "msg", None)
if exprinfo is None:
exprinfo = saferepr(exc_info[1])
if exprinfo and exprinfo.startswith(cls._assert_start_repr):
_striptext = "AssertionError: "
return cls(exc_info, _striptext, _ispytest=True)
@classmethod
def from_current(cls, exprinfo: str | None = None) -> ExceptionInfo[BaseException]:
"""Return an ExceptionInfo matching the current traceback.
.. warning::
Experimental API
:param exprinfo:
A text string helping to determine if we should strip
``AssertionError`` from the output. Defaults to the exception
message/``__str__()``.
"""
tup = sys.exc_info()
assert tup[0] is not None, "no current exception"
assert tup[1] is not None, "no current exception"
assert tup[2] is not None, "no current exception"
exc_info = (tup[0], tup[1], tup[2])
return ExceptionInfo.from_exc_info(exc_info, exprinfo)
@classmethod
def for_later(cls) -> ExceptionInfo[E]:
"""Return an unfilled ExceptionInfo."""
return cls(None, _ispytest=True)
def fill_unfilled(self, exc_info: tuple[type[E], E, TracebackType]) -> None:
"""Fill an unfilled ExceptionInfo created with ``for_later()``."""
assert self._excinfo is None, "ExceptionInfo was already filled"
self._excinfo = exc_info
@property
def type(self) -> type[E]:
"""The exception class."""
assert self._excinfo is not None, (
".type can only be used after the context manager exits"
)
return self._excinfo[0]
@property
def value(self) -> E:
"""The exception value."""
assert self._excinfo is not None, (
".value can only be used after the context manager exits"
)
return self._excinfo[1]
@property
def tb(self) -> TracebackType:
"""The exception raw traceback."""
assert self._excinfo is not None, (
".tb can only be used after the context manager exits"
)
return self._excinfo[2]
@property
def typename(self) -> str:
"""The type name of the exception."""
assert self._excinfo is not None, (
".typename can only be used after the context manager exits"
)
return self.type.__name__
@property
def traceback(self) -> Traceback:
"""The traceback."""
if self._traceback is None:
self._traceback = Traceback(self.tb)
return self._traceback
@traceback.setter
def traceback(self, value: Traceback) -> None:
self._traceback = value
def __repr__(self) -> str:
if self._excinfo is None:
return "<ExceptionInfo for raises contextmanager>"
return f"<{self.__class__.__name__} {saferepr(self._excinfo[1])} tblen={len(self.traceback)}>"
def exconly(self, tryshort: bool = False) -> str:
"""Return the exception as a string.
When 'tryshort' resolves to True, and the exception is an
AssertionError, only the actual exception part of the exception
representation is returned (so 'AssertionError: ' is removed from
the beginning).
"""
def _get_single_subexc(
eg: BaseExceptionGroup[BaseException],
) -> BaseException | None:
if len(eg.exceptions) != 1:
return None
if isinstance(e := eg.exceptions[0], BaseExceptionGroup):
return _get_single_subexc(e)
return e
if (
tryshort
and isinstance(self.value, BaseExceptionGroup)
and (subexc := _get_single_subexc(self.value)) is not None
):
return f"{subexc!r} [single exception in {type(self.value).__name__}]"
lines = format_exception_only(self.type, self.value)
text = "".join(lines)
text = text.rstrip()
if tryshort:
if text.startswith(self._striptext):
text = text[len(self._striptext) :]
return text
def errisinstance(self, exc: EXCEPTION_OR_MORE) -> bool:
"""Return True if the exception is an instance of exc.
Consider using ``isinstance(excinfo.value, exc)`` instead.
"""
return isinstance(self.value, exc)
def _getreprcrash(self) -> ReprFileLocation | None:
# Find last non-hidden traceback entry that led to the exception of the
# traceback, or None if all hidden.
for i in range(-1, -len(self.traceback) - 1, -1):
entry = self.traceback[i]
if not entry.ishidden(self):
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
exconly = self.exconly(tryshort=True)
return ReprFileLocation(path, lineno + 1, exconly)
return None
def getrepr(
self,
showlocals: bool = False,
style: TracebackStyle = "long",
abspath: bool = False,
tbfilter: bool | Callable[[ExceptionInfo[BaseException]], Traceback] = True,
funcargs: bool = False,
truncate_locals: bool = True,
truncate_args: bool = True,
chain: bool = True,
) -> ReprExceptionInfo | ExceptionChainRepr:
"""Return str()able representation of this exception info.
:param bool showlocals:
Show locals per traceback entry.
Ignored if ``style=="native"``.
:param str style:
long|short|line|no|native|value traceback style.
:param bool abspath:
If paths should be changed to absolute or left unchanged.
:param tbfilter:
A filter for traceback entries.
* If false, don't hide any entries.
* If true, hide internal entries and entries that contain a local
variable ``__tracebackhide__ = True``.
* If a callable, delegates the filtering to the callable.
Ignored if ``style`` is ``"native"``.
:param bool funcargs:
Show fixtures ("funcargs" for legacy purposes) per traceback entry.
:param bool truncate_locals:
With ``showlocals==True``, make sure locals can be safely represented as strings.
:param bool truncate_args:
With ``showargs==True``, make sure args can be safely represented as strings.
:param bool chain:
If chained exceptions in Python 3 should be shown.
.. versionchanged:: 3.9
Added the ``chain`` parameter.
"""
if style == "native":
return ReprExceptionInfo(
reprtraceback=ReprTracebackNative(
format_exception(
self.type,
self.value,
self.traceback[0]._rawentry if self.traceback else None,
)
),
reprcrash=self._getreprcrash(),
)
fmt = FormattedExcinfo(
showlocals=showlocals,
style=style,
abspath=abspath,
tbfilter=tbfilter,
funcargs=funcargs,
truncate_locals=truncate_locals,
truncate_args=truncate_args,
chain=chain,
)
return fmt.repr_excinfo(self)
def match(self, regexp: str | re.Pattern[str]) -> Literal[True]:
"""Check whether the regular expression `regexp` matches the string
representation of the exception using :func:`python:re.search`.
If it matches `True` is returned, otherwise an `AssertionError` is raised.
"""
__tracebackhide__ = True
value = stringify_exception(self.value)
msg = (
f"Regex pattern did not match.\n"
f" Expected regex: {regexp!r}\n"
f" Actual message: {value!r}"
)
if regexp == value:
msg += "\n Did you mean to `re.escape()` the regex?"
assert re.search(regexp, value), msg
# Return True to allow for "assert excinfo.match()".
return True
def _group_contains(
self,
exc_group: BaseExceptionGroup[BaseException],
expected_exception: EXCEPTION_OR_MORE,
match: str | re.Pattern[str] | None,
target_depth: int | None = None,
current_depth: int = 1,
) -> bool:
"""Return `True` if a `BaseExceptionGroup` contains a matching exception."""
if (target_depth is not None) and (current_depth > target_depth):
# already descended past the target depth
return False
for exc in exc_group.exceptions:
if isinstance(exc, BaseExceptionGroup):
if self._group_contains(
exc, expected_exception, match, target_depth, current_depth + 1
):
return True
if (target_depth is not None) and (current_depth != target_depth):
# not at the target depth, no match
continue
if not isinstance(exc, expected_exception):
continue
if match is not None:
value = stringify_exception(exc)
if not re.search(match, value):
continue
return True
return False
def group_contains(
self,
expected_exception: EXCEPTION_OR_MORE,
*,
match: str | re.Pattern[str] | None = None,
depth: int | None = None,
) -> bool:
"""Check whether a captured exception group contains a matching exception.
:param Type[BaseException] | Tuple[Type[BaseException]] expected_exception:
The expected exception type, or a tuple if one of multiple possible
exception types are expected.
:param str | re.Pattern[str] | None match:
If specified, a string containing a regular expression,
or a regular expression object, that is tested against the string
representation of the exception and its `PEP-678 <https://peps.python.org/pep-0678/>` `__notes__`
using :func:`re.search`.
To match a literal string that may contain :ref:`special characters
<re-syntax>`, the pattern can first be escaped with :func:`re.escape`.
:param Optional[int] depth:
If `None`, will search for a matching exception at any nesting depth.
If >= 1, will only match an exception if it's at the specified depth (depth = 1 being
the exceptions contained within the topmost exception group).
.. versionadded:: 8.0
.. warning::
This helper makes it easy to check for the presence of specific exceptions,
but it is very bad for checking that the group does *not* contain
*any other exceptions*.
You should instead consider using :class:`pytest.RaisesGroup`
"""
msg = "Captured exception is not an instance of `BaseExceptionGroup`"
assert isinstance(self.value, BaseExceptionGroup), msg
msg = "`depth` must be >= 1 if specified"
assert (depth is None) or (depth >= 1), msg
return self._group_contains(self.value, expected_exception, match, depth)
# Type alias for the `tbfilter` setting:
# bool: If True, it should be filtered using Traceback.filter()
# callable: A callable that takes an ExceptionInfo and returns the filtered traceback.
TracebackFilter: TypeAlias = bool | Callable[[ExceptionInfo[BaseException]], Traceback]
@dataclasses.dataclass
| ExceptionInfo |
python | bokeh__bokeh | src/bokeh/util/token.py | {
"start": 8197,
"end": 8391
} | class ____(json.JSONEncoder):
def default(self, o: Any) -> Any:
if isinstance(o, bytes):
return dict(bytes=_base64_encode(o))
return super().default(o)
| _BytesEncoder |
python | google__jax | jax/_src/pallas/pipelining/schedule_api.py | {
"start": 1372,
"end": 2066
} | class ____:
"""Constructs a synchronous pipeline stage."""
def __init__(self, func, max_in_flight: int):
self.func = func
self.max_in_flight = max_in_flight
def trace(
self, abstract_refs, state_avals, grid
) -> internal.PipelineStage:
jaxpr, effs = trace_fun(
self.func, abstract_refs, state_avals, grid
)
name = getattr(self.func, "__name__", str(self.func))
return internal.PipelineStage(
jaxpr=jaxpr,
effects=set(effs),
properties=internal.SchedulingProperties(
max_in_flight=self.max_in_flight,
is_async_start=False,
is_async_done=False,
),
name=name,
)
| SyncStage |
python | sympy__sympy | sympy/physics/quantum/operator.py | {
"start": 14728,
"end": 19657
} | class ____(Operator):
"""An operator for representing the differential operator, i.e. d/dx
It is initialized by passing two arguments. The first is an arbitrary
expression that involves a function, such as ``Derivative(f(x), x)``. The
second is the function (e.g. ``f(x)``) which we are to replace with the
``Wavefunction`` that this ``DifferentialOperator`` is applied to.
Parameters
==========
expr : Expr
The arbitrary expression which the appropriate Wavefunction is to be
substituted into
func : Expr
A function (e.g. f(x)) which is to be replaced with the appropriate
Wavefunction when this DifferentialOperator is applied
Examples
========
You can define a completely arbitrary expression and specify where the
Wavefunction is to be substituted
>>> from sympy import Derivative, Function, Symbol
>>> from sympy.physics.quantum.operator import DifferentialOperator
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy.physics.quantum.qapply import qapply
>>> f = Function('f')
>>> x = Symbol('x')
>>> d = DifferentialOperator(1/x*Derivative(f(x), x), f(x))
>>> w = Wavefunction(x**2, x)
>>> d.function
f(x)
>>> d.variables
(x,)
>>> qapply(d*w)
Wavefunction(2, x)
"""
@property
def variables(self):
"""
Returns the variables with which the function in the specified
arbitrary expression is evaluated
Examples
========
>>> from sympy.physics.quantum.operator import DifferentialOperator
>>> from sympy import Symbol, Function, Derivative
>>> x = Symbol('x')
>>> f = Function('f')
>>> d = DifferentialOperator(1/x*Derivative(f(x), x), f(x))
>>> d.variables
(x,)
>>> y = Symbol('y')
>>> d = DifferentialOperator(Derivative(f(x, y), x) +
... Derivative(f(x, y), y), f(x, y))
>>> d.variables
(x, y)
"""
return self.args[-1].args
@property
def function(self):
"""
Returns the function which is to be replaced with the Wavefunction
Examples
========
>>> from sympy.physics.quantum.operator import DifferentialOperator
>>> from sympy import Function, Symbol, Derivative
>>> x = Symbol('x')
>>> f = Function('f')
>>> d = DifferentialOperator(Derivative(f(x), x), f(x))
>>> d.function
f(x)
>>> y = Symbol('y')
>>> d = DifferentialOperator(Derivative(f(x, y), x) +
... Derivative(f(x, y), y), f(x, y))
>>> d.function
f(x, y)
"""
return self.args[-1]
@property
def expr(self):
"""
Returns the arbitrary expression which is to have the Wavefunction
substituted into it
Examples
========
>>> from sympy.physics.quantum.operator import DifferentialOperator
>>> from sympy import Function, Symbol, Derivative
>>> x = Symbol('x')
>>> f = Function('f')
>>> d = DifferentialOperator(Derivative(f(x), x), f(x))
>>> d.expr
Derivative(f(x), x)
>>> y = Symbol('y')
>>> d = DifferentialOperator(Derivative(f(x, y), x) +
... Derivative(f(x, y), y), f(x, y))
>>> d.expr
Derivative(f(x, y), x) + Derivative(f(x, y), y)
"""
return self.args[0]
@property
def free_symbols(self):
"""
Return the free symbols of the expression.
"""
return self.expr.free_symbols
def _apply_operator_Wavefunction(self, func, **options):
from sympy.physics.quantum.state import Wavefunction
var = self.variables
wf_vars = func.args[1:]
f = self.function
new_expr = self.expr.subs(f, func(*var))
new_expr = new_expr.doit()
return Wavefunction(new_expr, *wf_vars)
def _eval_derivative(self, symbol):
new_expr = Derivative(self.expr, symbol)
return DifferentialOperator(new_expr, self.args[-1])
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print(self, printer, *args):
return '%s(%s)' % (
self._print_operator_name(printer, *args),
self._print_label(printer, *args)
)
def _print_pretty(self, printer, *args):
pform = self._print_operator_name_pretty(printer, *args)
label_pform = self._print_label_pretty(printer, *args)
label_pform = prettyForm(
*label_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(label_pform))
return pform
| DifferentialOperator |
python | huggingface__transformers | src/transformers/models/segformer/modeling_segformer.py | {
"start": 11516,
"end": 13327
} | class ____(nn.Module):
"""This corresponds to the Block class in the original implementation."""
def __init__(self, config, hidden_size, num_attention_heads, drop_path, sequence_reduction_ratio, mlp_ratio):
super().__init__()
self.layer_norm_1 = nn.LayerNorm(hidden_size)
self.attention = SegformerAttention(
config,
hidden_size=hidden_size,
num_attention_heads=num_attention_heads,
sequence_reduction_ratio=sequence_reduction_ratio,
)
self.drop_path = SegformerDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.layer_norm_2 = nn.LayerNorm(hidden_size)
mlp_hidden_size = int(hidden_size * mlp_ratio)
self.mlp = SegformerMixFFN(config, in_features=hidden_size, hidden_features=mlp_hidden_size)
def forward(self, hidden_states, height, width, output_attentions=False):
self_attention_outputs = self.attention(
self.layer_norm_1(hidden_states), # in Segformer, layernorm is applied before self-attention
height,
width,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
# first residual connection (with stochastic depth)
attention_output = self.drop_path(attention_output)
hidden_states = attention_output + hidden_states
mlp_output = self.mlp(self.layer_norm_2(hidden_states), height, width)
# second residual connection (with stochastic depth)
mlp_output = self.drop_path(mlp_output)
layer_output = mlp_output + hidden_states
outputs = (layer_output,) + outputs
return outputs
| SegformerLayer |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1130196,
"end": 1130630
} | class ____(ScaleInvalidDataShowAsangle):
"""
ScaleInvalidDataShowAsValueangle schema wrapper.
Parameters
----------
value : float
The rotation angle of the text, in degrees.
"""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAsValue<"angle">'}
def __init__(self, value: Optional[float] = Undefined, **kwds):
super().__init__(value=value, **kwds)
| ScaleInvalidDataShowAsValueangle |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_alloy_db.py | {
"start": 26486,
"end": 31254
} | class ____:
def setup_method(self):
self.operator = AlloyDBDeleteClusterOperator(
task_id=TEST_TASK_ID,
cluster_id=TEST_CLUSTER_ID,
etag=TEST_ETAG,
force=TEST_FORCE,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
gcp_conn_id=TEST_GCP_CONN_ID,
request_id=TEST_REQUEST_ID,
validate_request=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
def test_init(self):
assert self.operator.cluster_id == TEST_CLUSTER_ID
assert self.operator.etag == TEST_ETAG
assert self.operator.force == TEST_FORCE
def test_template_fields(self):
expected_template_fields = {"cluster_id", "etag", "force"} | set(
AlloyDBWriteBaseOperator.template_fields
)
assert set(AlloyDBDeleteClusterOperator.template_fields) == expected_template_fields
@mock.patch(OPERATOR_MODULE_PATH.format("AlloyDBDeleteClusterOperator.get_operation_result"))
@mock.patch(OPERATOR_MODULE_PATH.format("AlloyDBDeleteClusterOperator.log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute(self, mock_hook, mock_log, mock_get_operation_result):
mock_delete_cluster = mock_hook.return_value.delete_cluster
mock_operation = mock_delete_cluster.return_value
mock_context = mock.MagicMock()
result = self.operator.execute(context=mock_context)
mock_delete_cluster.assert_called_once_with(
cluster_id=TEST_CLUSTER_ID,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
etag=TEST_ETAG,
force=TEST_FORCE,
request_id=TEST_REQUEST_ID,
validate_only=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_operation_result.assert_called_once_with(mock_operation)
assert result is None
mock_log.info.assert_has_calls(
[
call("Deleting an AlloyDB cluster."),
call("AlloyDB cluster %s was successfully removed.", TEST_CLUSTER_ID),
]
)
@mock.patch(OPERATOR_MODULE_PATH.format("AlloyDBDeleteClusterOperator.get_operation_result"))
@mock.patch(OPERATOR_MODULE_PATH.format("AlloyDBDeleteClusterOperator.log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_validate_request(self, mock_hook, mock_log, mock_get_operation_result):
mock_delete_cluster = mock_hook.return_value.delete_cluster
mock_operation = mock_delete_cluster.return_value
mock_context = mock.MagicMock()
self.operator.validate_request = True
result = self.operator.execute(context=mock_context)
mock_delete_cluster.assert_called_once_with(
cluster_id=TEST_CLUSTER_ID,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
etag=TEST_ETAG,
force=TEST_FORCE,
request_id=TEST_REQUEST_ID,
validate_only=True,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_operation_result.assert_called_once_with(mock_operation)
assert result is None
mock_log.info.assert_called_once_with("Validating a Delete AlloyDB cluster request.")
@mock.patch(OPERATOR_MODULE_PATH.format("AlloyDBDeleteClusterOperator.get_operation_result"))
@mock.patch(OPERATOR_MODULE_PATH.format("AlloyDBDeleteClusterOperator.log"))
@mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock)
def test_execute_exception(self, mock_hook, mock_log, mock_get_operation_result):
mock_delete_cluster = mock_hook.return_value.delete_cluster
mock_delete_cluster.side_effect = Exception
mock_context = mock.MagicMock()
with pytest.raises(AirflowException):
_ = self.operator.execute(context=mock_context)
mock_delete_cluster.assert_called_once_with(
cluster_id=TEST_CLUSTER_ID,
project_id=TEST_GCP_PROJECT,
location=TEST_GCP_REGION,
etag=TEST_ETAG,
force=TEST_FORCE,
request_id=TEST_REQUEST_ID,
validate_only=TEST_VALIDATE_ONLY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert not mock_get_operation_result.called
mock_log.info.assert_called_once_with("Deleting an AlloyDB cluster.")
| TestAlloyDBDeleteClusterOperator |
python | python__mypy | mypyc/test/test_emitfunc.py | {
"start": 34278,
"end": 36172
} | class ____(unittest.TestCase):
def setUp(self) -> None:
self.arg = RuntimeArg("arg", int_rprimitive)
self.reg = Register(int_rprimitive, "arg")
self.block = BasicBlock(0)
def test_simple(self) -> None:
self.block.ops.append(Return(self.reg))
fn = FuncIR(
FuncDecl("myfunc", None, "mod", FuncSignature([self.arg], int_rprimitive)),
[self.reg],
[self.block],
)
value_names = generate_names_for_ir(fn.arg_regs, fn.blocks)
emitter = Emitter(EmitterContext(NameGenerator([["mod"]])), value_names)
generate_native_function(fn, emitter, "prog.py", "prog")
result = emitter.fragments
assert_string_arrays_equal(
["CPyTagged CPyDef_myfunc(CPyTagged cpy_r_arg) {\n", " return cpy_r_arg;\n", "}\n"],
result,
msg="Generated code invalid",
)
def test_register(self) -> None:
reg = Register(int_rprimitive)
op = Assign(reg, Integer(5))
self.block.ops.append(op)
self.block.ops.append(Unreachable())
fn = FuncIR(
FuncDecl("myfunc", None, "mod", FuncSignature([self.arg], list_rprimitive)),
[self.reg],
[self.block],
)
value_names = generate_names_for_ir(fn.arg_regs, fn.blocks)
emitter = Emitter(EmitterContext(NameGenerator([["mod"]])), value_names)
generate_native_function(fn, emitter, "prog.py", "prog")
result = emitter.fragments
assert_string_arrays_equal(
[
"PyObject *CPyDef_myfunc(CPyTagged cpy_r_arg) {\n",
" CPyTagged cpy_r_r0;\n",
" cpy_r_r0 = 10;\n",
" CPy_Unreachable();\n",
"}\n",
],
result,
msg="Generated code invalid",
)
| TestGenerateFunction |
python | PrefectHQ__prefect | src/prefect/serializers.py | {
"start": 5445,
"end": 7831
} | class ____(Serializer[D]):
"""
Serializes data to JSON.
Input types must be compatible with the stdlib json library.
Wraps the `json` library to serialize to UTF-8 bytes instead of string types.
"""
type: str = Field(default="json", frozen=True)
jsonlib: str = "json"
object_encoder: Optional[str] = Field(
default="prefect.serializers.prefect_json_object_encoder",
description=(
"An optional callable to use when serializing objects that are not "
"supported by the JSON encoder. By default, this is set to a callable that "
"adds support for all types supported by "
),
)
object_decoder: Optional[str] = Field(
default="prefect.serializers.prefect_json_object_decoder",
description=(
"An optional callable to use when deserializing objects. This callable "
"is passed each dictionary encountered during JSON deserialization. "
"By default, this is set to a callable that deserializes content created "
"by our default `object_encoder`."
),
)
dumps_kwargs: dict[str, Any] = Field(default_factory=dict)
loads_kwargs: dict[str, Any] = Field(default_factory=dict)
@field_validator("dumps_kwargs")
def dumps_kwargs_cannot_contain_default(
cls, value: dict[str, Any]
) -> dict[str, Any]:
return validate_dump_kwargs(value)
@field_validator("loads_kwargs")
def loads_kwargs_cannot_contain_object_hook(
cls, value: dict[str, Any]
) -> dict[str, Any]:
return validate_load_kwargs(value)
def dumps(self, obj: D) -> bytes:
json = from_qualified_name(self.jsonlib)
kwargs = self.dumps_kwargs.copy()
if self.object_encoder:
kwargs["default"] = from_qualified_name(self.object_encoder)
result = json.dumps(obj, **kwargs)
if isinstance(result, str):
# The standard library returns str but others may return bytes directly
result = result.encode()
return result
def loads(self, blob: bytes) -> D:
json = from_qualified_name(self.jsonlib)
kwargs = self.loads_kwargs.copy()
if self.object_decoder:
kwargs["object_hook"] = from_qualified_name(self.object_decoder)
return json.loads(blob.decode(), **kwargs)
| JSONSerializer |
python | huggingface__transformers | src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py | {
"start": 83667,
"end": 94948
} | class ____(BigBirdPegasusPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BigBirdPegasusDecoderLayer`]
Args:
config: BigBirdPegasusConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: BigBirdPegasusConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = BigBirdPegasusScaledWordEmbedding(
config.vocab_size, config.d_model, self.padding_idx, embed_scale=embed_scale
)
self.embed_positions = BigBirdPegasusLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList(
[BigBirdPegasusDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]
)
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# retrieve input_ids and inputs_embeds
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
# initialize `past_key_values`
if use_cache and past_key_values is None:
past_key_values = (
EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if encoder_hidden_states is not None or self.config.is_encoder_decoder
else DynamicCache(config=self.config)
)
batch_size, seq_length = inputs_embeds.size()[:-1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
)
if attention_mask is None and not is_torchdynamo_compiling():
# required mask seq length can be calculated via length of past cache
mask_seq_length = past_key_values_length + seq_length
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
self_attn_cache = (
past_key_values.self_attention_cache
if isinstance(past_key_values, EncoderDecoderCache)
else past_key_values
)
attention_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=self_attn_cache,
)
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
# embed positions
positions = self.embed_positions(input, past_key_values_length, position_ids=cache_position)
positions = positions.to(inputs_embeds.device)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layernorm_embedding(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@auto_docstring
| BigBirdPegasusDecoder |
python | google__jax | jax/experimental/mosaic/gpu/constraints.py | {
"start": 1635,
"end": 1817
} | class ____(Constant):
"""Wraps a known TMEM layout."""
value: tcgen05.TMEMLayout
def __str__(self):
return f"C({self.value})"
@dataclasses.dataclass(frozen=True)
| TMEMLayout |
python | python-markdown__markdown | markdown/inlinepatterns.py | {
"start": 16944,
"end": 18093
} | class ____(InlineProcessor):
""" Return a `<code>` element containing the escaped matching text. """
def __init__(self, pattern: str):
InlineProcessor.__init__(self, pattern)
self.ESCAPED_BSLASH = '{}{}{}'.format(util.STX, ord('\\'), util.ETX)
self.tag = 'code'
""" The tag of the rendered element. """
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | str, int, int]:
"""
If the match contains `group(3)` of a pattern, then return a `code`
[`Element`][xml.etree.ElementTree.Element] which contains HTML escaped text (with
[`code_escape`][markdown.util.code_escape]) as an [`AtomicString`][markdown.util.AtomicString].
If the match does not contain `group(3)` then return the text of `group(1)` backslash escaped.
"""
if m.group(3):
el = etree.Element(self.tag)
el.text = util.AtomicString(util.code_escape(m.group(3).strip()))
return el, m.start(0), m.end(0)
else:
return m.group(1).replace('\\\\', self.ESCAPED_BSLASH), m.start(0), m.end(0)
| BacktickInlineProcessor |
python | django__django | tests/model_forms/models.py | {
"start": 9619,
"end": 10079
} | class ____(models.CharField):
def __init__(self, *args, **kwargs):
kwargs["max_length"] = 20
super().__init__(*args, **kwargs)
def formfield(self, **kwargs):
# don't allow this field to be used in form (real use-case might be
# that you know the markup will always be X, but it is among an app
# that allows the user to say it could be something else)
# regressed at r10062
return None
| MarkupField |
python | pytest-dev__pytest | testing/test_assertion.py | {
"start": 13137,
"end": 14216
} | class ____:
def test_pytest_assertrepr_compare_called(self, pytester: Pytester) -> None:
pytester.makeconftest(
"""
import pytest
values = []
def pytest_assertrepr_compare(op, left, right):
values.append((op, left, right))
@pytest.fixture
def list(request):
return values
"""
)
pytester.makepyfile(
"""
def test_hello():
assert 0 == 1
def test_check(list):
assert list == [("==", 0, 1)]
"""
)
result = pytester.runpytest("-v")
result.stdout.fnmatch_lines(["*test_hello*FAIL*", "*test_check*PASS*"])
def callop(op: str, left: Any, right: Any, verbose: int = 0) -> list[str] | None:
config = mock_config(verbose=verbose)
return plugin.pytest_assertrepr_compare(config, op, left, right)
def callequal(left: Any, right: Any, verbose: int = 0) -> list[str] | None:
return callop("==", left, right, verbose)
| TestBinReprIntegration |
python | PyCQA__pylint | pylint/pyreverse/diagrams.py | {
"start": 2082,
"end": 10104
} | class ____(Figure, FilterMixIn):
"""Main class diagram handling."""
TYPE = "class"
def __init__(self, title: str, mode: str) -> None:
FilterMixIn.__init__(self, mode)
Figure.__init__(self)
self.title = title
# TODO: Specify 'Any' after refactor of `DiagramEntity`
self.objects: list[Any] = []
self.relationships: dict[str, list[Relationship]] = {}
self._nodes: dict[nodes.NodeNG, DiagramEntity] = {}
def get_relationships(self, role: str) -> Iterable[Relationship]:
# sorted to get predictable (hence testable) results
return sorted(
self.relationships.get(role, ()),
key=lambda x: (x.from_object.fig_id, x.to_object.fig_id),
)
def add_relationship(
self,
from_object: DiagramEntity,
to_object: DiagramEntity,
relation_type: str,
name: str | None = None,
) -> None:
"""Create a relationship."""
rel = Relationship(from_object, to_object, relation_type, name)
self.relationships.setdefault(relation_type, []).append(rel)
def get_relationship(
self, from_object: DiagramEntity, relation_type: str
) -> Relationship:
"""Return a relationship or None."""
for rel in self.relationships.get(relation_type, ()):
if rel.from_object is from_object:
return rel
raise KeyError(relation_type)
def get_attrs(self, node: nodes.ClassDef) -> list[str]:
"""Return visible attributes, possibly with class name."""
attrs = []
# Collect functions decorated with @property
properties = {
local_name: local_node
for local_name, local_node in node.items()
if isinstance(local_node, nodes.FunctionDef)
and decorated_with_property(local_node)
}
# Add instance attributes to properties
for attr_name, attr_type in list(node.locals_type.items()) + list(
node.instance_attrs_type.items()
):
if attr_name not in properties:
properties[attr_name] = attr_type
for node_name, associated_nodes in properties.items():
if not self.show_attr(node_name):
continue
# Handle property methods differently to correctly extract return type
if isinstance(
associated_nodes, nodes.FunctionDef
) and decorated_with_property(associated_nodes):
if associated_nodes.returns:
type_annotation = get_annotation_label(associated_nodes.returns)
node_name = f"{node_name} : {type_annotation}"
# Handle regular attributes
else:
names = self.class_names(associated_nodes)
if names:
node_name = f"{node_name} : {', '.join(names)}"
attrs.append(node_name)
return sorted(attrs)
def get_methods(self, node: nodes.ClassDef) -> list[nodes.FunctionDef]:
"""Return visible methods."""
methods = [
m
for m in node.values()
if isinstance(m, nodes.FunctionDef)
and not isinstance(m, objects.Property)
and not decorated_with_property(m)
and self.show_attr(m.name)
]
return sorted(methods, key=lambda n: n.name)
def add_object(self, title: str, node: nodes.ClassDef) -> None:
"""Create a diagram object."""
assert node not in self._nodes
ent = ClassEntity(title, node)
self._nodes[node] = ent
self.objects.append(ent)
def class_names(self, nodes_lst: Iterable[nodes.NodeNG]) -> list[str]:
"""Return class names if needed in diagram."""
names = []
for node in nodes_lst:
if isinstance(node, astroid.Instance):
node = node._proxied
if (
isinstance(
node, (nodes.ClassDef, nodes.Name, nodes.Subscript, nodes.BinOp)
)
and hasattr(node, "name")
and not self.has_node(node)
):
if node.name not in names:
node_name = node.name
names.append(node_name)
# sorted to get predictable (hence testable) results
return sorted(
name
for name in names
if all(name not in other or name == other for other in names)
)
def has_node(self, node: nodes.NodeNG) -> bool:
"""Return true if the given node is included in the diagram."""
return node in self._nodes
def object_from_node(self, node: nodes.NodeNG) -> DiagramEntity:
"""Return the diagram object mapped to node."""
return self._nodes[node]
def classes(self) -> list[ClassEntity]:
"""Return all class nodes in the diagram."""
return [o for o in self.objects if isinstance(o, ClassEntity)]
def classe(self, name: str) -> ClassEntity:
"""Return a class by its name, raise KeyError if not found."""
for klass in self.classes():
if klass.node.name == name:
return klass
raise KeyError(name)
def extract_relationships(self) -> None:
"""Extract relationships between nodes in the diagram."""
for obj in self.classes():
node = obj.node
obj.attrs = self.get_attrs(node)
obj.methods = self.get_methods(node)
obj.shape = "class"
# inheritance link
for par_node in node.ancestors(recurs=False):
try:
par_obj = self.object_from_node(par_node)
self.add_relationship(obj, par_obj, "specialization")
except KeyError:
continue
# Track processed attributes to avoid duplicates
processed_attrs = set()
# Process in priority order: Composition > Aggregation > Association
# 1. Composition links (highest priority)
for name, values in list(node.compositions_type.items()):
if not self.show_attr(name):
continue
for value in values:
self.assign_association_relationship(
value, obj, name, "composition"
)
processed_attrs.add(name)
# 2. Aggregation links (medium priority)
for name, values in list(node.aggregations_type.items()):
if not self.show_attr(name) or name in processed_attrs:
continue
for value in values:
self.assign_association_relationship(
value, obj, name, "aggregation"
)
processed_attrs.add(name)
# 3. Association links (lowest priority)
associations = node.associations_type.copy()
for name, values in node.locals_type.items():
if name not in associations:
associations[name] = values
for name, values in associations.items():
if not self.show_attr(name) or name in processed_attrs:
continue
for value in values:
self.assign_association_relationship(
value, obj, name, "association"
)
def assign_association_relationship(
self, value: nodes.NodeNG, obj: ClassEntity, name: str, type_relationship: str
) -> None:
if isinstance(value, util.UninferableBase):
return
if isinstance(value, astroid.Instance):
value = value._proxied
try:
associated_obj = self.object_from_node(value)
self.add_relationship(associated_obj, obj, type_relationship, name)
except KeyError:
return
| ClassDiagram |
python | kamyu104__LeetCode-Solutions | Python/verify-preorder-sequence-in-binary-search-tree.py | {
"start": 29,
"end": 465
} | class ____(object):
# @param {integer[]} preorder
# @return {boolean}
def verifyPreorder(self, preorder):
low, i = float("-inf"), -1
for p in preorder:
if p < low:
return False
while i >= 0 and p > preorder[i]:
low = preorder[i]
i -= 1
i += 1
preorder[i] = p
return True
# Time: O(n)
# Space: O(h)
| Solution |
python | huggingface__transformers | src/transformers/models/mpt/modeling_mpt.py | {
"start": 6166,
"end": 6934
} | class ____(nn.Module):
def __init__(self, config: MptConfig):
super().__init__()
hidden_size = config.hidden_size
self.up_proj = nn.Linear(hidden_size, 4 * hidden_size, bias=False)
self.act = nn.GELU(approximate="none")
self.down_proj = nn.Linear(4 * hidden_size, hidden_size, bias=False)
self.hidden_dropout = config.attn_config.attn_pdrop
def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
hidden_states = self.act(self.up_proj(hidden_states))
intermediate_output = self.down_proj(hidden_states)
output = F.dropout(intermediate_output, p=self.hidden_dropout, training=self.training)
output = output + residual
return output
| MptMLP |
python | kamyu104__LeetCode-Solutions | Python/sum-of-weighted-modes-in-subarrays.py | {
"start": 905,
"end": 1521
} | class ____(object):
def modeWeight(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
cnt = collections.defaultdict(int)
max_heap = []
result = 0
for i in xrange(len(nums)):
cnt[nums[i]] += 1
heapq.heappush(max_heap, (-cnt[nums[i]], nums[i]))
if i >= k-1:
while -max_heap[0][0] != cnt[max_heap[0][1]]:
heapq.heappop(max_heap)
result += -max_heap[0][0]*max_heap[0][1]
cnt[nums[i-k+1]] -= 1
return result
| Solution2 |
python | scipy__scipy | scipy/optimize/_optimize.py | {
"start": 18446,
"end": 40469
} | class ____(RuntimeError):
pass
def _wrap_scalar_function_maxfun_validation(function, args, maxfun):
# wraps a minimizer function to count number of evaluations
# and to easily provide an args kwd.
ncalls = [0]
if function is None:
return ncalls, None
def function_wrapper(x, *wrapper_args):
if ncalls[0] >= maxfun:
raise _MaxFuncCallError("Too many function calls")
ncalls[0] += 1
# A copy of x is sent to the user function (gh13740)
fx = function(np.copy(x), *(wrapper_args + args))
# Ideally, we'd like to a have a true scalar returned from f(x). For
# backwards-compatibility, also allow np.array([1.3]),
# np.array([[1.3]]) etc.
if not np.isscalar(fx):
try:
fx = np.asarray(fx).item()
except (TypeError, ValueError) as e:
raise ValueError("The user-provided objective function "
"must return a scalar value.") from e
return fx
return ncalls, function_wrapper
def fmin(func, x0, args=(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None, initial_simplex=None):
"""
Minimize a function using the downhill simplex algorithm.
This algorithm only uses function values, not derivatives or second
derivatives.
Parameters
----------
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to func, i.e., ``f(x,*args)``.
xtol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
ftol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
maxiter : int, optional
Maximum number of iterations to perform.
maxfun : number, optional
Maximum number of function evaluations to make.
full_output : bool, optional
Set to True if fopt and warnflag outputs are desired.
disp : bool, optional
Set to True to print convergence messages.
retall : bool, optional
Set to True to return list of solutions at each iteration.
callback : callable, optional
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
initial_simplex : array_like of shape (N + 1, N), optional
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the jth vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
Returns
-------
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'Nelder-Mead' `method` in particular.
Notes
-----
Uses a Nelder-Mead simplex algorithm to find the minimum of function of
one or more variables.
This algorithm has a long history of successful use in applications.
But it will usually be slower than an algorithm that uses first or
second derivative information. In practice, it can have poor
performance in high-dimensional problems and is not robust to
minimizing complicated functions. Additionally, there currently is no
complete theory describing when the algorithm will successfully
converge to the minimum, or how fast it will if it does. Both the ftol and
xtol criteria must be met for convergence.
Examples
--------
>>> def f(x):
... return x**2
>>> from scipy import optimize
>>> minimum = optimize.fmin(f, 1)
Optimization terminated successfully.
Current function value: 0.000000
Iterations: 17
Function evaluations: 34
>>> minimum[0]
-8.8817841970012523e-16
References
----------
.. [1] Nelder, J.A. and Mead, R. (1965), "A simplex method for function
minimization", The Computer Journal, 7, pp. 308-313
.. [2] Wright, M.H. (1996), "Direct Search Methods: Once Scorned, Now
Respectable", in Numerical Analysis 1995, Proceedings of the
1995 Dundee Biennial Conference in Numerical Analysis, D.F.
Griffiths and G.A. Watson (Eds.), Addison Wesley Longman,
Harlow, UK, pp. 191-208.
"""
opts = {'xatol': xtol,
'fatol': ftol,
'maxiter': maxiter,
'maxfev': maxfun,
'disp': disp,
'return_all': retall,
'initial_simplex': initial_simplex}
callback = _wrap_callback(callback)
res = _minimize_neldermead(func, x0, args, callback=callback, **opts)
if full_output:
retlist = res['x'], res['fun'], res['nit'], res['nfev'], res['status']
if retall:
retlist += (res['allvecs'], )
return retlist
else:
if retall:
return res['x'], res['allvecs']
else:
return res['x']
def _minimize_neldermead(func, x0, args=(), callback=None,
maxiter=None, maxfev=None, disp=False,
return_all=False, initial_simplex=None,
xatol=1e-4, fatol=1e-4, adaptive=False, bounds=None,
**unknown_options):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options
-------
disp : bool
Set to True to print convergence messages.
maxiter, maxfev : int
Maximum allowed number of iterations and function evaluations.
Will default to ``N*200``, where ``N`` is the number of
variables, if neither `maxiter` or `maxfev` is set. If both
`maxiter` and `maxfev` are set, minimization will stop at the
first reached.
return_all : bool, optional
Set to True to return a list of the best solution at each of the
iterations.
initial_simplex : array_like of shape (N + 1, N)
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the jth vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
xatol : float, optional
Absolute error in xopt between iterations that is acceptable for
convergence.
fatol : number, optional
Absolute error in func(xopt) between iterations that is acceptable for
convergence.
adaptive : bool, optional
Adapt algorithm parameters to dimensionality of problem. Useful for
high-dimensional minimization [1]_.
bounds : sequence or `Bounds`, optional
Bounds on variables. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
is used to specify no bound.
Note that this just clips all vertices in simplex based on
the bounds.
References
----------
.. [1] Gao, F. and Han, L.
Implementing the Nelder-Mead simplex algorithm with adaptive
parameters. 2012. Computational Optimization and Applications.
51:1, pp. 259-277
"""
_check_unknown_options(unknown_options)
maxfun = maxfev
retall = return_all
x0 = np.atleast_1d(x0).flatten()
dtype = x0.dtype if np.issubdtype(x0.dtype, np.inexact) else np.float64
x0 = np.asarray(x0, dtype=dtype)
if adaptive:
dim = float(len(x0))
rho = 1
chi = 1 + 2/dim
psi = 0.75 - 1/(2*dim)
sigma = 1 - 1/dim
else:
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
nonzdelt = 0.05
zdelt = 0.00025
if bounds is not None:
lower_bound, upper_bound = bounds.lb, bounds.ub
# check bounds
if (lower_bound > upper_bound).any():
raise ValueError("Nelder Mead - one of the lower bounds "
"is greater than an upper bound.")
if np.any(lower_bound > x0) or np.any(x0 > upper_bound):
warnings.warn("Initial guess is not within the specified bounds",
OptimizeWarning, stacklevel=3)
if bounds is not None:
x0 = np.clip(x0, lower_bound, upper_bound)
if initial_simplex is None:
N = len(x0)
sim = np.empty((N + 1, N), dtype=x0.dtype)
sim[0] = x0
for k in range(N):
y = np.array(x0, copy=True)
if y[k] != 0:
y[k] = (1 + nonzdelt)*y[k]
else:
y[k] = zdelt
sim[k + 1] = y
else:
sim = np.atleast_2d(initial_simplex).copy()
dtype = sim.dtype if np.issubdtype(sim.dtype, np.inexact) else np.float64
sim = np.asarray(sim, dtype=dtype)
if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1:
raise ValueError("`initial_simplex` should be an array of shape (N+1,N)")
if len(x0) != sim.shape[1]:
raise ValueError("Size of `initial_simplex` is not consistent with `x0`")
N = sim.shape[1]
if retall:
allvecs = [sim[0]]
# If neither are set, then set both to default
if maxiter is None and maxfun is None:
maxiter = N * 200
maxfun = N * 200
elif maxiter is None:
# Convert remaining Nones, to np.inf, unless the other is np.inf, in
# which case use the default to avoid unbounded iteration
if maxfun == np.inf:
maxiter = N * 200
else:
maxiter = np.inf
elif maxfun is None:
if maxiter == np.inf:
maxfun = N * 200
else:
maxfun = np.inf
if bounds is not None:
# The default simplex construction may make all entries (for a given
# parameter) greater than an upper bound if x0 is very close to the
# upper bound. If one simply clips the simplex to the bounds this could
# make the simplex entries degenerate. If that occurs reflect into the
# interior.
msk = sim > upper_bound
# reflect into the interior
sim = np.where(msk, 2*upper_bound - sim, sim)
# but make sure the reflection is no less than the lower_bound
sim = np.clip(sim, lower_bound, upper_bound)
one2np1 = list(range(1, N + 1))
fsim = np.full((N + 1,), np.inf, dtype=float)
fcalls, func = _wrap_scalar_function_maxfun_validation(func, args, maxfun)
try:
for k in range(N + 1):
fsim[k] = func(sim[k])
except _MaxFuncCallError:
pass
finally:
ind = np.argsort(fsim)
sim = np.take(sim, ind, 0)
fsim = np.take(fsim, ind, 0)
ind = np.argsort(fsim)
fsim = np.take(fsim, ind, 0)
# sort so sim[0,:] has the lowest function value
sim = np.take(sim, ind, 0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
try:
if (np.max(np.ravel(np.abs(sim[1:] - sim[0]))) <= xatol and
np.max(np.abs(fsim[0] - fsim[1:])) <= fatol):
break
xbar = np.add.reduce(sim[:-1], 0) / N
xr = (1 + rho) * xbar - rho * sim[-1]
if bounds is not None:
xr = np.clip(xr, lower_bound, upper_bound)
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1 + rho * chi) * xbar - rho * chi * sim[-1]
if bounds is not None:
xe = np.clip(xe, lower_bound, upper_bound)
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
if bounds is not None:
xc = np.clip(xc, lower_bound, upper_bound)
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
else:
# Perform an inside contraction
xcc = (1 - psi) * xbar + psi * sim[-1]
if bounds is not None:
xcc = np.clip(xcc, lower_bound, upper_bound)
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
if bounds is not None:
sim[j] = np.clip(
sim[j], lower_bound, upper_bound)
fsim[j] = func(sim[j])
iterations += 1
except _MaxFuncCallError:
pass
ind = np.argsort(fsim)
sim = np.take(sim, ind, 0)
fsim = np.take(fsim, ind, 0)
if retall:
allvecs.append(sim[0])
intermediate_result = OptimizeResult(x=sim[0], fun=fsim[0])
if _call_callback_maybe_halt(callback, intermediate_result):
break
x = sim[0]
fval = np.min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
msg = _status_message['maxfev']
if disp:
warnings.warn(msg, RuntimeWarning, stacklevel=3)
elif iterations >= maxiter:
warnflag = 2
msg = _status_message['maxiter']
if disp:
warnings.warn(msg, RuntimeWarning, stacklevel=3)
else:
msg = _status_message['success']
if disp:
print(msg)
print(f" Current function value: {fval:f}")
print(f" Iterations: {iterations:d}")
print(f" Function evaluations: {fcalls[0]:d}")
result = OptimizeResult(fun=fval, nit=iterations, nfev=fcalls[0],
status=warnflag, success=(warnflag == 0),
message=msg, x=x, final_simplex=(sim, fsim))
if retall:
result['allvecs'] = allvecs
return result
def approx_fprime(xk, f, epsilon=_epsilon, *args):
"""Finite difference approximation of the derivatives of a
scalar or vector-valued function.
If a function maps from :math:`R^n` to :math:`R^m`, its derivatives form
an m-by-n matrix
called the Jacobian, where an element :math:`(i, j)` is a partial
derivative of f[i] with respect to ``xk[j]``.
Parameters
----------
xk : array_like
The coordinate vector at which to determine the gradient of `f`.
f : callable
Function of which to estimate the derivatives of. Has the signature
``f(xk, *args)`` where `xk` is the argument in the form of a 1-D array
and `args` is a tuple of any additional fixed parameters needed to
completely specify the function. The argument `xk` passed to this
function is an ndarray of shape (n,) (never a scalar even if n=1).
It must return a 1-D array_like of shape (m,) or a scalar.
Suppose the callable has signature ``f0(x, *my_args, **my_kwargs)``, where
``my_args`` and ``my_kwargs`` are required positional and keyword arguments.
Rather than passing ``f0`` as the callable, wrap it to accept
only ``x``; e.g., pass ``fun=lambda x: f0(x, *my_args, **my_kwargs)`` as the
callable, where ``my_args`` (tuple) and ``my_kwargs`` (dict) have been
gathered before invoking this function.
.. versionchanged:: 1.9.0
`f` is now able to return a 1-D array-like, with the :math:`(m, n)`
Jacobian being estimated.
epsilon : {float, array_like}, optional
Increment to `xk` to use for determining the function gradient.
If a scalar, uses the same finite difference delta for all partial
derivatives. If an array, should contain one value per element of
`xk`. Defaults to ``sqrt(np.finfo(float).eps)``, which is approximately
1.49e-08.
\\*args : args, optional
Any other arguments that are to be passed to `f`.
Returns
-------
jac : ndarray
The partial derivatives of `f` to `xk`.
See Also
--------
check_grad : Check correctness of gradient function against approx_fprime.
Notes
-----
The function gradient is determined by the forward finite difference
formula::
f(xk[i] + epsilon[i]) - f(xk[i])
f'[i] = ---------------------------------
epsilon[i]
Examples
--------
>>> import numpy as np
>>> from scipy import optimize
>>> def func(x, c0, c1):
... "Coordinate vector `x` should be an array of size two."
... return c0 * x[0]**2 + c1*x[1]**2
>>> x = np.ones(2)
>>> c0, c1 = (1, 200)
>>> eps = np.sqrt(np.finfo(float).eps)
>>> optimize.approx_fprime(x, func, [eps, np.sqrt(200) * eps], c0, c1)
array([ 2. , 400.00004208])
"""
xk = np.asarray(xk, float)
f0 = f(xk, *args)
return approx_derivative(f, xk, method='2-point', abs_step=epsilon,
args=args, f0=f0)
@_transition_to_rng("seed", position_num=6)
def check_grad(func, grad, x0, *args, epsilon=_epsilon,
direction='all', rng=None):
r"""Check the correctness of a gradient function by comparing it against a
(forward) finite-difference approximation of the gradient.
Parameters
----------
func : callable ``func(x0, *args)``
Function whose derivative is to be checked.
grad : callable ``grad(x0, *args)``
Jacobian of `func`.
x0 : ndarray
Points to check `grad` against forward difference approximation of grad
using `func`.
args : \\*args, optional
Extra arguments passed to `func` and `grad`.
epsilon : float, optional
Step size used for the finite difference approximation. It defaults to
``sqrt(np.finfo(float).eps)``, which is approximately 1.49e-08.
direction : str, optional
If set to ``'random'``, then gradients along a random vector
are used to check `grad` against forward difference approximation
using `func`. By default it is ``'all'``, in which case, all
the one hot direction vectors are considered to check `grad`.
If `func` is a vector valued function then only ``'all'`` can be used.
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
The random numbers generated affect the random vector along which gradients
are computed to check ``grad``. Note that `rng` is only used when `direction`
argument is set to `'random'`.
Returns
-------
err : float
The square root of the sum of squares (i.e., the 2-norm) of the
difference between ``grad(x0, *args)`` and the finite difference
approximation of `grad` using func at the points `x0`.
See Also
--------
approx_fprime
Examples
--------
>>> import numpy as np
>>> def func(x):
... return x[0]**2 - 0.5 * x[1]**3
>>> def grad(x):
... return [2 * x[0], -1.5 * x[1]**2]
>>> from scipy.optimize import check_grad
>>> check_grad(func, grad, [1.5, -1.5])
2.9802322387695312e-08 # may vary
>>> rng = np.random.default_rng()
>>> check_grad(func, grad, [1.5, -1.5],
... direction='random', seed=rng)
2.9802322387695312e-08
"""
step = epsilon
x0 = np.asarray(x0)
def g(w, func, x0, v, *args):
return func(x0 + w*v, *args)
if direction == 'random':
_grad = np.asanyarray(grad(x0, *args))
if _grad.ndim > 1:
raise ValueError("'random' can only be used with scalar valued"
" func")
rng_gen = check_random_state(rng)
v = rng_gen.standard_normal(size=(x0.shape))
_args = (func, x0, v) + args
_func = g
vars = np.zeros((1,))
analytical_grad = np.dot(_grad, v)
elif direction == 'all':
_args = args
_func = func
vars = x0
analytical_grad = grad(x0, *args)
else:
raise ValueError(f"{direction} is not a valid string for "
"``direction`` argument")
return np.sqrt(np.sum(np.abs(
(analytical_grad - approx_fprime(vars, _func, step, *_args))**2
)))
def approx_fhess_p(x0, p, fprime, epsilon, *args):
# calculate fprime(x0) first, as this may be cached by ScalarFunction
f1 = fprime(*((x0,) + args))
f2 = fprime(*((x0 + epsilon*p,) + args))
return (f2 - f1) / epsilon
| _MaxFuncCallError |
python | huggingface__transformers | tests/models/flava/test_modeling_flava.py | {
"start": 35722,
"end": 42001
} | class ____(FlavaModelTester):
model_class = FlavaForPreTraining
def prepare_config_and_inputs_for_common(self):
_, pixel_values, bool_masked_pos = self.image_model_tester.prepare_config_and_inputs()
_, input_ids, token_type_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs()
config = self.get_config()
input_ids_masked = input_ids.detach().clone()
input_ids_masked[:, 1:3] = 100
mlm_labels = input_ids.detach().clone()
mlm_labels[:, :] = config.ce_ignore_index
mlm_labels[:, 1:3] = input_ids[:, 1:3]
mim_labels = torch.randint(
0, self.image_model_tester.vocab_size, bool_masked_pos.size(), device=bool_masked_pos.device
).long()
mim_labels[bool_masked_pos.ne(True)] = config.ce_ignore_index
itm_labels = torch.ones(mlm_labels.size(0), device=bool_masked_pos.device).long()
return config, {
"input_ids": input_ids,
"input_ids_masked": input_ids_masked,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
"pixel_values": pixel_values,
"bool_masked_pos": bool_masked_pos,
"mlm_labels": mlm_labels,
"mim_labels": mim_labels,
"itm_labels": itm_labels,
"return_loss": True,
}
def _test_model(self, config, inputs, test_image=False, test_text=False):
model = self.model_class(config).to(torch_device).eval()
with torch.no_grad():
result = model(
input_ids=inputs["input_ids"] if test_text else None,
input_ids_masked=inputs["input_ids_masked"] if test_text else None,
attention_mask=inputs["attention_mask"] if test_text else None,
token_type_ids=inputs["token_type_ids"] if test_text else None,
pixel_values=inputs["pixel_values"] if test_image else None,
bool_masked_pos=inputs["bool_masked_pos"] if test_image else None,
mlm_labels=inputs["mlm_labels"],
mim_labels=inputs["mim_labels"],
itm_labels=inputs["itm_labels"],
return_loss=inputs["return_loss"],
)
image_size = (self.image_model_tester.image_size, self.image_model_tester.image_size)
patch_size = (self.image_model_tester.patch_size, self.image_model_tester.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
if test_image:
self.parent.assertEqual(
result.image_embeddings.shape,
(self.image_model_tester.batch_size, num_patches + 1, self.image_model_tester.hidden_size),
)
if not test_text:
self.parent.assertEqual(
result.loss_info.mim.dim(),
0,
)
self.parent.assertEqual(
result.mim_logits.shape,
(inputs["bool_masked_pos"].sum().item(), self.image_model_tester.vocab_size),
)
else:
self.parent.assertIsNone(result.image_embeddings)
if test_text:
self.parent.assertEqual(
result.text_embeddings.shape,
(
self.text_model_tester.batch_size,
self.text_model_tester.seq_length,
self.text_model_tester.hidden_size,
),
)
if not test_image:
self.parent.assertEqual(result.loss_info.mlm.dim(), 0)
self.parent.assertEqual(
result.mlm_logits.shape,
(
(inputs["mlm_labels"] != self.multimodal_model_tester.ce_ignore_index).sum().item(),
self.text_model_tester.vocab_size,
),
)
else:
self.parent.assertIsNone(result.text_embeddings)
if test_image and test_text:
self.parent.assertEqual(
result.multimodal_masked_embeddings.shape,
(
self.multimodal_model_tester.batch_size,
self.text_model_tester.seq_length + num_patches + 2,
self.multimodal_model_tester.hidden_size,
),
)
self.parent.assertEqual(
result.itm_logits.shape,
(self.text_model_tester.batch_size, 2),
)
self.parent.assertEqual(
result.mmm_text_logits.shape,
(
(inputs["mlm_labels"] != self.multimodal_model_tester.ce_ignore_index).sum().item(),
self.text_model_tester.vocab_size,
),
)
self.parent.assertEqual(
result.mmm_image_logits.shape,
(inputs["bool_masked_pos"].sum().item(), self.image_model_tester.vocab_size),
)
self.parent.assertEqual(
result.contrastive_logits_per_image.shape,
(self.image_model_tester.batch_size, self.text_model_tester.batch_size),
)
self.parent.assertEqual(
result.contrastive_logits_per_text.shape,
(self.text_model_tester.batch_size, self.image_model_tester.batch_size),
)
for item in [
result.loss_info.global_contrastive,
result.loss_info.itm,
result.loss_info.mmm_text,
result.loss_info.mmm_image,
]:
self.parent.assertEqual(item.dim(), 0)
for item in [result.loss_info.mim, result.loss_info.mlm]:
self.parent.assertIsNone(item)
else:
self.parent.assertIsNone(result.multimodal_masked_embeddings)
for item in [
result.loss_info.global_contrastive,
result.loss_info.itm,
result.loss_info.mmm_text,
result.loss_info.mmm_image,
]:
self.parent.assertIsNone(item)
self.parent.assertIsNone(result.multimodal_embeddings)
@require_torch
| FlavaForPreTrainingTester |
python | facebook__pyre-check | scripts/build_pypi_sanity_test.py | {
"start": 386,
"end": 6165
} | class ____(Exception):
pass
def production_assert(value: bool, *args: Any) -> None:
if not value:
raise AssertionError(*args)
def validate_configuration(temporary_project_path: Path) -> None:
configuration_path = temporary_project_path / ".pyre_configuration"
try:
configuration = json.loads(configuration_path.read_text())
except json.JSONDecodeError:
raise AssertionError(f"Invalid configuration at `{configuration_path}`")
LOG.warning(f"Successfully created configuration at `{configuration_path}`:")
LOG.warning(json.dumps(configuration, indent=2))
# Confirm configuration explicit typeshed and binary are valid. Missing fields are
# expected if typeshed and binary can be found in a standard location.
typeshed_path = configuration.get("typeshed")
if typeshed_path:
typeshed_path = Path(typeshed_path)
production_assert(typeshed_path.is_dir(), "Explicit typeshed path is invalid.")
production_assert(
(typeshed_path / "stdlib").is_dir(),
"`stdlib` was not included in typeshed.",
)
binary_path = configuration.get("binary")
if binary_path:
binary_path = Path(binary_path)
production_assert(binary_path.is_file(), "Explicit binary path is invalid.")
def run_sanity_test(version: str, use_wheel: bool) -> None:
message = "wheel" if use_wheel else "source distribution"
LOG.warning(f"Sanity testing {message}")
with tempfile.TemporaryDirectory() as temporary_venv:
venv = Path(temporary_venv)
builder = EnvBuilder(system_site_packages=False, clear=True, with_pip=True)
builder.create(venv)
pyre_path = venv / "bin" / "pyre"
pyre_bin_path = venv / "bin" / "pyre.bin"
pyre_upgrade_path = venv / "bin" / "pyre-upgrade"
# Confirm that pypi package can be successfully installed
LOG.warning("Testing PyPi package installation...")
wheel_flag = "--only-binary" if use_wheel else "--no-binary"
subprocess.run(
[
venv / "bin" / "pip",
"install",
"--proxy=http://fwdproxy:8080/",
"--index-url",
"https://test.pypi.org/simple/",
"--extra-index-url",
"https://pypi.org/simple",
wheel_flag,
"pyre-check",
f"pyre-check=={version}",
]
)
production_assert(pyre_path.exists(), "Pyre (client) was not installed.")
production_assert(
pyre_bin_path.exists(),
"Pyre binary (pyre.bin executable) was not installed.",
)
production_assert(pyre_upgrade_path.exists(), "Pyre upgrade was not installed.")
# Create test project.
with tempfile.TemporaryDirectory() as temporary_project:
temporary_project_path = Path(temporary_project)
python_file_path = temporary_project_path / "a.py"
python_file_path.touch()
python_file_path.write_text("# pyre-strict \ndef foo():\n\treturn 1")
# Confirm we can run `pyre init` successfully.
LOG.warning("Testing `pyre init`...")
init_process = subprocess.run(
[str(pyre_path), "init"],
cwd=temporary_project_path,
input=b"n\n.\n",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
error_message = init_process.stderr.decode()
production_assert(
init_process.returncode == 0,
f"Failed to run `pyre init` successfully: {error_message}",
)
validate_configuration(temporary_project_path)
# Confirm `pyre` reports errors as expected.
LOG.warning("Testing `pyre` error reporting...")
result = subprocess.run(
[pyre_path, "--binary", pyre_bin_path, "--output=json", "check"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=temporary_project_path,
)
try:
errors = json.loads(result.stdout)
except json.JSONDecodeError:
error_message = result.stderr.decode()
raise AssertionError(
f"Pyre did not successfully finish type checking: {error_message}"
)
production_assert(
errors and errors[0]["name"] == "Missing return annotation",
"Incorrect pyre errors returned."
if errors
else "Expected pyre errors but none returned.",
)
# Confirm `pyre-upgrade` runs successfully.
LOG.warning("Testing `pyre upgrade`...")
upgrade_process = subprocess.run(
[str(pyre_upgrade_path), "fixme"],
cwd=temporary_project_path,
input=b"[]",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
error_message = upgrade_process.stderr.decode()
production_assert(
upgrade_process.returncode == 0,
f"Failed to run `pyre-upgrade` successfully: {error_message}",
)
def main() -> None:
parser = argparse.ArgumentParser(
description="Test wheel & source distribution for basic functionality."
)
parser.add_argument("version", type=str)
arguments = parser.parse_args()
version: str = arguments.version
run_sanity_test(version, use_wheel=True)
# TODO(T94611472): Fix PyPi source distribution
# run_sanity_test(version, use_wheel=False)
if __name__ == "__main__":
main()
| AssertionError |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-gaudi/llama_index/llms/gaudi/base.py | {
"start": 835,
"end": 18273
} | class ____(HuggingFaceLLM):
r"""
GaudiLLM LLM.
Examples:
`pip install llama-index-llms-gaudi`
```python
from llama_index.llms.gaudi import GaudiLLM
import argparse
import os, logging
def setup_parser(parser):
# Arguments management
parser.add_argument(
"--device", "-d", type=str, choices=["hpu"], help="Device to run", default="hpu"
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
# required=True,
help="Path to pre-trained model (on the HF Hub or locally).",
)
parser.add_argument(
"--bf16",
default=True,
action="store_true",
help="Whether to perform generation in bf16 precision.",
)
parser.add_argument(
"--max_new_tokens", type=int, default=100, help="Number of tokens to generate."
)
parser.add_argument(
"--max_input_tokens",
type=int,
default=0,
help="If > 0 then pad and truncate the input sequences to this specified length of tokens. \
if == 0, then truncate to 16 (original default) \
if < 0, then do not truncate, use full input prompt",
)
parser.add_argument("--batch_size", type=int, default=1, help="Input batch size.")
parser.add_argument(
"--warmup",
type=int,
default=3,
help="Number of warmup iterations for benchmarking.",
)
parser.add_argument(
"--n_iterations",
type=int,
default=5,
help="Number of inference iterations for benchmarking.",
)
parser.add_argument(
"--local_rank", type=int, default=0, metavar="N", help="Local process rank."
)
parser.add_argument(
"--use_kv_cache",
default=True,
action="store_true",
help="Whether to use the key/value cache for decoding. It should speed up generation.",
)
parser.add_argument(
"--use_hpu_graphs",
default=True,
action="store_true",
help="Whether to use HPU graphs or not. Using HPU graphs should give better latencies.",
)
parser.add_argument(
"--dataset_name",
default=None,
type=str,
help="Optional argument if you want to assess your model on a given dataset of the HF Hub.",
)
parser.add_argument(
"--column_name",
default=None,
type=str,
help="If `--dataset_name` was given, this will be the name of the column to use as prompts for generation.",
)
parser.add_argument(
"--do_sample",
action="store_true",
help="Whether to use sampling for generation.",
)
parser.add_argument(
"--num_beams",
default=1,
type=int,
help="Number of beams used for beam search generation. 1 means greedy search will be performed.",
)
parser.add_argument(
"--trim_logits",
action="store_true",
help="Calculate logits only for the last token to save memory in the first step.",
)
parser.add_argument(
"--seed",
default=27,
type=int,
help="Seed to use for random generation. Useful to reproduce your runs with `--do_sample`.",
)
parser.add_argument(
"--profiling_warmup_steps",
default=0,
type=int,
help="Number of steps to ignore for profiling.",
)
parser.add_argument(
"--profiling_steps",
default=0,
type=int,
help="Number of steps to capture for profiling.",
)
parser.add_argument(
"--profiling_record_shapes",
default=False,
type=bool,
help="Record shapes when enabling profiling.",
)
parser.add_argument(
"--prompt",
default=None,
type=str,
nargs="*",
help='Optional argument to give a prompt of your choice as input. Can be a single string (eg: --prompt "Hello world"), or a list of space-separated strings (eg: --prompt "Hello world" "How are you?")',
)
parser.add_argument(
"--bad_words",
default=None,
type=str,
nargs="+",
help="Optional argument list of words that are not allowed to be generated.",
)
parser.add_argument(
"--force_words",
default=None,
type=str,
nargs="+",
help="Optional argument list of words that must be generated.",
)
parser.add_argument(
"--assistant_model",
default=None,
type=str,
help="Optional argument to give a path to a draft/assistant model for assisted decoding.",
)
parser.add_argument(
"--peft_model",
default=None,
type=str,
help="Optional argument to give a path to a PEFT model.",
)
parser.add_argument("--num_return_sequences", type=int, default=1)
parser.add_argument(
"--token",
default=None,
type=str,
help="The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `huggingface-cli login` (stored in `~/.huggingface`).",
)
parser.add_argument(
"--model_revision",
default="main",
type=str,
help="The specific model version to use (can be a branch name, tag name or commit id).",
)
parser.add_argument(
"--attn_softmax_bf16",
action="store_true",
help="Whether to run attention softmax layer in lower precision provided that the model supports it and "
"is also running in lower precision.",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
help="Output directory to store results in.",
)
parser.add_argument(
"--bucket_size",
default=-1,
type=int,
help="Bucket size to maintain static shapes. If this number is negative (default is -1) \
then we use `shape = prompt_length + max_new_tokens`. If a positive number is passed \
we increase the bucket in steps of `bucket_size` instead of allocating to max (`prompt_length + max_new_tokens`).",
)
parser.add_argument(
"--bucket_internal",
action="store_true",
help="Split kv sequence into buckets in decode phase. It improves throughput when max_new_tokens is large.",
)
parser.add_argument(
"--dataset_max_samples",
default=-1,
type=int,
help="If a negative number is passed (default = -1) perform inference on the whole dataset, else use only `dataset_max_samples` samples.",
)
parser.add_argument(
"--limit_hpu_graphs",
action="store_true",
help="Skip HPU Graph usage for first token to save memory",
)
parser.add_argument(
"--reuse_cache",
action="store_true",
help="Whether to reuse key/value cache for decoding. It should save memory.",
)
parser.add_argument(
"--verbose_workers",
action="store_true",
help="Enable output from non-master workers",
)
parser.add_argument(
"--simulate_dyn_prompt",
default=None,
type=int,
nargs="*",
help="If empty, static prompt is used. If a comma separated list of integers is passed, we warmup and use those shapes for prompt length.",
)
parser.add_argument(
"--reduce_recompile",
action="store_true",
help="Preprocess on cpu, and some other optimizations. Useful to prevent recompilations when using dynamic prompts (simulate_dyn_prompt)",
)
parser.add_argument(
"--use_flash_attention",
action="store_true",
help="Whether to enable Habana Flash Attention, provided that the model supports it.",
)
parser.add_argument(
"--flash_attention_recompute",
action="store_true",
help="Whether to enable Habana Flash Attention in recompute mode on first token generation. This gives an opportunity of splitting graph internally which helps reduce memory consumption.",
)
parser.add_argument(
"--flash_attention_causal_mask",
action="store_true",
help="Whether to enable Habana Flash Attention in causal mode on first token generation.",
)
parser.add_argument(
"--flash_attention_fast_softmax",
action="store_true",
help="Whether to enable Habana Flash Attention in fast softmax mode.",
)
parser.add_argument(
"--book_source",
action="store_true",
help="Whether to use project Guttenberg books data as input. Useful for testing large sequence lengths.",
)
parser.add_argument(
"--torch_compile",
action="store_true",
help="Whether to use torch compiled model or not.",
)
parser.add_argument(
"--ignore_eos",
default=True,
action=argparse.BooleanOptionalAction,
help="Whether to ignore eos, set False to disable it",
)
parser.add_argument(
"--temperature",
default=1.0,
type=float,
help="Temperature value for text generation",
)
parser.add_argument(
"--top_p",
default=1.0,
type=float,
help="Top_p value for generating text via sampling",
)
parser.add_argument(
"--const_serialization_path",
"--csp",
type=str,
help="Path to serialize const params. Const params will be held on disk memory instead of being allocated on host memory.",
)
parser.add_argument(
"--disk_offload",
action="store_true",
help="Whether to enable device map auto. In case no space left on cpu, weights will be offloaded to disk.",
)
parser.add_argument(
"--trust_remote_code",
action="store_true",
help="Whether or not to allow for custom models defined on the Hub in their own modeling files.",
)
args = parser.parse_args()
if args.torch_compile:
args.use_hpu_graphs = False
if not args.use_hpu_graphs:
args.limit_hpu_graphs = False
args.quant_config = os.getenv("QUANT_CONFIG", "")
if args.quant_config == "" and args.disk_offload:
logger.warning(
"`--disk_offload` was tested only with fp8, it may not work with full precision. If error raises try to remove the --disk_offload flag."
)
return args
def messages_to_prompt(messages):
prompt = ""
for message in messages:
if message.role == 'system':
prompt += f"<|system|>\n{message.content}</s>\n"
elif message.role == 'user':
prompt += f"<|user|>\n{message.content}</s>\n"
elif message.role == 'assistant':
prompt += f"<|assistant|>\n{message.content}</s>\n"
# ensure we start with a system prompt, insert blank if needed
if not prompt.startswith("<|system|>\n"):
prompt = "<|system|>\n</s>\n" + prompt
# add final assistant prompt
prompt = prompt + "<|assistant|>\n"
return prompt
def completion_to_prompt(completion):
return f"<|system|>\n</s>\n<|user|>\n{completion}</s>\n<|assistant|>\n"
import torch
from llama_index.core.prompts import PromptTemplate
from llama_index.llms.optimum-intel import GaudiLLM
parser = argparse.ArgumentParser(description="GaudiLLM Basic Usage Example")
args = setup_parser(parser)
args.model_name_or_path = "HuggingFaceH4/zephyr-7b-alpha"
llm = GaudiLLM(
args=args,
logger=logger,
model_name="HuggingFaceH4/zephyr-7b-alpha",
tokenizer_name="HuggingFaceH4/zephyr-7b-alpha",
query_wrapper_prompt=PromptTemplate(
"<|system|>\n</s>\n<|user|>\n{query_str}</s>\n<|assistant|>\n"
),
context_window=3900,
max_new_tokens=256,
generate_kwargs={"temperature": 0.7, "top_k": 50, "top_p": 0.95},
messages_to_prompt=messages_to_prompt,
device_map="auto",
)
response = llm.complete("What is the meaning of life?")
print(str(response))
```
"""
model_name: str = Field(
default=DEFAULT_HUGGINGFACE_MODEL,
description=(
"The model name to use from HuggingFace. "
"Unused if `model` is passed in directly."
),
)
tokenizer_name: str = Field(
default=DEFAULT_HUGGINGFACE_MODEL,
description=(
"The name of the tokenizer to use from HuggingFace. "
"Unused if `tokenizer` is passed in directly."
),
)
def __init__(
self,
args,
logger,
context_window: int = DEFAULT_CONTEXT_WINDOW,
max_new_tokens: int = DEFAULT_NUM_OUTPUTS,
query_wrapper_prompt: Union[str, PromptTemplate] = "{query_str}",
tokenizer_name: str = DEFAULT_HUGGINGFACE_MODEL,
model_name: str = DEFAULT_HUGGINGFACE_MODEL,
model: Optional[Any] = None,
tokenizer: Optional[Any] = None,
device_map: Optional[str] = "auto",
stopping_ids: Optional[List[int]] = None,
tokenizer_kwargs: Optional[dict] = None,
tokenizer_outputs_to_remove: Optional[list] = None,
model_kwargs: Optional[dict] = None,
generate_kwargs: Optional[dict] = None,
is_chat_model: Optional[bool] = False,
callback_manager: Optional[CallbackManager] = None,
system_prompt: str = "",
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
"""Initialize params."""
model_kwargs = model_kwargs or {}
model, _, tokenizer, _ = initialize_model(args, logger)
super().__init__(
context_window=context_window,
max_new_tokens=max_new_tokens,
query_wrapper_prompt=query_wrapper_prompt,
tokenizer_name=tokenizer_name,
model_name=model_name,
model=model,
tokenizer=tokenizer,
device_map=device_map,
stopping_ids=stopping_ids or [],
tokenizer_kwargs=tokenizer_kwargs or {},
tokenizer_outputs_to_remove=tokenizer_outputs_to_remove or [],
model_kwargs=model_kwargs or {},
generate_kwargs=generate_kwargs or {},
is_chat_model=is_chat_model,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "GaudiLLM"
| GaudiLLM |
python | davidhalter__jedi | test/completion/classes.py | {
"start": 4425,
"end": 4574
} | class ____():
def __call__(self):
return 1
#? int()
CallClass()()
# -----------------
# variable assignments
# -----------------
| CallClass |
python | google__pytype | pytype/tests/test_match1.py | {
"start": 143,
"end": 4498
} | class ____(test_base.BaseTest):
"""Tests for matching types."""
def test_type_against_callable(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Callable
def f(x: Callable) -> str: ...
""",
)
ty = self.Infer(
"""
import foo
def f():
return foo.f(int)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
def f() -> str: ...
""",
)
def test_match_static(self):
ty = self.Infer("""
s = {1}
def f(x):
# set.intersection is a static method:
return s.intersection(x)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Set
s = ... # type: Set[int]
def f(x) -> Set[int]: ...
""",
)
def test_generic_hierarchy(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import Iterable
def f(x: Iterable[str]) -> str: ...
""",
)
ty = self.Infer(
"""
import a
x = a.f(["a", "b", "c"])
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
x = ... # type: str
""",
)
def test_generic(self):
with test_utils.Tempdir() as d:
d.create_file(
"a.pyi",
"""
from typing import Generic, Iterable
K = TypeVar("K")
V = TypeVar("V")
Q = TypeVar("Q")
class A(Iterable[V], Generic[K, V]): ...
class B(A[K, V]):
def __init__(self):
self = B[bool, str]
def f(x: Iterable[Q]) -> Q: ...
""",
)
ty = self.Infer(
"""
import a
x = a.f(a.B())
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import a
x = ... # type: str
""",
)
def test_match_identity_function(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import TypeVar
T = TypeVar("T")
def f(x: T) -> T: ...
""",
)
ty = self.Infer(
"""
import foo
v = foo.f(__any_object__)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
from typing import Any
import foo
v = ... # type: Any
""",
)
def test_callable_return(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Callable, TypeVar
T = TypeVar("T")
def foo(func: Callable[[], T]) -> T: ...
""",
)
self.Check(
"""
import foo
class Foo:
def __init__(self):
self.x = 42
foo.foo(Foo).x
""",
pythonpath=[d.path],
)
def test_callable_union_return(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Callable, TypeVar, Union
T1 = TypeVar("T1")
T2 = TypeVar("T2")
def foo(func: Callable[[], T1]) -> Union[T1, T2]: ...
""",
)
self.Check(
"""
import foo
class Foo:
def __init__(self):
self.x = 42
v = foo.foo(Foo)
if isinstance(v, Foo):
v.x
""",
pythonpath=[d.path],
)
def test_any_base_class(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Any
class Foo(Any): pass
class Bar: pass
def f(x: Bar) -> None: ...
""",
)
self.Check(
"""
import foo
foo.f(foo.Foo())
""",
pythonpath=[d.path],
)
def test_maybe_parameterized(self):
self.Check("""
import collections.abc
class Foo(collections.abc.MutableMapping):
pass
def f(x: Foo):
dict.__delitem__(x, __any_object__) # pytype: disable=wrong-arg-types
""")
if __name__ == "__main__":
test_base.main()
| MatchTest |
python | huggingface__transformers | src/transformers/models/blenderbot/modeling_blenderbot.py | {
"start": 3368,
"end": 4990
} | class ____(nn.Embedding):
"""
This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: Optional[float] = 1.0):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.embed_scale = embed_scale
def forward(self, input_ids: torch.Tensor):
return super().forward(input_ids) * self.embed_scale
# Copied from transformers.models.bert.modeling_bert.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Blenderbot
| BlenderbotScaledWordEmbedding |
python | getsentry__sentry | src/sentry/users/models/lostpasswordhash.py | {
"start": 718,
"end": 4157
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
user = FlexibleForeignKey(settings.AUTH_USER_MODEL, unique=True)
hash = models.CharField(max_length=32)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_lostpasswordhash"
__repr__ = sane_repr("user_id", "hash")
def save(self, *args: Any, **kwargs: Any) -> None:
if not self.hash:
self.set_hash()
super().save(*args, **kwargs)
def set_hash(self) -> None:
self.hash = get_secure_token()
def is_valid(self) -> bool:
return self.date_added > timezone.now() - timedelta(hours=1)
@classmethod
def send_recover_password_email(cls, user: User, hash: str, ip_address: str) -> None:
extra = {
"ip_address": ip_address,
}
cls._send_email("recover_password", user, hash, extra)
@classmethod
def send_relocate_account_email(
cls, user: User | RpcUser, hash: str, orgs: Iterable[str]
) -> None:
cls._send_email("relocate_account", user, hash, {"orgs": orgs})
@classmethod
def send_set_password_email(cls, user: User | RpcUser, hash: str, **kwargs: Any) -> None:
cls._send_email("set_password", user, hash, extra=kwargs)
@classmethod
def _send_email(cls, mode: str, user: User | RpcUser, hash: str, extra: dict[str, Any]) -> None:
from sentry import options
from sentry.http import get_server_hostname
from sentry.utils.email import MessageBuilder
context = {
"user": user,
"domain": get_server_hostname(),
"url": cls.get_lostpassword_url(user.id, hash, mode),
"datetime": timezone.now(),
**extra,
}
subject = "Password Recovery"
template = "recover_account"
if mode == "set_password":
subject = "Set Password for your Sentry.io Account"
template = "set_password"
elif mode == "relocate_account":
template = "relocate_account"
subject = "Set Username and Password for Your Relocated Sentry.io Account"
msg = MessageBuilder(
subject="{}{}".format(options.get("mail.subject-prefix"), subject),
template=f"sentry/emails/{template}.txt",
html_template=f"sentry/emails/{template}.html",
type="user.password_recovery",
context=context,
)
msg.send_async([user.email])
# Duplicated from RpcLostPasswordHash
def get_absolute_url(self, mode: str = "recover") -> str:
return LostPasswordHash.get_lostpassword_url(self.user_id, self.hash, mode)
@classmethod
def get_lostpassword_url(self, user_id: int, hash: str, mode: str = "recover") -> str:
url_key = "sentry-account-recover-confirm"
if mode == "set_password":
url_key = "sentry-account-set-password-confirm"
elif mode == "relocate_account":
url_key = "sentry-account-relocate-confirm"
return absolute_uri(reverse(url_key, args=[user_id, hash]))
@classmethod
def for_user(cls, user: User) -> RpcLostPasswordHash:
from sentry.users.services.lost_password_hash import lost_password_hash_service
password_hash = lost_password_hash_service.get_or_create(user_id=user.id)
return password_hash
| LostPasswordHash |
python | google__pytype | pytype/typegraph/typegraph_serializer.py | {
"start": 2358,
"end": 7352
} | class ____(json.JSONEncoder):
"""Implements the JSONEncoder behavior for typegraph objects."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._bindings: dict[int, cfg.Binding] = {}
def _encode_program(self, program: cfg.Program) -> dict[str, Any]:
# Surprisingly, program.cfg_nodes and program.variables are not guaranteed
# to be sorted. Remove this surprise by sorting them here.
cfg_nodes = sorted(
[self._encode_cfgnode(n) for n in program.cfg_nodes],
key=lambda n: n["id"],
)
variables = sorted(
[self._encode_variable(v) for v in program.variables],
key=lambda v: v["id"],
)
# After visiting every Variable, self._bindings contains every Binding.
bindings = sorted(self._bindings.values(), key=lambda b: b.id)
return {
"_type": "Program",
"cfg_nodes": cfg_nodes,
"variables": variables,
"entrypoint": program.entrypoint.id,
"bindings": [self._encode_binding(b) for b in bindings],
"queries": self._encode_queries(program),
}
def _encode_cfgnode(self, node: cfg.CFGNode) -> dict[str, Any]:
return {
"_type": "CFGNode",
"id": node.id,
"name": node.name,
"incoming": [n.id for n in node.incoming],
"outgoing": [n.id for n in node.outgoing],
"bindings": [b.id for b in node.bindings],
"condition": node.condition.id if node.condition else None,
}
def _encode_variable(self, variable: cfg.Variable) -> dict[str, Any]:
self._bindings.update((b.id, b) for b in variable.bindings)
return {
"_type": "Variable",
"id": variable.id,
"bindings": [b.id for b in variable.bindings],
}
def _encode_binding_data(self, binding: cfg.Binding) -> str:
data = binding.data
return pytd_utils.Print(data.to_pytd_type()) if data else "None"
def _encode_binding(self, binding: cfg.Binding) -> dict[str, Any]:
return {
"_type": "Binding",
"id": binding.id,
"variable": binding.variable.id,
"data": self._encode_binding_data(binding),
"origins": [self._encode_origin(o) for o in binding.origins],
}
def _encode_origin(self, origin: cfg.Origin) -> dict[str, Any]:
return {
"_type": "Origin",
"where": origin.where.id,
"source_sets": [[b.id for b in s] for s in origin.source_sets],
}
def _encode_queries(self, program: cfg.Program) -> list[dict[str, Any]]:
"""Encodes information about solver queries from a Program's metrics.
The queries are numbered in the order they were recorded.
Args:
program: a cfg.Program.
Returns:
A list of dictionaries that correspond to SerializedQuery.
"""
metrics = program.calculate_metrics()
solvers = metrics.solver_metrics
enc_queries = []
query_id = -1
for solver_idx, solver in enumerate(solvers):
for query in solver.query_metrics:
query_id += 1
steps = []
for step in query.steps:
steps.append({
"_type": "QueryStep",
"node": step.node,
"depth": step.depth,
"bindings": step.bindings,
})
enc_queries.append({
"_type": "Query",
"solver_idx": solver_idx,
"start_node": query.start_node,
"end_node": query.end_node,
"initial_binding_count": query.initial_binding_count,
"shortcircuited": query.shortcircuited,
"from_cache": query.from_cache,
"steps": steps,
})
return enc_queries
def default(self, o):
if isinstance(o, cfg.Program):
return self._encode_program(o)
elif isinstance(o, cfg.CFGNode):
return self._encode_cfgnode(o)
elif isinstance(o, cfg.Variable):
return self._encode_variable(o)
elif isinstance(o, cfg.Binding):
return self._encode_binding(o)
elif isinstance(o, cfg.Origin):
return self._encode_origin(o)
else:
return super().default(o)
_TYP_MAP = {
"Program": SerializedProgram,
"CFGNode": SerializedCFGNode,
"Variable": SerializedVariable,
"Binding": SerializedBinding,
"Origin": SerializedOrigin,
"QueryStep": SerializedQueryStep,
"Query": SerializedQuery,
}
def _decode(obj):
typ = obj.pop("_type")
return _TYP_MAP[typ](**obj)
def object_hook(obj: dict[str, Any]) -> Any:
"""An object hook for json.load that produces serialized CFG objects."""
if "_type" in obj:
return _decode(obj)
return obj
def encode_program(program: cfg.Program) -> str:
return json.dumps(program, cls=TypegraphEncoder)
def decode_program(json_str: str) -> SerializedProgram:
prog = json.loads(json_str, object_hook=object_hook)
assert isinstance(prog, SerializedProgram)
return prog
def to_serialized_program(program: cfg.Program) -> SerializedProgram:
return decode_program(encode_program(program))
| TypegraphEncoder |
python | vyperlang__vyper | vyper/ast/utils.py | {
"start": 152,
"end": 1881
} | class ____:
"""
Class to convert between character offsets in a text string, and pairs (line, column) of 1-based
line and 0-based column numbers.
Vendored from asttokens.
"""
def __init__(self, text: str) -> None:
# a list of character offsets of each line's first character
self._line_offsets = [m.start(0) for m in re.finditer(r"^", text, re.M)]
self._text_len = len(text)
def offset_to_line(self, offset: int) -> Tuple[int, int]:
"""
Converts 0-based character offset to pair (line, col) of 1-based line and 0-based column
numbers.
"""
offset = max(0, min(self._text_len, offset))
line_index = bisect.bisect_right(self._line_offsets, offset) - 1
return (line_index + 1, offset - self._line_offsets[line_index])
def ast_to_dict(ast_struct: Union[vy_ast.VyperNode, List]) -> Union[Dict, List]:
"""
Converts a Vyper AST node, or list of nodes, into a dictionary suitable for
output to the user.
"""
if isinstance(ast_struct, vy_ast.VyperNode):
return ast_struct.to_dict()
elif isinstance(ast_struct, list):
return [i.to_dict() for i in ast_struct]
else:
raise CompilerPanic(f'Unknown Vyper AST node provided: "{type(ast_struct)}".')
def dict_to_ast(ast_struct: Union[Dict, List]) -> Union[vy_ast.VyperNode, List]:
"""
Converts an AST dict, or list of dicts, into Vyper AST node objects.
"""
if isinstance(ast_struct, dict):
return vy_ast.get_node(ast_struct)
if isinstance(ast_struct, list):
return [vy_ast.get_node(i) for i in ast_struct]
raise CompilerPanic(f'Unknown ast_struct provided: "{type(ast_struct)}".')
| LineNumbers |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 143217,
"end": 143600
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(StarOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
| StarOrder |
python | kamyu104__LeetCode-Solutions | Python/extra-characters-in-a-string.py | {
"start": 113,
"end": 861
} | class ____(object):
def minExtraChar(self, s, dictionary):
"""
:type s: str
:type dictionary: List[str]
:rtype: int
"""
_trie = lambda: collections.defaultdict(_trie)
trie = _trie()
for word in dictionary:
reduce(dict.__getitem__, word, trie).setdefault("_end")
dp = [float("inf")]*(len(s)+1)
dp[0] = 0
for i in xrange(len(s)):
dp[i+1] = min(dp[i+1], dp[i]+1)
curr = trie
for j in xrange(i, len(s)):
if s[j] not in curr:
break
curr = curr[s[j]]
if "_end" in curr:
dp[j+1] = min(dp[j+1], dp[i])
return dp[-1]
| Solution |
python | huggingface__transformers | tests/models/phi4_multimodal/test_modeling_phi4_multimodal.py | {
"start": 1575,
"end": 6563
} | class ____:
def __init__(
self,
parent,
batch_size=2,
seq_length=12,
image_seq_length=275,
audio_seq_length=8,
is_training=True,
num_hidden_layers=2,
vocab_size=49,
hidden_size=32,
intermediate_size=64,
num_attention_heads=8,
num_key_value_heads=4,
bos_token_id=0,
eos_token_id=0,
pad_token_id=0,
image_token_id=1,
audio_token_id=2,
image_size=16,
audio_size=12,
audio_config=Phi4MultimodalAudioConfig(
num_blocks=2,
hidden_size=32,
num_attention_heads=8,
intermediate_size=48,
depthwise_separable_out_channel=128,
nemo_conv_channels=128,
initializer_range=1e-5,
),
vision_config=Phi4MultimodalVisionConfig(
num_hidden_layers=2,
hidden_size=32,
intermediate_size=64,
num_attention_heads=8,
crop_size=16,
initializer_range=1e-5,
),
):
self.parent = parent
self.num_hidden_layers = num_hidden_layers
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.eos_token_id = eos_token_id
self.image_token_id = image_token_id
self.audio_token_id = audio_token_id
self.audio_config = audio_config
self.vision_config = vision_config
self.is_training = is_training
self.batch_size = batch_size
self.seq_length = seq_length + image_seq_length + audio_seq_length
self.image_seq_length = image_seq_length
self.audio_seq_length = audio_seq_length
self.image_size = image_size
self.audio_size = audio_size
self.num_channels = 3
def get_config(self):
return Phi4MultimodalConfig(
num_hidden_layers=self.num_hidden_layers,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
intermediate_size=self.intermediate_size,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
vision_config=self.vision_config,
audio_config=self.audio_config,
)
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
# The shapes corresponds to the inputs for image of size 16x16
image_pixel_values = floats_tensor([self.batch_size, 2, self.num_channels, self.image_size, self.image_size])
image_attention_mask = torch.ones(self.batch_size, 2, 1, 1)
image_sizes = torch.tensor(
[[self.image_size, self.image_size]] * self.batch_size, dtype=torch.long, device=torch_device
)
# Feature sizes returned by an audio of size 10000
audio_input_features = floats_tensor([self.batch_size, 61, 80])
audio_embed_sizes = torch.tensor([self.audio_seq_length] * self.batch_size, dtype=torch.long)
input_ids[input_ids == self.pad_token_id] = self.pad_token_id + 1 # random value but not pad token
input_ids[-1, 0] = self.pad_token_id # mask the last text token
input_ids[:, -self.image_seq_length - self.audio_seq_length : -self.audio_seq_length] = self.image_token_id
input_ids[:, -self.audio_seq_length :] = self.audio_token_id
attention_mask = torch.ones_like(input_ids)
attention_mask[-1, 0] = 0 # mask the last text token
config = self.get_config()
return (
config,
input_ids,
attention_mask,
image_pixel_values,
image_attention_mask,
image_sizes,
audio_input_features,
audio_embed_sizes,
)
def prepare_config_and_inputs_for_common(self):
(
config,
input_ids,
attention_mask,
image_pixel_values,
image_attention_mask,
image_sizes,
audio_input_features,
audio_embed_sizes,
) = self.prepare_config_and_inputs()
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"image_pixel_values": image_pixel_values,
"image_attention_mask": image_attention_mask,
"image_sizes": image_sizes,
"audio_input_features": audio_input_features,
"audio_embed_sizes": audio_embed_sizes,
}
return config, inputs_dict
@require_torch
| Phi4MultimodalModelTester |
python | run-llama__llama_index | llama-index-core/llama_index/core/vector_stores/types.py | {
"start": 9485,
"end": 12804
} | class ____(BaseComponent, ABC):
"""Abstract vector store protocol."""
model_config = ConfigDict(arbitrary_types_allowed=True)
stores_text: bool
is_embedding_query: bool = True
@property
@abstractmethod
def client(self) -> Any:
"""Get client."""
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""Get nodes from vector store."""
raise NotImplementedError("get_nodes not implemented")
async def aget_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""Asynchronously get nodes from vector store."""
return self.get_nodes(node_ids, filters)
@abstractmethod
def add(
self,
nodes: Sequence[BaseNode],
**kwargs: Any,
) -> List[str]:
"""Add nodes to vector store."""
async def async_add(
self,
nodes: Sequence[BaseNode],
**kwargs: Any,
) -> List[str]:
"""
Asynchronously add nodes to vector store.
NOTE: this is not implemented for all vector stores. If not implemented,
it will just call add synchronously.
"""
return self.add(nodes, **kwargs)
@abstractmethod
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id."""
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
NOTE: this is not implemented for all vector stores. If not implemented,
it will just call delete synchronously.
"""
self.delete(ref_doc_id, **delete_kwargs)
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""Delete nodes from vector store."""
raise NotImplementedError("delete_nodes not implemented")
async def adelete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
"""Asynchronously delete nodes from vector store."""
self.delete_nodes(node_ids, filters)
def clear(self) -> None:
"""Clear all nodes from configured vector store."""
raise NotImplementedError("clear not implemented")
async def aclear(self) -> None:
"""Asynchronously clear all nodes from configured vector store."""
self.clear()
@abstractmethod
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store."""
async def aquery(
self, query: VectorStoreQuery, **kwargs: Any
) -> VectorStoreQueryResult:
"""
Asynchronously query vector store.
NOTE: this is not implemented for all vector stores. If not implemented,
it will just call query synchronously.
"""
return self.query(query, **kwargs)
def persist(
self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> None:
return None
| BasePydanticVectorStore |
python | readthedocs__readthedocs.org | readthedocs/gold/views.py | {
"start": 1976,
"end": 2279
} | class ____(PrivateViewMixin):
def get_gold_user(self):
return get_object_or_404(GoldUser, user=self.request.user)
def get_gold_projects(self):
return self.get_gold_user().projects.all()
def get_success_url(self):
return reverse_lazy("gold_projects")
| GoldProjectsMixin |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 117794,
"end": 118192
} | class ____(sgqlc.types.Enum):
"""The possible team member roles; either 'maintainer' or 'member'.
Enumeration Choices:
* `MAINTAINER`: A team maintainer has permission to add and remove
team members.
* `MEMBER`: A team member has no administrative permissions on the
team.
"""
__schema__ = github_schema
__choices__ = ("MAINTAINER", "MEMBER")
| TeamMemberRole |
python | numba__numba | numba/experimental/structref.py | {
"start": 585,
"end": 9999
} | class ____:
"""Internal builder-code utils for structref definitions.
"""
def __init__(self, context, builder, struct_type):
"""
Parameters
----------
context :
a numba target context
builder :
a llvmlite IRBuilder
struct_type : numba.core.types.StructRef
"""
self.context = context
self.builder = builder
self.struct_type = struct_type
def new_struct_ref(self, mi):
"""Encapsulate the MemInfo from a `StructRefPayload` in a `StructRef`
"""
context = self.context
builder = self.builder
struct_type = self.struct_type
st = cgutils.create_struct_proxy(struct_type)(context, builder)
st.meminfo = mi
return st
def get_struct_ref(self, val):
"""Return a helper for accessing a StructRefType
"""
context = self.context
builder = self.builder
struct_type = self.struct_type
return cgutils.create_struct_proxy(struct_type)(
context, builder, value=val
)
def get_data_pointer(self, val):
"""Get the data pointer to the payload from a `StructRefType`.
"""
context = self.context
builder = self.builder
struct_type = self.struct_type
structval = self.get_struct_ref(val)
meminfo = structval.meminfo
data_ptr = context.nrt.meminfo_data(builder, meminfo)
valtype = struct_type.get_data_type()
model = context.data_model_manager[valtype]
alloc_type = model.get_value_type()
data_ptr = builder.bitcast(data_ptr, alloc_type.as_pointer())
return data_ptr
def get_data_struct(self, val):
"""Get a getter/setter helper for accessing a `StructRefPayload`
"""
context = self.context
builder = self.builder
struct_type = self.struct_type
data_ptr = self.get_data_pointer(val)
valtype = struct_type.get_data_type()
dataval = cgutils.create_struct_proxy(valtype)(
context, builder, ref=data_ptr
)
return dataval
def define_attributes(struct_typeclass):
"""Define attributes on `struct_typeclass`.
Defines both setters and getters in jit-code.
This is called directly in `register()`.
"""
@infer_getattr
class StructAttribute(AttributeTemplate):
key = struct_typeclass
def generic_resolve(self, typ, attr):
if attr in typ.field_dict:
attrty = typ.field_dict[attr]
return attrty
@lower_getattr_generic(struct_typeclass)
def struct_getattr_impl(context, builder, typ, val, attr):
utils = _Utils(context, builder, typ)
dataval = utils.get_data_struct(val)
ret = getattr(dataval, attr)
fieldtype = typ.field_dict[attr]
return imputils.impl_ret_borrowed(context, builder, fieldtype, ret)
@lower_setattr_generic(struct_typeclass)
def struct_setattr_impl(context, builder, sig, args, attr):
[inst_type, val_type] = sig.args
[instance, val] = args
utils = _Utils(context, builder, inst_type)
dataval = utils.get_data_struct(instance)
# cast val to the correct type
field_type = inst_type.field_dict[attr]
casted = context.cast(builder, val, val_type, field_type)
# read old
old_value = getattr(dataval, attr)
# incref new value
context.nrt.incref(builder, val_type, casted)
# decref old value (must be last in case new value is old value)
context.nrt.decref(builder, val_type, old_value)
# write new
setattr(dataval, attr, casted)
def define_boxing(struct_type, obj_class):
"""Define the boxing & unboxing logic for `struct_type` to `obj_class`.
Defines both boxing and unboxing.
- boxing turns an instance of `struct_type` into a PyObject of `obj_class`
- unboxing turns an instance of `obj_class` into an instance of
`struct_type` in jit-code.
Use this directly instead of `define_proxy()` when the user does not
want any constructor to be defined.
"""
if struct_type is types.StructRef:
raise ValueError(f"cannot register {types.StructRef}")
obj_ctor = obj_class._numba_box_
@box(struct_type)
def box_struct_ref(typ, val, c):
"""
Convert a raw pointer to a Python int.
"""
utils = _Utils(c.context, c.builder, typ)
struct_ref = utils.get_struct_ref(val)
meminfo = struct_ref.meminfo
mip_type = types.MemInfoPointer(types.voidptr)
boxed_meminfo = c.box(mip_type, meminfo)
ctor_pyfunc = c.pyapi.unserialize(c.pyapi.serialize_object(obj_ctor))
ty_pyobj = c.pyapi.unserialize(c.pyapi.serialize_object(typ))
res = c.pyapi.call_function_objargs(
ctor_pyfunc, [ty_pyobj, boxed_meminfo],
)
c.pyapi.decref(ctor_pyfunc)
c.pyapi.decref(ty_pyobj)
c.pyapi.decref(boxed_meminfo)
return res
@unbox(struct_type)
def unbox_struct_ref(typ, obj, c):
mi_obj = c.pyapi.object_getattr_string(obj, "_meminfo")
mip_type = types.MemInfoPointer(types.voidptr)
mi = c.unbox(mip_type, mi_obj).value
utils = _Utils(c.context, c.builder, typ)
struct_ref = utils.new_struct_ref(mi)
out = struct_ref._getvalue()
c.pyapi.decref(mi_obj)
return NativeValue(out)
def define_constructor(py_class, struct_typeclass, fields):
"""Define the jit-code constructor for `struct_typeclass` using the
Python type `py_class` and the required `fields`.
Use this instead of `define_proxy()` if the user does not want boxing
logic defined.
"""
# Build source code for the constructor
params = ', '.join(fields)
indent = ' ' * 8
init_fields_buf = []
for k in fields:
init_fields_buf.append(f"st.{k} = {k}")
init_fields = f'\n{indent}'.join(init_fields_buf)
source = f"""
def ctor({params}):
struct_type = struct_typeclass(list(zip({list(fields)}, [{params}])))
def impl({params}):
st = new(struct_type)
{init_fields}
return st
return impl
"""
glbs = dict(struct_typeclass=struct_typeclass, new=new)
exec(source, glbs)
ctor = glbs['ctor']
# Make it an overload
overload(py_class)(ctor)
def define_proxy(py_class, struct_typeclass, fields):
"""Defines a PyObject proxy for a structref.
This makes `py_class` a valid constructor for creating a instance of
`struct_typeclass` that contains the members as defined by `fields`.
Parameters
----------
py_class : type
The Python class for constructing an instance of `struct_typeclass`.
struct_typeclass : numba.core.types.Type
The structref type class to bind to.
fields : Sequence[str]
A sequence of field names.
Returns
-------
None
"""
define_constructor(py_class, struct_typeclass, fields)
define_boxing(struct_typeclass, py_class)
def register(struct_type):
"""Register a `numba.core.types.StructRef` for use in jit-code.
This defines the data-model for lowering an instance of `struct_type`.
This defines attributes accessor and mutator for an instance of
`struct_type`.
Parameters
----------
struct_type : type
A subclass of `numba.core.types.StructRef`.
Returns
-------
struct_type : type
Returns the input argument so this can act like a decorator.
Examples
--------
.. code-block::
class MyStruct(numba.core.types.StructRef):
... # the simplest subclass can be empty
numba.experimental.structref.register(MyStruct)
"""
if struct_type is types.StructRef:
raise ValueError(f"cannot register {types.StructRef}")
default_manager.register(struct_type, models.StructRefModel)
define_attributes(struct_type)
return struct_type
@intrinsic
def new(typingctx, struct_type):
"""new(struct_type)
A jit-code only intrinsic. Used to allocate an **empty** mutable struct.
The fields are zero-initialized and must be set manually after calling
the function.
Example:
instance = new(MyStruct)
instance.field = field_value
"""
from numba.experimental.jitclass.base import imp_dtor
inst_type = struct_type.instance_type
def codegen(context, builder, signature, args):
# FIXME: mostly the same as jitclass ctor_impl()
model = context.data_model_manager[inst_type.get_data_type()]
alloc_type = model.get_value_type()
alloc_size = context.get_abi_sizeof(alloc_type)
meminfo = context.nrt.meminfo_alloc_dtor(
builder,
context.get_constant(types.uintp, alloc_size),
imp_dtor(context, builder.module, inst_type),
)
data_pointer = context.nrt.meminfo_data(builder, meminfo)
data_pointer = builder.bitcast(data_pointer, alloc_type.as_pointer())
# Nullify all data
builder.store(cgutils.get_null_value(alloc_type), data_pointer)
inst_struct = context.make_helper(builder, inst_type)
inst_struct.meminfo = meminfo
return inst_struct._getvalue()
sig = inst_type(struct_type)
return sig, codegen
| _Utils |
python | boto__boto3 | boto3/dynamodb/conditions.py | {
"start": 6602,
"end": 6705
} | class ____(ConditionBase):
expression_operator = 'NOT'
expression_format = '({operator} {0})'
| Not |
python | xlwings__xlwings | xlwings/conversion/framework.py | {
"start": 2519,
"end": 2873
} | class ____:
@classmethod
def reader(cls, options):
return Pipeline()
@classmethod
def writer(cls, options):
return Pipeline()
@classmethod
def register(cls, *types):
for type in types:
accessors[type] = cls
@classmethod
def router(cls, value, rng, options):
return cls
| Accessor |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/recurrent.py | {
"start": 46623,
"end": 52362
} | class ____(object):
"""Object that hold dropout related fields for RNN Cell.
This class is not a standalone RNN cell. It suppose to be used with a RNN cell
by multiple inheritance. Any cell that mix with class should have following
fields:
dropout: a float number within range [0, 1). The ratio that the input
tensor need to dropout.
recurrent_dropout: a float number within range [0, 1). The ratio that the
recurrent state weights need to dropout.
This object will create and cache created dropout masks, and reuse them for
the incoming data, so that the same mask is used for every batch input.
"""
def __init__(self, *args, **kwargs):
self._create_non_trackable_mask_cache()
super(DropoutRNNCellMixin, self).__init__(*args, **kwargs)
@trackable.no_automatic_dependency_tracking
def _create_non_trackable_mask_cache(self):
"""Create the cache for dropout and recurrent dropout mask.
Note that the following two masks will be used in "graph function" mode,
e.g. these masks are symbolic tensors. In eager mode, the `eager_*_mask`
tensors will be generated differently than in the "graph function" case,
and they will be cached.
Also note that in graph mode, we still cache those masks only because the
RNN could be created with `unroll=True`. In that case, the `cell.call()`
function will be invoked multiple times, and we want to ensure same mask
is used every time.
Also the caches are created without tracking. Since they are not picklable
by python when deepcopy, we don't want `layer._obj_reference_counts_dict`
to track it by default.
"""
self._dropout_mask_cache = backend.ContextValueCache(
self._create_dropout_mask)
self._recurrent_dropout_mask_cache = backend.ContextValueCache(
self._create_recurrent_dropout_mask)
def reset_dropout_mask(self):
"""Reset the cached dropout masks if any.
This is important for the RNN layer to invoke this in it `call()` method so
that the cached mask is cleared before calling the `cell.call()`. The mask
should be cached across the timestep within the same batch, but shouldn't
be cached between batches. Otherwise it will introduce unreasonable bias
against certain index of data within the batch.
"""
self._dropout_mask_cache.clear()
def reset_recurrent_dropout_mask(self):
"""Reset the cached recurrent dropout masks if any.
This is important for the RNN layer to invoke this in it call() method so
that the cached mask is cleared before calling the cell.call(). The mask
should be cached across the timestep within the same batch, but shouldn't
be cached between batches. Otherwise it will introduce unreasonable bias
against certain index of data within the batch.
"""
self._recurrent_dropout_mask_cache.clear()
def _create_dropout_mask(self, inputs, training, count=1):
return _generate_dropout_mask(
array_ops.ones_like(inputs),
self.dropout,
training=training,
count=count)
def _create_recurrent_dropout_mask(self, inputs, training, count=1):
return _generate_dropout_mask(
array_ops.ones_like(inputs),
self.recurrent_dropout,
training=training,
count=count)
def get_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the dropout mask for RNN cell's input.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: The input tensor whose shape will be used to generate dropout
mask.
training: Boolean tensor, whether its in training mode, dropout will be
ignored in non-training mode.
count: Int, how many dropout mask will be generated. It is useful for cell
that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.dropout == 0:
return None
init_kwargs = dict(inputs=inputs, training=training, count=count)
return self._dropout_mask_cache.setdefault(kwargs=init_kwargs)
def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the recurrent dropout mask for RNN cell.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: The input tensor whose shape will be used to generate dropout
mask.
training: Boolean tensor, whether its in training mode, dropout will be
ignored in non-training mode.
count: Int, how many dropout mask will be generated. It is useful for cell
that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.recurrent_dropout == 0:
return None
init_kwargs = dict(inputs=inputs, training=training, count=count)
return self._recurrent_dropout_mask_cache.setdefault(kwargs=init_kwargs)
def __getstate__(self):
# Used for deepcopy. The caching can't be pickled by python, since it will
# contain tensor and graph.
state = super(DropoutRNNCellMixin, self).__getstate__()
state.pop('_dropout_mask_cache', None)
state.pop('_recurrent_dropout_mask_cache', None)
return state
def __setstate__(self, state):
state['_dropout_mask_cache'] = backend.ContextValueCache(
self._create_dropout_mask)
state['_recurrent_dropout_mask_cache'] = backend.ContextValueCache(
self._create_recurrent_dropout_mask)
super(DropoutRNNCellMixin, self).__setstate__(state)
| DropoutRNNCellMixin |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/sanity/__init__.py | {
"start": 44165,
"end": 44907
} | class ____(SanityTest, metaclass=abc.ABCMeta):
"""Base class for sanity test plugins which are independent of the python version being used."""
@abc.abstractmethod
def test(self, args: SanityConfig, targets: SanityTargets) -> TestResult:
"""Run the sanity test and return the result."""
def load_processor(self, args: SanityConfig) -> SanityIgnoreProcessor:
"""Load the ignore processor for this sanity test."""
return SanityIgnoreProcessor(args, self, None)
@property
def supported_python_versions(self) -> t.Optional[tuple[str, ...]]:
"""A tuple of supported Python versions or None if the test does not depend on specific Python versions."""
return None
| SanityVersionNeutral |
python | psf__requests | src/requests/exceptions.py | {
"start": 3359,
"end": 3481
} | class ____(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
| ChunkedEncodingError |
python | huggingface__transformers | src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py | {
"start": 14127,
"end": 14781
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
intermediate_size = int(config.hidden_size * config.intermediate_multiple_size)
self.dense_h_to_4h = nn.Linear(config.hidden_size, intermediate_size, bias=False)
# Project back to h.
self.dense_4h_to_h = nn.Linear(intermediate_size, config.hidden_size, bias=False)
self.act = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
intermediate = self.dense_h_to_4h(hidden_states)
intermediate = self.act(intermediate)
output = self.dense_4h_to_h(intermediate)
return output
| GPTNeoXJapaneseMLP |
python | apache__airflow | providers/databricks/tests/unit/databricks/sensors/test_databricks.py | {
"start": 1354,
"end": 8489
} | class ____:
"""
Validate and test the functionality of the DatabricksSQLStatementsSensor. This Sensor borrows heavily
from the DatabricksSQLStatementOperator, meaning that much of the testing logic is also reused.
"""
def test_init_statement(self):
"""Test initialization for traditional use-case (statement)."""
op = DatabricksSQLStatementsSensor(task_id=TASK_ID, statement=STATEMENT, warehouse_id=WAREHOUSE_ID)
assert op.statement == STATEMENT
assert op.warehouse_id == WAREHOUSE_ID
def test_init_statement_id(self):
"""Test initialization when a statement_id is passed, rather than a statement."""
op = DatabricksSQLStatementsSensor(
task_id=TASK_ID, statement_id=STATEMENT_ID, warehouse_id=WAREHOUSE_ID
)
assert op.statement_id == STATEMENT_ID
assert op.warehouse_id == WAREHOUSE_ID
@mock.patch("airflow.providers.databricks.sensors.databricks.DatabricksHook")
def test_exec_success(self, db_mock_class):
"""
Test the execute function for non-deferrable execution. This same exact behavior is expected when the
statement itself fails, so no test_exec_failure_statement is implemented.
"""
expected_json = {
"statement": STATEMENT,
"warehouse_id": WAREHOUSE_ID,
"catalog": None,
"schema": None,
"parameters": None,
"wait_timeout": "0s",
}
op = DatabricksSQLStatementsSensor(task_id=TASK_ID, statement=STATEMENT, warehouse_id=WAREHOUSE_ID)
db_mock = db_mock_class.return_value
db_mock.post_sql_statement.return_value = STATEMENT_ID
op.execute(None) # No context is being passed in
db_mock_class.assert_called_once_with(
DEFAULT_CONN_ID,
retry_limit=op.databricks_retry_limit,
retry_delay=op.databricks_retry_delay,
retry_args=None,
caller="DatabricksSQLStatementsSensor",
)
# Since a statement is being passed in rather than a statement_id, we're asserting that the
# post_sql_statement method is called once
db_mock.post_sql_statement.assert_called_once_with(expected_json)
assert op.statement_id == STATEMENT_ID
@mock.patch("airflow.providers.databricks.sensors.databricks.DatabricksHook")
def test_on_kill(self, db_mock_class):
"""
Test the on_kill method. This is actually part of the DatabricksSQLStatementMixin, so the
test logic will match that with the same name for DatabricksSQLStatementOperator.
"""
# Behavior here will remain the same whether a statement or statement_id is passed
op = DatabricksSQLStatementsSensor(task_id=TASK_ID, statement=STATEMENT, warehouse_id=WAREHOUSE_ID)
db_mock = db_mock_class.return_value
op.statement_id = STATEMENT_ID
# When on_kill is executed, it should call the cancel_sql_statement method
op.on_kill()
db_mock.cancel_sql_statement.assert_called_once_with(STATEMENT_ID)
def test_wait_for_termination_is_default(self):
"""Validate that the default value for wait_for_termination is True."""
op = DatabricksSQLStatementsSensor(
task_id=TASK_ID, statement="select * from test.test;", warehouse_id=WAREHOUSE_ID
)
assert op.wait_for_termination
@pytest.mark.parametrize(
argnames=("statement_state", "expected_poke_result"),
argvalues=[
("RUNNING", False),
("SUCCEEDED", True),
],
)
@mock.patch("airflow.providers.databricks.sensors.databricks.DatabricksHook")
def test_poke(self, db_mock_class, statement_state, expected_poke_result):
op = DatabricksSQLStatementsSensor(
task_id=TASK_ID,
statement=STATEMENT,
warehouse_id=WAREHOUSE_ID,
)
db_mock = db_mock_class.return_value
db_mock.get_sql_statement_state.return_value = SQLStatementState(statement_state)
poke_result = op.poke(None)
assert poke_result == expected_poke_result
@mock.patch("airflow.providers.databricks.sensors.databricks.DatabricksHook")
def test_poke_failure(self, db_mock_class):
op = DatabricksSQLStatementsSensor(
task_id=TASK_ID,
statement=STATEMENT,
warehouse_id=WAREHOUSE_ID,
)
db_mock = db_mock_class.return_value
db_mock.get_sql_statement_state.return_value = SQLStatementState("FAILED")
with pytest.raises(AirflowException):
op.poke(None)
@mock.patch("airflow.providers.databricks.sensors.databricks.DatabricksHook")
def test_execute_task_deferred(self, db_mock_class):
"""
Test that the statement is successfully deferred. This behavior will remain the same whether a
statement or a statement_id is passed.
"""
op = DatabricksSQLStatementsSensor(
task_id=TASK_ID,
statement=STATEMENT,
warehouse_id=WAREHOUSE_ID,
deferrable=True,
)
db_mock = db_mock_class.return_value
db_mock.get_sql_statement_state.return_value = SQLStatementState("RUNNING")
with pytest.raises(TaskDeferred) as exc:
op.execute(None)
assert isinstance(exc.value.trigger, DatabricksSQLStatementExecutionTrigger)
assert exc.value.method_name == "execute_complete"
def test_execute_complete_success(self):
"""
Test the execute_complete function in case the Trigger has returned a successful completion event.
This method is part of the DatabricksSQLStatementsMixin. Note that this is only being tested when
in deferrable mode.
"""
event = {
"statement_id": STATEMENT_ID,
"state": SQLStatementState("SUCCEEDED").to_json(),
"error": {},
}
op = DatabricksSQLStatementsSensor(
task_id=TASK_ID,
statement=STATEMENT,
warehouse_id=WAREHOUSE_ID,
deferrable=True,
)
assert op.execute_complete(context=None, event=event) is None
@mock.patch("airflow.providers.databricks.sensors.databricks.DatabricksHook")
def test_execute_complete_failure(self, db_mock_class):
"""Test execute_complete function in case the Trigger has returned a failure completion event."""
event = {
"statement_id": STATEMENT_ID,
"state": SQLStatementState("FAILED").to_json(),
"error": SQLStatementState(
state="FAILED", error_code="500", error_message="Something Went Wrong"
).to_json(),
}
op = DatabricksSQLStatementsSensor(
task_id=TASK_ID,
statement=STATEMENT,
warehouse_id=WAREHOUSE_ID,
deferrable=True,
)
with pytest.raises(AirflowException, match="^SQL Statement execution failed with terminal state: .*"):
op.execute_complete(context=None, event=event)
| TestDatabricksSQLStatementsSensor |
python | pandas-dev__pandas | pandas/core/interchange/dataframe_protocol.py | {
"start": 1279,
"end": 1872
} | class ____(enum.IntEnum):
"""
Integer enum for null type representation.
Attributes
----------
NON_NULLABLE : int
Non-nullable column.
USE_NAN : int
Use explicit float NaN value.
USE_SENTINEL : int
Sentinel value besides NaN/NaT.
USE_BITMASK : int
The bit is set/unset representing a null on a certain position.
USE_BYTEMASK : int
The byte is set/unset representing a null on a certain position.
"""
NON_NULLABLE = 0
USE_NAN = 1
USE_SENTINEL = 2
USE_BITMASK = 3
USE_BYTEMASK = 4
| ColumnNullType |
python | doocs__leetcode | lcci/05.01.Insert Into Bits/Solution.py | {
"start": 0,
"end": 168
} | class ____:
def insertBits(self, N: int, M: int, i: int, j: int) -> int:
for k in range(i, j + 1):
N &= ~(1 << k)
return N | M << i
| Solution |
python | huggingface__transformers | tests/models/mvp/test_modeling_mvp.py | {
"start": 8147,
"end": 15694
} | class ____(unittest.TestCase):
vocab_size = 99
def _get_config_and_data(self):
input_ids = torch.tensor(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
],
dtype=torch.long,
device=torch_device,
)
batch_size = input_ids.shape[0]
config = MvpConfig(
vocab_size=self.vocab_size,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
)
return config, input_ids, batch_size
def test_sequence_classification_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
labels = _long_tensor([2] * batch_size).to(torch_device)
config.num_labels = 3
model = MvpForSequenceClassification(config)
model.to(torch_device)
outputs = model(input_ids=input_ids, decoder_input_ids=input_ids, labels=labels)
expected_shape = torch.Size((batch_size, config.num_labels))
self.assertEqual(outputs["logits"].shape, expected_shape)
self.assertIsInstance(outputs["loss"].item(), float)
def test_question_answering_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
sequence_labels = ids_tensor([batch_size], 2).to(torch_device)
model = MvpForQuestionAnswering(config)
model.to(torch_device)
outputs = model(
input_ids=input_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.assertEqual(outputs["start_logits"].shape, input_ids.shape)
self.assertEqual(outputs["end_logits"].shape, input_ids.shape)
self.assertIsInstance(outputs["loss"].item(), float)
@timeout_decorator.timeout(1)
def test_lm_forward(self):
config, input_ids, batch_size = self._get_config_and_data()
lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size).to(torch_device)
lm_model = MvpForConditionalGeneration(config)
lm_model.to(torch_device)
outputs = lm_model(input_ids=input_ids, labels=lm_labels)
expected_shape = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape, expected_shape)
self.assertIsInstance(outputs["loss"].item(), float)
def test_lm_uneven_forward(self):
config = MvpConfig(
vocab_size=self.vocab_size,
d_model=14,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=8,
decoder_ffn_dim=8,
max_position_embeddings=48,
)
lm_model = MvpForConditionalGeneration(config).to(torch_device)
context = torch.tensor(
[[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], device=torch_device, dtype=torch.long
)
summary = torch.tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], device=torch_device, dtype=torch.long)
outputs = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
expected_shape = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape, expected_shape)
def test_generate_beam_search(self):
input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], device=torch_device, dtype=torch.long)
config = MvpConfig(
vocab_size=self.vocab_size,
d_model=24,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=2,
decoder_attention_heads=2,
encoder_ffn_dim=32,
decoder_ffn_dim=32,
max_position_embeddings=48,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
)
lm_model = MvpForConditionalGeneration(config).to(torch_device)
lm_model.eval()
max_length = 5
generated_ids = lm_model.generate(
input_ids.clone(),
do_sample=True,
num_return_sequences=1,
num_beams=2,
no_repeat_ngram_size=3,
max_length=max_length,
)
self.assertEqual(generated_ids.shape, (input_ids.shape[0], max_length))
def test_shift_tokens_right(self):
input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long)
shifted = shift_tokens_right(input_ids, 1, 2)
n_pad_before = input_ids.eq(1).float().sum()
n_pad_after = shifted.eq(1).float().sum()
self.assertEqual(shifted.shape, input_ids.shape)
self.assertEqual(n_pad_after, n_pad_before - 1)
self.assertTrue(torch.eq(shifted[:, 0], 2).all())
@slow
def test_tokenization(self):
tokenizer = MvpTokenizer.from_pretrained("RUCAIBox/mvp")
examples = [" Hello world", " DomDramg"] # need leading spaces for equality
fairseq_results = [
torch.tensor([0, 20920, 232, 2]),
torch.tensor([0, 11349, 495, 4040, 571, 2]),
]
for ex, desired_result in zip(examples, fairseq_results):
mvp_toks = tokenizer.encode(ex, return_tensors="pt").squeeze()
assert_tensors_close(desired_result.long(), mvp_toks, prefix=ex)
@require_torch_fp16
def test_generate_fp16(self):
config, input_ids, batch_size = self._get_config_and_data()
attention_mask = input_ids.ne(1).to(torch_device)
model = MvpForConditionalGeneration(config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def test_dummy_inputs(self):
config, *_ = self._get_config_and_data()
model = MvpForConditionalGeneration(config).eval().to(torch_device)
model(**model.dummy_inputs)
def test_resize_tokens_embeddings_more(self):
config, input_ids, _ = self._get_config_and_data()
def _get_embs(m):
return (m.get_input_embeddings().weight.data.clone(), m.get_output_embeddings().weight.data.clone())
model = MvpForConditionalGeneration(config).eval().to(torch_device)
input, output = _get_embs(model)
self.assertTrue(torch.eq(input, output).all())
new_vocab_size = 45
model.resize_token_embeddings(new_vocab_size)
input_new, output_new = _get_embs(model)
self.assertEqual(input_new.shape, (new_vocab_size, config.d_model))
self.assertEqual(output_new.shape, (new_vocab_size, config.d_model))
self.assertTrue(torch.eq(input_new, output_new).all())
@require_torch
| MvpHeadTests |
python | huggingface__transformers | src/transformers/models/shieldgemma2/modeling_shieldgemma2.py | {
"start": 1085,
"end": 1352
} | class ____(ImageClassifierOutputWithNoAttention):
"""ShieldGemma2 classifies imags as violative or not relative to a specific policy
Args:
"""
probabilities: Optional[torch.Tensor] = None
@auto_docstring
| ShieldGemma2ImageClassifierOutputWithNoAttention |
python | PrefectHQ__prefect | src/prefect/server/utilities/messaging/memory.py | {
"start": 2261,
"end": 2371
} | class ____:
data: Union[bytes, str]
attributes: Mapping[str, Any]
retry_count: int = 0
| MemoryMessage |
python | ansible__ansible | test/integration/targets/delegate_to/connection_plugins/fakelocal.py | {
"start": 1006,
"end": 2426
} | class ____(ConnectionBase):
""" Local based connections """
transport = 'fakelocal'
has_pipelining = True
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.cwd = None
def _connect(self):
""" verify """
if self.get_option('remote_user') == 'invaliduser' and self.get_option('password') == 'badpassword':
raise AnsibleConnectionFailure('Got invaliduser and badpassword')
if not self._connected:
display.vvv(u"ESTABLISH FAKELOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
self._connected = True
return self
def exec_command(self, cmd, in_data=None, sudoable=True):
""" run a command on the local host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
return 0, '{"msg": "ALL IS GOOD"}', ''
def put_file(self, in_path, out_path):
""" transfer a file from local to local """
super(Connection, self).put_file(in_path, out_path)
def fetch_file(self, in_path, out_path):
""" fetch a file from local to local -- for compatibility """
super(Connection, self).fetch_file(in_path, out_path)
def close(self):
""" terminate the connection; nothing to do here """
self._connected = False
| Connection |
python | wandb__wandb | wandb/sdk/lib/printer.py | {
"start": 7262,
"end": 7790
} | class ____(abc.ABC):
"""A handle to a block of text that's allowed to change."""
@abc.abstractmethod
def set_text(self, text: str) -> None:
r"""Change the text.
Args:
text: The text to put in the block, with lines separated
by \n characters. The text should not end in \n unless
a blank line at the end of the block is desired.
May include styled output from methods on the Printer
that created this.
"""
| DynamicText |
python | walkccc__LeetCode | solutions/1826. Faulty Sensor/1826.py | {
"start": 0,
"end": 644
} | class ____:
def badSensor(self, sensor1: list[int], sensor2: list[int]) -> int:
# A -> B, so B is defect
def canReplace(A, B):
i = 0 # A's index
j = 0 # B's index
droppedValue = -1
while i < len(A):
if A[i] == B[j]:
i += 1
j += 1
else:
droppedValue = A[i]
i += 1
return j == len(B) - 1 and B[-1] != droppedValue
oneDefect = canReplace(sensor2, sensor1)
twoDefect = canReplace(sensor1, sensor2)
if oneDefect and twoDefect:
return -1
if not oneDefect and not twoDefect:
return -1
return 1 if oneDefect else 2
| Solution |
python | spyder-ide__spyder | spyder/plugins/pylint/plugin.py | {
"start": 973,
"end": 1036
} | class ____:
AnalyzeCurrentFile = 'run analysis'
| PylintActions |
python | doocs__leetcode | lcci/16.25.LRU Cache/Solution.py | {
"start": 0,
"end": 148
} | class ____:
def __init__(self, key=0, val=0):
self.key = key
self.val = val
self.prev = None
self.next = None
| Node |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.