language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
scrapy__scrapy
|
tests/CrawlerRunner/custom_loop_different.py
|
{
"start": 206,
"end": 684
}
|
class ____(Spider):
name = "no_request"
custom_settings = {
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": "uvloop.Loop",
}
async def start(self):
return
yield
def main(reactor):
configure_logging()
runner = CrawlerRunner()
return runner.crawl(NoRequestsSpider)
install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor")
react(main)
|
NoRequestsSpider
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-azurepostgresql/llama_index/vector_stores/azure_postgres/common/_shared.py
|
{
"start": 1212,
"end": 1547
}
|
class ____(BaseModel):
"""Basic username/password authentication for Azure Database for PostgreSQL connections.
:param username: Username for the connection.
:type username: str
:param password: Password for the connection.
:type password: str
"""
username: str = "postgres"
password: str = ""
|
BasicAuth
|
python
|
joke2k__faker
|
faker/providers/person/ar_DZ/__init__.py
|
{
"start": 70,
"end": 6808
}
|
class ____(PersonProvider):
formats_female: Tuple[str, ...] = ("{{first_name_female}} {{last_name}}",)
formats_male: Tuple[str, ...] = ("{{first_name_male}} {{last_name}}",)
formats = formats_male + formats_female
# Translated from: https://studentsoftheworld.info/penpals/stats_fr.php?Pays=ALG
# Last checked: 2025-09-28
first_names_female: Tuple[str, ...] = (
"آية",
"أماني",
"أمل",
"أمينة",
"أميرة",
"أناييس",
"أنيسة",
"أسماء",
"إكرام",
"إيمان",
"إيناس",
"بشرى",
"تينهينان",
"حياة",
"خديجة",
"داليا",
"دنيا",
"رانية",
"رشا",
"روز",
"ريم",
"ريما",
"زينة",
"زينب",
"سارة",
"سعاد",
"سرين",
"سلمى",
"سليمة",
"سميرة",
"سومية",
"سيليا",
"سيلين",
"شيراز",
"صبرينة",
"صفية",
"صوفيا",
"فاطمة",
"فرح",
"فريال",
"فوزية",
"فلة",
"كاتيا",
"كاهينة",
"ليديا",
"ليزا",
"ليلى",
"ليليا",
"ليندة",
"لينا",
"ماريا",
"مايا",
"ملاك",
"مروة",
"مريم",
"مزرية",
"مينة",
"ميرا",
"ميليسا",
"نادية",
"نسرين",
"نجمة",
"نريمان",
"نوال",
"نهاد",
"نور",
"هاجر",
"هانية",
"هدى",
"هناء",
"وفاء",
"ياسمين",
"ياسمينة",
"يسرى",
)
# Translated from: https://studentsoftheworld.info/penpals/stats_fr.php?Pays=ALG
# Last checked: 2025-09-28
first_names_male: Tuple[str, ...] = (
"آدم",
"أسامة",
"أحمد",
"أرزقي",
"أكرم",
"أمين",
"أمير",
"أنيس",
"أيمن",
"أيوب",
"إبراهيم",
"إلياس",
"عبد الرحمن",
"عبد الرؤوف",
"عبد القادر",
"علاء الدين",
"عادل",
"علي",
"عمر",
"فاتح",
"فارس",
"فاروق",
"فريد",
"فرحات",
"فضيل",
"غِلاس",
"قادة",
"خالد",
"خليل",
"رابح",
"رضا",
"رشدي",
"رشيد",
"رمزي",
"رياض",
"زكريا",
"سعد",
"سعيد",
"سامي",
"سمير",
"سفيان",
"سليم",
"صالح",
"شعبان",
"شريف",
"طه",
"علي",
"علاء الدين",
"غِلاس",
"قادة",
"لطفي",
"لمين",
"مالك",
"ماسِينيسا",
"مراد",
"محند",
"محمد",
"مصطفى",
"منير",
"مهدي",
"مولود",
"موسى",
"ناصر",
"ناظم",
"نادر",
"نسيم",
"وليد",
"وسيم",
"وناس",
"ياسر",
"ياسين",
"يانيس",
"يحيى",
"يوسف",
"يونس",
"يوبا",
)
first_names = first_names_male + first_names_female
# Translated from: https://fr.geneawiki.com/wiki/Noms_de_famille_alg%C3%A9riens
# Last checked: 2025-09-28
last_names: Tuple[str, ...] = (
"أعراب",
"إبراهيمي",
"إخلف",
"إسماعيل",
"باشا",
"باي",
"بحري",
"بختي",
"بخوش",
"بغدادي",
"بركان",
"بركاني",
"بلبشير",
"بلحاج",
"بلخير",
"بلخيري",
"بلعربي",
"بلعيد",
"بلعيدي",
"بلقادي",
"بلقاسم",
"بلقاسمي",
"بن أحمد",
"بن زيان",
"بن سالم",
"بن سعيد",
"بن سليمان",
"بن شيخ",
"بن صالح",
"بن عامر",
"بن عبد الله",
"بن علي",
"بن عمار",
"بن عمارة",
"بن عودة",
"بن عيسى",
"بن موسى",
"بن يحيى",
"بن يمينة",
"بن يوسف",
"بو عافية",
"بو عبد الله",
"بوبكر",
"بوتالب",
"بوجمعة",
"بوخاتم",
"بوخاري",
"بوخلفة",
"بودراع",
"بوزيان",
"بوزيد",
"بوزيدي",
"بوسعيد",
"بوشامة",
"بوشارب",
"بوعزيز",
"بوعلام",
"بوعلي",
"بومدين",
"بومعزة",
"بوناب",
"تابت",
"تومي",
"تواتي",
"جبار",
"جلال",
"جلولي",
"جودي",
"داود",
"داوودي",
"دحمان",
"دحماني",
"دراجـي",
"دربال",
"دركاوي",
"درويش",
"ديب",
"دياف",
"ديف",
"رابحي",
"رحال",
"رحمون",
"رحماني",
"رحموني",
"ربيعة",
"رشدي",
"رزّيق",
"رمضاني",
"زايدي",
"زاوي",
"زروال",
"زرّوقي",
"زواوي",
"زيان",
"زياني",
"زيتوني",
"زيدان",
"ساسي",
"سالم",
"سالمي",
"سعد",
"سعدي",
"سعودي",
"سعيد",
"سعيداني",
"سعيدي",
"سلامي",
"سلطاني",
"سليماني",
"سهلي",
"سوداني",
"سياح",
"شايب",
"شاوي",
"شريف",
"شريفي",
"شرقي",
"شعبان",
"شعيباني",
"شيخ",
"صالح",
"صالحي",
"صحراوي",
"صدِّيقي",
"طالب",
"طالبي",
"طايبي",
"طحراوي",
"طهري",
"طويل",
"عثماني",
"فارس",
"فرحات",
"فلاح",
"فيلالي",
"قاسم",
"قاسمي",
"قاسي",
"قبائلي",
"قدور",
"قدوري",
"قرفي",
"قريشي",
"قندوز",
"قويدري",
"لحمر",
"لخضر",
"لعربي",
"لعريبي",
"لعمري",
"لوسيف",
"لونيس",
"ماحي",
"ماعوش",
"مالك",
"مالكي",
"مباركي",
"مخلوف",
"مخلوفي",
"مداح",
"مداني",
"مداهد",
"مرابط",
"مراح",
"مرزوق",
"مرزوقي",
"مرسلي",
"مزيان",
"مزياني",
"مسعودي",
"مشري",
"مصباح",
"مصطفاوي",
"مفتاح",
"مقدم",
"مقران",
"مقراني",
"محمدي",
"محمودي",
"مختاري",
"معزوز",
"معزوزي",
"ميلودي",
"ميموني",
"ميهوبي",
"مولاي",
"موساوي",
"موسى",
"نايلي",
"ناصر",
"نجار",
"نصري",
"نوار",
"نوري",
"نوي",
"هاشمي",
"هني",
"هواري",
"يحبَوي",
"يحيى",
"يوسفي",
)
|
Provider
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 127672,
"end": 128237
}
|
class ____(BaseModel):
"""
Configuration for sparse inverted index.
"""
full_scan_threshold: Optional[int] = Field(
default=None,
description="We prefer a full scan search upto (excluding) this number of vectors. Note: this is number of vectors, not KiloBytes.",
)
index_type: "SparseIndexType" = Field(..., description="Configuration for sparse inverted index.")
datatype: Optional["VectorStorageDatatype"] = Field(
default=None, description="Datatype used to store weights in the index."
)
|
SparseIndexConfig
|
python
|
tiangolo__fastapi
|
docs_src/dependencies/tutorial004_an_py310.py
|
{
"start": 171,
"end": 647
}
|
class ____:
def __init__(self, q: str | None = None, skip: int = 0, limit: int = 100):
self.q = q
self.skip = skip
self.limit = limit
@app.get("/items/")
async def read_items(commons: Annotated[CommonQueryParams, Depends()]):
response = {}
if commons.q:
response.update({"q": commons.q})
items = fake_items_db[commons.skip : commons.skip + commons.limit]
response.update({"items": items})
return response
|
CommonQueryParams
|
python
|
gevent__gevent
|
src/gevent/_config.py
|
{
"start": 18652,
"end": 18789
}
|
class ____(AresSettingMixin, Setting):
name = 'ares_tcp_port'
default = None
environment_key = 'GEVENTARES_TCP_PORT'
|
AresTCPPort
|
python
|
PrefectHQ__prefect
|
src/prefect/server/utilities/server.py
|
{
"start": 851,
"end": 2091
}
|
class ____(APIRoute):
"""
A FastAPIRoute class which attaches an async stack to requests that exits before
a response is returned.
Requests already have `request.scope['fastapi_astack']` which is an async stack for
the full scope of the request. This stack is used for managing contexts of FastAPI
dependencies. If we want to close a dependency before the request is complete
(i.e. before returning a response to the user), we need a stack with a different
scope. This extension adds this stack at `request.state.response_scoped_stack`.
"""
def get_route_handler(self) -> Callable[[Request], Coroutine[Any, Any, Response]]:
default_handler = super().get_route_handler()
async def handle_response_scoped_depends(request: Request) -> Response:
# Create a new stack scoped to exit before the response is returned
response = None
async with AsyncExitStack() as stack:
request.state.response_scoped_stack = stack
response = await default_handler(request)
if TYPE_CHECKING:
assert response is not None
return response
return handle_response_scoped_depends
|
PrefectAPIRoute
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_operators.py
|
{
"start": 3645,
"end": 12510
}
|
class ____(
testing.AssertsCompiledSQL, fixtures.TestBase
):
dialect = __dialect__ = "default_enhanced"
@testing.combinations((operators.desc_op, desc), (operators.asc_op, asc))
def test_scalar(self, operator, compare_to):
left = column("left")
assert left.comparator.operate(operator).compare(compare_to(left))
self._loop_test(operator)
right_column = column("right")
@testing.combinations(
(operators.add, right_column),
(operators.is_, None),
(operators.is_not, None),
(operators.isnot, None), # deprecated 1.4; See #5429
(operators.is_, null()),
(operators.is_, true()),
(operators.is_, false()),
(operators.eq, True),
(operators.ne, True),
(operators.is_distinct_from, True),
(operators.is_distinct_from, False),
(operators.is_distinct_from, None),
(operators.is_not_distinct_from, True),
(operators.isnot_distinct_from, True), # deprecated 1.4; See #5429
(operators.is_, True),
(operators.is_not, True),
(operators.isnot, True), # deprecated 1.4; See #5429
(operators.is_, False),
(operators.is_not, False),
(operators.isnot, False), # deprecated 1.4; See #5429
(operators.like_op, right_column),
(operators.not_like_op, right_column),
(operators.notlike_op, right_column), # deprecated 1.4; See #5435
(operators.ilike_op, right_column),
(operators.not_ilike_op, right_column),
(operators.notilike_op, right_column), # deprecated 1.4; See #5435
(operators.is_, right_column),
(operators.is_not, right_column),
(operators.isnot, right_column), # deprecated 1.4; See #5429
(operators.concat_op, right_column),
id_="ns",
)
def test_operate(self, operator, right):
left = column("left")
if operators.is_comparison(operator):
type_ = sqltypes.BOOLEANTYPE
else:
type_ = sqltypes.NULLTYPE
assert left.comparator.operate(operator, right).compare(
BinaryExpression(
coercions.expect(roles.WhereHavingRole, left),
coercions.expect(roles.WhereHavingRole, right),
operator,
type_=type_,
)
)
modifiers = operator(left, right).modifiers
assert operator(left, right).compare(
BinaryExpression(
coercions.expect(roles.WhereHavingRole, left),
coercions.expect(roles.WhereHavingRole, right),
operator,
modifiers=modifiers,
type_=type_,
)
)
self._loop_test(operator, right)
if operators.is_comparison(operator):
is_(
left.comparator.operate(operator, right).type,
sqltypes.BOOLEANTYPE,
)
def _loop_test(self, operator, *arg):
loop = LoopOperate()
is_(operator(loop, *arg), operator)
@testing.combinations(
operators.add,
operators.and_,
operators.or_,
operators.mul,
argnames="op",
)
def test_nonsensical_negations(self, op):
opstring = compiler.OPERATORS[op]
self.assert_compile(
select(~op(column("x"), column("q"))),
f"SELECT NOT (x{opstring}q) AS anon_1",
)
def test_null_true_false_is_sanity_checks(self):
d = default.DefaultDialect()
d.supports_native_boolean = True
self.assert_compile(
column("q") == None,
"q IS NULL",
)
self.assert_compile(
column("q") == null(),
"q IS NULL",
)
# IS coercion only occurs from left to right (just discovered this)
self.assert_compile(
null() == column("q"),
"NULL = q",
)
self.assert_compile(column("q") == true(), "q = true", dialect=d)
self.assert_compile(true() == column("q"), "true = q", dialect=d)
self.assert_compile(column("q") == True, "q = true", dialect=d)
# this comes out reversed; no choice, column.__eq__() is called
# and we don't get to know it's "reverse"
self.assert_compile(True == column("q"), "q = true", dialect=d)
def test_no_getitem(self):
assert_raises_message(
NotImplementedError,
"Operator 'getitem' is not supported on this expression",
self.test_operate,
operators.getitem,
column("right"),
)
assert_raises_message(
NotImplementedError,
"Operator 'getitem' is not supported on this expression",
lambda: column("left")[3],
)
def test_in(self):
left = column("left")
assert left.comparator.operate(operators.in_op, [1, 2, 3]).compare(
BinaryExpression(
left,
BindParameter(
"left", value=[1, 2, 3], unique=True, expanding=True
),
operators.in_op,
type_=sqltypes.BOOLEANTYPE,
)
)
self._loop_test(operators.in_op, [1, 2, 3])
def test_not_in(self):
left = column("left")
assert left.comparator.operate(operators.not_in_op, [1, 2, 3]).compare(
BinaryExpression(
left,
BindParameter(
"left", value=[1, 2, 3], unique=True, expanding=True
),
operators.not_in_op,
type_=sqltypes.BOOLEANTYPE,
)
)
self._loop_test(operators.not_in_op, [1, 2, 3])
def test_in_no_accept_list_of_non_column_element(self):
left = column("left")
foo = ClauseList()
assert_raises_message(
exc.ArgumentError,
r"IN expression list, SELECT construct, or bound parameter "
r"object expected, got .*ClauseList",
left.in_,
[foo],
)
def test_in_no_accept_non_list_non_selectable(self):
left = column("left")
right = column("right")
assert_raises_message(
exc.ArgumentError,
r"IN expression list, SELECT construct, or bound parameter "
r"object expected, got .*ColumnClause",
left.in_,
right,
)
def test_in_no_accept_non_list_thing_with_getitem(self):
# test [ticket:2726]
class HasGetitem(String):
class comparator_factory(String.Comparator):
def __getitem__(self, value):
return value
left = column("left")
right = column("right", HasGetitem)
assert_raises_message(
exc.ArgumentError,
r"IN expression list, SELECT construct, or bound parameter "
r"object expected, got .*ColumnClause",
left.in_,
right,
)
def test_collate(self):
left = column("left")
right = "some collation"
left.comparator.operate(operators.collate, right).compare(
collate(left, right)
)
def test_default_adapt(self):
class TypeOne(TypeEngine):
operator_classes = OperatorClass.ANY
class TypeTwo(TypeEngine):
operator_classes = OperatorClass.ANY
expr = column("x", TypeOne()) - column("y", TypeTwo())
is_(expr.type._type_affinity, TypeOne)
def test_concatenable_adapt(self):
class TypeOne(Concatenable, TypeEngine):
operator_classes = OperatorClass.ANY
class TypeTwo(Concatenable, TypeEngine):
operator_classes = OperatorClass.ANY
class TypeThree(TypeEngine):
operator_classes = OperatorClass.ANY
expr = column("x", TypeOne()) - column("y", TypeTwo())
is_(expr.type._type_affinity, TypeOne)
is_(expr.operator, operator.sub)
expr = column("x", TypeOne()) + column("y", TypeTwo())
is_(expr.type._type_affinity, TypeOne)
is_(expr.operator, operators.concat_op)
expr = column("x", TypeOne()) - column("y", TypeThree())
is_(expr.type._type_affinity, TypeOne)
is_(expr.operator, operator.sub)
expr = column("x", TypeOne()) + column("y", TypeThree())
is_(expr.type._type_affinity, TypeOne)
is_(expr.operator, operator.add)
def test_contains_override_raises(self):
for col in [
Column("x", String),
Column("x", ARRAY(Integer)),
]:
assert_raises_message(
NotImplementedError,
"Operator 'contains' is not supported on this expression",
lambda: "foo" in col,
)
|
DefaultColumnComparatorTest
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/queues.py
|
{
"start": 16329,
"end": 19799
}
|
class ____(Request):
"""
Add or update queue metadata
:param queue: ID of the queue
:type queue: str
:param metadata: Metadata items to add or update
:type metadata: Sequence[MetadataItem]
:param replace_metadata: If set then the all the metadata items will be replaced with the provided ones.
Otherwise only the provided metadata items will be updated or added
:type replace_metadata: bool
"""
_service = "queues"
_action = "add_or_update_metadata"
_version = "2.20"
_schema = {
"definitions": {
"metadata_item": {
"properties": {
"key": {
"description": "The key uniquely identifying the metadata item inside the given entity",
"type": ["string", "null"],
},
"type": {
"description": "The type of the metadata item",
"type": ["string", "null"],
},
"value": {
"description": "The value stored in the metadata item",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"metadata": {
"description": "Metadata items to add or update",
"items": {"$ref": "#/definitions/metadata_item"},
"type": "array",
},
"queue": {"description": "ID of the queue", "type": "string"},
"replace_metadata": {
"default": False,
"description": "If set then the all the metadata items will be replaced with the provided ones. Otherwise only the provided metadata items will be updated or added",
"type": "boolean",
},
},
"required": ["queue", "metadata"],
"type": "object",
}
def __init__(
self, queue: str, metadata: List[Any], replace_metadata: Optional[bool] = False, **kwargs: Any
) -> None:
super(AddOrUpdateMetadataRequest, self).__init__(**kwargs)
self.queue = queue
self.metadata = metadata
self.replace_metadata = replace_metadata
@schema_property("queue")
def queue(self) -> str:
return self._property_queue
@queue.setter
def queue(self, value: str) -> None:
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("metadata")
def metadata(self) -> List[Any]:
return self._property_metadata
@metadata.setter
def metadata(self, value: List[Any]) -> None:
if value is None:
self._property_metadata = None
return
self.assert_isinstance(value, "metadata", (dict,))
self._property_metadata = value
@schema_property("replace_metadata")
def replace_metadata(self) -> Optional[bool]:
return self._property_replace_metadata
@replace_metadata.setter
def replace_metadata(self, value: Optional[bool]) -> None:
if value is None:
self._property_replace_metadata = None
return
self.assert_isinstance(value, "replace_metadata", (bool,))
self._property_replace_metadata = value
|
AddOrUpdateMetadataRequest
|
python
|
ApeWorX__ape
|
src/ape_test/config.py
|
{
"start": 3735,
"end": 4462
}
|
class ____(PluginConfig):
enable_session: bool = True
"""
Set to ``False`` to disable session isolation.
"""
enable_package: bool = True
"""
Set to ``False`` to disable package isolation.
"""
enable_module: bool = True
"""
Set to ``False`` to disable module isolation.
"""
enable_class: bool = True
"""
Set to ``False`` to disable class isolation.
"""
enable_function: bool = True
"""
Set to ``False`` to disable function isolation.
"""
model_config = SettingsConfigDict(extra="allow", env_prefix="APE_TEST_")
def get_isolation(self, scope: "Scope") -> bool:
return getattr(self, f"enable_{scope.name.lower()}")
|
IsolationConfig
|
python
|
pytorch__pytorch
|
torch/_functorch/_aot_autograd/aot_autograd_result.py
|
{
"start": 21243,
"end": 21469
}
|
class ____(GenericAOTAutogradResult[CompiledForward, CompiledBackward]):
"""
Regular AOTAutogradResult: saves the forward/backward FxGraphCache keys
and looks them up in FxGraphCache on load
"""
|
AOTAutogradResult
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/check_ops_test.py
|
{
"start": 37217,
"end": 41124
}
|
class ____(test.TestCase):
# Static shape inference
@test_util.run_deprecated_v1
def testStaticShape(self):
placeholder = array_ops.placeholder(dtypes.int32)
ensure_shape_op = check_ops.ensure_shape(placeholder, (3, 3, 3))
self.assertEqual(ensure_shape_op.get_shape(), (3, 3, 3))
@test_util.run_deprecated_v1
def testStaticShape_MergesShapes(self):
placeholder = array_ops.placeholder(dtypes.int32, shape=(None, None, 3))
ensure_shape_op = check_ops.ensure_shape(placeholder, (5, 4, None))
self.assertEqual(ensure_shape_op.get_shape(), (5, 4, 3))
@test_util.run_deprecated_v1
def testStaticShape_RaisesErrorWhenRankIncompatible(self):
placeholder = array_ops.placeholder(dtypes.int32, shape=(None, None, 3))
with self.assertRaises(ValueError):
check_ops.ensure_shape(placeholder, (2, 3))
@test_util.run_deprecated_v1
def testStaticShape_RaisesErrorWhenDimIncompatible(self):
placeholder = array_ops.placeholder(dtypes.int32, shape=(None, None, 3))
with self.assertRaises(ValueError):
check_ops.ensure_shape(placeholder, (2, 2, 4))
@test_util.run_deprecated_v1
def testStaticShape_CanSetUnknownShape(self):
placeholder = array_ops.placeholder(dtypes.int32)
derived = placeholder / 3
ensure_shape_op = check_ops.ensure_shape(derived, None)
self.assertEqual(ensure_shape_op.get_shape(), None)
# Dynamic shape check
@test_util.run_deprecated_v1
@test_util.disable_xla(
"b/123337890") # Dynamic shapes not supported now with XLA
def testEnsuresDynamicShape_RaisesError(self):
placeholder = array_ops.placeholder(dtypes.int32)
derived = math_ops.divide(placeholder, 3, name="MyDivide")
derived = check_ops.ensure_shape(derived, (3, 3, 3))
feed_val = [[1], [2]]
with self.cached_session() as sess:
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
r"Shape of tensor MyDivide \[2,1\] is not compatible with "
r"expected shape \[3,3,3\]."):
sess.run(derived, feed_dict={placeholder: feed_val})
@test_util.run_deprecated_v1
@test_util.disable_xla(
"b/123337890") # Dynamic shapes not supported now with XLA
def testEnsuresDynamicShape_RaisesErrorDimUnknown(self):
placeholder = array_ops.placeholder(dtypes.int32)
derived = placeholder / 3
derived = check_ops.ensure_shape(derived, (None, None, 3))
feed_val = [[1], [2]]
with self.cached_session() as sess:
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
r"Shape of tensor [A-Za-z_]* \[2,1\] is not compatible with "
r"expected shape \[\?,\?,3\]."):
sess.run(derived, feed_dict={placeholder: feed_val})
@test_util.run_deprecated_v1
def testEnsuresDynamicShape(self):
placeholder = array_ops.placeholder(dtypes.int32)
derived = placeholder / 3
derived = check_ops.ensure_shape(derived, (2, 1))
feed_val = [[1], [2]]
with self.cached_session() as sess:
sess.run(derived, feed_dict={placeholder: feed_val})
@test_util.run_deprecated_v1
def testEnsuresDynamicShape_WithUnknownDims(self):
placeholder = array_ops.placeholder(dtypes.int32)
derived = placeholder / 3
derived = check_ops.ensure_shape(derived, (None, None))
feed_val = [[1], [2]]
with self.cached_session() as sess:
sess.run(derived, feed_dict={placeholder: feed_val})
@test_util.run_deprecated_v1
def testGradient(self):
placeholder = array_ops.placeholder(dtypes.float32)
derived = check_ops.ensure_shape(placeholder, (None, None))
gradient = gradients.gradients(derived, placeholder)
feed_val = [[4.0], [-1.0]]
with self.cached_session() as sess:
gradient_values, = sess.run(gradient, feed_dict={placeholder: feed_val})
expected = [[1.0], [1.0]]
self.assertAllEqual(gradient_values, expected)
|
EnsureShapeTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimum-number-of-visited-cells-in-a-grid.py
|
{
"start": 970,
"end": 2163
}
|
class ____(object):
def minimumVisitedCells(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
m, n = len(grid), len(grid[0])
uf1 = [UnionFind(n+1) for _ in xrange(m)]
uf2 = [UnionFind(m+1) for _ in xrange(n)]
d, i, j = 1, 0, 0
q = [(i, j)]
uf1[i].union_set(j, j+1)
uf2[j].union_set(i, i+1)
while q:
new_q = []
for i, j in q:
if (i, j) == (m-1, n-1):
return d
while uf1[i].right_set(j) <= min(j+grid[i][j], n-1):
k = uf1[i].right_set(j)
new_q.append((i, k))
uf2[k].union_set(i, i+1)
uf1[i].union_set(k, k+1)
while uf2[j].right_set(i) <= min(i+grid[i][j], m-1):
k = uf2[j].right_set(i)
new_q.append((k, j))
uf1[k].union_set(j, j+1)
uf2[j].union_set(k, k+1)
q = new_q
d += 1
return -1
# Time: O(m * n * log(m + n))
# Space: O(m * n)
from sortedcontainers import SortedList
# bfs, sorted list
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/642. Design Search Autocomplete System/642.py
|
{
"start": 0,
"end": 455
}
|
class ____:
def __init__(self):
self.children: dict[str, TrieNode] = {}
self.s: str | None = None
self.time = 0
self.top3: list[TrieNode] = []
def __lt__(self, other):
if self.time == other.time:
return self.s < other.s
return self.time > other.time
def update(self, node) -> None:
if node not in self.top3:
self.top3.append(node)
self.top3.sort()
if len(self.top3) > 3:
self.top3.pop()
|
TrieNode
|
python
|
dagster-io__dagster
|
examples/docs_snippets/docs_snippets/guides/components/shell-script-component/generated/2-shell-command-empty.py
|
{
"start": 22,
"end": 418
}
|
class ____(dg.Component, dg.Model, dg.Resolvable):
"""COMPONENT SUMMARY HERE.
COMPONENT DESCRIPTION HERE.
"""
# added fields here will define params when instantiated in Python, and yaml schema via Resolvable
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
# Add definition construction logic here.
return dg.Definitions()
|
ShellCommand
|
python
|
pydantic__pydantic
|
tests/mypy/modules/plugin_success.py
|
{
"start": 1883,
"end": 1952
}
|
class ____(Model):
x: int
OverrideModel(x=1, y='b')
|
OverrideModel
|
python
|
django__django
|
tests/gis_tests/geo3d/tests.py
|
{
"start": 9795,
"end": 14005
}
|
class ____(FuncTestMixin, Geo3DLoadingHelper, TestCase):
def test_kml(self):
"""
Test KML() function with Z values.
"""
self._load_city_data()
h = City3D.objects.annotate(kml=AsKML("point", precision=6)).get(name="Houston")
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(
r"^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$"
)
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoJSON() function with Z values.
"""
self._load_city_data()
h = City3D.objects.annotate(geojson=AsGeoJSON("point", precision=6)).get(
name="Houston"
)
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d
# WHERE name='Houston';`
ref_json_regex = re.compile(
r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$'
)
self.assertTrue(ref_json_regex.match(h.geojson))
def test_perimeter(self):
"""
Testing Perimeter() function on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly)
# FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
poly2d = Polygon2D.objects.annotate(perimeter=Perimeter("poly")).get(
name="2D BBox"
)
self.assertAlmostEqual(ref_perim_2d, poly2d.perimeter.m, tol)
poly3d = Polygon3D.objects.annotate(perimeter=Perimeter("poly")).get(
name="3D BBox"
)
self.assertAlmostEqual(ref_perim_3d, poly3d.perimeter.m, tol)
def test_length(self):
"""
Testing Length() function on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(
# line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
inter2d = Interstate2D.objects.annotate(length=Length("line")).get(name="I-45")
self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol)
inter3d = Interstate3D.objects.annotate(length=Length("line")).get(name="I-45")
self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
inter2d = InterstateProj2D.objects.annotate(length=Length("line")).get(
name="I-45"
)
self.assertAlmostEqual(ref_length_2d, inter2d.length.m, tol)
inter3d = InterstateProj3D.objects.annotate(length=Length("line")).get(
name="I-45"
)
self.assertAlmostEqual(ref_length_3d, inter3d.length.m, tol)
def test_scale(self):
"""
Testing Scale() function on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.annotate(scale=Scale("point", 1.0, 1.0, zscale)):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test_translate(self):
"""
Testing Translate() function on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.annotate(
translate=Translate("point", 0, 0, ztrans)
):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
|
Geo3DFunctionsTests
|
python
|
pypa__warehouse
|
warehouse/manage/views/organizations.py
|
{
"start": 26152,
"end": 28956
}
|
class ____:
def __init__(self, organization, request):
self.organization = organization
self.request = request
self.organization_service = request.find_service(
IOrganizationService, context=None
)
@property
def default_response(self):
return {
"organization": self.organization,
"create_team_form": CreateTeamForm(
self.request.POST,
organization_service=self.organization_service,
organization_id=self.organization.id,
),
}
@view_config(request_method="GET", permission=Permissions.OrganizationsRead)
def manage_teams(self):
return self.default_response
@view_config(request_method="POST")
def create_team(self):
# Get and validate form from default response.
default_response = self.default_response
form = default_response["create_team_form"]
if not form.validate():
return default_response
# Add team to organization.
team = self.organization_service.add_team(
organization_id=self.organization.id,
name=form.name.data,
)
# Record events.
self.organization.record_event(
tag=EventTag.Organization.TeamCreate,
request=self.request,
additional={
"created_by_user_id": str(self.request.user.id),
"team_name": team.name,
},
)
team.record_event(
tag=EventTag.Team.TeamCreate,
request=self.request,
additional={
"created_by_user_id": str(self.request.user.id),
},
)
# Send notification emails.
owner_and_manager_users = set(
organization_owners(self.request, self.organization)
+ organization_managers(self.request, self.organization)
)
send_team_created_email(
self.request,
owner_and_manager_users,
organization_name=self.organization.name,
team_name=team.name,
)
# Display notification message.
self.request.session.flash(
f"Created team {team.name!r} in {self.organization.name!r}",
queue="success",
)
# Refresh teams list.
return HTTPSeeOther(self.request.path)
@view_defaults(
route_name="manage.organization.projects",
context=Organization,
renderer="warehouse:templates/manage/organization/projects.html",
uses_session=True,
require_active_organization=True,
require_csrf=True,
require_methods=False,
permission=Permissions.OrganizationsManage,
has_translations=True,
require_reauth=True,
)
|
ManageOrganizationTeamsViews
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/events/__init__.py
|
{
"start": 67005,
"end": 67119
}
|
class ____:
asset_key: AssetKey
partition_keys: Optional[Sequence[str]]
@whitelist_for_serdes
|
AssetWipedData
|
python
|
neetcode-gh__leetcode
|
python/0304-range-sum-query-2d-immutable.py
|
{
"start": 0,
"end": 606
}
|
class ____:
def __init__(self, matrix):
self.sum_ = [[0] * (len(matrix[0]) + 1) for _ in range(len(matrix) + 1)]
for i, line in enumerate(matrix):
previous = 0
for j, num in enumerate(line):
previous += num
above = self.sum_[i][j + 1]
self.sum_[i + 1][j + 1] = previous + above
def sumRegion(self, row1, col1, row2, col2):
sum_col2 = self.sum_[row2 + 1][col2 + 1] - self.sum_[row1][col2 + 1]
sum_col1 = self.sum_[row2 + 1][col1] - self.sum_[row1][col1]
return sum_col2 - sum_col1
|
NumMatrix
|
python
|
google__jax
|
jax/_src/core.py
|
{
"start": 113957,
"end": 117742
}
|
class ____(effects.Effect):
"""A side-effect introducing a new named axis into the current scope."""
name: AxisName
effects.control_flow_allowed_effects.add_type(NamedAxisEffect)
effects.custom_derivatives_allowed_effects.add_type(NamedAxisEffect)
effects.lowerable_effects.add_type(NamedAxisEffect)
effects.remat_allowed_effects.add_type(NamedAxisEffect)
def filter_named_axis_effects(
effects: Effects, names: Collection[AxisName]
) -> Effects:
return {e for e in effects
if not isinstance(e, NamedAxisEffect) or e.name not in names}
def remove_named_axis_effects(
jaxpr: Jaxpr, names: Collection[AxisName]
) -> Jaxpr:
if not names or not jaxpr.effects:
return jaxpr
return jaxpr.replace(effects=filter_named_axis_effects(jaxpr.effects, names))
def used_axis_names_jaxpr(jaxpr: Jaxpr | ClosedJaxpr):
return {e.name for e in jaxpr.effects if isinstance(e, NamedAxisEffect)}
def replace_jaxpr_effects(jaxpr: ClosedJaxpr, effects: Effects):
return _replace_jaxpr_effects(jaxpr, frozenset(effects))
@weakref_lru_cache
def _replace_jaxpr_effects(jaxpr: ClosedJaxpr, effects: frozenset[Effect]):
return jaxpr.replace(jaxpr=jaxpr.jaxpr.replace(effects=set(effects)))
# ------------------- Jaxpr checking -------------------
def typecheck(aval: AbstractValue, x) -> bool:
return typecompat(aval, get_aval(x))
def typecompat(aval_ref: AbstractValue, aval: AbstractValue) -> bool:
"""Determine whether `aval` conforms to `aval_ref`. Ignores weak_type."""
try:
return typematch(aval_ref, aval)
except TypeError:
return False
def typematch(t1: AbstractValue, t2: AbstractValue) -> bool:
"""Determine whether `t1` and `t2` are equivalent. Ignores weak_type."""
t1 = t1.normalize()
t2 = t2.normalize()
from jax._src.state.types import AbstractRef # pytype: disable=import-error
if t1 == t2:
return True
elif (isinstance(t1, (ShapedArray, DShapedArray)) and
isinstance(t2, (ShapedArray, DShapedArray))):
# This case handles DShapedArray and shape polynomials. Alternatively we
# could try normalizing first and then doing simple equality.
cmp = (t1.dtype == t2.dtype and definitely_equal_shape(t1.shape, t2.shape)
and t1.vma == t2.vma and t1.memory_space == t2.memory_space) # type: ignore
# TODO(yashkatariya): Expand this to Manual and Auto mode.
# See https://github.com/jax-ml/jax/issues/26474
if (not t1.sharding.mesh.empty and not t2.sharding.mesh.empty and
(t1.sharding.mesh._any_axis_explicit or
t2.sharding.mesh._any_axis_explicit)):
sh_eq = t1.sharding == t2.sharding
else:
sh_eq = True
return cmp and sh_eq
elif isinstance(t1, AbstractRef) and isinstance(t2, AbstractRef):
# We want to use the regular typecheck for ShapedArray here.
return (typematch(t1.inner_aval, t2.inner_aval) and # type: ignore
(t1.memory_space is None or t2.memory_space is None or # type: ignore
t1.memory_space == t2.memory_space)) # type: ignore
else:
return False
def aval_mismatch_extra(a1: AbstractValue, a2: AbstractValue) -> str:
assert not typematch(a1, a2)
if isinstance(a1, ShapedArray) and isinstance(a2, ShapedArray):
mismatches = []
if a1.dtype != a2.dtype:
mismatches.append('the dtypes do not match')
if a1.shape != a2.shape:
mismatches.append('the shapes do not match')
if a1.vma != a2.vma:
mismatches.append('the varying manual axes do not match')
# TODO(yashkatariya,mattjj): add check for sharding-in-types mismatch
if len(mismatches) == 0:
return ''
elif len(mismatches) == 1:
return ', so ' + mismatches[0]
else:
return ', so ' + ', '.join(mismatches[:-1]) + ', and ' + mismatches[-1]
return ''
|
NamedAxisEffect
|
python
|
pytest-dev__pytest
|
testing/test_runner.py
|
{
"start": 617,
"end": 5981
}
|
class ____:
def test_setup(self, pytester: Pytester) -> None:
item = pytester.getitem("def test_func(): pass")
ss = item.session._setupstate
values = [1]
ss.setup(item)
ss.addfinalizer(values.pop, item)
assert values
ss.teardown_exact(None)
assert not values
def test_teardown_exact_stack_empty(self, pytester: Pytester) -> None:
item = pytester.getitem("def test_func(): pass")
ss = item.session._setupstate
ss.setup(item)
ss.teardown_exact(None)
ss.teardown_exact(None)
ss.teardown_exact(None)
def test_setup_fails_and_failure_is_cached(self, pytester: Pytester) -> None:
item = pytester.getitem(
"""
def setup_module(mod):
raise ValueError(42)
def test_func(): pass
"""
)
ss = item.session._setupstate
with pytest.raises(ValueError):
ss.setup(item)
with pytest.raises(ValueError):
ss.setup(item)
def test_teardown_multiple_one_fails(self, pytester: Pytester) -> None:
r = []
def fin1():
r.append("fin1")
def fin2():
raise Exception("oops")
def fin3():
r.append("fin3")
item = pytester.getitem("def test_func(): pass")
ss = item.session._setupstate
ss.setup(item)
ss.addfinalizer(fin1, item)
ss.addfinalizer(fin2, item)
ss.addfinalizer(fin3, item)
with pytest.raises(Exception) as err:
ss.teardown_exact(None)
assert err.value.args == ("oops",)
assert r == ["fin3", "fin1"]
def test_teardown_multiple_fail(self, pytester: Pytester) -> None:
def fin1():
raise Exception("oops1")
def fin2():
raise Exception("oops2")
item = pytester.getitem("def test_func(): pass")
ss = item.session._setupstate
ss.setup(item)
ss.addfinalizer(fin1, item)
ss.addfinalizer(fin2, item)
with pytest.raises(ExceptionGroup) as err:
ss.teardown_exact(None)
# Note that finalizers are run LIFO, but because FIFO is more intuitive for
# users we reverse the order of messages, and see the error from fin1 first.
err1, err2 = err.value.exceptions
assert err1.args == ("oops1",)
assert err2.args == ("oops2",)
def test_teardown_multiple_scopes_one_fails(self, pytester: Pytester) -> None:
module_teardown = []
def fin_func():
raise Exception("oops1")
def fin_module():
module_teardown.append("fin_module")
item = pytester.getitem("def test_func(): pass")
mod = item.listchain()[-2]
ss = item.session._setupstate
ss.setup(item)
ss.addfinalizer(fin_module, mod)
ss.addfinalizer(fin_func, item)
with pytest.raises(Exception, match="oops1"):
ss.teardown_exact(None)
assert module_teardown == ["fin_module"]
def test_teardown_multiple_scopes_several_fail(self, pytester) -> None:
def raiser(exc):
raise exc
item = pytester.getitem("def test_func(): pass")
mod = item.listchain()[-2]
ss = item.session._setupstate
ss.setup(item)
ss.addfinalizer(partial(raiser, KeyError("from module scope")), mod)
ss.addfinalizer(partial(raiser, TypeError("from function scope 1")), item)
ss.addfinalizer(partial(raiser, ValueError("from function scope 2")), item)
with pytest.raises(ExceptionGroup, match="errors during test teardown") as e:
ss.teardown_exact(None)
mod, func = e.value.exceptions
assert isinstance(mod, KeyError)
assert isinstance(func.exceptions[0], TypeError)
assert isinstance(func.exceptions[1], ValueError)
def test_cached_exception_doesnt_get_longer(self, pytester: Pytester) -> None:
"""Regression test for #12204 (the "BTW" case)."""
pytester.makepyfile(test="")
# If the collector.setup() raises, all collected items error with this
# exception.
pytester.makeconftest(
"""
import pytest
class MyItem(pytest.Item):
def runtest(self) -> None: pass
class MyBadCollector(pytest.Collector):
def collect(self):
return [
MyItem.from_parent(self, name="one"),
MyItem.from_parent(self, name="two"),
MyItem.from_parent(self, name="three"),
]
def setup(self):
1 / 0
def pytest_collect_file(file_path, parent):
if file_path.name == "test.py":
return MyBadCollector.from_parent(parent, name='bad')
"""
)
result = pytester.runpytest_inprocess("--tb=native")
assert result.ret == ExitCode.TESTS_FAILED
failures = result.reprec.getfailures() # type: ignore[attr-defined]
assert len(failures) == 3
lines1 = failures[1].longrepr.reprtraceback.reprentries[0].lines
lines2 = failures[2].longrepr.reprtraceback.reprentries[0].lines
assert len(lines1) == len(lines2)
|
TestSetupState
|
python
|
ray-project__ray
|
doc/source/ray-core/doc_code/actor_checkpointing.py
|
{
"start": 1713,
"end": 2692
}
|
class ____:
def __init__(self, checkpoint_file):
self.checkpoint_file = checkpoint_file
if os.path.exists(self.checkpoint_file):
# Restore from a checkpoint
with open(self.checkpoint_file, "r") as f:
self.state = json.load(f)
else:
self.state = {}
def update(self, key, value):
import random
if random.randrange(10) < 5:
sys.exit(1)
self.state[key] = value
# Checkpoint the latest state
with open(self.checkpoint_file, "w") as f:
json.dump(self.state, f)
def get(self, key):
return self.state[key]
checkpoint_dir = tempfile.mkdtemp()
actor = ImmortalActor.remote(os.path.join(checkpoint_dir, "checkpoint.json"))
ray.get(actor.update.remote("1", 1))
ray.get(actor.update.remote("2", 2))
assert ray.get(actor.get.remote("1")) == 1
shutil.rmtree(checkpoint_dir)
# __actor_checkpointing_auto_restart_end__
|
ImmortalActor
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/strings.py
|
{
"start": 4596,
"end": 5832
}
|
class ____:
params = ([0, 3], [None, ","], [None, "-"], [0.0, 0.001, 0.15])
param_names = ["other_cols", "sep", "na_rep", "na_frac"]
def setup(self, other_cols, sep, na_rep, na_frac):
N = 10**5
mask_gen = lambda: np.random.choice([True, False], N, p=[1 - na_frac, na_frac])
self.s = Series(Index([f"i-{i}" for i in range(N)], dtype=object)).where(
mask_gen()
)
if other_cols == 0:
# str.cat self-concatenates only for others=None
self.others = None
else:
self.others = DataFrame(
{
i: Index([f"i-{i}" for i in range(N)], dtype=object).where(
mask_gen()
)
for i in range(other_cols)
}
)
def time_cat(self, other_cols, sep, na_rep, na_frac):
# before the concatenation (one caller + other_cols columns), the total
# expected fraction of rows containing any NaN is:
# reduce(lambda t, _: t + (1 - t) * na_frac, range(other_cols + 1), 0)
# for other_cols=3 and na_frac=0.15, this works out to ~48%
self.s.str.cat(others=self.others, sep=sep, na_rep=na_rep)
|
Cat
|
python
|
marshmallow-code__marshmallow
|
tests/test_serialization.py
|
{
"start": 35621,
"end": 37606
}
|
class ____:
def test_serialize_with_missing_param_value(self):
class AliasingUserSerializer(Schema):
name = fields.String()
birthdate = fields.DateTime(dump_default=dt.datetime(2017, 9, 29))
data = {"name": "Mick"}
result = AliasingUserSerializer().dump(data)
assert result["name"] == "Mick"
assert result["birthdate"] == "2017-09-29T00:00:00"
def test_serialize_with_missing_param_callable(self):
class AliasingUserSerializer(Schema):
name = fields.String()
birthdate = fields.DateTime(dump_default=lambda: dt.datetime(2017, 9, 29))
data = {"name": "Mick"}
result = AliasingUserSerializer().dump(data)
assert result["name"] == "Mick"
assert result["birthdate"] == "2017-09-29T00:00:00"
def test_serializing_named_tuple():
field = fields.Raw()
p = Point(x=4, y=2)
assert field.serialize("x", p) == 4
def test_serializing_named_tuple_with_meta():
p = Point(x=4, y=2)
class PointSerializer(Schema):
x = fields.Int()
y = fields.Int()
serialized = PointSerializer().dump(p)
assert serialized["x"] == 4
assert serialized["y"] == 2
def test_serializing_slice():
values = [{"value": value} for value in range(5)]
sliced = itertools.islice(values, None)
class ValueSchema(Schema):
value = fields.Int()
serialized = ValueSchema(many=True).dump(sliced)
assert serialized == values
# https://github.com/marshmallow-code/marshmallow/issues/1163
def test_nested_field_many_serializing_generator():
class MySchema(Schema):
name = fields.Str()
class OtherSchema(Schema):
objects = fields.Nested(MySchema, many=True)
def gen():
yield {"name": "foo"}
yield {"name": "bar"}
obj = {"objects": gen()}
data = OtherSchema().dump(obj)
assert data.get("objects") == [{"name": "foo"}, {"name": "bar"}]
|
TestSchemaSerialization
|
python
|
Pylons__pyramid
|
src/pyramid/config/tweens.py
|
{
"start": 329,
"end": 6979
}
|
class ____:
def add_tween(self, tween_factory, under=None, over=None):
"""
.. versionadded:: 1.2
Add a 'tween factory'. A :term:`tween` (a contraction of 'between')
is a bit of code that sits between the Pyramid router's main request
handling function and the upstream WSGI component that uses
:app:`Pyramid` as its 'app'. Tweens are a feature that may be used
by Pyramid framework extensions, to provide, for example,
Pyramid-specific view timing support, bookkeeping code that examines
exceptions before they are returned to the upstream WSGI application,
or a variety of other features. Tweens behave a bit like
:term:`WSGI` 'middleware' but they have the benefit of running in a
context in which they have access to the Pyramid :term:`application
registry` as well as the Pyramid rendering machinery.
.. note:: You can view the tween ordering configured into a given
Pyramid application by using the ``ptweens``
command. See :ref:`displaying_tweens`.
The ``tween_factory`` argument must be a :term:`dotted Python name`
to a global object representing the tween factory.
The ``under`` and ``over`` arguments allow the caller of
``add_tween`` to provide a hint about where in the tween chain this
tween factory should be placed when an implicit tween chain is used.
These hints are only used when an explicit tween chain is not used
(when the ``pyramid.tweens`` configuration value is not set).
Allowable values for ``under`` or ``over`` (or both) are:
- ``None`` (the default).
- A :term:`dotted Python name` to a tween factory: a string
representing the dotted name of a tween factory added in a call to
``add_tween`` in the same configuration session.
- One of the constants :attr:`pyramid.tweens.MAIN`,
:attr:`pyramid.tweens.INGRESS`, or :attr:`pyramid.tweens.EXCVIEW`.
- An iterable of any combination of the above. This allows the user
to specify fallbacks if the desired tween is not included, as well
as compatibility with multiple other tweens.
``under`` means 'closer to the main Pyramid application than',
``over`` means 'closer to the request ingress than'.
For example, calling ``add_tween('myapp.tfactory',
over=pyramid.tweens.MAIN)`` will attempt to place the tween factory
represented by the dotted name ``myapp.tfactory`` directly 'above'
(in ``ptweens`` order) the main Pyramid request handler.
Likewise, calling ``add_tween('myapp.tfactory',
over=pyramid.tweens.MAIN, under='mypkg.someothertween')`` will
attempt to place this tween factory 'above' the main handler but
'below' (a fictional) 'mypkg.someothertween' tween factory.
If all options for ``under`` (or ``over``) cannot be found in the
current configuration, it is an error. If some options are specified
purely for compatibility with other tweens, just add a fallback of
MAIN or INGRESS. For example, ``under=('mypkg.someothertween',
'mypkg.someothertween2', INGRESS)``. This constraint will require
the tween to be located under both the 'mypkg.someothertween' tween,
the 'mypkg.someothertween2' tween, and INGRESS. If any of these is
not in the current configuration, this constraint will only organize
itself based on the tweens that are present.
Specifying neither ``over`` nor ``under`` is equivalent to specifying
``under=INGRESS``.
Implicit tween ordering is obviously only best-effort. Pyramid will
attempt to present an implicit order of tweens as best it can, but
the only surefire way to get any particular ordering is to use an
explicit tween order. A user may always override the implicit tween
ordering by using an explicit ``pyramid.tweens`` configuration value
setting.
``under``, and ``over`` arguments are ignored when an explicit tween
chain is specified using the ``pyramid.tweens`` configuration value.
For more information, see :ref:`registering_tweens`.
"""
return self._add_tween(
tween_factory, under=under, over=over, explicit=False
)
def add_default_tweens(self):
self.add_tween(EXCVIEW)
@action_method
def _add_tween(self, tween_factory, under=None, over=None, explicit=False):
if not isinstance(tween_factory, str):
raise ConfigurationError(
'The "tween_factory" argument to add_tween must be a '
'dotted name to a globally importable object, not %r'
% tween_factory
)
name = tween_factory
if name in (MAIN, INGRESS):
raise ConfigurationError('%s is a reserved tween name' % name)
tween_factory = self.maybe_dotted(tween_factory)
for t, p in [('over', over), ('under', under)]:
if p is not None:
if not is_string_or_iterable(p):
raise ConfigurationError(
f'"{t}" must be a string or iterable, not {p}'
)
if over is INGRESS or is_nonstr_iter(over) and INGRESS in over:
raise ConfigurationError('%s cannot be over INGRESS' % name)
if under is MAIN or is_nonstr_iter(under) and MAIN in under:
raise ConfigurationError('%s cannot be under MAIN' % name)
registry = self.registry
introspectables = []
tweens = registry.queryUtility(ITweens)
if tweens is None:
tweens = Tweens()
registry.registerUtility(tweens, ITweens)
def register():
if explicit:
tweens.add_explicit(name, tween_factory)
else:
tweens.add_implicit(
name, tween_factory, under=under, over=over
)
discriminator = ('tween', name, explicit)
tween_type = explicit and 'explicit' or 'implicit'
intr = self.introspectable(
'tweens', discriminator, name, '%s tween' % tween_type
)
intr['name'] = name
intr['factory'] = tween_factory
intr['type'] = tween_type
intr['under'] = under
intr['over'] = over
introspectables.append(intr)
self.action(discriminator, register, introspectables=introspectables)
@implementer(ITweens)
|
TweensConfiguratorMixin
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 858313,
"end": 858511
}
|
class ____(VegaLiteSchema):
"""ParameterName schema wrapper."""
_schema = {"$ref": "#/definitions/ParameterName"}
def __init__(self, *args):
super().__init__(*args)
|
ParameterName
|
python
|
dagster-io__dagster
|
helm/dagster/schema/schema/charts/dagster/subschema/postgresql.py
|
{
"start": 162,
"end": 433
}
|
class ____(BaseModel):
image: ExternalImage
enabled: bool
postgresqlHost: str
postgresqlUsername: str
postgresqlPassword: str
postgresqlDatabase: str
postgresqlParams: dict
postgresqlScheme: Optional[str] = None
service: Service
|
PostgreSQL
|
python
|
ray-project__ray
|
rllib/connectors/module_to_env/unbatch_to_individual_items.py
|
{
"start": 472,
"end": 4829
}
|
class ____(ConnectorV2):
"""Unbatches the given `data` back into the individual-batch-items format.
Note: This is one of the default module-to-env ConnectorV2 pieces that
are added automatically by RLlib into every module-to-env connector pipeline,
unless `config.add_default_connectors_to_module_to_env_pipeline` is set to
False.
The default module-to-env connector pipeline is:
[
GetActions,
TensorToNumpy,
UnBatchToIndividualItems,
ModuleToAgentUnmapping, # only in multi-agent setups!
RemoveSingleTsTimeRankFromBatch,
[0 or more user defined ConnectorV2 pieces],
NormalizeAndClipActions,
ListifyDataForVectorEnv,
]
"""
@override(ConnectorV2)
def __call__(
self,
*,
rl_module: RLModule,
batch: Dict[str, Any],
episodes: List[EpisodeType],
explore: Optional[bool] = None,
shared_data: Optional[dict] = None,
**kwargs,
) -> Any:
memorized_map_structure = shared_data.get("memorized_map_structure")
episode_map_structure = shared_data.get("vector_env_episodes_map", {})
# Simple case (no structure stored): Just unbatch.
if memorized_map_structure is None:
return tree.map_structure(lambda s: unbatch_fn(s), batch)
# Single agent case: Memorized structure is a list, whose indices map to
# eps_id values.
elif isinstance(memorized_map_structure, list):
for column, column_data in batch.copy().items():
column_data = unbatch_fn(column_data)
new_column_data = defaultdict(list)
for i, eps_id in enumerate(memorized_map_structure):
# Keys are always tuples to resemble multi-agent keys, which
# have the structure (eps_id, agent_id, module_id).
key = (eps_id,)
new_column_data[key].append(column_data[i])
batch[column] = dict(new_column_data)
# Multi-agent case: Memorized structure is dict mapping module_ids to lists of
# (eps_id, agent_id)-tuples, such that the original individual-items-based form
# can be constructed.
else:
for module_id, module_data in batch.copy().items():
if module_id not in memorized_map_structure:
raise KeyError(
f"ModuleID={module_id} not found in `memorized_map_structure`!"
)
for column, column_data in module_data.items():
column_data = unbatch_fn(column_data)
new_column_data = defaultdict(list)
for i, (eps_id, agent_id) in enumerate(
memorized_map_structure[module_id]
):
key = (eps_id, agent_id, module_id)
# Check, if an agent episode is already done. For this we need
# to get the corresponding episode in the `EnvRunner`s list of
# episodes.
eps_id = episode_map_structure.get(eps_id, eps_id)
episode = next(
(eps for eps in episodes if eps.id_ == eps_id), None
)
if episode is None:
raise ValueError(
f"No episode found that matches the ID={eps_id}. Check "
"shared_data['vector_env_episodes_map'] for a missing ",
"mapping.",
)
# If an episode has not just started and the agent's episode
# is done do not return data.
# This should not be `True` for new `MultiAgentEpisode`s.
if (
episode.agent_episodes
and episode.agent_episodes[agent_id].is_done
and not episode.is_done
):
continue
new_column_data[key].append(column_data[i])
module_data[column] = dict(new_column_data)
return batch
|
UnBatchToIndividualItems
|
python
|
pola-rs__polars
|
py-polars/src/polars/dataframe/_html.py
|
{
"start": 534,
"end": 1372
}
|
class ____:
"""Class for representing an HTML tag."""
def __init__(
self,
elements: list[str],
tag: str,
attributes: dict[str, str] | None = None,
) -> None:
self.tag = tag
self.elements = elements
self.attributes = attributes
def __enter__(self) -> None:
if self.attributes is not None:
s = f"<{self.tag} "
for k, v in self.attributes.items():
s += f'{k}="{v}" '
s = f"{s.rstrip()}>"
self.elements.append(s)
else:
self.elements.append(f"<{self.tag}>")
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.elements.append(f"</{self.tag}>")
|
Tag
|
python
|
huggingface__transformers
|
src/transformers/models/openai/modeling_openai.py
|
{
"start": 20794,
"end": 26565
}
|
class ____(OpenAIGPTPreTrainedModel):
_tied_weights_keys = {"transformer.tokens_embed.weight": "lm_head.weight"}
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = OpenAIGPTSequenceSummary(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
mc_token_ids: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
mc_labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], OpenAIGPTDoubleHeadsModelOutput]:
r"""
mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input):
Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) -
1]`.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-1, 0, ..., config.vocab_size]` All labels set to `-100` are
ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above)
Examples:
```python
>>> from transformers import AutoTokenizer, OpenAIGPTDoubleHeadsModel
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("openai-community/openai-gpt")
>>> model = OpenAIGPTDoubleHeadsModel.from_pretrained("openai-community/openai-gpt")
>>> tokenizer.add_special_tokens(
... {"cls_token": "[CLS]"}
... ) # Add a [CLS] to the vocabulary (we should train it also!)
>>> model.resize_token_embeddings(len(tokenizer))
>>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
>>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
>>> mc_token_ids = torch.tensor([input_ids.size(-1) - 1, input_ids.size(-1) - 1]).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, mc_token_ids=mc_token_ids)
>>> lm_logits = outputs.logits
>>> mc_logits = outputs.mc_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
lm_loss, mc_loss = None, None
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_loss is not None:
output = (mc_loss,) + output
return ((lm_loss,) + output) if lm_loss is not None else output
return OpenAIGPTDoubleHeadsModelOutput(
loss=lm_loss,
mc_loss=mc_loss,
logits=lm_logits,
mc_logits=mc_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The Original OpenAI GPT Model transformer with a sequence classification head on top (linear layer).
[`OpenAIGPTForSequenceClassification`] uses the last token in order to do the classification, as other causal
models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the
last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding
token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since
it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take
the last value in each row of the batch).
"""
)
|
OpenAIGPTDoubleHeadsModel
|
python
|
dagster-io__dagster
|
python_modules/automation/automation_tests/dagster_dev_tests/ai_review_tests/test_ai_review_analyze.py
|
{
"start": 95,
"end": 1782
}
|
class ____:
"""Basic smoke tests for the ai-review-analyze command."""
def test_import_and_basic_structure(self):
"""Test that command can be imported and has expected structure."""
from automation.dagster_dev.commands.ai_review_analyze import ai_review_analyze
assert ai_review_analyze is not None
assert ai_review_analyze.name == "ai-review-analyze"
assert callable(ai_review_analyze)
def test_help_command(self):
"""Test that help command works."""
from automation.dagster_dev.commands.ai_review_analyze import ai_review_analyze
runner = CliRunner()
result = runner.invoke(ai_review_analyze, ["--help"])
assert result.exit_code == 0
assert "ai-review-analyze" in result.output
assert "--human" in result.output
assert "--json" in result.output
def test_basic_command_execution(self):
"""Test that command executes successfully when dependencies are available."""
from automation.dagster_dev.commands.ai_review_analyze import ai_review_analyze
runner = CliRunner()
result = runner.invoke(ai_review_analyze, ["--json"])
# Command should execute successfully in test environment
# Note: This may fail if gt/dagster-dev tools are unavailable
if result.exit_code == 0:
# Success case - verify JSON output format
import json
data = json.loads(result.output)
assert "current_branch" in data
assert "repository_state" in data
else:
# Failure case - should be graceful
assert "Error" in result.output
|
TestAiReviewAnalyze
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_iter.py
|
{
"start": 3430,
"end": 3511
}
|
class ____:
def __iter__(self):
raise ZeroDivisionError
|
BadIterableClass
|
python
|
encode__django-rest-framework
|
rest_framework/schemas/coreapi.py
|
{
"start": 1478,
"end": 2619
}
|
class ____(dict):
def __init__(self):
self.links = []
self.methods_counter = Counter()
super().__init__()
def get_available_key(self, preferred_key):
if preferred_key not in self:
return preferred_key
while True:
current_val = self.methods_counter[preferred_key]
self.methods_counter[preferred_key] += 1
key = f'{preferred_key}_{current_val}'
if key not in self:
return key
def insert_into(target, keys, value):
"""
Nested dictionary insertion.
>>> example = {}
>>> insert_into(example, ['a', 'b', 'c'], 123)
>>> example
LinkNode({'a': LinkNode({'b': LinkNode({'c': LinkNode(links=[123])}}})))
"""
for key in keys[:-1]:
if key not in target:
target[key] = LinkNode()
target = target[key]
try:
target.links.append((keys[-1], value))
except TypeError:
msg = INSERT_INTO_COLLISION_FMT.format(
value_url=value.url,
target_url=target.url,
keys=keys
)
raise ValueError(msg)
|
LinkNode
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
|
{
"start": 80565,
"end": 81768
}
|
class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
start_date: str,
access_key: str,
base: Optional[str] = None,
ignore_weekends: Optional[bool] = None,
):
"""Airbyte Source for Exchange Rates.
Documentation can be found at https://docs.airbyte.com/integrations/sources/exchange-rates
Args:
name (str): The name of the destination.
start_date (str): Start getting data from that date.
access_key (str): Your API Key. See here. The key is case sensitive.
base (Optional[str]): ISO reference currency. See here. Free plan doesn't support Source Currency Switching, default base currency is EUR
ignore_weekends (Optional[bool]): Ignore weekends? (Exchanges don't run on weekends)
"""
self.start_date = check.str_param(start_date, "start_date")
self.access_key = check.str_param(access_key, "access_key")
self.base = check.opt_str_param(base, "base")
self.ignore_weekends = check.opt_bool_param(ignore_weekends, "ignore_weekends")
super().__init__("Exchange Rates", name)
|
ExchangeRatesSource
|
python
|
realpython__materials
|
queue/src/queues.py
|
{
"start": 1100,
"end": 1863
}
|
class ____(IterableMixin):
def __init__(self):
super().__init__()
self._elements_by_value = {}
self._elements = []
self._counter = count()
def __setitem__(self, unique_value, priority):
if unique_value in self._elements_by_value:
self._elements_by_value[unique_value].priority = priority
heapify(self._elements)
else:
element = Element(priority, next(self._counter), unique_value)
self._elements_by_value[unique_value] = element
heappush(self._elements, element)
def __getitem__(self, unique_value):
return self._elements_by_value[unique_value].priority
def dequeue(self):
return heappop(self._elements).value
|
MutableMinHeap
|
python
|
gevent__gevent
|
src/gevent/tests/test__core_timer.py
|
{
"start": 283,
"end": 1927
}
|
class ____(TestCase):
__timeout__ = LARGE_TIMEOUT
repeat = 0
timer_duration = 0.001
def setUp(self):
super(Test, self).setUp()
self.called = []
self.loop = config.loop(default=False)
self.timer = self.loop.timer(self.timer_duration, repeat=self.repeat)
assert not self.loop.default
def cleanup(self):
# cleanup instead of tearDown to cooperate well with
# leakcheck.py
self.timer.close()
# cycle the loop so libuv close callbacks fire
self.loop.run()
self.loop.destroy()
self.loop = None
self.timer = None
def f(self, x=None):
self.called.append(1)
if x is not None:
x.stop()
def assertTimerInKeepalive(self):
if CFFI_BACKEND:
self.assertIn(self.timer, self.loop._keepaliveset)
def assertTimerNotInKeepalive(self):
if CFFI_BACKEND:
self.assertNotIn(self.timer, self.loop._keepaliveset)
def test_main(self):
loop = self.loop
x = self.timer
x.start(self.f)
self.assertTimerInKeepalive()
self.assertTrue(x.active, x)
with self.assertRaises((AttributeError, ValueError)):
x.priority = 1
loop.run()
self.assertEqual(x.pending, 0)
self.assertEqual(self.called, [1])
self.assertIsNone(x.callback)
self.assertIsNone(x.args)
if x.priority is not None:
self.assertEqual(x.priority, 0)
x.priority = 1
self.assertEqual(x.priority, 1)
x.stop()
self.assertTimerNotInKeepalive()
|
Test
|
python
|
streamlit__streamlit
|
lib/streamlit/vendor/pympler/asizeof.py
|
{
"start": 52762,
"end": 87966
}
|
class ____(object):
"""Sizer state and options to accumulate sizes."""
_above_ = 1024 # rank only objs of size 1K+
_align_ = 8 # alignment, power-of-2
_clip_ = 80
_code_ = False
_cutoff_ = 0 # in percent
_derive_ = False
_detail_ = 0 # for Asized only
_frames_ = False
_infer_ = False
_limit_ = 100
_stats_ = 0
_depth = 0 # deepest recursion
_excl_d = None # {}
_ign_d = _kind_ignored
_incl = _NN # or ' (incl. code)'
_mask = 7 # see _align_
_missed = 0 # due to errors
_profile = False # no profiling
_profs = None # {}
_ranked = 0
_ranks = [] # type: List[_Rank] # sorted by decreasing size
_seen = None # {}
_stream = None # I/O stream for printing
_total = 0 # total size
def __init__(self, **opts):
"""New **Asizer** accumulator.
See this module documentation for more details.
See method **reset** for all available options and defaults.
"""
self._excl_d = {}
self.reset(**opts)
def _c100(self, stats):
"""Cutoff as percentage (for backward compatibility)"""
s = int(stats)
c = int((stats - s) * 100.0 + 0.5) or self.cutoff
return s, c
def _clear(self):
"""Clear state."""
self._depth = 0 # recursion depth reached
self._incl = _NN # or ' (incl. code)'
self._missed = 0 # due to errors
self._profile = False
self._profs = {}
self._ranked = 0
self._ranks = []
self._seen = _Seen()
self._total = 0 # total size
for k in _keys(self._excl_d):
self._excl_d[k] = 0
# don't size, profile or rank private, possibly large objs
m = sys.modules[__name__]
self.exclude_objs(
self,
self._excl_d,
self._profs,
self._ranks,
self._seen,
m,
m.__dict__,
m.__doc__,
_typedefs,
)
def _nameof(self, obj):
"""Return the object's name."""
return _nameof(obj, _NN) or self._repr(obj)
def _prepr(self, obj):
"""Like **prepr()**."""
return _prepr(obj, clip=self._clip_)
def _printf(self, fmt, *args, **print3options):
"""Print to sys.stdout or the configured stream if any is
specified and if the file keyword argument is not already
set in the **print3options** for this specific call.
"""
if self._stream and not print3options.get("file", None):
if args:
fmt = fmt % args
_printf(fmt, file=self._stream, **print3options)
else:
_printf(fmt, *args, **print3options)
def _prof(self, key):
"""Get _Prof object."""
p = self._profs.get(key, None)
if not p:
self._profs[key] = p = _Prof()
self.exclude_objs(p) # XXX superfluous?
return p
def _rank(self, key, obj, size, deep, pid):
"""Rank 100 largest objects by size."""
rs = self._ranks
# bisect, see <http://GitHub.com/python/cpython/blob/master/Lib/bisect.py>
i, j = 0, len(rs)
while i < j:
m = (i + j) // 2
if size < rs[m].size:
i = m + 1
else:
j = m
if i < 100:
r = _Rank(key, obj, size, deep, pid)
rs.insert(i, r)
self.exclude_objs(r) # XXX superfluous?
while len(rs) > 100:
rs.pop()
# self._ranks[:] = rs[:100]
self._ranked += 1
def _repr(self, obj):
"""Like ``repr()``."""
return _repr(obj, clip=self._clip_)
def _sizer(self, obj, pid, deep, sized): # MCCABE 19
"""Size an object, recursively."""
s, f, i = 0, 0, id(obj)
if i not in self._seen:
self._seen[i] = 1
elif deep or self._seen[i]:
# skip obj if seen before
# or if ref of a given obj
self._seen.again(i)
if sized:
s = sized(s, f, name=self._nameof(obj))
self.exclude_objs(s)
return s # zero
else: # deep == seen[i] == 0
self._seen.again(i)
try:
k, rs = _objkey(obj), []
if k in self._excl_d:
self._excl_d[k] += 1
else:
v = _typedefs.get(k, None)
if not v: # new typedef
_typedefs[k] = v = _typedef(
obj,
derive=self._derive_,
frames=self._frames_,
infer=self._infer_,
)
if (v.both or self._code_) and v.kind is not self._ign_d:
s = f = v.flat(obj, self._mask) # flat size
if self._profile:
# profile based on *flat* size
self._prof(k).update(obj, s)
# recurse, but not for nested modules
if v.refs and deep < self._limit_ and not (deep and ismodule(obj)):
# add sizes of referents
z, d = self._sizer, deep + 1
if sized and deep < self._detail_:
# use named referents
self.exclude_objs(rs)
for o in v.refs(obj, True):
if isinstance(o, _NamedRef):
r = z(o.ref, i, d, sized)
r.name = o.name
else:
r = z(o, i, d, sized)
r.name = self._nameof(o)
rs.append(r)
s += r.size
else: # just size and accumulate
for o in v.refs(obj, False):
s += z(o, i, d, None)
# deepest recursion reached
if self._depth < d:
self._depth = d
if self._stats_ and s > self._above_ > 0:
# rank based on *total* size
self._rank(k, obj, s, deep, pid)
except RuntimeError: # XXX RecursionLimitExceeded:
self._missed += 1
if not deep:
self._total += s # accumulate
if sized:
s = sized(s, f, name=self._nameof(obj), refs=rs)
self.exclude_objs(s)
return s
def _sizes(self, objs, sized=None):
"""Return the size or an **Asized** instance for each
given object plus the total size. The total includes
the size of duplicates only once.
"""
self.exclude_refs(*objs) # skip refs to objs
s, t = {}, []
self.exclude_objs(s, t)
for o in objs:
i = id(o)
if i in s: # duplicate
self._seen.again(i)
else:
s[i] = self._sizer(o, 0, 0, sized)
t.append(s[i])
return tuple(t)
@property
def above(self):
"""Get the large object size threshold (int)."""
return self._above_
@property
def align(self):
"""Get the size alignment (int)."""
return self._align_
def asized(self, *objs, **opts):
"""Size each object and return an **Asized** instance with
size information and referents up to the given detail
level (and with modified options, see method **set**).
If only one object is given, the return value is the
**Asized** instance for that object. The **Asized** size
of duplicate and ignored objects will be zero.
"""
if opts:
self.set(**opts)
t = self._sizes(objs, Asized)
return t[0] if len(t) == 1 else t
def asizeof(self, *objs, **opts):
"""Return the combined size of the given objects
(with modified options, see method **set**).
"""
if opts:
self.set(**opts)
self.exclude_refs(*objs) # skip refs to objs
return sum(self._sizer(o, 0, 0, None) for o in objs)
def asizesof(self, *objs, **opts):
"""Return the individual sizes of the given objects
(with modified options, see method **set**).
The size of duplicate and ignored objects will be zero.
"""
if opts:
self.set(**opts)
return self._sizes(objs, None)
@property
def clip(self):
"""Get the clipped string length (int)."""
return self._clip_
@property
def code(self):
"""Size (byte) code (bool)."""
return self._code_
@property
def cutoff(self):
"""Stats cutoff (int)."""
return self._cutoff_
@property
def derive(self):
"""Derive types (bool)."""
return self._derive_
@property
def detail(self):
"""Get the detail level for **Asized** refs (int)."""
return self._detail_
@property
def duplicate(self):
"""Get the number of duplicate objects seen so far (int)."""
return sum(1 for v in _values(self._seen) if v > 1) # == len
def exclude_objs(self, *objs):
"""Exclude the specified objects from sizing, profiling and ranking."""
for o in objs:
self._seen.setdefault(id(o), -1)
def exclude_refs(self, *objs):
"""Exclude any references to the specified objects from sizing.
While any references to the given objects are excluded, the
objects will be sized if specified as positional arguments
in subsequent calls to methods **asizeof** and **asizesof**.
"""
for o in objs:
self._seen.setdefault(id(o), 0)
def exclude_types(self, *objs):
"""Exclude the specified object instances and types from sizing.
All instances and types of the given objects are excluded,
even objects specified as positional arguments in subsequent
calls to methods **asizeof** and **asizesof**.
"""
for o in objs:
for t in _key2tuple(o):
if t and t not in self._excl_d:
self._excl_d[t] = 0
@property
def excluded(self):
"""Get the types being excluded (tuple)."""
return tuple(_keys(self._excl_d))
@property
def frames(self):
"""Ignore stack frames (bool)."""
return self._frames_
@property
def ignored(self):
"""Ignore certain types (bool)."""
return True if self._ign_d else False
@property
def infer(self):
"""Infer types (bool)."""
return self._infer_
@property
def limit(self):
"""Get the recursion limit (int)."""
return self._limit_
@property
def missed(self):
"""Get the number of objects missed due to errors (int)."""
return self._missed
def print_largest(self, w=0, cutoff=0, **print3options):
"""Print the largest objects.
The available options and defaults are:
*w=0* -- indentation for each line
*cutoff=100* -- number of largest objects to print
*print3options* -- some keyword arguments, like Python 3+ print
"""
c = int(cutoff) if cutoff else self._cutoff_
n = min(len(self._ranks), max(c, 0))
s = self._above_
if n > 0 and s > 0:
self._printf(
"%s%*d largest object%s (of %d over %d bytes%s)",
linesep,
w,
n,
_plural(n),
self._ranked,
s,
_SI(s),
**print3options,
)
id2x = dict((r.id, i) for i, r in enumerate(self._ranks))
for r in self._ranks[:n]:
s, t = r.size, r.format(self._clip_, id2x)
self._printf("%*d bytes%s: %s", w, s, _SI(s), t, **print3options)
def print_profiles(self, w=0, cutoff=0, **print3options):
"""Print the profiles above *cutoff* percentage.
The available options and defaults are:
*w=0* -- indentation for each line
*cutoff=0* -- minimum percentage printed
*print3options* -- some keyword arguments, like Python 3+ print
"""
# get the profiles with non-zero size or count
t = [(v, k) for k, v in _items(self._profs) if v.total > 0 or v.number > 1]
if (len(self._profs) - len(t)) < 9: # just show all
t = [(v, k) for k, v in _items(self._profs)]
if t:
s = _NN
if self._total:
s = " (% of grand total)"
c = int(cutoff) if cutoff else self._cutoff_
C = int(c * 0.01 * self._total)
else:
C = c = 0
self._printf(
"%s%*d profile%s: total%s, average, and largest flat size%s: largest object",
linesep,
w,
len(t),
_plural(len(t)),
s,
self._incl,
**print3options,
)
r = len(t)
t = [
(v, self._prepr(k)) for v, k in t
] # replace types with str for Python 3.11+
for v, k in sorted(t, reverse=True):
s = (
"object%(plural)s: %(total)s, %(avg)s, %(high)s: %(obj)s%(lengstr)s"
% v.format(self._clip_, self._total)
)
self._printf("%*d %s %s", w, v.number, k, s, **print3options)
r -= 1
if r > 1 and v.total < C:
self._printf("%+*d profiles below cutoff (%.0f%%)", w, r, c)
break
z = len(self._profs) - len(t)
if z > 0:
self._printf(
"%+*d %r object%s", w, z, "zero", _plural(z), **print3options
)
def print_stats(
self, objs=(), opts={}, sized=(), sizes=(), stats=3, **print3options
):
"""Prints the statistics.
The available options and defaults are:
*w=0* -- indentation for each line
*objs=()* -- optional, list of objects
*opts={}* -- optional, dict of options used
*sized=()* -- optional, tuple of **Asized** instances returned
*sizes=()* -- optional, tuple of sizes returned
*stats=3* -- print stats, see function **asizeof**
*print3options* -- some keyword arguments, like Python 3+ print
"""
s = min(opts.get("stats", stats) or 0, self.stats)
if s > 0: # print stats
w = len(str(self.missed + self.seen + self.total)) + 1
t = c = _NN
o = _kwdstr(**opts)
if o and objs:
c = ", "
# print header line(s)
if sized and objs:
n = len(objs)
if n > 1:
self._printf(
"%sasized(...%s%s) ...", linesep, c, o, **print3options
)
for i in range(n): # no enumerate in Python 2.2.3
self._printf("%*d: %s", w - 1, i, sized[i], **print3options)
else:
self._printf("%sasized(%s): %s", linesep, o, sized, **print3options)
elif sizes and objs:
self._printf("%sasizesof(...%s%s) ...", linesep, c, o, **print3options)
for z, o in zip(sizes, objs):
self._printf(
"%*d bytes%s%s: %s",
w,
z,
_SI(z),
self._incl,
self._repr(o),
**print3options,
)
else:
if objs:
t = self._repr(objs)
self._printf("%sasizeof(%s%s%s) ...", linesep, t, c, o, **print3options)
# print summary
self.print_summary(w=w, objs=objs, **print3options)
# for backward compatibility, cutoff from fractional stats
s, c = self._c100(s)
self.print_largest(w=w, cutoff=c if s < 2 else 10, **print3options)
if s > 1: # print profile
self.print_profiles(w=w, cutoff=c, **print3options)
if s > 2: # print typedefs
self.print_typedefs(w=w, **print3options) # PYCHOK .print_largest?
def print_summary(self, w=0, objs=(), **print3options):
"""Print the summary statistics.
The available options and defaults are:
*w=0* -- indentation for each line
*objs=()* -- optional, list of objects
*print3options* -- some keyword arguments, like Python 3+ print
"""
self._printf(
"%*d bytes%s%s",
w,
self._total,
_SI(self._total),
self._incl,
**print3options,
)
if self._mask:
self._printf("%*d byte aligned", w, self._mask + 1, **print3options)
self._printf("%*d byte sizeof(void*)", w, _sizeof_Cvoidp, **print3options)
n = len(objs or ())
self._printf("%*d object%s %s", w, n, _plural(n), "given", **print3options)
n = self.sized
self._printf("%*d object%s %s", w, n, _plural(n), "sized", **print3options)
if self._excl_d:
n = sum(_values(self._excl_d))
self._printf(
"%*d object%s %s", w, n, _plural(n), "excluded", **print3options
)
n = self.seen
self._printf("%*d object%s %s", w, n, _plural(n), "seen", **print3options)
n = self.ranked
if n > 0:
self._printf("%*d object%s %s", w, n, _plural(n), "ranked", **print3options)
n = self.missed
self._printf("%*d object%s %s", w, n, _plural(n), "missed", **print3options)
n = self.duplicate
self._printf("%*d duplicate%s", w, n, _plural(n), **print3options)
if self._depth > 0:
self._printf("%*d deepest recursion", w, self._depth, **print3options)
def print_typedefs(self, w=0, **print3options):
"""Print the types and dict tables.
The available options and defaults are:
*w=0* -- indentation for each line
*print3options* -- some keyword arguments, like Python 3+ print
"""
for k in _all_kinds:
# XXX Python 3+ doesn't sort type objects
t = [
(self._prepr(a), v)
for a, v in _items(_typedefs)
if v.kind == k and (v.both or self._code_)
]
if t:
self._printf(
"%s%*d %s type%s: basicsize, itemsize, _len_(), _refs()",
linesep,
w,
len(t),
k,
_plural(len(t)),
**print3options,
)
for a, v in sorted(t):
self._printf("%*s %s: %s", w, _NN, a, v, **print3options)
# dict and dict-like classes
t = sum(len(v) for v in _values(_dict_types))
if t:
self._printf("%s%*d dict/-like classes:", linesep, w, t, **print3options)
for m, v in _items(_dict_types):
self._printf("%*s %s: %s", w, _NN, m, self._prepr(v), **print3options)
@property
def ranked(self):
"""Get the number objects ranked by size so far (int)."""
return self._ranked
def reset(
self,
above=1024,
align=8,
clip=80,
code=False, # PYCHOK too many args
cutoff=10,
derive=False,
detail=0,
frames=False,
ignored=True,
infer=False,
limit=100,
stats=0,
stream=None,
**extra,
):
"""Reset sizing options, state, etc. to defaults.
The available options and default values are:
*above=0* -- threshold for largest objects stats
*align=8* -- size alignment
*code=False* -- incl. (byte)code size
*cutoff=10* -- limit large objects or profiles stats
*derive=False* -- derive from super type
*detail=0* -- **Asized** refs level
*frames=False* -- ignore frame objects
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0* -- print statistics, see function **asizeof**
*stream=None* -- output stream for printing
See function **asizeof** for a description of the options.
"""
if extra:
raise _OptionError(self.reset, Error=KeyError, **extra)
# options
self._above_ = above
self._align_ = align
self._clip_ = clip
self._code_ = code
self._cutoff_ = cutoff
self._derive_ = derive
self._detail_ = detail # for Asized only
self._frames_ = frames
self._infer_ = infer
self._limit_ = limit
self._stats_ = stats
self._stream = stream
if ignored:
self._ign_d = _kind_ignored
else:
self._ign_d = None
# clear state
self._clear()
self.set(align=align, code=code, cutoff=cutoff, stats=stats)
@property
def seen(self):
"""Get the number objects seen so far (int)."""
return sum(v for v in _values(self._seen) if v > 0)
def set(
self,
above=None,
align=None,
code=None,
cutoff=None,
frames=None,
detail=None,
limit=None,
stats=None,
):
"""Set some sizing options. See also **reset**.
The available options are:
*above* -- threshold for largest objects stats
*align* -- size alignment
*code* -- incl. (byte)code size
*cutoff* -- limit large objects or profiles stats
*detail* -- **Asized** refs level
*frames* -- size or ignore frame objects
*limit* -- recursion limit
*stats* -- print statistics, see function **asizeof**
Any options not set remain unchanged from the previous setting.
"""
# adjust
if above is not None:
self._above_ = int(above)
if align is not None:
if align > 1:
m = align - 1
if m & align:
raise _OptionError(self.set, align=align)
else:
m = 0
self._align_ = align
self._mask = m
if code is not None:
self._code_ = code
if code: # incl. (byte)code
self._incl = " (incl. code)"
if detail is not None:
self._detail_ = detail
if frames is not None:
self._frames_ = frames
if limit is not None:
self._limit_ = limit
if stats is not None:
if stats < 0:
raise _OptionError(self.set, stats=stats)
# for backward compatibility, cutoff from fractional stats
s, c = self._c100(stats)
self._cutoff_ = int(cutoff) if cutoff else c
self._stats_ = s
self._profile = s > 1 # profile types
@property
def sized(self):
"""Get the number objects sized so far (int)."""
return sum(1 for v in _values(self._seen) if v > 0)
@property
def stats(self):
"""Get the stats and cutoff setting (float)."""
return self._stats_ # + (self._cutoff_ * 0.01)
@property
def total(self):
"""Get the total size (in bytes) accumulated so far."""
return self._total
def amapped(percentage=None):
"""Set/get approximate mapped memory usage as a percentage
of the mapped file size.
Sets the new percentage if not None and returns the
previously set percentage.
Applies only to *numpy.memmap* objects.
"""
global _amapped
p = _amapped * 100.0
if percentage is not None:
_amapped = max(0, min(1, percentage * 0.01))
return p
_amapped = 0.01 # 0 <= percentage <= 1.0
_asizer = Asizer()
def asized(*objs, **opts):
"""Return a tuple containing an **Asized** instance for each
object passed as positional argument.
The available options and defaults are:
*above=0* -- threshold for largest objects stats
*align=8* -- size alignment
*code=False* -- incl. (byte)code size
*cutoff=10* -- limit large objects or profiles stats
*derive=False* -- derive from super type
*detail=0* -- Asized refs level
*frames=False* -- ignore stack frame objects
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0* -- print statistics
If only one object is given, the return value is the **Asized**
instance for that object. Otherwise, the length of the returned
tuple matches the number of given objects.
The **Asized** size of duplicate and ignored objects will be zero.
Set *detail* to the desired referents level and *limit* to the
maximum recursion depth.
See function **asizeof** for descriptions of the other options.
"""
_asizer.reset(**opts)
if objs:
t = _asizer.asized(*objs)
_asizer.print_stats(objs, opts=opts, sized=t) # show opts as _kwdstr
_asizer._clear()
else:
t = ()
return t
def asizeof(*objs: Any, **opts: Any) -> int:
"""Return the combined size (in bytes) of all objects passed
as positional arguments.
The available options and defaults are:
*above=0* -- threshold for largest objects stats
*align=8* -- size alignment
*clip=80* -- clip ``repr()`` strings
*code=False* -- incl. (byte)code size
*cutoff=10* -- limit large objects or profiles stats
*derive=False* -- derive from super type
*frames=False* -- ignore stack frame objects
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0* -- print statistics
Set *align* to a power of 2 to align sizes. Any value less
than 2 avoids size alignment.
If *all* is True and if no positional arguments are supplied.
size all current gc objects, including module, global and stack
frame objects.
A positive *clip* value truncates all repr() strings to at
most *clip* characters.
The (byte)code size of callable objects like functions,
methods, classes, etc. is included only if *code* is True.
If *derive* is True, new types are handled like an existing
(super) type provided there is one and only of those.
By default certain base types like object, super, etc. are
ignored. Set *ignored* to False to include those.
If *infer* is True, new types are inferred from attributes
(only implemented for dict types on callable attributes
as get, has_key, items, keys and values).
Set *limit* to a positive value to accumulate the sizes of
the referents of each object, recursively up to the limit.
Using *limit=0* returns the sum of the flat sizes of the
given objects. High *limit* values may cause runtime errors
and miss objects for sizing.
A positive value for *stats* prints up to 9 statistics, (1)
a summary of the number of objects sized and seen and a list
of the largests objects with size over *above* bytes, (2) a
simple profile of the sized objects by type and (3+) up to 6
tables showing the static, dynamic, derived, ignored, inferred
and dict types used, found respectively installed.
The fractional part of the *stats* value (x 100) is the number
of largest objects shown for (*stats*1.+) or the cutoff
percentage for simple profiles for (*stats*=2.+). For example,
*stats=1.10* shows the summary and the 10 largest objects,
also the default.
See this module documentation for the definition of flat size.
"""
t, p, x = _objs_opts_x(asizeof, objs, **opts)
_asizer.reset(**p)
if t:
if x: # don't size, profile or rank _getobjects tuple
_asizer.exclude_objs(t)
s = _asizer.asizeof(*t)
_asizer.print_stats(objs=t, opts=opts) # show opts as _kwdstr
_asizer._clear()
else:
s = 0
return s
def asizesof(*objs, **opts):
"""Return a tuple containing the size (in bytes) of all objects
passed as positional arguments.
The available options and defaults are:
*above=1024* -- threshold for largest objects stats
*align=8* -- size alignment
*clip=80* -- clip ``repr()`` strings
*code=False* -- incl. (byte)code size
*cutoff=10* -- limit large objects or profiles stats
*derive=False* -- derive from super type
*frames=False* -- ignore stack frame objects
*ignored=True* -- ignore certain types
*infer=False* -- try to infer types
*limit=100* -- recursion limit
*stats=0* -- print statistics
See function **asizeof** for a description of the options.
The length of the returned tuple equals the number of given
objects.
The size of duplicate and ignored objects will be zero.
"""
_asizer.reset(**opts)
if objs:
t = _asizer.asizesof(*objs)
_asizer.print_stats(objs, opts=opts, sizes=t) # show opts as _kwdstr
_asizer._clear()
else:
t = ()
return t
def _typedefof(obj, save=False, **opts):
"""Get the typedef for an object."""
k = _objkey(obj)
v = _typedefs.get(k, None)
if not v: # new typedef
v = _typedef(obj, **opts)
if save:
_typedefs[k] = v
return v
def basicsize(obj, **opts):
"""Return the basic size of an object (in bytes).
The available options and defaults are:
*derive=False* -- derive type from super type
*infer=False* -- try to infer types
*save=False* -- save the object's type definition if new
See this module documentation for the definition of *basic size*.
"""
b = t = _typedefof(obj, **opts)
if t:
b = t.base
return b
def flatsize(obj, align=0, **opts):
"""Return the flat size of an object (in bytes), optionally aligned
to the given power-of-2.
See function **basicsize** for a description of other available options.
See this module documentation for the definition of *flat size*.
"""
f = t = _typedefof(obj, **opts)
if t:
if align > 1:
m = align - 1
if m & align:
raise _OptionError(flatsize, align=align)
else:
m = 0
f = t.flat(obj, mask=m)
return f
def itemsize(obj, **opts):
"""Return the item size of an object (in bytes).
See function **basicsize** for a description of the available options.
See this module documentation for the definition of *item size*.
"""
i = t = _typedefof(obj, **opts)
if t:
i, v = t.item, t.vari
if v and i == _sizeof_Cbyte:
i = getattr(obj, v, i)
return i
def leng(obj, **opts):
"""Return the length of an object, in number of *items*.
See function **basicsize** for a description of the available options.
"""
n = t = _typedefof(obj, **opts)
if t:
n = t.leng
if n and callable(n):
i, v, n = t.item, t.vari, n(obj)
if v and i == _sizeof_Cbyte:
i = getattr(obj, v, i)
if i > _sizeof_Cbyte:
n = n // i
return n
def named_refs(obj, **opts):
"""Return all named **referents** of an object (re-using
functionality from **asizeof**).
Does not return un-named *referents*, e.g. objects in a list.
See function **basicsize** for a description of the available options.
"""
rs = []
v = _typedefof(obj, **opts)
if v:
v = v.refs
if v and callable(v):
for r in v(obj, True):
try:
rs.append((r.name, r.ref))
except AttributeError:
pass
return rs
def refs(obj, **opts):
"""Return (a generator for) specific *referents* of an object.
See function **basicsize** for a description of the available options.
"""
v = _typedefof(obj, **opts)
if v:
v = v.refs
if v and callable(v):
v = v(obj, False)
return v
# License from the initial version of this source file follows:
# --------------------------------------------------------------------
# Copyright (c) 2002-2022 -- ProphICy Semiconductor, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# - Neither the name of ProphICy Semiconductor, Inc. nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# --------------------------------------------------------------------
|
Asizer
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-aws/prefect_aws/workers/ecs_worker.py
|
{
"start": 8146,
"end": 15455
}
|
class ____(BaseJobConfiguration):
"""
Job configuration for an ECS worker.
"""
aws_credentials: Optional[AwsCredentials] = Field(default_factory=AwsCredentials)
task_definition: Dict[str, Any] = Field(
default_factory=dict,
json_schema_extra=dict(template=_default_task_definition_template()),
)
task_run_request: Dict[str, Any] = Field(
default_factory=dict,
json_schema_extra=dict(template=_default_task_run_request_template()),
)
configure_cloudwatch_logs: Optional[bool] = Field(default=None)
cloudwatch_logs_options: Dict[str, str] = Field(default_factory=dict)
cloudwatch_logs_prefix: Optional[str] = Field(default=None)
network_configuration: Dict[str, Any] = Field(default_factory=dict)
stream_output: Optional[bool] = Field(
default=None,
json_schema_extra=dict(template=False),
deprecated="This field is no longer used and will be removed in a future release.",
)
task_start_timeout_seconds: int = Field(
default=300,
json_schema_extra=dict(template=0),
deprecated="This field is no longer used and will be removed in a future release.",
)
task_watch_poll_interval: float = Field(
default=5.0,
json_schema_extra=dict(template=0),
deprecated="This field is no longer used and will be removed in a future release.",
)
auto_deregister_task_definition: bool = Field(default=False)
vpc_id: Optional[str] = Field(default=None)
container_name: Optional[str] = Field(default=None)
cluster: Optional[str] = Field(default=None)
match_latest_revision_in_family: bool = Field(default=False)
prefect_api_key_secret_arn: Optional[str] = Field(default=None)
prefect_api_auth_string_secret_arn: Optional[str] = Field(default=None)
execution_role_arn: Optional[str] = Field(
title="Execution Role ARN",
default=None,
description=(
"An execution role to use for the task. This controls the permissions of "
"the task when it is launching. If this value is not null, it will "
"override the value in the task definition. An execution role must be "
"provided to capture logs from the container."
),
)
@classmethod
def json_template(cls) -> dict[str, Any]:
"""Returns a dict with job configuration as keys and the corresponding templates as values
Defaults to using the job configuration parameter name as the template variable name.
e.g.
```python
{
key1: '{{ key1 }}', # default variable template
key2: '{{ template2 }}', # `template2` specifically provide as template
}
```
"""
# This is overridden because the base class was incorrectly handling `False`
# TODO: Update the base class, remove this override, and bump the minimum `prefect` version
configuration: dict[str, Any] = {}
properties = cls.model_json_schema()["properties"]
for k, v in properties.items():
if v.get("template") is not None:
template = v["template"]
else:
template = "{{ " + k + " }}"
configuration[k] = template
return configuration
def prepare_for_flow_run(
self,
flow_run: "FlowRun",
deployment: "DeploymentResponse | None" = None,
flow: "APIFlow | None" = None,
work_pool: "WorkPool | None" = None,
worker_name: str | None = None,
) -> None:
super().prepare_for_flow_run(flow_run, deployment, flow, work_pool, worker_name)
if self.prefect_api_key_secret_arn:
# Remove the PREFECT_API_KEY from the environment variables because it will be provided via a secret
del self.env["PREFECT_API_KEY"]
if self.prefect_api_auth_string_secret_arn:
# Remove the PREFECT_API_AUTH_STRING from the environment variables because it will be provided via a secret
if "PREFECT_API_AUTH_STRING" in self.env:
del self.env["PREFECT_API_AUTH_STRING"]
@model_validator(mode="after")
def task_run_request_requires_arn_if_no_task_definition_given(self) -> Self:
"""
If no task definition is provided, a task definition ARN must be present on the
task run request.
"""
if (
not (self.task_run_request or {}).get("taskDefinition")
and not self.task_definition
):
raise ValueError(
"A task definition must be provided if a task definition ARN is not "
"present on the task run request."
)
return self
@model_validator(mode="after")
def container_name_default_from_task_definition(self) -> Self:
"""
Infers the container name from the task definition if not provided.
"""
if self.container_name is None:
self.container_name = _container_name_from_task_definition(
self.task_definition
)
# We may not have a name here still; for example if someone is using a task
# definition arn. In that case, we'll perform similar logic later to find
# the name to treat as the "orchestration" container.
return self
@model_validator(mode="after")
def configure_cloudwatch_logs_requires_execution_role_arn(
self,
) -> Self:
"""
Enforces that an execution role arn is provided (or could be provided by a
runtime task definition) when configuring logging.
"""
if (
self.configure_cloudwatch_logs
and not self.execution_role_arn
# TODO: Does not match
# Do not raise if they've linked to another task definition or provided
# it without using our shortcuts
and not (self.task_run_request or {}).get("taskDefinition")
and not (self.task_definition or {}).get("executionRoleArn")
):
raise ValueError(
"An `execution_role_arn` must be provided to use "
"`configure_cloudwatch_logs` or `stream_logs`."
)
return self
@model_validator(mode="after")
def cloudwatch_logs_options_requires_configure_cloudwatch_logs(
self,
) -> Self:
"""
Enforces that an execution role arn is provided (or could be provided by a
runtime task definition) when configuring logging.
"""
if self.cloudwatch_logs_options and not self.configure_cloudwatch_logs:
raise ValueError(
"`configure_cloudwatch_log` must be enabled to use "
"`cloudwatch_logs_options`."
)
return self
@model_validator(mode="after")
def network_configuration_requires_vpc_id(self) -> Self:
"""
Enforces a `vpc_id` is provided when custom network configuration mode is
enabled for network settings.
"""
if self.network_configuration and not self.vpc_id:
raise ValueError(
"You must provide a `vpc_id` to enable custom `network_configuration`."
)
return self
|
ECSJobConfiguration
|
python
|
huggingface__transformers
|
src/transformers/models/swin2sr/modeling_swin2sr.py
|
{
"start": 18953,
"end": 24791
}
|
class ____(nn.Module):
def __init__(
self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0, pretrained_window_size=0
):
super().__init__()
self.input_resolution = input_resolution
window_size, shift_size = self._compute_window_shift(
(config.window_size, config.window_size), (shift_size, shift_size)
)
self.window_size = window_size[0]
self.shift_size = shift_size[0]
self.attention = Swin2SRAttention(
config=config,
dim=dim,
num_heads=num_heads,
window_size=self.window_size,
pretrained_window_size=pretrained_window_size
if isinstance(pretrained_window_size, collections.abc.Iterable)
else (pretrained_window_size, pretrained_window_size),
)
self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.drop_path = Swin2SRDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.intermediate = Swin2SRIntermediate(config, dim)
self.output = Swin2SROutput(config, dim)
self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
def _compute_window_shift(self, target_window_size, target_shift_size) -> tuple[tuple[int, int], tuple[int, int]]:
window_size = [min(r, w) for r, w in zip(self.input_resolution, target_window_size)]
shift_size = [0 if r <= w else s for r, w, s in zip(self.input_resolution, window_size, target_shift_size)]
return window_size, shift_size
def get_attn_mask(self, height, width, dtype):
if self.shift_size > 0:
# calculate attention mask for shifted window multihead self attention
img_mask = torch.zeros((1, height, width, 1), dtype=dtype)
height_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
width_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
count = 0
for height_slice in height_slices:
for width_slice in width_slices:
img_mask[:, height_slice, width_slice, :] = count
count += 1
mask_windows = window_partition(img_mask, self.window_size)
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0)
else:
attn_mask = None
return attn_mask
def maybe_pad(self, hidden_states, height, width):
pad_right = (self.window_size - width % self.window_size) % self.window_size
pad_bottom = (self.window_size - height % self.window_size) % self.window_size
pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
hidden_states = nn.functional.pad(hidden_states, pad_values)
return hidden_states, pad_values
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: tuple[int, int],
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor, torch.Tensor]:
height, width = input_dimensions
batch_size, _, channels = hidden_states.size()
shortcut = hidden_states
# pad hidden_states to multiples of window size
hidden_states = hidden_states.view(batch_size, height, width, channels)
hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
_, height_pad, width_pad, _ = hidden_states.shape
# cyclic shift
if self.shift_size > 0:
shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_hidden_states = hidden_states
# partition windows
hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype)
if attn_mask is not None:
attn_mask = attn_mask.to(hidden_states_windows.device)
attention_outputs = self.attention(hidden_states_windows, attn_mask, output_attentions=output_attentions)
attention_output = attention_outputs[0]
attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
# reverse cyclic shift
if self.shift_size > 0:
attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
attention_windows = shifted_windows
was_padded = pad_values[3] > 0 or pad_values[5] > 0
if was_padded:
attention_windows = attention_windows[:, :height, :width, :].contiguous()
attention_windows = attention_windows.view(batch_size, height * width, channels)
hidden_states = self.layernorm_before(attention_windows)
hidden_states = shortcut + self.drop_path(hidden_states)
layer_output = self.intermediate(hidden_states)
layer_output = self.output(layer_output)
layer_output = hidden_states + self.drop_path(self.layernorm_after(layer_output))
layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
return layer_outputs
|
Swin2SRLayer
|
python
|
sqlalchemy__sqlalchemy
|
test/dialect/oracle/test_types.py
|
{
"start": 49995,
"end": 54144
}
|
class ____(fixtures.TestBase):
__only_on__ = ("oracle+cx_oracle", "oracle+oracledb")
__backend__ = True
@testing.combinations(
(SmallInteger, 25, int, False),
(Integer, 25, int, False),
(Numeric(10, 8), decimal.Decimal("25.34534"), None, False),
(oracle.FLOAT(15), 25.34534, None, False),
(oracle.BINARY_DOUBLE, 25.34534, "NATIVE_FLOAT", False),
(oracle.BINARY_FLOAT, 25.34534, "NATIVE_FLOAT", False),
(oracle.DOUBLE_PRECISION, 25.34534, None, False),
(Unicode(30), "test", "NCHAR", True),
(UnicodeText(), "test", "DB_TYPE_NVARCHAR", True),
(Unicode(30), "test", None, False),
(UnicodeText(), "test", "DB_TYPE_NVARCHAR", False),
(String(30), "test", None, False),
(CHAR(30), "test", "FIXED_CHAR", False),
(NCHAR(30), "test", "FIXED_NCHAR", False),
(oracle.LONG(), "test", None, False),
argnames="datatype, value, sis_value_text, set_nchar_flag",
)
def test_setinputsizes(
self, metadata, datatype, value, sis_value_text, set_nchar_flag
):
if isinstance(sis_value_text, str):
sis_value = getattr(testing.db.dialect.dbapi, sis_value_text)
else:
sis_value = sis_value_text
class TestTypeDec(TypeDecorator):
impl = NullType()
cache_ok = True
def load_dialect_impl(self, dialect):
if dialect.name == "oracle":
return dialect.type_descriptor(datatype)
else:
return self.impl
m = metadata
# Oracle can have only one column of type LONG so we make three
# tables rather than one table w/ three columns
t1 = Table("t1", m, Column("foo", datatype))
t2 = Table(
"t2", m, Column("foo", NullType().with_variant(datatype, "oracle"))
)
t3 = Table("t3", m, Column("foo", TestTypeDec()))
class CursorWrapper:
# cx_oracle cursor can't be modified so we have to
# invent a whole wrapping scheme
def __init__(self, connection_fairy):
self.cursor = connection_fairy.dbapi_connection.cursor()
self.mock = mock.Mock()
connection_fairy.info["mock"] = self.mock
def setinputsizes(self, *arg, **kw):
self.mock.setinputsizes(*arg, **kw)
self.cursor.setinputsizes(*arg, **kw)
def __getattr__(self, key):
return getattr(self.cursor, key)
if set_nchar_flag:
engine = testing_engine(options={"use_nchar_for_unicode": True})
else:
engine = testing.db
with engine.connect() as conn:
conn.begin()
m.create_all(conn, checkfirst=False)
connection_fairy = conn.connection
for tab in [t1, t2, t3]:
with mock.patch.object(
connection_fairy,
"cursor",
lambda: CursorWrapper(connection_fairy),
):
conn.execute(tab.insert(), {"foo": value})
if sis_value:
eq_(
conn.info["mock"].mock_calls,
[mock.call.setinputsizes(foo=sis_value)],
)
else:
eq_(
conn.info["mock"].mock_calls,
[mock.call.setinputsizes()],
)
def test_event_no_native_float(self, metadata):
def _remove_type(inputsizes, cursor, statement, parameters, context):
for param, dbapitype in list(inputsizes.items()):
if dbapitype is testing.db.dialect.dbapi.NATIVE_FLOAT:
del inputsizes[param]
event.listen(testing.db, "do_setinputsizes", _remove_type)
try:
self.test_setinputsizes(
metadata, oracle.BINARY_FLOAT, 25.34534, None, False
)
finally:
event.remove(testing.db, "do_setinputsizes", _remove_type)
|
SetInputSizesTest
|
python
|
apache__airflow
|
docker-tests/tests/docker_tests/test_prod_image.py
|
{
"start": 2110,
"end": 3521
}
|
class ____:
def test_without_command(self, default_docker_image):
"""Checking the image without a command. It should return non-zero exit code."""
with pytest.raises(DockerException) as ctx:
run_cmd_in_docker(image=default_docker_image)
assert ctx.value.return_code == 2
def test_airflow_command(self, default_docker_image):
"""Checking 'airflow' command. It should return non-zero exit code."""
with pytest.raises(DockerException) as ctx:
run_airflow_cmd_in_docker(image=default_docker_image)
assert ctx.value.return_code == 2
def test_airflow_version(self, default_docker_image):
"""Checking 'airflow version' command. It should return zero exit code."""
output = run_airflow_cmd_in_docker(["version"], image=default_docker_image)
assert "3." in output
def test_python_version(self, default_docker_image):
"""Checking 'python --version' command. It should return zero exit code."""
output = run_cmd_in_docker(cmd=["python", "--version"], image=default_docker_image)
assert "Python 3." in output
def test_bash_version(self, default_docker_image):
"""Checking 'bash --version' command It should return zero exit code."""
output = run_cmd_in_docker(cmd=["bash", "--version"], image=default_docker_image)
assert "GNU bash," in output
|
TestCommands
|
python
|
getsentry__sentry
|
tests/sentry/uptime/endpoints/test_validators.py
|
{
"start": 1638,
"end": 3941
}
|
class ____(TestCase):
def get_valid_data(self, **kwargs):
return {
"url": kwargs.get("url", "https://www.google.com"),
"interval_seconds": kwargs.get(
"interval_seconds", UptimeSubscription.IntervalSeconds.ONE_MINUTE
),
"timeout_ms": kwargs.get("timeout_ms", 30000),
"method": kwargs.get("method", UptimeSubscription.SupportedHTTPMethods.GET),
"headers": kwargs.get("headers", []),
"trace_sampling": kwargs.get("trace_sampling", False),
"body": kwargs.get("body", None),
}
def setUp(self):
self.context = {
"organization": self.project.organization,
"project": self.project,
"request": self.make_request(),
}
def test_simple(self):
validator = UptimeMonitorDataSourceValidator(
data=self.get_valid_data(), context=self.context
)
assert validator.is_valid()
def test_bad_interval(self):
data = self.get_valid_data(interval_seconds=3700)
validator = UptimeMonitorDataSourceValidator(data=data, context=self.context)
assert not validator.is_valid()
def test_bad_method(self):
data = self.get_valid_data(method="GOT")
validator = UptimeMonitorDataSourceValidator(data=data, context=self.context)
assert not validator.is_valid()
def test_too_many_urls(self):
for _ in range(0, 100):
self.create_uptime_subscription(
url="https://www.google.com",
interval_seconds=3600,
timeout_ms=30000,
url_domain="google",
url_domain_suffix="com",
)
data = self.get_valid_data(url="https://www.google.com")
validator = UptimeMonitorDataSourceValidator(data=data, context=self.context)
assert not validator.is_valid()
assert "You cannot create any additional alerts for this domain" in str(
validator.errors["url"]
)
def test_too_big_request(self):
data = self.get_valid_data(body="0" * 1000)
validator = UptimeMonitorDataSourceValidator(data=data, context=self.context)
assert not validator.is_valid()
|
UptimeMonitorDataSourceValidatorTest
|
python
|
PrefectHQ__prefect
|
src/prefect/client/schemas/filters.py
|
{
"start": 18292,
"end": 18702
}
|
class ____(PrefectBaseModel):
"""Filter by `Log.level`."""
ge_: Optional[int] = Field(
default=None,
description="Include logs with a level greater than or equal to this level",
examples=[20],
)
le_: Optional[int] = Field(
default=None,
description="Include logs with a level less than or equal to this level",
examples=[50],
)
|
LogFilterLevel
|
python
|
run-llama__llama_index
|
llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/firecrawl_web/base.py
|
{
"start": 250,
"end": 37193
}
|
class ____(BasePydanticReader):
"""
turn a url to llm accessible markdown with `Firecrawl.dev`.
Args:
api_key (str): The Firecrawl API key.
api_url (Optional[str]): Optional base URL for Firecrawl deployment
mode (Optional[str]):
The mode to run the loader in. Default is "crawl".
Options include "scrape" (single url),
"crawl" (all accessible sub pages),
"map" (map all accessible sub pages),
"search" (search for content), and
"extract" (extract structured data from URLs using a prompt).
params (Optional[dict]): The parameters to pass to the Firecrawl API.
Examples include crawlerOptions.
For more details, visit: https://docs.firecrawl.dev/sdks/python
"""
firecrawl: Any
api_key: str
api_url: Optional[str]
mode: Optional[str]
params: Optional[dict]
_metadata_fn: Optional[Callable[[str], Dict]] = PrivateAttr()
# --------------------
# Aux methods (init)
# --------------------
def _import_firecrawl(self) -> Any:
try:
from firecrawl import Firecrawl # type: ignore
except Exception as exc:
raise ImportError(
"firecrawl not found, please run `pip install 'firecrawl-py>=4.3.3'`"
) from exc
return Firecrawl
def _init_client(self, api_key: str, api_url: Optional[str]) -> Any:
Firecrawl = self._import_firecrawl()
client_kwargs: Dict[str, Any] = {"api_key": api_key}
if api_url is not None:
client_kwargs["api_url"] = api_url
return Firecrawl(**client_kwargs)
def _params_copy(self) -> Dict[str, Any]:
params: Dict[str, Any] = self.params.copy() if self.params else {}
return params
# --------------------
# Aux helpers (common)
# --------------------
def _safe_get_attr(self, obj: Any, *names: str) -> Optional[Any]:
for name in names:
try:
val = getattr(obj, name, None)
except Exception:
val = None
if val:
return val
return None
def _to_dict_best_effort(self, obj: Any) -> Dict[str, Any]:
# pydantic v2
if hasattr(obj, "model_dump") and callable(obj.model_dump):
try:
return obj.model_dump() # type: ignore[attr-defined]
except Exception:
pass
# pydantic v1
if hasattr(obj, "dict") and callable(obj.dict):
try:
return obj.dict() # type: ignore[attr-defined]
except Exception:
pass
# dataclass or simple object
if hasattr(obj, "__dict__"):
try:
return {k: v for k, v in vars(obj).items() if not k.startswith("_")}
except Exception:
pass
# reflect over attributes
result: Dict[str, Any] = {}
try:
for attr in dir(obj):
if attr.startswith("_"):
continue
try:
val = getattr(obj, attr)
except Exception:
continue
if callable(val):
continue
result[attr] = val
except Exception:
pass
return result
# --------------------
# Aux handlers (SCRAPE)
# --------------------
def _scrape_get_first(self, data_obj: Dict[str, Any], *keys: str) -> Optional[Any]:
for k in keys:
if isinstance(data_obj, dict) and k in data_obj and data_obj.get(k):
return data_obj.get(k)
return None
def _scrape_from_dict(
self, firecrawl_docs: Dict[str, Any]
) -> (str, Dict[str, Any]):
data_obj = firecrawl_docs.get("data", firecrawl_docs)
text_value = (
self._scrape_get_first(
data_obj,
"markdown",
"content",
"html",
"raw_html",
"rawHtml",
"summary",
)
or ""
)
meta_obj = data_obj.get("metadata", {}) if isinstance(data_obj, dict) else {}
metadata_value: Dict[str, Any] = {}
if isinstance(meta_obj, dict):
metadata_value = meta_obj
else:
try:
metadata_value = self._to_dict_best_effort(meta_obj)
except Exception:
metadata_value = {"metadata": str(meta_obj)}
if isinstance(data_obj, dict):
for extra_key in (
"links",
"actions",
"screenshot",
"warning",
"changeTracking",
):
if extra_key in data_obj and data_obj.get(extra_key) is not None:
metadata_value[extra_key] = data_obj.get(extra_key)
if "success" in firecrawl_docs:
metadata_value["success"] = firecrawl_docs.get("success")
if "warning" in firecrawl_docs and firecrawl_docs.get("warning") is not None:
metadata_value["warning_top"] = firecrawl_docs.get("warning")
return text_value, metadata_value
def _scrape_from_obj(self, firecrawl_docs: Any) -> (str, Dict[str, Any]):
text_value = (
self._safe_get_attr(
firecrawl_docs,
"markdown",
"content",
"html",
"raw_html",
"summary",
)
or ""
)
meta_obj = getattr(firecrawl_docs, "metadata", None)
metadata_value: Dict[str, Any] = {}
if meta_obj is not None:
try:
metadata_value = self._to_dict_best_effort(meta_obj)
except Exception:
metadata_value = {"metadata": str(meta_obj)}
for extra_attr in (
"links",
"actions",
"screenshot",
"warning",
"change_tracking",
):
try:
extra_val = getattr(firecrawl_docs, extra_attr, None)
except Exception:
extra_val = None
if extra_val is not None:
metadata_value[extra_attr] = extra_val
return text_value, metadata_value
def _handle_scrape_response(self, firecrawl_docs: Any) -> (str, Dict[str, Any]):
if isinstance(firecrawl_docs, dict):
return self._scrape_from_dict(firecrawl_docs)
else:
return self._scrape_from_obj(firecrawl_docs)
# --------------------
# Aux handlers (CRAWL)
# --------------------
def _normalize_crawl_response(self, firecrawl_docs: Any) -> List[Dict[str, Any]]:
return firecrawl_docs.get("data", firecrawl_docs)
# --------------------
# Aux handlers (MAP)
# --------------------
def _handle_map_error_or_links(self, response: Any, url: str) -> List[Document]:
docs: List[Document] = []
if (
isinstance(response, dict)
and "error" in response
and not response.get("success", False)
):
error_message = response.get("error", "Unknown error")
docs.append(
Document(
text=f"Map request failed: {error_message}",
metadata={"source": "map", "url": url, "error": error_message},
)
)
return docs
links = response.links or []
for link in links:
link_url = link.url
title = link.title
description = link.description
text_content = title or description or link_url
docs.append(
Document(
text=text_content,
metadata={
"source": "map",
"url": link_url,
"title": title,
"description": description,
},
)
)
return docs
# --------------------
# Aux handlers (SEARCH)
# --------------------
def _process_search_dict(
self, search_response: Dict[str, Any], query: str
) -> List[Document]:
documents: List[Document] = []
if search_response.get("success", False):
search_results = search_response.get("data", [])
for result in search_results:
text = result.get("markdown", "")
if not text:
text = result.get("description", "")
metadata = {
"title": result.get("title", ""),
"url": result.get("url", ""),
"description": result.get("description", ""),
"source": "search",
"query": query,
}
if "metadata" in result and isinstance(result["metadata"], dict):
metadata.update(result["metadata"])
documents.append(Document(text=text, metadata=metadata))
else:
warning = search_response.get("warning", "Unknown error")
print(f"Search was unsuccessful: {warning}")
documents.append(
Document(
text=f"Search for '{query}' was unsuccessful: {warning}",
metadata={"source": "search", "query": query, "error": warning},
)
)
return documents
def _process_search_items(
self, result_list: Any, result_type: str, query: str
) -> List[Document]:
docs: List[Document] = []
if not result_list:
return docs
for item in result_list:
item_url = getattr(item, "url", "")
item_title = getattr(item, "title", "")
item_description = getattr(item, "description", "")
text_content = item_title or item_description or item_url
metadata = {
"title": item_title,
"url": item_url,
"description": item_description,
"source": "search",
"search_type": result_type,
"query": query,
}
base_keys = set(metadata.keys())
extra_attrs = self._to_dict_best_effort(item)
for k, v in extra_attrs.items():
if k not in base_keys:
metadata[k] = v
docs.append(Document(text=text_content, metadata=metadata))
return docs
def _process_search_sdk(self, search_response: Any, query: str) -> List[Document]:
documents: List[Document] = []
documents += self._process_search_items(
getattr(search_response, "web", None), "web", query
) # type: ignore[attr-defined]
documents += self._process_search_items(
getattr(search_response, "news", None), "news", query
) # type: ignore[attr-defined]
documents += self._process_search_items(
getattr(search_response, "images", None), "images", query
) # type: ignore[attr-defined]
return documents
# --------------------
# Aux handlers (EXTRACT)
# --------------------
def _format_extract_text(self, extract_data: Dict[str, Any]) -> str:
text_parts = []
for key, value in extract_data.items():
text_parts.append(f"{key}: {value}")
return "\n".join(text_parts)
# --------------------
# __init__ (unchanged behavior)
# --------------------
def __init__(
self,
api_key: str,
api_url: Optional[str] = None,
mode: Optional[str] = "crawl",
params: Optional[dict] = None,
) -> None:
"""Initialize with parameters."""
# Ensure firecrawl client is installed and instantiate
try:
from firecrawl import Firecrawl # type: ignore
except Exception as exc:
raise ImportError(
"firecrawl not found, please run `pip install 'firecrawl-py>=4.3.3'`"
) from exc
# Instantiate the new Firecrawl client
client_kwargs: Dict[str, Any] = {"api_key": api_key}
if api_url is not None:
client_kwargs["api_url"] = api_url
firecrawl = Firecrawl(**client_kwargs)
params = params or {}
params["integration"] = "llamaindex"
super().__init__(
firecrawl=firecrawl,
api_key=api_key,
api_url=api_url,
mode=mode,
params=params,
)
@classmethod
def class_name(cls) -> str:
return "Firecrawl_reader"
def load_data(
self,
url: Optional[str] = None,
query: Optional[str] = None,
urls: Optional[List[str]] = None,
) -> List[Document]:
"""
Load data from the input directory.
Args:
url (Optional[str]): URL to scrape or crawl.
query (Optional[str]): Query to search for.
urls (Optional[List[str]]): List of URLs for extract mode.
Returns:
List[Document]: List of documents.
Raises:
ValueError: If invalid combination of parameters is provided.
"""
if sum(x is not None for x in [url, query, urls]) != 1:
raise ValueError("Exactly one of url, query, or urls must be provided.")
documents = []
if self.mode == "scrape":
# [SCRAPE] params: https://docs.firecrawl.dev/api-reference/endpoint/scrape
if url is None:
raise ValueError("URL must be provided for scrape mode.")
# Map params to new client call signature
scrape_params = self._params_copy()
firecrawl_docs = self.firecrawl.scrape(url, **scrape_params)
# Support both dict and SDK object responses
text_value = ""
metadata_value: Dict[str, Any] = {}
if isinstance(firecrawl_docs, dict):
# Newer API may return { success, data: {...} }
data_obj = firecrawl_docs.get("data", firecrawl_docs)
def _get_first(*keys: str) -> Optional[Any]:
for k in keys:
if (
isinstance(data_obj, dict)
and k in data_obj
and data_obj.get(k)
):
return data_obj.get(k)
return None
text_value = (
_get_first(
"markdown", "content", "html", "raw_html", "rawHtml", "summary"
)
or ""
)
meta_obj = (
data_obj.get("metadata", {}) if isinstance(data_obj, dict) else {}
)
if isinstance(meta_obj, dict):
metadata_value = meta_obj
else:
# Convert metadata object to dict if needed
try:
if hasattr(meta_obj, "model_dump") and callable(
meta_obj.model_dump
):
metadata_value = meta_obj.model_dump() # type: ignore[attr-defined]
elif hasattr(meta_obj, "dict") and callable(meta_obj.dict):
metadata_value = meta_obj.dict() # type: ignore[attr-defined]
elif hasattr(meta_obj, "__dict__"):
metadata_value = {
k: v
for k, v in vars(meta_obj).items()
if not k.startswith("_")
}
except Exception:
metadata_value = {"metadata": str(meta_obj)}
# Capture other helpful fields into metadata
if isinstance(data_obj, dict):
for extra_key in (
"links",
"actions",
"screenshot",
"warning",
"changeTracking",
):
if (
extra_key in data_obj
and data_obj.get(extra_key) is not None
):
metadata_value[extra_key] = data_obj.get(extra_key)
# Bubble up success/warning if at top-level
if "success" in firecrawl_docs:
metadata_value["success"] = firecrawl_docs.get("success")
if (
"warning" in firecrawl_docs
and firecrawl_docs.get("warning") is not None
):
metadata_value["warning_top"] = firecrawl_docs.get("warning")
else:
# SDK object with attributes
def _safe_get(obj: Any, *names: str) -> Optional[Any]:
for name in names:
try:
val = getattr(obj, name, None)
except Exception:
val = None
if val:
return val
return None
text_value = (
_safe_get(
firecrawl_docs,
"markdown",
"content",
"html",
"raw_html",
"summary",
)
or ""
)
meta_obj = getattr(firecrawl_docs, "metadata", None)
if meta_obj is not None:
try:
if hasattr(meta_obj, "model_dump") and callable(
meta_obj.model_dump
):
metadata_value = meta_obj.model_dump() # type: ignore[attr-defined]
elif hasattr(meta_obj, "dict") and callable(meta_obj.dict):
metadata_value = meta_obj.dict() # type: ignore[attr-defined]
elif hasattr(meta_obj, "__dict__"):
metadata_value = {
k: v
for k, v in vars(meta_obj).items()
if not k.startswith("_")
}
else:
metadata_value = {"metadata": str(meta_obj)}
except Exception:
metadata_value = {"metadata": str(meta_obj)}
# Attach extra top-level attributes if present on SDK object
for extra_attr in (
"links",
"actions",
"screenshot",
"warning",
"change_tracking",
):
try:
extra_val = getattr(firecrawl_docs, extra_attr, None)
except Exception:
extra_val = None
if extra_val is not None:
metadata_value[extra_attr] = extra_val
documents.append(Document(text=text_value or "", metadata=metadata_value))
elif self.mode == "crawl":
# [CRAWL] params: https://docs.firecrawl.dev/api-reference/endpoint/crawl-post
if url is None:
raise ValueError("URL must be provided for crawl mode.")
crawl_params = self._params_copy()
# Remove deprecated/unsupported parameters
if "maxDepth" in crawl_params:
crawl_params.pop("maxDepth", None)
firecrawl_docs = self.firecrawl.crawl(url, **crawl_params)
# Normalize Crawl response across SDK versions
items: List[Any] = []
if isinstance(firecrawl_docs, dict):
data = firecrawl_docs.get("data", firecrawl_docs)
if isinstance(data, list):
items = data
else:
# Try common list-bearing attributes first
for attr_name in ("data", "results", "documents", "items", "pages"):
try:
candidate = getattr(firecrawl_docs, attr_name, None)
except Exception:
candidate = None
if isinstance(candidate, list) and candidate:
items = candidate
break
# Fallback to model dump reflection
if not items:
try:
if hasattr(firecrawl_docs, "model_dump") and callable(
firecrawl_docs.model_dump
):
dump_obj = firecrawl_docs.model_dump() # type: ignore[attr-defined]
elif hasattr(firecrawl_docs, "dict") and callable(
firecrawl_docs.dict
):
dump_obj = firecrawl_docs.dict() # type: ignore[attr-defined]
else:
dump_obj = {}
except Exception:
dump_obj = {}
if isinstance(dump_obj, dict):
data = (
dump_obj.get("data")
or dump_obj.get("results")
or dump_obj.get("documents")
)
if isinstance(data, list):
items = data
for doc in items:
if isinstance(doc, dict):
text_val = (
doc.get("markdown")
or doc.get("content")
or doc.get("text")
or ""
)
metadata_val = doc.get("metadata", {})
else:
text_val = (
self._safe_get_attr(
doc,
"markdown",
"content",
"text",
"html",
"raw_html",
"rawHtml",
"summary",
)
or ""
)
meta_obj = getattr(doc, "metadata", None)
if isinstance(meta_obj, dict):
metadata_val = meta_obj
elif meta_obj is not None:
try:
metadata_val = self._to_dict_best_effort(meta_obj)
except Exception:
metadata_val = {"metadata": str(meta_obj)}
else:
metadata_val = {}
documents.append(Document(text=text_val, metadata=metadata_val))
elif self.mode == "map":
# [MAP] params: https://docs.firecrawl.dev/api-reference/endpoint/map
# Expected response: { "success": true, "links": [{"url":..., "title":..., "description":...}, ...] }
if url is None:
raise ValueError("URL must be provided for map mode.")
map_params = self._params_copy()
# Pass through optional parameters like sitemap, includeSubdomains, ignoreQueryParameters, limit, timeout, search
response = self.firecrawl.map(url, **map_params) # type: ignore[attr-defined]
# Handle error response format: { "error": "..." }
if (
isinstance(response, dict)
and "error" in response
and not response.get("success", False)
):
error_message = response.get("error", "Unknown error")
documents.append(
Document(
text=f"Map request failed: {error_message}",
metadata={"source": "map", "url": url, "error": error_message},
)
)
return documents
# Extract links from success response
links = response.links or []
for link in links:
link_url = link.url
title = link.title
description = link.description
text_content = title or description or link_url
documents.append(
Document(
text=text_content,
metadata={
"source": "map",
"url": link_url,
"title": title,
"description": description,
},
)
)
elif self.mode == "search":
# [SEARCH] params: https://docs.firecrawl.dev/api-reference/endpoint/search
if query is None:
raise ValueError("Query must be provided for search mode.")
# Remove query from params if it exists to avoid duplicate
search_params = self._params_copy()
if "query" in search_params:
del search_params["query"]
# Get search results
search_response = self.firecrawl.search(query, **search_params)
# Handle the search response format
if isinstance(search_response, dict):
# Check for success
if search_response.get("success", False):
# Get the data array
search_results = search_response.get("data", [])
# Process each search result
for result in search_results:
# Extract text content (prefer markdown if available)
text = result.get("markdown", "")
if not text:
# Fall back to description if markdown is not available
text = result.get("description", "")
# Extract metadata
metadata = {
"title": result.get("title", ""),
"url": result.get("url", ""),
"description": result.get("description", ""),
"source": "search",
"query": query,
}
# Add additional metadata if available
if "metadata" in result and isinstance(
result["metadata"], dict
):
metadata.update(result["metadata"])
# Create document
documents.append(
Document(
text=text,
metadata=metadata,
)
)
else:
# Handle unsuccessful response
warning = search_response.get("warning", "Unknown error")
print(f"Search was unsuccessful: {warning}")
documents.append(
Document(
text=f"Search for '{query}' was unsuccessful: {warning}",
metadata={
"source": "search",
"query": query,
"error": warning,
},
)
)
elif (
hasattr(search_response, "web")
or hasattr(search_response, "news")
or hasattr(search_response, "images")
):
# New SDK object response like: web=[SearchResultWeb(...)] news=None images=None
def _process_results(result_list, result_type: str) -> None:
if not result_list:
return
for item in result_list:
# Try to access attributes with safe fallbacks
item_url = getattr(item, "url", "")
item_title = getattr(item, "title", "")
item_description = getattr(item, "description", "")
text_content = item_title or item_description or item_url
metadata = {
"title": item_title,
"url": item_url,
"description": item_description,
"source": "search",
"search_type": result_type,
"query": query,
}
# Collect all other attributes dynamically without whitelisting
base_keys = set(metadata.keys())
def _item_to_dict(obj: Any) -> Dict[str, Any]:
# pydantic v2
if hasattr(obj, "model_dump") and callable(obj.model_dump):
try:
return obj.model_dump() # type: ignore[attr-defined]
except Exception:
pass
# pydantic v1
if hasattr(obj, "dict") and callable(obj.dict):
try:
return obj.dict() # type: ignore[attr-defined]
except Exception:
pass
# dataclass or simple object
if hasattr(obj, "__dict__"):
try:
return {
k: v
for k, v in vars(obj).items()
if not k.startswith("_")
}
except Exception:
pass
# Fallback: reflect over attributes
result: Dict[str, Any] = {}
try:
for attr in dir(obj):
if attr.startswith("_"):
continue
try:
val = getattr(obj, attr)
except Exception:
continue
if callable(val):
continue
result[attr] = val
except Exception:
pass
return result
extra_attrs = _item_to_dict(item)
for k, v in extra_attrs.items():
if k not in base_keys:
metadata[k] = v
documents.append(
Document(
text=text_content,
metadata=metadata,
)
)
_process_results(getattr(search_response, "web", None), "web") # type: ignore[attr-defined]
_process_results(getattr(search_response, "news", None), "news") # type: ignore[attr-defined]
_process_results(getattr(search_response, "images", None), "images") # type: ignore[attr-defined]
else:
# Handle unexpected response format
print(f"Unexpected search response format: {type(search_response)}")
documents.append(
Document(
text=str(search_response),
metadata={"source": "search", "query": query},
)
)
elif self.mode == "extract":
# [EXTRACT] params: https://docs.firecrawl.dev/api-reference/endpoint/extract
if urls is None:
# For backward compatibility, convert single URL to list if provided
if url is not None:
urls = [url]
else:
raise ValueError("URLs must be provided for extract mode.")
# Ensure we have a prompt in params
extract_params = self._params_copy()
if "prompt" not in extract_params:
raise ValueError("A 'prompt' parameter is required for extract mode.")
# Prepare the payload according to the new API structure
payload = {"prompt": extract_params.pop("prompt")}
payload["integration"] = "llamaindex"
# Call the extract method with the urls and params
extract_response = self.firecrawl.extract(urls=urls, **payload)
# Handle the extract response format
if isinstance(extract_response, dict):
# Check for success
if extract_response.get("success", False):
# Get the data from the response
extract_data = extract_response.get("data", {})
# Get the sources if available
sources = extract_response.get("sources", {})
# Convert the extracted data to text
if extract_data:
# Convert the data to a formatted string
text_parts = []
for key, value in extract_data.items():
text_parts.append(f"{key}: {value}")
text = "\n".join(text_parts)
# Create metadata
metadata = {
"urls": urls,
"source": "extract",
"status": extract_response.get("status"),
"expires_at": extract_response.get("expiresAt"),
}
# Add sources to metadata if available
if sources:
metadata["sources"] = sources
# Create document
documents.append(
Document(
text=text,
metadata=metadata,
)
)
else:
# Handle empty data in successful response
print("Extract response successful but no data returned")
documents.append(
Document(
text="Extraction was successful but no data was returned",
metadata={"urls": urls, "source": "extract"},
)
)
else:
# Handle unsuccessful response
warning = extract_response.get("warning", "Unknown error")
print(f"Extraction was unsuccessful: {warning}")
documents.append(
Document(
text=f"Extraction was unsuccessful: {warning}",
metadata={
"urls": urls,
"source": "extract",
"error": warning,
},
)
)
else:
# Handle unexpected response format
print(f"Unexpected extract response format: {type(extract_response)}")
documents.append(
Document(
text=str(extract_response),
metadata={"urls": urls, "source": "extract"},
)
)
else:
raise ValueError(
"Invalid mode. Please choose 'scrape', 'crawl', 'search', or 'extract'."
)
return documents
|
FireCrawlWebReader
|
python
|
getsentry__sentry
|
tests/sentry/data_secrecy/test_service.py
|
{
"start": 458,
"end": 5030
}
|
class ____(TestCase):
def setUp(self) -> None:
self.organization = self.create_organization()
self.organization_2 = self.create_organization()
def test_get_effective_waiver_status_with_active_grant(self) -> None:
now = datetime.now(tz=timezone.utc)
grant_start = now - timedelta(hours=1)
grant_end = now + timedelta(hours=1)
self.create_data_access_grant(
organization_id=self.organization.id,
grant_type=DataAccessGrant.GrantType.ZENDESK,
ticket_id="TICKET-123",
grant_start=grant_start,
grant_end=grant_end,
)
result = data_access_grant_service.get_effective_grant_status(
organization_id=self.organization.id
)
assert result is not None
assert result.organization_id == self.organization.id
assert result.access_start == grant_start
assert result.access_end == grant_end
def test_get_effective_waiver_status_with_no_grants(self) -> None:
result = data_access_grant_service.get_effective_grant_status(
organization_id=self.organization.id
)
assert result is None
def test_get_effective_waiver_status_with_expired_grant(self) -> None:
now = datetime.now(tz=timezone.utc)
grant_start = now - timedelta(hours=2)
grant_end = now - timedelta(hours=1) # Expired
self.create_data_access_grant(
organization_id=self.organization.id,
grant_type=DataAccessGrant.GrantType.ZENDESK,
ticket_id="TICKET-123",
grant_start=grant_start,
grant_end=grant_end,
)
result = data_access_grant_service.get_effective_grant_status(
organization_id=self.organization.id
)
assert result is None
def test_get_effective_waiver_status_with_future_grant(self) -> None:
now = datetime.now(tz=timezone.utc)
grant_start = now + timedelta(hours=1) # Future
grant_end = now + timedelta(hours=2)
self.create_data_access_grant(
organization_id=self.organization.id,
grant_type=DataAccessGrant.GrantType.ZENDESK,
ticket_id="TICKET-123",
grant_start=grant_start,
grant_end=grant_end,
)
result = data_access_grant_service.get_effective_grant_status(
organization_id=self.organization.id
)
assert result is None
def test_get_effective_waiver_status_with_revoked_grant(self) -> None:
now = datetime.now(tz=timezone.utc)
grant_start = now - timedelta(hours=1)
grant_end = now + timedelta(hours=1)
self.create_data_access_grant(
organization_id=self.organization.id,
grant_type=DataAccessGrant.GrantType.ZENDESK,
ticket_id="TICKET-123",
grant_start=grant_start,
grant_end=grant_end,
revocation_date=now,
revocation_reason=DataAccessGrant.RevocationReason.MANUAL_REVOCATION,
)
result = data_access_grant_service.get_effective_grant_status(
organization_id=self.organization.id
)
assert result is None
def test_get_effective_waiver_status_with_multiple_grants(self) -> None:
now = datetime.now(tz=timezone.utc)
# Grant 1: Earlier start, earlier end
grant1_start = now - timedelta(hours=2)
grant1_end = now + timedelta(hours=1)
# Grant 2: Later start, later end
grant2_start = now - timedelta(hours=1)
grant2_end = now + timedelta(hours=2)
self.create_data_access_grant(
organization_id=self.organization.id,
grant_type=DataAccessGrant.GrantType.ZENDESK,
ticket_id="TICKET-123",
grant_start=grant1_start,
grant_end=grant1_end,
)
self.create_data_access_grant(
organization_id=self.organization.id,
grant_type=DataAccessGrant.GrantType.MANUAL,
granted_by_user=self.user,
grant_start=grant2_start,
grant_end=grant2_end,
)
result = data_access_grant_service.get_effective_grant_status(
organization_id=self.organization.id
)
assert result is not None
assert result.organization_id == self.organization.id
# Should use earliest start and latest end
assert result.access_start == grant1_start
assert result.access_end == grant2_end
|
TestDataAccessGrantService
|
python
|
agronholm__apscheduler
|
src/apscheduler/eventbrokers/local.py
|
{
"start": 183,
"end": 613
}
|
class ____(BaseEventBroker):
"""
Asynchronous, local event broker.
This event broker only broadcasts within the process it runs in, and is therefore
not suitable for multi-node or multiprocess use cases.
Does not serialize events.
"""
def __repr__(self) -> str:
return create_repr(self)
async def publish(self, event: Event) -> None:
await self.publish_local(event)
|
LocalEventBroker
|
python
|
PyCQA__pylint
|
tests/functional/ext/typing/typing_deprecated_alias.py
|
{
"start": 2227,
"end": 2325
}
|
class ____(TypedDict):
my_var: List[int] # [deprecated-typing-alias]
@dataclass
|
CustomTypedDict2
|
python
|
huggingface__transformers
|
src/transformers/models/oneformer/convert_to_hf_oneformer.py
|
{
"start": 7733,
"end": 9145
}
|
class ____:
def __call__(self, original_config: object, model_repo: str) -> OneFormerProcessor:
model = original_config.MODEL
model_input = original_config.INPUT
dataset_catalog = MetadataCatalog.get(original_config.DATASETS.TEST_PANOPTIC[0])
if "ade20k" in model_repo:
class_info_file = "ade20k_panoptic.json"
elif "coco" in model_repo:
class_info_file = "coco_panoptic.json"
elif "cityscapes" in model_repo:
class_info_file = "cityscapes_panoptic.json"
else:
raise ValueError("Invalid Dataset!")
image_processor = OneFormerImageProcessor(
image_mean=(torch.tensor(model.PIXEL_MEAN) / 255).tolist(),
image_std=(torch.tensor(model.PIXEL_STD) / 255).tolist(),
size=model_input.MIN_SIZE_TEST,
max_size=model_input.MAX_SIZE_TEST,
num_labels=model.SEM_SEG_HEAD.NUM_CLASSES,
ignore_index=dataset_catalog.ignore_label,
class_info_file=class_info_file,
)
tokenizer = CLIPTokenizer.from_pretrained(model_repo)
return OneFormerProcessor(
image_processor=image_processor,
tokenizer=tokenizer,
task_seq_length=original_config.INPUT.TASK_SEQ_LEN,
max_seq_length=original_config.INPUT.MAX_SEQ_LEN,
)
|
OriginalOneFormerConfigToProcessorConverter
|
python
|
getsentry__sentry
|
tests/snuba/api/endpoints/test_organization_metrics_meta.py
|
{
"start": 217,
"end": 4840
}
|
class ____(MetricsEnhancedPerformanceTestCase):
def setUp(self) -> None:
super().setUp()
self.min_ago = before_now(minutes=1)
self.two_min_ago = before_now(minutes=2)
self.features = {
"organizations:performance-use-metrics": True,
}
self.login_as(user=self.user)
# Don't create any txn on this, don't set its DS rules, it shouldn't show up anywhere
self.bad_project = self.create_project()
def test_unparameterized_transactions(self) -> None:
# Make current project incompatible
self.store_transaction_metric(
1, tags={"transaction": "<< unparameterized >>"}, timestamp=self.min_ago
)
url = reverse(
"sentry-api-0-organization-metrics-compatibility",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
self.assertCountEqual(
response.json()["incompatible_projects"], [self.project.id, self.bad_project.id]
)
assert response.json()["compatible_projects"] == []
def test_null_transaction(self) -> None:
# Make current project incompatible
self.store_transaction_metric(1, tags={}, timestamp=self.min_ago)
url = reverse(
"sentry-api-0-organization-metrics-compatibility",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
self.assertCountEqual(
response.json()["incompatible_projects"], [self.project.id, self.bad_project.id]
)
assert response.json()["compatible_projects"] == []
def test_no_transaction(self) -> None:
# Make current project incompatible by having nothing
url = reverse(
"sentry-api-0-organization-metrics-compatibility",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
self.assertCountEqual(
response.json()["incompatible_projects"], [self.project.id, self.bad_project.id]
)
assert response.json()["compatible_projects"] == []
def test_has_transaction(self) -> None:
self.store_transaction_metric(
1, tags={"transaction": "foo_transaction"}, timestamp=self.min_ago
)
url = reverse(
"sentry-api-0-organization-metrics-compatibility",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.json()["incompatible_projects"] == [self.bad_project.id]
assert response.json()["compatible_projects"] == [self.project.id]
def test_multiple_projects(self) -> None:
project2 = self.create_project()
project3 = self.create_project()
project4 = self.create_project()
self.store_transaction_metric(
1, tags={"transaction": "foo_transaction"}, timestamp=self.min_ago
)
self.store_transaction_metric(
1, tags={"transaction": "foo_transaction"}, timestamp=self.min_ago, project=project4.id
)
self.store_transaction_metric(
1,
tags={"transaction": "<< unparameterized >>"},
timestamp=self.min_ago,
project=project2.id,
)
self.store_transaction_metric(
1,
tags={},
timestamp=self.min_ago,
project=project3.id,
)
self.store_event(
data={"timestamp": self.min_ago.isoformat(), "transaction": "foo_transaction"},
project_id=self.project.id,
)
url = reverse(
"sentry-api-0-organization-metrics-compatibility",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
self.assertCountEqual(
response.json()["incompatible_projects"],
[project2.id, project3.id, self.bad_project.id],
)
self.assertCountEqual(
response.json()["compatible_projects"], [self.project.id, project4.id]
)
|
OrganizationMetricsCompatiblity
|
python
|
cython__cython
|
Cython/Debugger/libpython.py
|
{
"start": 5749,
"end": 14710
}
|
class ____:
"""
Class wrapping a gdb.Value that's either a (PyObject*) within the
inferior process, or some subclass pointer e.g. (PyBytesObject*)
There will be a subclass for every refined PyObject type that we care
about.
Note that at every stage the underlying pointer could be NULL, point
to corrupt data, etc; this is the debugger, after all.
"""
_typename = 'PyObject'
def __init__(self, gdbval, cast_to=None):
if cast_to:
self._gdbval = gdbval.cast(cast_to)
else:
self._gdbval = gdbval
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
pyo_ptr = self._gdbval.cast(PyVarObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name]
def pyop_field(self, name):
'''
Get a PyObjectPtr for the given PyObject* field within this PyObject,
coping with some python 2 versus python 3 differences.
'''
return PyObjectPtr.from_pyobject_ptr(self.field(name))
def write_field_repr(self, name, out, visited):
'''
Extract the PyObject* field named "name", and write its representation
to file-like object "out"
'''
field_obj = self.pyop_field(name)
field_obj.write_repr(out, visited)
def get_truncated_repr(self, maxlen):
'''
Get a repr-like string for the data, but truncate it at "maxlen" bytes
(ending the object graph traversal as soon as you do)
'''
out = TruncatedStringIO(maxlen)
try:
self.write_repr(out, set())
except StringTruncated:
# Truncation occurred:
return out.getvalue() + '...(truncated)'
# No truncation occurred:
return out.getvalue()
def type(self):
return PyTypeObjectPtr(self.field('ob_type'))
def is_null(self):
return 0 == int(self._gdbval)
def is_optimized_out(self):
'''
Is the value of the underlying PyObject* visible to the debugger?
This can vary with the precise version of the compiler used to build
Python, and the precise version of gdb.
See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=556975 with
PyEval_EvalFrameEx's "f"
'''
return self._gdbval.is_optimized_out
def safe_tp_name(self):
try:
ob_type = self.type()
tp_name = ob_type.field('tp_name')
return tp_name.string()
# NullPyObjectPtr: NULL tp_name?
# RuntimeError: Can't even read the object at all?
# UnicodeDecodeError: Failed to decode tp_name bytestring
except (NullPyObjectPtr, RuntimeError, UnicodeDecodeError):
return 'unknown'
def proxyval(self, visited):
'''
Scrape a value from the inferior process, and try to represent it
within the gdb process, whilst (hopefully) avoiding crashes when
the remote data is corrupt.
Derived classes will override this.
For example, a PyIntObject* with ob_ival 42 in the inferior process
should result in an int(42) in this process.
visited: a set of all gdb.Value pyobject pointers already visited
whilst generating this value (to guard against infinite recursion when
visiting object graphs with loops). Analogous to Py_ReprEnter and
Py_ReprLeave
'''
class FakeRepr:
"""
Class representing a non-descript PyObject* value in the inferior
process for when we don't have a custom scraper, intended to have
a sane repr().
"""
def __init__(self, tp_name, address):
self.tp_name = tp_name
self.address = address
def __repr__(self):
# For the NULL pointer, we have no way of knowing a type, so
# special-case it as per
# http://bugs.python.org/issue8032#msg100882
if self.address == 0:
return '0x0'
return '<%s at remote 0x%x>' % (self.tp_name, self.address)
return FakeRepr(self.safe_tp_name(),
int(self._gdbval))
def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited)))
@classmethod
def subclass_from_type(cls, t):
'''
Given a PyTypeObjectPtr instance wrapping a gdb.Value that's a
(PyTypeObject*), determine the corresponding subclass of PyObjectPtr
to use
Ideally, we would look up the symbols for the global types, but that
isn't working yet:
(gdb) python print gdb.lookup_symbol('PyList_Type')[0].value
Traceback (most recent call last):
File "<string>", line 1, in <module>
NotImplementedError: Symbol type not yet supported in Python scripts.
Error while executing Python code.
For now, we use tp_flags, after doing some string comparisons on the
tp_name for some special-cases that don't seem to be visible through
flags
'''
try:
tp_name = t.field('tp_name').string()
tp_flags = int(t.field('tp_flags'))
# RuntimeError: NULL pointers
# UnicodeDecodeError: string() fails to decode the bytestring
except (RuntimeError, UnicodeDecodeError):
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
return cls
#print('tp_flags = 0x%08x' % tp_flags)
#print('tp_name = %r' % tp_name)
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'NoneType': PyNoneStructPtr,
'frame': PyFrameObjectPtr,
'set' : PySetObjectPtr,
'frozenset' : PySetObjectPtr,
'builtin_function_or_method' : PyCFunctionObjectPtr,
'method-wrapper': wrapperobject,
}
if tp_name in name_map:
return name_map[tp_name]
if tp_flags & Py_TPFLAGS_HEAPTYPE:
return HeapTypeObjectPtr
if tp_flags & Py_TPFLAGS_LONG_SUBCLASS:
return PyLongObjectPtr
if tp_flags & Py_TPFLAGS_LIST_SUBCLASS:
return PyListObjectPtr
if tp_flags & Py_TPFLAGS_TUPLE_SUBCLASS:
return PyTupleObjectPtr
if tp_flags & Py_TPFLAGS_BYTES_SUBCLASS:
return PyBytesObjectPtr
if tp_flags & Py_TPFLAGS_UNICODE_SUBCLASS:
return PyUnicodeObjectPtr
if tp_flags & Py_TPFLAGS_DICT_SUBCLASS:
return PyDictObjectPtr
if tp_flags & Py_TPFLAGS_BASE_EXC_SUBCLASS:
return PyBaseExceptionObjectPtr
#if tp_flags & Py_TPFLAGS_TYPE_SUBCLASS:
# return PyTypeObjectPtr
# Use the base class:
return cls
@classmethod
def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval)
@classmethod
def get_gdb_type(cls):
return gdb.lookup_type(cls._typename).pointer()
def as_address(self):
return int(self._gdbval)
|
PyObjectPtr
|
python
|
sympy__sympy
|
sympy/physics/mechanics/pathway.py
|
{
"start": 9692,
"end": 17832
}
|
class ____(PathwayBase):
"""Obstacle-set pathway between a set of attachment points.
Explanation
===========
An obstacle-set pathway forms a series of straight-line segment between
pairs of consecutive points in a set of points. It is similar to multiple
linear pathways joined end-to-end. It will not interact with any other
objects in the system, i.e. an ``ObstacleSetPathway`` will intersect other
objects to ensure that the path between its pairs of points (its
attachments) is the shortest possible.
Examples
========
To construct an obstacle-set pathway, three or more points are required to
be passed to the ``attachments`` parameter as a ``tuple``.
>>> from sympy.physics.mechanics import ObstacleSetPathway, Point
>>> pA, pB, pC, pD = Point('pA'), Point('pB'), Point('pC'), Point('pD')
>>> obstacle_set_pathway = ObstacleSetPathway(pA, pB, pC, pD)
>>> obstacle_set_pathway
ObstacleSetPathway(pA, pB, pC, pD)
The pathway created above isn't very interesting without the positions and
velocities of its attachment points being described. Without this its not
possible to describe how the pathway moves, i.e. its length or its
extension velocity.
>>> from sympy import cos, sin
>>> from sympy.physics.mechanics import ReferenceFrame
>>> from sympy.physics.vector import dynamicsymbols
>>> N = ReferenceFrame('N')
>>> q = dynamicsymbols('q')
>>> pO = Point('pO')
>>> pA.set_pos(pO, N.y)
>>> pB.set_pos(pO, -N.x)
>>> pC.set_pos(pA, cos(q) * N.x - (sin(q) + 1) * N.y)
>>> pD.set_pos(pA, sin(q) * N.x + (cos(q) - 1) * N.y)
>>> pB.pos_from(pA)
- N.x - N.y
>>> pC.pos_from(pA)
cos(q(t))*N.x + (-sin(q(t)) - 1)*N.y
>>> pD.pos_from(pA)
sin(q(t))*N.x + (cos(q(t)) - 1)*N.y
A pathway's length can be accessed via its ``length`` attribute.
>>> obstacle_set_pathway.length.simplify()
sqrt(2)*(sqrt(cos(q(t)) + 1) + 2)
A pathway's extension velocity can be accessed similarly via its
``extension_velocity`` attribute.
>>> obstacle_set_pathway.extension_velocity.simplify()
-sqrt(2)*sin(q(t))*Derivative(q(t), t)/(2*sqrt(cos(q(t)) + 1))
Parameters
==========
attachments : tuple[Point, ...]
The set of ``Point`` objects that define the segmented obstacle-set
pathway.
"""
def __init__(self, *attachments):
"""Initializer for ``ObstacleSetPathway``.
Parameters
==========
attachments : tuple[Point, ...]
The set of ``Point`` objects that define the segmented obstacle-set
pathway.
"""
super().__init__(*attachments)
@property
def attachments(self):
"""The set of points defining a pathway's segmented path."""
return self._attachments
@attachments.setter
def attachments(self, attachments):
if hasattr(self, '_attachments'):
msg = (
f'Can\'t set attribute `attachments` to {repr(attachments)} '
f'as it is immutable.'
)
raise AttributeError(msg)
if len(attachments) <= 2:
msg = (
f'Value {repr(attachments)} passed to `attachments` was an '
f'iterable of length {len(attachments)}, must be an iterable '
f'of length 3 or greater.'
)
raise ValueError(msg)
for i, point in enumerate(attachments):
if not isinstance(point, Point):
msg = (
f'Value {repr(point)} passed to `attachments` at index '
f'{i} was of type {type(point)}, must be {Point}.'
)
raise TypeError(msg)
self._attachments = tuple(attachments)
@property
def length(self):
"""Exact analytical expression for the pathway's length."""
length = S.Zero
attachment_pairs = zip(self.attachments[:-1], self.attachments[1:])
for attachment_pair in attachment_pairs:
length += _point_pair_length(*attachment_pair)
return length
@property
def extension_velocity(self):
"""Exact analytical expression for the pathway's extension velocity."""
extension_velocity = S.Zero
attachment_pairs = zip(self.attachments[:-1], self.attachments[1:])
for attachment_pair in attachment_pairs:
extension_velocity += _point_pair_extension_velocity(*attachment_pair)
return extension_velocity
def to_loads(self, force):
"""Loads required by the equations of motion method classes.
Explanation
===========
``KanesMethod`` requires a list of ``Point``-``Vector`` tuples to be
passed to the ``loads`` parameters of its ``kanes_equations`` method
when constructing the equations of motion. This method acts as a
utility to produce the correctly-structred pairs of points and vectors
required so that these can be easily concatenated with other items in
the list of loads and passed to ``KanesMethod.kanes_equations``. These
loads are also in the correct form to also be passed to the other
equations of motion method classes, e.g. ``LagrangesMethod``.
Examples
========
The below example shows how to generate the loads produced in an
actuator that follows an obstacle-set pathway between four points and
produces an expansile force ``F``. First, create a pair of reference
frames, ``A`` and ``B``, in which the four points ``pA``, ``pB``,
``pC``, and ``pD`` will be located. The first two points in frame ``A``
and the second two in frame ``B``. Frame ``B`` will also be oriented
such that it relates to ``A`` via a rotation of ``q`` about an axis
``N.z`` in a global frame (``N.z``, ``A.z``, and ``B.z`` are parallel).
>>> from sympy.physics.mechanics import (ObstacleSetPathway, Point,
... ReferenceFrame)
>>> from sympy.physics.vector import dynamicsymbols
>>> q = dynamicsymbols('q')
>>> N = ReferenceFrame('N')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'axis', (0, N.x))
>>> B = A.orientnew('B', 'axis', (q, N.z))
>>> pO = Point('pO')
>>> pA, pB, pC, pD = Point('pA'), Point('pB'), Point('pC'), Point('pD')
>>> pA.set_pos(pO, A.x)
>>> pB.set_pos(pO, -A.y)
>>> pC.set_pos(pO, B.y)
>>> pD.set_pos(pO, B.x)
>>> obstacle_set_pathway = ObstacleSetPathway(pA, pB, pC, pD)
Now create a symbol ``F`` to describe the magnitude of the (expansile)
force that will be produced along the pathway. The list of loads that
``KanesMethod`` requires can be produced by calling the pathway's
``to_loads`` method with ``F`` passed as the only argument.
>>> from sympy import Symbol
>>> F = Symbol('F')
>>> obstacle_set_pathway.to_loads(F)
[(pA, sqrt(2)*F/2*A.x + sqrt(2)*F/2*A.y),
(pB, - sqrt(2)*F/2*A.x - sqrt(2)*F/2*A.y),
(pB, - F/sqrt(2*cos(q(t)) + 2)*A.y - F/sqrt(2*cos(q(t)) + 2)*B.y),
(pC, F/sqrt(2*cos(q(t)) + 2)*A.y + F/sqrt(2*cos(q(t)) + 2)*B.y),
(pC, - sqrt(2)*F/2*B.x + sqrt(2)*F/2*B.y),
(pD, sqrt(2)*F/2*B.x - sqrt(2)*F/2*B.y)]
Parameters
==========
force : Expr
The force acting along the length of the pathway. It is assumed
that this ``Expr`` represents an expansile force.
"""
loads = []
attachment_pairs = zip(self.attachments[:-1], self.attachments[1:])
for attachment_pair in attachment_pairs:
relative_position = _point_pair_relative_position(*attachment_pair)
length = _point_pair_length(*attachment_pair)
loads.extend([
Force(attachment_pair[0], -force*relative_position/length),
Force(attachment_pair[1], force*relative_position/length),
])
return loads
|
ObstacleSetPathway
|
python
|
pypa__pipenv
|
pipenv/vendor/click/core.py
|
{
"start": 44709,
"end": 56511
}
|
class ____(BaseCommand):
"""Commands are the basic building block of command line interfaces in
Click. A basic command handles command line parsing and might dispatch
more parsing to commands nested below it.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
:param callback: the callback to invoke. This is optional.
:param params: the parameters to register with this command. This can
be either :class:`Option` or :class:`Argument` objects.
:param help: the help string to use for this command.
:param epilog: like the help string but it's printed at the end of the
help page after everything else.
:param short_help: the short help to use for this command. This is
shown on the command listing of the parent command.
:param add_help_option: by default each command registers a ``--help``
option. This can be disabled by this parameter.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is disabled by default.
If enabled this will add ``--help`` as argument
if no arguments are passed
:param hidden: hide this command from help outputs.
:param deprecated: issues a message indicating that
the command is deprecated.
.. versionchanged:: 8.1
``help``, ``epilog``, and ``short_help`` are stored unprocessed,
all formatting is done when outputting help text, not at init,
and is done even if not using the ``@command`` decorator.
.. versionchanged:: 8.0
Added a ``repr`` showing the command name.
.. versionchanged:: 7.1
Added the ``no_args_is_help`` parameter.
.. versionchanged:: 2.0
Added the ``context_settings`` parameter.
"""
def __init__(
self,
name: t.Optional[str],
context_settings: t.Optional[t.MutableMapping[str, t.Any]] = None,
callback: t.Optional[t.Callable[..., t.Any]] = None,
params: t.Optional[t.List["Parameter"]] = None,
help: t.Optional[str] = None,
epilog: t.Optional[str] = None,
short_help: t.Optional[str] = None,
options_metavar: t.Optional[str] = "[OPTIONS]",
add_help_option: bool = True,
no_args_is_help: bool = False,
hidden: bool = False,
deprecated: bool = False,
) -> None:
super().__init__(name, context_settings)
#: the callback to execute when the command fires. This might be
#: `None` in which case nothing happens.
self.callback = callback
#: the list of parameters for this command in the order they
#: should show up in the help page and execute. Eager parameters
#: will automatically be handled before non eager ones.
self.params: t.List["Parameter"] = params or []
self.help = help
self.epilog = epilog
self.options_metavar = options_metavar
self.short_help = short_help
self.add_help_option = add_help_option
self.no_args_is_help = no_args_is_help
self.hidden = hidden
self.deprecated = deprecated
def to_info_dict(self, ctx: Context) -> t.Dict[str, t.Any]:
info_dict = super().to_info_dict(ctx)
info_dict.update(
params=[param.to_info_dict() for param in self.get_params(ctx)],
help=self.help,
epilog=self.epilog,
short_help=self.short_help,
hidden=self.hidden,
deprecated=self.deprecated,
)
return info_dict
def get_usage(self, ctx: Context) -> str:
"""Formats the usage line into a string and returns it.
Calls :meth:`format_usage` internally.
"""
formatter = ctx.make_formatter()
self.format_usage(ctx, formatter)
return formatter.getvalue().rstrip("\n")
def get_params(self, ctx: Context) -> t.List["Parameter"]:
rv = self.params
help_option = self.get_help_option(ctx)
if help_option is not None:
rv = [*rv, help_option]
return rv
def format_usage(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Writes the usage line into the formatter.
This is a low-level method called by :meth:`get_usage`.
"""
pieces = self.collect_usage_pieces(ctx)
formatter.write_usage(ctx.command_path, " ".join(pieces))
def collect_usage_pieces(self, ctx: Context) -> t.List[str]:
"""Returns all the pieces that go into the usage line and returns
it as a list of strings.
"""
rv = [self.options_metavar] if self.options_metavar else []
for param in self.get_params(ctx):
rv.extend(param.get_usage_pieces(ctx))
return rv
def get_help_option_names(self, ctx: Context) -> t.List[str]:
"""Returns the names for the help option."""
all_names = set(ctx.help_option_names)
for param in self.params:
all_names.difference_update(param.opts)
all_names.difference_update(param.secondary_opts)
return list(all_names)
def get_help_option(self, ctx: Context) -> t.Optional["Option"]:
"""Returns the help option object."""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return None
def show_help(ctx: Context, param: "Parameter", value: str) -> None:
if value and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help=_("Show this message and exit."),
)
def make_parser(self, ctx: Context) -> OptionParser:
"""Creates the underlying option parser for this command."""
parser = OptionParser(ctx)
for param in self.get_params(ctx):
param.add_to_parser(parser, ctx)
return parser
def get_help(self, ctx: Context) -> str:
"""Formats the help into a string and returns it.
Calls :meth:`format_help` internally.
"""
formatter = ctx.make_formatter()
self.format_help(ctx, formatter)
return formatter.getvalue().rstrip("\n")
def get_short_help_str(self, limit: int = 45) -> str:
"""Gets short help for the command or makes it by shortening the
long help string.
"""
if self.short_help:
text = inspect.cleandoc(self.short_help)
elif self.help:
text = make_default_short_help(self.help, limit)
else:
text = ""
if self.deprecated:
text = _("(Deprecated) {text}").format(text=text)
return text.strip()
def format_help(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Writes the help into the formatter if it exists.
This is a low-level method called by :meth:`get_help`.
This calls the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
"""
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter)
def format_help_text(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Writes the help text to the formatter if it exists."""
if self.help is not None:
# truncate the help text to the first form feed
text = inspect.cleandoc(self.help).partition("\f")[0]
else:
text = ""
if self.deprecated:
text = _("(Deprecated) {text}").format(text=text)
if text:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(text)
def format_options(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section(_("Options")):
formatter.write_dl(opts)
def format_epilog(self, ctx: Context, formatter: HelpFormatter) -> None:
"""Writes the epilog into the formatter if it exists."""
if self.epilog:
epilog = inspect.cleandoc(self.epilog)
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(epilog)
def parse_args(self, ctx: Context, args: t.List[str]) -> t.List[str]:
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
parser = self.make_parser(ctx)
opts, args, param_order = parser.parse_args(args=args)
for param in iter_params_for_processing(param_order, self.get_params(ctx)):
value, args = param.handle_parse_result(ctx, opts, args)
if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
ctx.fail(
ngettext(
"Got unexpected extra argument ({args})",
"Got unexpected extra arguments ({args})",
len(args),
).format(args=" ".join(map(str, args)))
)
ctx.args = args
ctx._opt_prefixes.update(parser._opt_prefixes)
return args
def invoke(self, ctx: Context) -> t.Any:
"""Given a context, this invokes the attached callback (if it exists)
in the right way.
"""
if self.deprecated:
message = _(
"DeprecationWarning: The command {name!r} is deprecated."
).format(name=self.name)
echo(style(message, fg="red"), err=True)
if self.callback is not None:
return ctx.invoke(self.callback, **ctx.params)
def shell_complete(self, ctx: Context, incomplete: str) -> t.List["CompletionItem"]:
"""Return a list of completions for the incomplete value. Looks
at the names of options and chained multi-commands.
:param ctx: Invocation context for this command.
:param incomplete: Value being completed. May be empty.
.. versionadded:: 8.0
"""
from pipenv.vendor.click.shell_completion import CompletionItem
results: t.List["CompletionItem"] = []
if incomplete and not incomplete[0].isalnum():
for param in self.get_params(ctx):
if (
not isinstance(param, Option)
or param.hidden
or (
not param.multiple
and ctx.get_parameter_source(param.name) # type: ignore
is ParameterSource.COMMANDLINE
)
):
continue
results.extend(
CompletionItem(name, help=param.help)
for name in [*param.opts, *param.secondary_opts]
if name.startswith(incomplete)
)
results.extend(super().shell_complete(ctx, incomplete))
return results
|
Command
|
python
|
pallets__werkzeug
|
src/werkzeug/datastructures/structures.py
|
{
"start": 27446,
"end": 32510
}
|
class ____(ImmutableMultiDictMixin[K, V], MultiDict[K, V]): # type: ignore[misc]
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol: t.SupportsIndex) -> t.Any:
return type(self), (self.dicts,)
def __init__(self, dicts: cabc.Iterable[MultiDict[K, V]] | None = None) -> None:
super().__init__()
self.dicts: list[MultiDict[K, V]] = list(dicts or ())
@classmethod
def fromkeys(cls, keys: t.Any, value: t.Any = None) -> t.NoReturn:
raise TypeError(f"cannot create {cls.__name__!r} instances by fromkeys")
def __getitem__(self, key: K) -> V:
for d in self.dicts:
if key in d:
return d[key]
raise exceptions.BadRequestKeyError(key)
@t.overload # type: ignore[override]
def get(self, key: K) -> V | None: ...
@t.overload
def get(self, key: K, default: V) -> V: ...
@t.overload
def get(self, key: K, default: T) -> V | T: ...
@t.overload
def get(self, key: str, type: cabc.Callable[[V], T]) -> T | None: ...
@t.overload
def get(self, key: str, default: T, type: cabc.Callable[[V], T]) -> T: ...
def get( # type: ignore[misc]
self,
key: K,
default: V | T | None = None,
type: cabc.Callable[[V], T] | None = None,
) -> V | T | None:
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except (ValueError, TypeError):
continue
return d[key]
return default
@t.overload
def getlist(self, key: K) -> list[V]: ...
@t.overload
def getlist(self, key: K, type: cabc.Callable[[V], T]) -> list[T]: ...
def getlist(
self, key: K, type: cabc.Callable[[V], T] | None = None
) -> list[V] | list[T]:
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type)) # type: ignore[arg-type]
return rv
def _keys_impl(self) -> set[K]:
"""This function exists so __len__ can be implemented more efficiently,
saving one list creation from an iterator.
"""
return set(k for d in self.dicts for k in d)
def keys(self) -> cabc.Iterable[K]: # type: ignore[override]
return self._keys_impl()
def __iter__(self) -> cabc.Iterator[K]:
return iter(self._keys_impl())
@t.overload # type: ignore[override]
def items(self) -> cabc.Iterable[tuple[K, V]]: ...
@t.overload
def items(self, multi: t.Literal[True]) -> cabc.Iterable[tuple[K, list[V]]]: ...
def items(
self, multi: bool = False
) -> cabc.Iterable[tuple[K, V]] | cabc.Iterable[tuple[K, list[V]]]:
found = set()
for d in self.dicts:
for key, value in d.items(multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def values(self) -> cabc.Iterable[V]: # type: ignore[override]
for _, value in self.items():
yield value
def lists(self) -> cabc.Iterable[tuple[K, list[V]]]:
rv: dict[K, list[V]] = {}
for d in self.dicts:
for key, values in d.lists():
rv.setdefault(key, []).extend(values)
return rv.items()
def listvalues(self) -> cabc.Iterable[list[V]]:
return (x[1] for x in self.lists())
def copy(self) -> MultiDict[K, V]: # type: ignore[override]
"""Return a shallow mutable copy of this object.
This returns a :class:`MultiDict` representing the data at the
time of copying. The copy will no longer reflect changes to the
wrapped dicts.
.. versionchanged:: 0.15
Return a mutable :class:`MultiDict`.
"""
return MultiDict(self)
def __len__(self) -> int:
return len(self._keys_impl())
def __contains__(self, key: K) -> bool: # type: ignore[override]
for d in self.dicts:
if key in d:
return True
return False
def __repr__(self) -> str:
return f"{type(self).__name__}({self.dicts!r})"
|
CombinedMultiDict
|
python
|
django__django
|
tests/aggregation/models.py
|
{
"start": 1031,
"end": 1286
}
|
class ____(models.Model):
name = models.CharField(max_length=255)
books = models.ManyToManyField(Book)
original_opening = models.DateTimeField()
friday_night_closing = models.TimeField()
def __str__(self):
return self.name
|
Store
|
python
|
ray-project__ray
|
python/ray/_private/ray_logging/__init__.py
|
{
"start": 7791,
"end": 13219
}
|
class ____:
def __init__(
self,
agg_window_s: int,
allow_re: Optional[str],
skip_re: Optional[str],
*,
_timesource=None,
):
self.agg_window_s = agg_window_s
if allow_re:
self.allow_re = re.compile(allow_re)
else:
self.allow_re = None
if skip_re:
self.skip_re = re.compile(skip_re)
else:
self.skip_re = None
# Buffer of up to RAY_DEDUP_LOGS_AGG_WINDOW_S recent log patterns.
# This buffer is cleared if the pattern isn't seen within the window.
self.recent: Dict[str, DedupState] = {}
self.timesource = _timesource or (lambda: time.time())
run_callback_on_events_in_ipython("post_execute", self.flush)
def deduplicate(self, batch: LogBatch) -> List[LogBatch]:
"""Rewrite a batch of lines to reduce duplicate log messages.
Args:
batch: The batch of lines from a single source.
Returns:
List of batches from this and possibly other previous sources to print.
"""
if not RAY_DEDUP_LOGS:
return [batch]
now = self.timesource()
metadata = batch.copy()
del metadata["lines"]
source = (metadata.get("ip"), metadata.get("pid"))
output: List[LogBatch] = [dict(**metadata, lines=[])]
# Decide which lines to emit from the input batch. Put the outputs in the
# first output log batch (output[0]).
for line in batch["lines"]:
if RAY_TQDM_MAGIC in line or (self.allow_re and self.allow_re.search(line)):
output[0]["lines"].append(line)
continue
elif self.skip_re and self.skip_re.search(line):
continue
dedup_key = _canonicalise_log_line(line)
if dedup_key == "":
# Don't dedup messages that are empty after canonicalization.
# Because that's all the information users want to see.
output[0]["lines"].append(line)
continue
if dedup_key in self.recent:
sources = self.recent[dedup_key].sources
sources.add(source)
# We deduplicate the warnings/error messages from raylet by default.
if len(sources) > 1 or batch["pid"] == "raylet":
state = self.recent[dedup_key]
self.recent[dedup_key] = DedupState(
state.timestamp,
state.count + 1,
line,
metadata,
sources,
)
else:
# Don't dedup messages from the same source, just print.
output[0]["lines"].append(line)
else:
self.recent[dedup_key] = DedupState(now, 0, line, metadata, {source})
output[0]["lines"].append(line)
# Flush patterns from the buffer that are older than the aggregation window.
while self.recent:
if now - next(iter(self.recent.values())).timestamp < self.agg_window_s:
break
dedup_key = next(iter(self.recent))
state = self.recent.pop(dedup_key)
# we already logged an instance of this line immediately when received,
# so don't log for count == 0
if state.count > 1:
# (Actor pid=xxxx) [repeated 2x across cluster] ...
output.append(dict(**state.metadata, lines=[state.formatted()]))
# Continue aggregating for this key but reset timestamp and count.
state.timestamp = now
state.count = 0
self.recent[dedup_key] = state
elif state.count > 0:
# Aggregation wasn't fruitful, print the line and stop aggregating.
output.append(dict(state.metadata, lines=[state.line]))
return output
def flush(self) -> List[dict]:
"""Return all buffered log messages and clear the buffer.
Returns:
List of log batches to print.
"""
output = []
for state in self.recent.values():
if state.count > 1:
output.append(
dict(
state.metadata,
lines=[state.formatted()],
)
)
elif state.count > 0:
output.append(dict(state.metadata, **{"lines": [state.line]}))
self.recent.clear()
return output
def _warn_once() -> str:
if log_once("log_dedup_warning"):
return (
" (Ray deduplicates logs by default. Set RAY_DEDUP_LOGS=0 to "
"disable log deduplication, or see https://docs.ray.io/en/master/"
"ray-observability/user-guides/configure-logging.html#log-deduplication "
"for more options.)"
)
else:
return ""
def _color(msg: str) -> str:
return "{}{}{}".format(colorama.Fore.GREEN, msg, colorama.Style.RESET_ALL)
stdout_deduplicator = LogDeduplicator(
RAY_DEDUP_LOGS_AGG_WINDOW_S, RAY_DEDUP_LOGS_ALLOW_REGEX, RAY_DEDUP_LOGS_SKIP_REGEX
)
stderr_deduplicator = LogDeduplicator(
RAY_DEDUP_LOGS_AGG_WINDOW_S, RAY_DEDUP_LOGS_ALLOW_REGEX, RAY_DEDUP_LOGS_SKIP_REGEX
)
|
LogDeduplicator
|
python
|
scipy__scipy
|
scipy/stats/_multivariate.py
|
{
"start": 37100,
"end": 49867
}
|
class ____(multi_rv_generic):
r"""A matrix normal random variable.
The `mean` keyword specifies the mean. The `rowcov` keyword specifies the
among-row covariance matrix. The 'colcov' keyword specifies the
among-column covariance matrix.
Methods
-------
pdf(X, mean=None, rowcov=1, colcov=1)
Probability density function.
logpdf(X, mean=None, rowcov=1, colcov=1)
Log of the probability density function.
rvs(mean=None, rowcov=1, colcov=1, size=1, random_state=None)
Draw random samples.
entropy(rowcol=1, colcov=1)
Differential entropy.
Parameters
----------
%(_matnorm_doc_default_callparams)s
%(_doc_random_state)s
Notes
-----
%(_matnorm_doc_callparams_note)s
The covariance matrices specified by `rowcov` and `colcov` must be
(symmetric) positive definite. If the samples in `X` are
:math:`m \times n`, then `rowcov` must be :math:`m \times m` and
`colcov` must be :math:`n \times n`. `mean` must be the same shape as `X`.
The probability density function for `matrix_normal` is
.. math::
f(X) = (2 \pi)^{-\frac{mn}{2}}|U|^{-\frac{n}{2}} |V|^{-\frac{m}{2}}
\exp\left( -\frac{1}{2} \mathrm{Tr}\left[ U^{-1} (X-M) V^{-1}
(X-M)^T \right] \right),
where :math:`M` is the mean, :math:`U` the among-row covariance matrix,
:math:`V` the among-column covariance matrix.
The `allow_singular` behaviour of the `multivariate_normal`
distribution is not currently supported. Covariance matrices must be
full rank.
The `matrix_normal` distribution is closely related to the
`multivariate_normal` distribution. Specifically, :math:`\mathrm{Vec}(X)`
(the vector formed by concatenating the columns of :math:`X`) has a
multivariate normal distribution with mean :math:`\mathrm{Vec}(M)`
and covariance :math:`V \otimes U` (where :math:`\otimes` is the Kronecker
product). Sampling and pdf evaluation are
:math:`\mathcal{O}(m^3 + n^3 + m^2 n + m n^2)` for the matrix normal, but
:math:`\mathcal{O}(m^3 n^3)` for the equivalent multivariate normal,
making this equivalent form algorithmically inefficient.
.. versionadded:: 0.17.0
Examples
--------
>>> import numpy as np
>>> from scipy.stats import matrix_normal
>>> M = np.arange(6).reshape(3,2); M
array([[0, 1],
[2, 3],
[4, 5]])
>>> U = np.diag([1,2,3]); U
array([[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> V = 0.3*np.identity(2); V
array([[ 0.3, 0. ],
[ 0. , 0.3]])
>>> X = M + 0.1; X
array([[ 0.1, 1.1],
[ 2.1, 3.1],
[ 4.1, 5.1]])
>>> matrix_normal.pdf(X, mean=M, rowcov=U, colcov=V)
0.023410202050005054
>>> # Equivalent multivariate normal
>>> from scipy.stats import multivariate_normal
>>> vectorised_X = X.T.flatten()
>>> equiv_mean = M.T.flatten()
>>> equiv_cov = np.kron(V,U)
>>> multivariate_normal.pdf(vectorised_X, mean=equiv_mean, cov=equiv_cov)
0.023410202050005054
Alternatively, the object may be called (as a function) to fix the mean
and covariance parameters, returning a "frozen" matrix normal
random variable:
>>> rv = matrix_normal(mean=None, rowcov=1, colcov=1)
>>> # Frozen object with the same methods but holding the given
>>> # mean and covariance fixed.
"""
def __init__(self, seed=None):
super().__init__(seed)
self.__doc__ = doccer.docformat(self.__doc__, matnorm_docdict_params)
def __call__(self, mean=None, rowcov=1, colcov=1, seed=None):
"""Create a frozen matrix normal distribution.
See `matrix_normal_frozen` for more information.
"""
return matrix_normal_frozen(mean, rowcov, colcov, seed=seed)
def _process_parameters(self, mean, rowcov, colcov):
"""
Infer dimensionality from mean or covariance matrices. Handle
defaults. Ensure compatible dimensions.
"""
# Process mean
if mean is not None:
mean = np.asarray(mean, dtype=float)
meanshape = mean.shape
if len(meanshape) != 2:
raise ValueError("Array `mean` must be two dimensional.")
if np.any(meanshape == 0):
raise ValueError("Array `mean` has invalid shape.")
# Process among-row covariance
rowcov = np.asarray(rowcov, dtype=float)
if rowcov.ndim == 0:
if mean is not None:
rowcov = rowcov * np.identity(meanshape[0])
else:
rowcov = rowcov * np.identity(1)
elif rowcov.ndim == 1:
rowcov = np.diag(rowcov)
rowshape = rowcov.shape
if len(rowshape) != 2:
raise ValueError("`rowcov` must be a scalar or a 2D array.")
if rowshape[0] != rowshape[1]:
raise ValueError("Array `rowcov` must be square.")
if rowshape[0] == 0:
raise ValueError("Array `rowcov` has invalid shape.")
numrows = rowshape[0]
# Process among-column covariance
colcov = np.asarray(colcov, dtype=float)
if colcov.ndim == 0:
if mean is not None:
colcov = colcov * np.identity(meanshape[1])
else:
colcov = colcov * np.identity(1)
elif colcov.ndim == 1:
colcov = np.diag(colcov)
colshape = colcov.shape
if len(colshape) != 2:
raise ValueError("`colcov` must be a scalar or a 2D array.")
if colshape[0] != colshape[1]:
raise ValueError("Array `colcov` must be square.")
if colshape[0] == 0:
raise ValueError("Array `colcov` has invalid shape.")
numcols = colshape[0]
# Ensure mean and covariances compatible
if mean is not None:
if meanshape[0] != numrows:
raise ValueError("Arrays `mean` and `rowcov` must have the "
"same number of rows.")
if meanshape[1] != numcols:
raise ValueError("Arrays `mean` and `colcov` must have the "
"same number of columns.")
else:
mean = np.zeros((numrows, numcols))
dims = (numrows, numcols)
return dims, mean, rowcov, colcov
def _process_quantiles(self, X, dims):
"""
Adjust quantiles array so that last two axes labels the components of
each data point.
"""
X = np.asarray(X, dtype=float)
if X.ndim == 2:
X = X[np.newaxis, :]
if X.shape[-2:] != dims:
raise ValueError("The shape of array `X` is not compatible "
"with the distribution parameters.")
return X
def _logpdf(self, dims, X, mean, row_prec_rt, log_det_rowcov,
col_prec_rt, log_det_colcov):
"""Log of the matrix normal probability density function.
Parameters
----------
dims : tuple
Dimensions of the matrix variates
X : ndarray
Points at which to evaluate the log of the probability
density function
mean : ndarray
Mean of the distribution
row_prec_rt : ndarray
A decomposition such that np.dot(row_prec_rt, row_prec_rt.T)
is the inverse of the among-row covariance matrix
log_det_rowcov : float
Logarithm of the determinant of the among-row covariance matrix
col_prec_rt : ndarray
A decomposition such that np.dot(col_prec_rt, col_prec_rt.T)
is the inverse of the among-column covariance matrix
log_det_colcov : float
Logarithm of the determinant of the among-column covariance matrix
Notes
-----
As this function does no argument checking, it should not be
called directly; use 'logpdf' instead.
"""
numrows, numcols = dims
roll_dev = np.moveaxis(X-mean, -1, 0)
scale_dev = np.tensordot(col_prec_rt.T,
np.dot(roll_dev, row_prec_rt), 1)
maha = np.sum(np.sum(np.square(scale_dev), axis=-1), axis=0)
return -0.5 * (numrows*numcols*_LOG_2PI + numcols*log_det_rowcov
+ numrows*log_det_colcov + maha)
def logpdf(self, X, mean=None, rowcov=1, colcov=1):
"""Log of the matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
logpdf : ndarray
Log of the probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
X = self._process_quantiles(X, dims)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U,
colpsd.log_pdet)
return _squeeze_output(out)
def pdf(self, X, mean=None, rowcov=1, colcov=1):
"""Matrix normal probability density function.
Parameters
----------
X : array_like
Quantiles, with the last two axes of `X` denoting the components.
%(_matnorm_doc_default_callparams)s
Returns
-------
pdf : ndarray
Probability density function evaluated at `X`
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
return np.exp(self.logpdf(X, mean, rowcov, colcov))
def rvs(self, mean=None, rowcov=1, colcov=1, size=1, random_state=None):
"""Draw random samples from a matrix normal distribution.
Parameters
----------
%(_matnorm_doc_default_callparams)s
size : integer, optional
Number of samples to draw (default 1).
%(_doc_random_state)s
Returns
-------
rvs : ndarray or scalar
Random variates of size (`size`, `dims`), where `dims` is the
dimension of the random matrices.
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
size = int(size)
dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov,
colcov)
rowchol = scipy.linalg.cholesky(rowcov, lower=True)
colchol = scipy.linalg.cholesky(colcov, lower=True)
random_state = self._get_random_state(random_state)
# We aren't generating standard normal variates with size=(size,
# dims[0], dims[1]) directly to ensure random variates remain backwards
# compatible. See https://github.com/scipy/scipy/pull/12312 for more
# details.
std_norm = random_state.standard_normal(
size=(dims[1], size, dims[0])
).transpose(1, 2, 0)
out = mean + np.einsum('jp,ipq,kq->ijk',
rowchol, std_norm, colchol,
optimize=True)
if size == 1:
out = out.reshape(mean.shape)
return out
def entropy(self, rowcov=1, colcov=1):
"""Log of the matrix normal probability density function.
Parameters
----------
rowcov : array_like, optional
Among-row covariance matrix of the distribution (default: ``1``)
colcov : array_like, optional
Among-column covariance matrix of the distribution (default: ``1``)
Returns
-------
entropy : float
Entropy of the distribution
Notes
-----
%(_matnorm_doc_callparams_note)s
"""
dummy_mean = np.zeros((rowcov.shape[0], colcov.shape[0]))
dims, _, rowcov, colcov = self._process_parameters(dummy_mean,
rowcov,
colcov)
rowpsd = _PSD(rowcov, allow_singular=False)
colpsd = _PSD(colcov, allow_singular=False)
return self._entropy(dims, rowpsd.log_pdet, colpsd.log_pdet)
def _entropy(self, dims, row_cov_logdet, col_cov_logdet):
n, p = dims
return (0.5 * n * p * (1 + _LOG_2PI) + 0.5 * p * row_cov_logdet +
0.5 * n * col_cov_logdet)
matrix_normal = matrix_normal_gen()
|
matrix_normal_gen
|
python
|
ZoranPandovski__al-go-rithms
|
others/synchronization/ProducerConsumer/Python/producer_consumer.py
|
{
"start": 998,
"end": 1533
}
|
class ____(Thread):
def run(self):
global queue
while True:
condition.acquire()
#lock.acquire()
if not Q:
print("Q empty : consumer is waiting!")
condition.wait()
print("producer added some item and notified the consumer")
item = Q.pop(0)
print("consumed", item)
condition.release()
#lock.release()
time.sleep(random.random())
Producer().start()
Consumer().start()
|
Consumer
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-dbt/prefect_dbt/core/settings.py
|
{
"start": 650,
"end": 3857
}
|
class ____(BaseSettings):
"""
dbt settings that directly affect the PrefectDbtRunner.
These settings will be collected automatically from their corresponding 'DBT_'-prefixed environment variables.
If a setting is not set in the environment or in the fields of this class, the default value will be used.
All other dbt settings should be used as normal, e.g. in the dbt_project.yml file, env vars, or kwargs to `invoke()`.
"""
model_config = SettingsConfigDict(env_prefix="DBT_")
profiles_dir: Path = Field(
default_factory=find_profiles_dir,
description="The directory containing the dbt profiles.yml file.",
)
project_dir: Path = Field(
default_factory=Path.cwd,
description="The directory containing the dbt project.",
)
log_level: EventLevel = Field(
default_factory=lambda: EventLevel(
get_current_settings().logging.level.lower()
),
description="The log level of the dbt CLI. Uses Prefect's logging level if not set.",
)
target_path: Path = Field(
default=Path("target"),
description="The path to the dbt target directory (relative to project_dir).",
)
def load_profiles_yml(self) -> dict[str, Any]:
"""
Load and parse the profiles.yml file.
Returns:
Dict containing the parsed profiles.yml contents
Raises:
ValueError: If profiles.yml is not found
"""
profiles_path = self.profiles_dir / "profiles.yml"
if not profiles_path.exists():
raise ValueError(f"No profiles.yml found at {profiles_path}")
with open(profiles_path, "r") as f:
return yaml.safe_load(f)
@contextlib.contextmanager
def resolve_profiles_yml(self) -> Generator[str, None, None]:
"""
Context manager that creates a temporary directory with a resolved profiles.yml file.
Args:
include_profiles: Whether to include the resolved profiles.yml in the yield.
Yields:
str: Path to temporary directory containing the resolved profiles.yml.
Directory and contents are automatically cleaned up after context exit.
Example:
```python
with resolve_profiles_yml() as temp_dir:
# temp_dir contains resolved profiles.yml
# use temp_dir for dbt operations
# temp_dir is automatically cleaned up
```
"""
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir_path = Path(temp_dir)
profiles_yml: dict[str, Any] = self.load_profiles_yml()
profiles_yml = run_coro_as_sync(
resolve_block_document_references(
profiles_yml, value_transformer=replace_with_env_var_call
)
)
profiles_yml = run_coro_as_sync(resolve_variables(profiles_yml))
temp_profiles_path = temp_dir_path / "profiles.yml"
temp_profiles_path.write_text(
yaml.dump(profiles_yml, default_style=None, default_flow_style=False)
)
yield str(temp_dir_path)
|
PrefectDbtSettings
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 145792,
"end": 146762
}
|
class ____(BaseModel):
"""
Config of single vector data storage
"""
size: int = Field(..., description="Size/dimensionality of the vectors used")
distance: "Distance" = Field(..., description="Config of single vector data storage")
storage_type: "VectorStorageType" = Field(..., description="Config of single vector data storage")
index: "Indexes" = Field(..., description="Config of single vector data storage")
quantization_config: Optional["QuantizationConfig"] = Field(
default=None, description="Vector specific quantization config that overrides collection config"
)
multivector_config: Optional["MultiVectorConfig"] = Field(
default=None, description="Vector specific configuration to enable multiple vectors per point"
)
datatype: Optional["VectorStorageDatatype"] = Field(
default=None, description="Vector specific configuration to set specific storage element type"
)
|
VectorDataConfig
|
python
|
google__pytype
|
pytype/rewrite/abstract/functions.py
|
{
"start": 1226,
"end": 2145
}
|
class ____(Protocol):
"""Protocol for a VM frame."""
name: str
final_locals: Mapping[str, base.BaseValue]
stack: Sequence['FrameType']
functions: Sequence['InterpreterFunction']
classes: Sequence[Any]
def make_child_frame(
self,
func: 'InterpreterFunction',
initial_locals: Mapping[str, _Var],
) -> 'FrameType': ...
def run(self) -> None: ...
def get_return_value(self) -> base.BaseValue: ...
def load_attr(self, target_var: _Var, attr_name: str) -> _Var: ...
_FrameT = TypeVar('_FrameT', bound=FrameType)
def _unpack_splats(elts):
"""Unpack any concrete splats and splice them into the sequence."""
ret = []
for e in elts:
try:
splat = e.get_atomic_value(internal.Splat)
ret.extend(splat.get_concrete_iterable())
except ValueError:
# Leave an indefinite splat intact
ret.append(e)
return tuple(ret)
@dataclasses.dataclass
|
FrameType
|
python
|
ray-project__ray
|
python/ray/llm/tests/batch/cpu/processor/test_processor_base.py
|
{
"start": 9116,
"end": 12342
}
|
class ____:
def test_valid_concurrency(self):
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.2-1B-Instruct",
concurrency=(1, 2),
)
assert config.concurrency == (1, 2)
config = vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.2-1B-Instruct",
)
assert config.concurrency == 1
def test_invalid_concurrency(self):
with pytest.raises(pydantic.ValidationError):
vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.2-1B-Instruct",
concurrency=1.1,
)
with pytest.raises(pydantic.ValidationError):
vLLMEngineProcessorConfig(
model_source="unsloth/Llama-3.2-1B-Instruct",
concurrency=[1, 2, 3],
)
@pytest.mark.parametrize("n", [1, 2, 10])
def test_positive_int_not_fail(self, n):
conf = ProcessorConfig(concurrency=n)
assert conf.concurrency == n
def test_positive_int_unusual_not_fail(self):
assert ProcessorConfig(concurrency="1").concurrency == 1
assert ProcessorConfig(concurrency=1.0).concurrency == 1
assert ProcessorConfig(concurrency="1.0").concurrency == 1
@pytest.mark.parametrize("pair", [(1, 1), (1, 2), (2, 8)])
def test_valid_tuple_not_fail(self, pair):
conf = ProcessorConfig(concurrency=pair)
assert conf.concurrency == pair
def test_valid_tuple_unusual_not_fail(self):
assert ProcessorConfig(concurrency=("1", 2)).concurrency == (1, 2)
assert ProcessorConfig(concurrency=(1, "2")).concurrency == (1, 2)
assert ProcessorConfig(concurrency=[1, "2"]).concurrency == (1, 2)
@pytest.mark.parametrize(
"bad,msg_part",
[
(0, "positive integer"),
(-5, "positive integer"),
((1, 2, 3), "at most 2 items"),
((0, 1), "positive integers"),
((1, 0), "positive integers"),
((-1, 2), "positive integers"),
((1, -2), "positive integers"),
((1, 2.5), "a number with a fractional part"),
("2.1", "unable to parse string"),
((5, 2), "min > max"),
],
)
def test_invalid_inputs_raise(self, bad, msg_part):
with pytest.raises(pydantic.ValidationError) as e:
ProcessorConfig(concurrency=bad)
assert msg_part in str(e.value)
@pytest.mark.parametrize(
"n,expected", [(1, (1, 1)), (4, (1, 4)), (10, (1, 10)), ("10", (1, 10))]
)
def test_with_int_concurrency_scaling(self, n, expected):
conf = ProcessorConfig(concurrency=n)
assert conf.get_concurrency() == expected
@pytest.mark.parametrize("n,expected", [(1, (1, 1)), (4, (4, 4)), (10, (10, 10))])
def test_with_int_concurrency_fixed(self, n, expected):
conf = ProcessorConfig(concurrency=n)
assert conf.get_concurrency(autoscaling_enabled=False) == expected
@pytest.mark.parametrize("pair", [(1, 1), (1, 3), (2, 8)])
def test_with_tuple_concurrency(self, pair):
conf = ProcessorConfig(concurrency=pair)
assert conf.get_concurrency() == pair
|
TestProcessorConfig
|
python
|
allegroai__clearml
|
clearml/debugging/log.py
|
{
"start": 2949,
"end": 9576
}
|
class ____(object):
__base_logger = None
@classmethod
def get_base_logger(
cls,
level: Optional[Union[str, int]] = None,
stream: Union[None, TextIO] = sys.stdout,
colored: bool = False,
) -> PickledLogger:
if LoggerRoot.__base_logger:
return LoggerRoot.__base_logger
# Note we can't use LOG_LEVEL_ENV_VAR defined in clearml.config.defs due to a circular dependency
if level is None and getenv("CLEARML_LOG_LEVEL"):
level = resolve_logging_level(getenv("CLEARML_LOG_LEVEL").strip())
if level is None:
print("Invalid value in environment variable CLEARML_LOG_LEVEL: %s" % getenv("CLEARML_LOG_LEVEL"))
clearml_logger = logging.getLogger("clearml")
if level is None:
level = clearml_logger.level
# avoid nested imports
from ..config import get_log_redirect_level
LoggerRoot.__base_logger = PickledLogger.wrapper(
clearml_logger,
func=cls.get_base_logger,
level=level,
stream=stream,
colored=colored,
)
LoggerRoot.__base_logger.setLevel(level)
redirect_level = get_log_redirect_level()
# Do not redirect to stderr if the target stream is already stderr
if redirect_level is not None and stream not in (None, sys.stderr):
# Adjust redirect level in case requested level is higher (e.g. logger is requested for CRITICAL
# and redirect is set for ERROR, in which case we redirect from CRITICAL)
redirect_level = max(level, redirect_level)
LoggerRoot.__base_logger.addHandler(ClearmlStreamHandler(redirect_level, sys.stderr, colored))
if level < redirect_level:
# Not all levels were redirected, remaining should be sent to requested stream
handler = ClearmlStreamHandler(level, stream, colored)
handler.addFilter(_LevelRangeFilter(min_level=level, max_level=redirect_level - 1))
LoggerRoot.__base_logger.addHandler(handler)
else:
LoggerRoot.__base_logger.addHandler(ClearmlStreamHandler(level, stream, colored))
LoggerRoot.__base_logger.propagate = False
return LoggerRoot.__base_logger
@classmethod
def flush(cls) -> None:
if LoggerRoot.__base_logger:
for h in LoggerRoot.__base_logger.handlers:
h.flush()
@staticmethod
def clear_logger_handlers() -> None:
# https://github.com/pytest-dev/pytest/issues/5502#issuecomment-647157873
loggers = [logging.getLogger()] + list(logging.Logger.manager.loggerDict.values())
for logger in loggers:
handlers = getattr(logger, "handlers", [])
for handler in handlers:
if isinstance(handler, ClearmlLoggerHandler):
logger.removeHandler(handler)
def add_options(parser: argparse.ArgumentParser) -> None:
"""Add logging options to an argparse.ArgumentParser object"""
level = logging.getLevelName(default_level)
parser.add_argument("--log-level", "-l", default=level, help="Log level (default is %s)" % level)
def apply_logging_args(args: argparse.Namespace) -> None:
"""Apply logging args from an argparse.ArgumentParser parsed args"""
global default_level
default_level = logging.getLevelName(args.log_level.upper())
def get_logger(
path: Optional[str] = None,
level: Optional[int] = None,
stream: Optional[Union[BytesIO, TextIO]] = None,
colored: bool = False,
) -> PickledLogger:
"""Get a python logging object named using the provided filename and preconfigured with a color-formatted
stream handler
"""
# noinspection PyBroadException
try:
path = path or os.path.abspath((inspect.stack()[1])[1])
except BaseException:
# if for some reason we could not find the calling file, use our own
path = os.path.abspath(__file__)
root_log = LoggerRoot.get_base_logger(stream=sys.stdout, colored=colored)
log = root_log.getChild(Path(path).stem)
if level is not None:
log.setLevel(level)
if stream:
ch = ClearmlStreamHandler(stream=stream, dont_set_formater=True)
if level is not None:
ch.setLevel(level)
log.addHandler(ch)
log.propagate = True
return PickledLogger.wrapper(log, func=get_logger, path=path, level=level, stream=stream, colored=colored)
def _add_file_handler(
logger: logging.Logger,
log_dir: Union[str, os.PathLike],
fh: logging.FileHandler,
formatter: Optional[logging.Formatter] = None,
) -> None:
"""Adds a file handler to a logger"""
Path(log_dir).mkdir(parents=True, exist_ok=True)
if not formatter:
log_format = "%(asctime)s %(name)s x_x[%(levelname)s] %(message)s"
formatter = logging.Formatter(log_format)
fh.setFormatter(formatter)
logger.addHandler(fh)
def add_rotating_file_handler(
logger: logging.Logger,
log_dir: Union[str, os.PathLike],
log_file_prefix: str,
max_bytes: int = 10 * 1024 * 1024,
backup_count: int = 20,
formatter: Optional[logging.Formatter] = None,
) -> None:
"""Create and add a rotating file handler to a logger"""
fh = ClearmlRotatingFileHandler(
str(Path(log_dir) / ("%s.log" % log_file_prefix)),
maxBytes=max_bytes,
backupCount=backup_count,
)
_add_file_handler(logger, log_dir, fh, formatter)
def add_time_rotating_file_handler(
logger: logging.Logger,
log_dir: Union[str, os.PathLike],
log_file_prefix: str,
when: str = "midnight",
formatter: Optional[logging.Formatter] = None,
) -> None:
"""
Create and add a time rotating file handler to a logger.
Possible values for when are 'midnight', weekdays ('w0'-'W6', when 0 is Monday), and 's', 'm', 'h' amd 'd' for
seconds, minutes, hours and days respectively (case-insensitive)
"""
fh = ClearmlTimedRotatingFileHandler(str(Path(log_dir) / ("%s.log" % log_file_prefix)), when=when)
_add_file_handler(logger, log_dir, fh, formatter)
def get_null_logger(name: Optional[str] = None) -> PickledLogger:
"""Get a logger with a null handler"""
log = logging.getLogger(name if name else "null")
if not log.handlers:
# avoid nested imports
from ..config import config
log.addHandler(ClearmlNullHandler())
log.propagate = config.get("log.null_log_propagate", False)
return PickledLogger.wrapper(log, func=get_null_logger, name=name)
|
LoggerRoot
|
python
|
protocolbuffers__protobuf
|
python/google/protobuf/internal/well_known_types.py
|
{
"start": 11450,
"end": 19302
}
|
class ____(object):
"""Class for Duration message type."""
__slots__ = ()
def ToJsonString(self):
"""Converts Duration to string format.
Returns:
A string converted from self. The string format will contains
3, 6, or 9 fractional digits depending on the precision required to
represent the exact Duration value. For example: "1s", "1.010s",
"1.000000100s", "-3.100s"
"""
_CheckDurationValid(self.seconds, self.nanos)
if self.seconds < 0 or self.nanos < 0:
result = '-'
seconds = -self.seconds + int((0 - self.nanos) // 1e9)
nanos = (0 - self.nanos) % 1e9
else:
result = ''
seconds = self.seconds + int(self.nanos // 1e9)
nanos = self.nanos % 1e9
result += '%d' % seconds
if (nanos % 1e9) == 0:
# If there are 0 fractional digits, the fractional
# point '.' should be omitted when serializing.
return result + 's'
if (nanos % 1e6) == 0:
# Serialize 3 fractional digits.
return result + '.%03ds' % (nanos / 1e6)
if (nanos % 1e3) == 0:
# Serialize 6 fractional digits.
return result + '.%06ds' % (nanos / 1e3)
# Serialize 9 fractional digits.
return result + '.%09ds' % nanos
def FromJsonString(self, value):
"""Converts a string to Duration.
Args:
value: A string to be converted. The string must end with 's'. Any
fractional digits (or none) are accepted as long as they fit into
precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s
Raises:
ValueError: On parsing problems.
"""
if not isinstance(value, str):
raise ValueError('Duration JSON value not a string: {!r}'.format(value))
if len(value) < 1 or value[-1] != 's':
raise ValueError('Duration must end with letter "s": {0}.'.format(value))
try:
pos = value.find('.')
if pos == -1:
seconds = int(value[:-1])
nanos = 0
else:
seconds = int(value[:pos])
if value[0] == '-':
nanos = int(round(float('-0{0}'.format(value[pos:-1])) * 1e9))
else:
nanos = int(round(float('0{0}'.format(value[pos:-1])) * 1e9))
_CheckDurationValid(seconds, nanos)
self.seconds = seconds
self.nanos = nanos
except ValueError as e:
raise ValueError("Couldn't parse duration: {0} : {1}.".format(value, e))
def ToNanoseconds(self):
"""Converts a Duration to nanoseconds."""
return self.seconds * _NANOS_PER_SECOND + self.nanos
def ToMicroseconds(self):
"""Converts a Duration to microseconds."""
micros = _RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND)
return self.seconds * _MICROS_PER_SECOND + micros
def ToMilliseconds(self):
"""Converts a Duration to milliseconds."""
millis = _RoundTowardZero(self.nanos, _NANOS_PER_MILLISECOND)
return self.seconds * _MILLIS_PER_SECOND + millis
def ToSeconds(self):
"""Converts a Duration to seconds."""
return self.seconds
def FromNanoseconds(self, nanos):
"""Converts nanoseconds to Duration."""
self._NormalizeDuration(
nanos // _NANOS_PER_SECOND, nanos % _NANOS_PER_SECOND
)
def FromMicroseconds(self, micros):
"""Converts microseconds to Duration."""
self._NormalizeDuration(
micros // _MICROS_PER_SECOND,
(micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND,
)
def FromMilliseconds(self, millis):
"""Converts milliseconds to Duration."""
self._NormalizeDuration(
millis // _MILLIS_PER_SECOND,
(millis % _MILLIS_PER_SECOND) * _NANOS_PER_MILLISECOND,
)
def FromSeconds(self, seconds):
"""Converts seconds to Duration."""
self.seconds = seconds
self.nanos = 0
def ToTimedelta(self) -> datetime.timedelta:
"""Converts Duration to timedelta."""
return datetime.timedelta(
seconds=self.seconds,
microseconds=_RoundTowardZero(self.nanos, _NANOS_PER_MICROSECOND),
)
def FromTimedelta(self, td):
"""Converts timedelta to Duration."""
if type(td).__name__ != 'timedelta' and not isinstance(
td, datetime.timedelta
):
raise TypeError(
'Fail to convert to Duration. Expected a timedelta object '
'got {0}'.format(type(td).__name__)
)
try:
self._NormalizeDuration(
td.seconds + td.days * _SECONDS_PER_DAY,
td.microseconds * _NANOS_PER_MICROSECOND,
)
except AttributeError as e:
raise AttributeError(
'Fail to convert to Duration. Expected a timedelta like '
'object got {0}: {1}'.format(type(td).__name__, e)
) from e
def _internal_assign(self, td):
self.FromTimedelta(td)
def _NormalizeDuration(self, seconds, nanos):
"""Set Duration by seconds and nanos."""
# Force nanos to be negative if the duration is negative.
if seconds < 0 and nanos > 0:
seconds += 1
nanos -= _NANOS_PER_SECOND
self.seconds = seconds
self.nanos = nanos
def __add__(self, value) -> Union[datetime.datetime, datetime.timedelta]:
if isinstance(value, Timestamp):
return self.ToTimedelta() + value.ToDatetime()
return self.ToTimedelta() + value
__radd__ = __add__
def __sub__(self, value) -> datetime.timedelta:
return self.ToTimedelta() - value
def __rsub__(self, value) -> Union[datetime.datetime, datetime.timedelta]:
return value - self.ToTimedelta()
def _CheckDurationValid(seconds, nanos):
if seconds < -_DURATION_SECONDS_MAX or seconds > _DURATION_SECONDS_MAX:
raise ValueError(
'Duration is not valid: Seconds {0} must be in range '
'[-315576000000, 315576000000].'.format(seconds)
)
if nanos <= -_NANOS_PER_SECOND or nanos >= _NANOS_PER_SECOND:
raise ValueError(
'Duration is not valid: Nanos {0} must be in range '
'[-999999999, 999999999].'.format(nanos)
)
if (nanos < 0 and seconds > 0) or (nanos > 0 and seconds < 0):
raise ValueError('Duration is not valid: Sign mismatch.')
def _RoundTowardZero(value, divider):
"""Truncates the remainder part after division."""
# For some languages, the sign of the remainder is implementation
# dependent if any of the operands is negative. Here we enforce
# "rounded toward zero" semantics. For example, for (-5) / 2 an
# implementation may give -3 as the result with the remainder being
# 1. This function ensures we always return -2 (closer to zero).
result = value // divider
remainder = value % divider
if result < 0 and remainder > 0:
return result + 1
else:
return result
def _SetStructValue(struct_value, value):
if value is None:
struct_value.null_value = 0
elif isinstance(value, bool):
# Note: this check must come before the number check because in Python
# True and False are also considered numbers.
struct_value.bool_value = value
elif isinstance(value, str):
struct_value.string_value = value
elif isinstance(value, (int, float)):
struct_value.number_value = value
elif isinstance(value, (dict, Struct)):
struct_value.struct_value.Clear()
struct_value.struct_value.update(value)
elif isinstance(value, (list, tuple, ListValue)):
struct_value.list_value.Clear()
struct_value.list_value.extend(value)
else:
raise ValueError('Unexpected type')
def _GetStructValue(struct_value):
which = struct_value.WhichOneof('kind')
if which == 'struct_value':
return struct_value.struct_value
elif which == 'null_value':
return None
elif which == 'number_value':
return struct_value.number_value
elif which == 'string_value':
return struct_value.string_value
elif which == 'bool_value':
return struct_value.bool_value
elif which == 'list_value':
return struct_value.list_value
elif which is None:
raise ValueError('Value not set')
|
Duration
|
python
|
mlflow__mlflow
|
mlflow/genai/judges/adapters/databricks_adapter.py
|
{
"start": 13671,
"end": 21457
}
|
class ____:
response: str
request_id: str | None
num_prompt_tokens: int | None
num_completion_tokens: int | None
def _parse_databricks_model_response(
res_json: dict[str, Any], headers: dict[str, Any]
) -> InvokeDatabricksModelOutput:
"""
Parse and validate the response from a Databricks model invocation.
Args:
res_json: The JSON response from the model
headers: The response headers
Returns:
InvokeDatabricksModelOutput with parsed response data
Raises:
MlflowException: If the response structure is invalid
"""
# Validate and extract choices
choices = res_json.get("choices", [])
if not choices:
raise MlflowException(
"Invalid response from Databricks model: missing 'choices' field",
error_code=INVALID_PARAMETER_VALUE,
)
first_choice = choices[0]
if "message" not in first_choice:
raise MlflowException(
"Invalid response from Databricks model: missing 'message' field",
error_code=INVALID_PARAMETER_VALUE,
)
content = first_choice.get("message", {}).get("content")
if content is None:
raise MlflowException(
"Invalid response from Databricks model: missing 'content' field",
error_code=INVALID_PARAMETER_VALUE,
)
# Handle reasoning response (list of content items)
if isinstance(content, list):
text_content = next(
(
item.get("text")
for item in content
if isinstance(item, dict) and item.get("type") == "text"
),
None,
)
if text_content is None:
raise MlflowException(
"Invalid reasoning response: no text content found in response list",
error_code=INVALID_PARAMETER_VALUE,
)
content = text_content
usage = res_json.get("usage", {})
return InvokeDatabricksModelOutput(
response=content,
request_id=headers.get("x-request-id"),
num_prompt_tokens=usage.get("prompt_tokens"),
num_completion_tokens=usage.get("completion_tokens"),
)
def _invoke_databricks_serving_endpoint(
*,
model_name: str,
prompt: str,
num_retries: int,
response_format: type[pydantic.BaseModel] | None = None,
) -> InvokeDatabricksModelOutput:
from mlflow.utils.databricks_utils import get_databricks_host_creds
# B-Step62: Why not use mlflow deployment client?
host_creds = get_databricks_host_creds()
api_url = f"{host_creds.host}/serving-endpoints/{model_name}/invocations"
# Implement retry logic with exponential backoff
last_exception = None
for attempt in range(num_retries + 1):
try:
# Build request payload
payload = {
"messages": [
{
"role": "user",
"content": prompt,
}
],
}
# Add response_schema if provided
if response_format is not None:
payload["response_schema"] = response_format.model_json_schema()
res = requests.post(
url=api_url,
headers={"Authorization": f"Bearer {host_creds.token}"},
json=payload,
)
except (requests.RequestException, requests.ConnectionError) as e:
last_exception = e
if attempt < num_retries:
_logger.debug(
f"Request attempt {attempt + 1} failed with error: {e}", exc_info=True
)
time.sleep(2**attempt) # Exponential backoff
continue
else:
raise MlflowException(
f"Failed to invoke Databricks model after {num_retries + 1} attempts: {e}",
error_code=INVALID_PARAMETER_VALUE,
) from e
# Check HTTP status before parsing JSON
if res.status_code in [400, 401, 403, 404]:
# Don't retry on bad request, unauthorized, not found, or forbidden
raise MlflowException(
f"Databricks model invocation failed with status {res.status_code}: {res.text}",
error_code=INVALID_PARAMETER_VALUE,
)
if res.status_code >= 400:
# For other errors, raise exception and potentially retry
error_msg = (
f"Databricks model invocation failed with status {res.status_code}: {res.text}"
)
if attempt < num_retries:
# Log and retry for transient errors
_logger.debug(f"Attempt {attempt + 1} failed: {error_msg}", exc_info=True)
time.sleep(2**attempt) # Exponential backoff
continue
else:
raise MlflowException(error_msg, error_code=INVALID_PARAMETER_VALUE)
# Parse JSON response
try:
res_json = res.json()
except json.JSONDecodeError as e:
raise MlflowException(
f"Failed to parse JSON response from Databricks model: {e}",
error_code=INVALID_PARAMETER_VALUE,
) from e
# Parse and validate the response using helper function
return _parse_databricks_model_response(res_json, res.headers)
# This should not be reached, but just in case
if last_exception:
raise MlflowException(
f"Failed to invoke Databricks model: {last_exception}",
error_code=INVALID_PARAMETER_VALUE,
) from last_exception
def _record_judge_model_usage_success_databricks_telemetry(
*,
request_id: str | None,
model_provider: str,
endpoint_name: str,
num_prompt_tokens: int | None,
num_completion_tokens: int | None,
) -> None:
try:
from databricks.agents.telemetry import record_judge_model_usage_success
except ImportError:
_logger.debug(
"Failed to import databricks.agents.telemetry.record_judge_model_usage_success; "
"databricks-agents needs to be installed."
)
return
from mlflow.tracking.fluent import _get_experiment_id
from mlflow.utils.databricks_utils import get_job_id, get_job_run_id, get_workspace_id
record_judge_model_usage_success(
request_id=request_id,
experiment_id=_get_experiment_id(),
job_id=get_job_id(),
job_run_id=get_job_run_id(),
workspace_id=get_workspace_id(),
model_provider=model_provider,
endpoint_name=endpoint_name,
num_prompt_tokens=num_prompt_tokens,
num_completion_tokens=num_completion_tokens,
)
def _record_judge_model_usage_failure_databricks_telemetry(
*,
model_provider: str,
endpoint_name: str,
error_code: str,
error_message: str,
) -> None:
try:
from databricks.agents.telemetry import record_judge_model_usage_failure
except ImportError:
_logger.debug(
"Failed to import databricks.agents.telemetry.record_judge_model_usage_success; "
"databricks-agents needs to be installed."
)
return
from mlflow.tracking.fluent import _get_experiment_id
from mlflow.utils.databricks_utils import get_job_id, get_job_run_id, get_workspace_id
record_judge_model_usage_failure(
experiment_id=_get_experiment_id(),
job_id=get_job_id(),
job_run_id=get_job_run_id(),
workspace_id=get_workspace_id(),
model_provider=model_provider,
endpoint_name=endpoint_name,
error_code=error_code,
error_message=error_message,
)
@dataclass
|
InvokeDatabricksModelOutput
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/code_structure/tutorial001_py310/models.py
|
{
"start": 268,
"end": 602
}
|
class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
team_id: int | None = Field(default=None, foreign_key="team.id")
team: Team | None = Relationship(back_populates="heroes")
|
Hero
|
python
|
explosion__spaCy
|
spacy/lang/bg/__init__.py
|
{
"start": 399,
"end": 805
}
|
class ____(BaseDefaults):
lex_attr_getters = dict(Language.Defaults.lex_attr_getters)
lex_attr_getters[LANG] = lambda text: "bg"
lex_attr_getters.update(LEX_ATTRS)
stop_words = STOP_WORDS
tokenizer_exceptions = update_exc(BASE_EXCEPTIONS, TOKENIZER_EXCEPTIONS)
suffixes = COMBINING_DIACRITICS_TOKENIZER_SUFFIXES
infixes = COMBINING_DIACRITICS_TOKENIZER_INFIXES
|
BulgarianDefaults
|
python
|
keras-team__keras
|
keras/src/metrics/accuracy_metrics.py
|
{
"start": 15166,
"end": 18274
}
|
class ____(reduction_metrics.MeanMetricWrapper):
"""Computes how often integer targets are in the top `K` predictions.
By default, the arguments expected by `update_state()` are:
- `y_true`: a tensor of shape `(batch_size)` representing indices of true
categories.
- `y_pred`: a tensor of shape `(batch_size, num_categories)` containing the
scores for each sample for all possible categories.
With `from_sorted_ids=True`, the arguments expected by `update_state` are:
- `y_true`: a tensor of shape `(batch_size)` representing indices or IDs of
true categories.
- `y_pred`: a tensor of shape `(batch_size, N)` containing the indices or
IDs of the top `N` categories sorted in order from highest score to
lowest score. `N` must be greater or equal to `k`.
The `from_sorted_ids=True` option can be more efficient when the set of
categories is very large and the model has an optimized way to retrieve the
top ones either without scoring or without maintaining the scores for all
the possible categories.
Args:
k: (Optional) Number of top elements to look at for computing accuracy.
Defaults to `5`.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_sorted_ids: (Optional) When `False`, the default, the tensor passed
in `y_pred` contains the unsorted scores of all possible categories.
When `True`, `y_pred` contains a the indices or IDs for the top
categories.
Example:
>>> m = keras.metrics.SparseTopKCategoricalAccuracy(k=1)
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
>>> m.result()
0.5
>>> m.reset_state()
>>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
... sample_weight=[0.7, 0.3])
>>> m.result()
0.3
>>> m = keras.metrics.SparseTopKCategoricalAccuracy(k=1,
... from_sorted_ids=True)
>>> m.update_state([2, 1], [[1, 0, 3], [1, 2, 3]])
>>> m.result()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='sparse_categorical_crossentropy',
metrics=[keras.metrics.SparseTopKCategoricalAccuracy()])
```
"""
def __init__(
self,
k=5,
name="sparse_top_k_categorical_accuracy",
dtype=None,
from_sorted_ids=False,
):
super().__init__(
fn=sparse_top_k_categorical_accuracy,
name=name,
dtype=dtype,
k=k,
from_sorted_ids=from_sorted_ids,
)
self.k = k
self.from_sorted_ids = from_sorted_ids
# Metric should be maximized during optimization.
self._direction = "up"
def get_config(self):
config = {"name": self.name, "dtype": self.dtype, "k": self.k}
if self.from_sorted_ids:
config["from_sorted_ids"] = True
return config
|
SparseTopKCategoricalAccuracy
|
python
|
tensorflow__tensorflow
|
tensorflow/python/util/serialization_test.py
|
{
"start": 892,
"end": 1223
}
|
class ____(test.TestCase):
def test_serialize_shape(self):
round_trip = json.loads(json.dumps(
tensor_shape.TensorShape([None, 2, 3]),
default=serialization.get_json_type))
self.assertIs(round_trip[0], None)
self.assertEqual(round_trip[1], 2)
if __name__ == "__main__":
test.main()
|
SerializationTests
|
python
|
ansible__ansible
|
lib/ansible/plugins/action/dnf.py
|
{
"start": 392,
"end": 3664
}
|
class ____(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
# Carry-over concept from the package action plugin
if 'use' in self._task.args and 'use_backend' in self._task.args:
raise AnsibleActionFail("parameters are mutually exclusive: ('use', 'use_backend')")
module = self._task.args.get('use', self._task.args.get('use_backend', 'auto'))
if module in {'yum', 'auto'}:
try:
# if we delegate, we should use delegated host's facts
expr = "hostvars[delegate_to].ansible_facts.pkg_mgr" if self._task.delegate_to else "ansible_facts.pkg_mgr"
module = self._templar.resolve_variable_expression(expr, local_variables=dict(delegate_to=self._task.delegate_to))
except Exception:
pass # could not get it from template!
if module not in VALID_BACKENDS:
facts = self._execute_module(
module_name="ansible.legacy.setup", module_args=dict(filter="ansible_pkg_mgr", gather_subset="!all"),
task_vars=task_vars)
if facts.get("failed", False):
raise AnsibleActionFail(
f"Failed to fetch ansible_pkg_mgr to determine the dnf action backend: {facts.get('msg')}",
result=facts,
)
display.debug("Facts %s" % facts)
module = facts.get("ansible_facts", {}).get("ansible_pkg_mgr", "auto")
if (not self._task.delegate_to or self._task.delegate_facts) and module != 'auto':
result['ansible_facts'] = {'pkg_mgr': module}
if module not in VALID_BACKENDS:
result.update(
{
'failed': True,
'msg': ("Could not detect which major revision of dnf is in use, which is required to determine module backend.",
"You should manually specify use_backend to tell the module whether to use the dnf4 or dnf5 backend})"),
}
)
else:
if module in {"yum4", "dnf4"}:
module = "dnf"
# eliminate collisions with collections search while still allowing local override
module = 'ansible.legacy.' + module
if not self._shared_loader_obj.module_loader.has_plugin(module):
result.update({'failed': True, 'msg': "Could not find a dnf module backend for %s." % module})
else:
new_module_args = self._task.args.copy()
if 'use_backend' in new_module_args:
del new_module_args['use_backend']
if 'use' in new_module_args:
del new_module_args['use']
display.vvvv("Running %s as the backend for the dnf action plugin" % module)
result.update(self._execute_module(
module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val))
return result
|
ActionModule
|
python
|
scikit-learn__scikit-learn
|
sklearn/mixture/tests/test_gaussian_mixture.py
|
{
"start": 2793,
"end": 55651
}
|
class ____:
def __init__(
self,
rng,
n_samples=200,
n_components=2,
n_features=2,
scale=50,
dtype=np.float64,
):
self.n_samples = n_samples
self.n_components = n_components
self.n_features = n_features
self.weights = rng.rand(n_components).astype(dtype)
self.weights = self.weights.astype(dtype) / self.weights.sum()
self.means = rng.rand(n_components, n_features).astype(dtype) * scale
self.covariances = {
"spherical": 0.5 + rng.rand(n_components).astype(dtype),
"diag": (0.5 + rng.rand(n_components, n_features).astype(dtype)) ** 2,
"tied": make_spd_matrix(n_features, random_state=rng).astype(dtype),
"full": np.array(
[
make_spd_matrix(n_features, random_state=rng).astype(dtype) * 0.5
for _ in range(n_components)
]
),
}
self.precisions = {
"spherical": 1.0 / self.covariances["spherical"],
"diag": 1.0 / self.covariances["diag"],
"tied": linalg.inv(self.covariances["tied"]),
"full": np.array(
[linalg.inv(covariance) for covariance in self.covariances["full"]]
),
}
self.X = dict(
zip(
COVARIANCE_TYPE,
[
generate_data(
n_samples,
n_features,
self.weights,
self.means,
self.covariances,
covar_type,
dtype=dtype,
)
for covar_type in COVARIANCE_TYPE
],
)
)
self.Y = np.hstack(
[
np.full(int(np.round(w * n_samples)), k, dtype=int)
for k, w in enumerate(self.weights)
]
)
def test_gaussian_mixture_attributes():
# test bad parameters
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
# test good parameters
n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1
covariance_type, init_params = "full", "random"
gmm = GaussianMixture(
n_components=n_components,
tol=tol,
n_init=n_init,
max_iter=max_iter,
reg_covar=reg_covar,
covariance_type=covariance_type,
init_params=init_params,
).fit(X)
assert gmm.n_components == n_components
assert gmm.covariance_type == covariance_type
assert gmm.tol == tol
assert gmm.reg_covar == reg_covar
assert gmm.max_iter == max_iter
assert gmm.n_init == n_init
assert gmm.init_params == init_params
def test_check_weights():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
X = rand_data.X["full"]
g = GaussianMixture(n_components=n_components)
# Check bad shape
weights_bad_shape = rng.rand(n_components, 1)
g.weights_init = weights_bad_shape
msg = re.escape(
"The parameter 'weights' should have the shape of "
f"({n_components},), but got {weights_bad_shape.shape}"
)
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check bad range
weights_bad_range = rng.rand(n_components) + 1
g.weights_init = weights_bad_range
msg = re.escape(
"The parameter 'weights' should be in the range [0, 1], but got"
f" max value {np.min(weights_bad_range):.5f}, "
f"min value {np.max(weights_bad_range):.5f}"
)
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check bad normalization
weights_bad_norm = rng.rand(n_components)
weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1)
g.weights_init = weights_bad_norm
msg = re.escape(
"The parameter 'weights' should be normalized, "
f"but got sum(weights) = {np.sum(weights_bad_norm):.5f}"
)
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check good weights matrix
weights = rand_data.weights
g = GaussianMixture(weights_init=weights, n_components=n_components)
g.fit(X)
assert_array_equal(weights, g.weights_init)
def test_check_means():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
X = rand_data.X["full"]
g = GaussianMixture(n_components=n_components)
# Check means bad shape
means_bad_shape = rng.rand(n_components + 1, n_features)
g.means_init = means_bad_shape
msg = "The parameter 'means' should have the shape of "
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check good means matrix
means = rand_data.means
g.means_init = means
g.fit(X)
assert_array_equal(means, g.means_init)
def test_check_precisions():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components, n_features = rand_data.n_components, rand_data.n_features
# Define the bad precisions for each covariance_type
precisions_bad_shape = {
"full": np.ones((n_components + 1, n_features, n_features)),
"tied": np.ones((n_features + 1, n_features + 1)),
"diag": np.ones((n_components + 1, n_features)),
"spherical": np.ones((n_components + 1)),
}
# Define not positive-definite precisions
precisions_not_pos = np.ones((n_components, n_features, n_features))
precisions_not_pos[0] = np.eye(n_features)
precisions_not_pos[0, 0, 0] = -1.0
precisions_not_positive = {
"full": precisions_not_pos,
"tied": precisions_not_pos[0],
"diag": np.full((n_components, n_features), -1.0),
"spherical": np.full(n_components, -1.0),
}
not_positive_errors = {
"full": "symmetric, positive-definite",
"tied": "symmetric, positive-definite",
"diag": "positive",
"spherical": "positive",
}
for covar_type in COVARIANCE_TYPE:
X = RandomData(rng).X[covar_type]
g = GaussianMixture(
n_components=n_components, covariance_type=covar_type, random_state=rng
)
# Check precisions with bad shapes
g.precisions_init = precisions_bad_shape[covar_type]
msg = f"The parameter '{covar_type} precision' should have the shape of"
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check not positive precisions
g.precisions_init = precisions_not_positive[covar_type]
msg = f"'{covar_type} precision' should be {not_positive_errors[covar_type]}"
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check the correct init of precisions_init
g.precisions_init = rand_data.precisions[covar_type]
g.fit(X)
assert_array_equal(rand_data.precisions[covar_type], g.precisions_init)
def test_suffstat_sk_full():
# compare the precision matrix compute from the
# EmpiricalCovariance.covariance fitted on X*sqrt(resp)
# with _sufficient_sk_full, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
# special case 1, assuming data is "centered"
X = rng.rand(n_samples, n_features)
resp = rng.rand(n_samples, 1)
X_resp = np.sqrt(resp) * X
nk = np.array([n_samples])
xk = np.zeros((1, n_features))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=True)
ecov.fit(X_resp)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm="frobenius"), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm="spectral"), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, "full")
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
# special case 2, assuming resp are all ones
resp = np.ones((n_samples, 1))
nk = np.array([n_samples])
xk = X.mean(axis=0).reshape((1, -1))
covars_pred = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance(assume_centered=False)
ecov.fit(X)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm="frobenius"), 0)
assert_almost_equal(ecov.error_norm(covars_pred[0], norm="spectral"), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred, "full")
precs_pred = np.array([np.dot(prec, prec.T) for prec in precs_chol_pred])
precs_est = np.array([linalg.inv(cov) for cov in covars_pred])
assert_array_almost_equal(precs_est, precs_pred)
def test_suffstat_sk_tied():
# use equation Nk * Sk / N = S_tied
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_full = (
np.sum(nk[:, np.newaxis, np.newaxis] * covars_pred_full, 0) / n_samples
)
covars_pred_tied = _estimate_gaussian_covariances_tied(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
ecov.covariance_ = covars_pred_full
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm="frobenius"), 0)
assert_almost_equal(ecov.error_norm(covars_pred_tied, norm="spectral"), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_tied, "tied")
precs_pred = np.dot(precs_chol_pred, precs_chol_pred.T)
precs_est = linalg.inv(covars_pred_tied)
assert_array_almost_equal(precs_est, precs_pred)
def test_suffstat_sk_diag():
# test against 'full' case
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 500, 2, 2
resp = rng.rand(n_samples, n_components)
resp = resp / resp.sum(axis=1)[:, np.newaxis]
X = rng.rand(n_samples, n_features)
nk = resp.sum(axis=0)
xk = np.dot(resp.T, X) / nk[:, np.newaxis]
covars_pred_full = _estimate_gaussian_covariances_full(resp, X, nk, xk, 0)
covars_pred_diag = _estimate_gaussian_covariances_diag(resp, X, nk, xk, 0)
ecov = EmpiricalCovariance()
for cov_full, cov_diag in zip(covars_pred_full, covars_pred_diag):
ecov.covariance_ = np.diag(np.diag(cov_full))
cov_diag = np.diag(cov_diag)
assert_almost_equal(ecov.error_norm(cov_diag, norm="frobenius"), 0)
assert_almost_equal(ecov.error_norm(cov_diag, norm="spectral"), 0)
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_diag, "diag")
assert_almost_equal(covars_pred_diag, 1.0 / precs_chol_pred**2)
def test_gaussian_suffstat_sk_spherical(global_dtype):
# computing spherical covariance equals to the variance of one-dimension
# data after flattening, n_components=1
rng = np.random.RandomState(0)
n_samples, n_features = 500, 2
X = rng.rand(n_samples, n_features).astype(global_dtype)
X = X - X.mean()
resp = np.ones((n_samples, 1), dtype=global_dtype)
nk = np.array([n_samples], dtype=global_dtype)
xk = X.mean()
covars_pred_spherical = _estimate_gaussian_covariances_spherical(resp, X, nk, xk, 0)
covars_pred_spherical2 = np.dot(X.flatten().T, X.flatten()) / (
n_features * n_samples
)
assert_almost_equal(covars_pred_spherical, covars_pred_spherical2)
assert covars_pred_spherical.dtype == global_dtype
# check the precision computation
precs_chol_pred = _compute_precision_cholesky(covars_pred_spherical, "spherical")
assert_almost_equal(covars_pred_spherical, 1.0 / precs_chol_pred**2)
assert precs_chol_pred.dtype == global_dtype
def test_compute_log_det_cholesky(global_dtype):
n_features = 2
rand_data = RandomData(np.random.RandomState(0), dtype=global_dtype)
for covar_type in COVARIANCE_TYPE:
covariance = rand_data.covariances[covar_type]
if covar_type == "full":
predected_det = np.array([linalg.det(cov) for cov in covariance])
elif covar_type == "tied":
predected_det = linalg.det(covariance)
elif covar_type == "diag":
predected_det = np.array([np.prod(cov) for cov in covariance])
elif covar_type == "spherical":
predected_det = covariance**n_features
# We compute the cholesky decomposition of the covariance matrix
assert covariance.dtype == global_dtype
expected_det = _compute_log_det_cholesky(
_compute_precision_cholesky(covariance, covar_type),
covar_type,
n_features=n_features,
)
assert_array_almost_equal(expected_det, -0.5 * np.log(predected_det))
assert expected_det.dtype == global_dtype
def _naive_lmvnpdf_diag(X, means, covars):
resp = np.empty((len(X), len(means)))
stds = np.sqrt(covars)
for i, (mean, std) in enumerate(zip(means, stds)):
resp[:, i] = stats.norm.logpdf(X, mean, std).sum(axis=1)
return resp
def test_gaussian_mixture_log_probabilities():
from sklearn.mixture._gaussian_mixture import _estimate_log_gaussian_prob
# test against with _naive_lmvnpdf_diag
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_samples = 500
n_features = rand_data.n_features
n_components = rand_data.n_components
means = rand_data.means
covars_diag = rng.rand(n_components, n_features)
X = rng.rand(n_samples, n_features)
log_prob_naive = _naive_lmvnpdf_diag(X, means, covars_diag)
# full covariances
precs_full = np.array([np.diag(1.0 / np.sqrt(x)) for x in covars_diag])
log_prob = _estimate_log_gaussian_prob(X, means, precs_full, "full")
assert_array_almost_equal(log_prob, log_prob_naive)
# diag covariances
precs_chol_diag = 1.0 / np.sqrt(covars_diag)
log_prob = _estimate_log_gaussian_prob(X, means, precs_chol_diag, "diag")
assert_array_almost_equal(log_prob, log_prob_naive)
# tied
covars_tied = np.array([x for x in covars_diag]).mean(axis=0)
precs_tied = np.diag(np.sqrt(1.0 / covars_tied))
log_prob_naive = _naive_lmvnpdf_diag(X, means, [covars_tied] * n_components)
log_prob = _estimate_log_gaussian_prob(X, means, precs_tied, "tied")
assert_array_almost_equal(log_prob, log_prob_naive)
# spherical
covars_spherical = covars_diag.mean(axis=1)
precs_spherical = 1.0 / np.sqrt(covars_diag.mean(axis=1))
log_prob_naive = _naive_lmvnpdf_diag(
X, means, [[k] * n_features for k in covars_spherical]
)
log_prob = _estimate_log_gaussian_prob(X, means, precs_spherical, "spherical")
assert_array_almost_equal(log_prob, log_prob_naive)
# skip tests on weighted_log_probabilities, log_weights
def test_gaussian_mixture_estimate_log_prob_resp():
# test whether responsibilities are normalized
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=5)
n_samples = rand_data.n_samples
n_features = rand_data.n_features
n_components = rand_data.n_components
X = rng.rand(n_samples, n_features)
for covar_type in COVARIANCE_TYPE:
weights = rand_data.weights
means = rand_data.means
precisions = rand_data.precisions[covar_type]
g = GaussianMixture(
n_components=n_components,
random_state=rng,
weights_init=weights,
means_init=means,
precisions_init=precisions,
covariance_type=covar_type,
)
g.fit(X)
resp = g.predict_proba(X)
assert_array_almost_equal(resp.sum(axis=1), np.ones(n_samples))
assert_array_equal(g.weights_init, weights)
assert_array_equal(g.means_init, means)
assert_array_equal(g.precisions_init, precisions)
def test_gaussian_mixture_predict_predict_proba():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
Y = rand_data.Y
g = GaussianMixture(
n_components=rand_data.n_components,
random_state=rng,
weights_init=rand_data.weights,
means_init=rand_data.means,
precisions_init=rand_data.precisions[covar_type],
covariance_type=covar_type,
)
# Check a warning message arrive if we don't do fit
msg = (
"This GaussianMixture instance is not fitted yet. Call 'fit' "
"with appropriate arguments before using this estimator."
)
with pytest.raises(NotFittedError, match=msg):
g.predict(X)
g.fit(X)
Y_pred = g.predict(X)
Y_pred_proba = g.predict_proba(X).argmax(axis=1)
assert_array_equal(Y_pred, Y_pred_proba)
assert adjusted_rand_score(Y, Y_pred) > 0.95
@pytest.mark.filterwarnings("ignore:.*did not converge.*")
@pytest.mark.parametrize(
"seed, max_iter, tol",
[
(0, 2, 1e-7), # strict non-convergence
(1, 2, 1e-1), # loose non-convergence
(3, 300, 1e-7), # strict convergence
(4, 300, 1e-1), # loose convergence
],
)
def test_gaussian_mixture_fit_predict(seed, max_iter, tol, global_dtype):
rng = np.random.RandomState(seed)
rand_data = RandomData(rng, dtype=global_dtype)
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
Y = rand_data.Y
g = GaussianMixture(
n_components=rand_data.n_components,
random_state=rng,
weights_init=rand_data.weights,
means_init=rand_data.means,
precisions_init=rand_data.precisions[covar_type],
covariance_type=covar_type,
max_iter=max_iter,
tol=tol,
)
# check if fit_predict(X) is equivalent to fit(X).predict(X)
f = copy.deepcopy(g)
Y_pred1 = f.fit(X).predict(X)
Y_pred2 = g.fit_predict(X)
assert_array_equal(Y_pred1, Y_pred2)
assert adjusted_rand_score(Y, Y_pred2) > 0.95
assert g.means_.dtype == global_dtype
assert g.weights_.dtype == global_dtype
assert g.precisions_.dtype == global_dtype
def test_gaussian_mixture_fit_predict_n_init():
# Check that fit_predict is equivalent to fit.predict, when n_init > 1
X = np.random.RandomState(0).randn(1000, 5)
gm = GaussianMixture(n_components=5, n_init=5, random_state=0)
y_pred1 = gm.fit_predict(X)
y_pred2 = gm.predict(X)
assert_array_equal(y_pred1, y_pred2)
def test_gaussian_mixture_fit(global_dtype):
# recover the ground truth
rng = np.random.RandomState(0)
rand_data = RandomData(rng, dtype=global_dtype)
n_features = rand_data.n_features
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(
n_components=n_components,
n_init=20,
reg_covar=0,
random_state=rng,
covariance_type=covar_type,
)
g.fit(X)
# needs more data to pass the test with rtol=1e-7
assert_allclose(
np.sort(g.weights_), np.sort(rand_data.weights), rtol=0.1, atol=1e-2
)
arg_idx1 = g.means_[:, 0].argsort()
arg_idx2 = rand_data.means[:, 0].argsort()
assert_allclose(
g.means_[arg_idx1], rand_data.means[arg_idx2], rtol=0.1, atol=1e-2
)
if covar_type == "full":
prec_pred = g.precisions_
prec_test = rand_data.precisions["full"]
elif covar_type == "tied":
prec_pred = np.array([g.precisions_] * n_components)
prec_test = np.array([rand_data.precisions["tied"]] * n_components)
elif covar_type == "spherical":
prec_pred = np.array([np.eye(n_features) * c for c in g.precisions_])
prec_test = np.array(
[np.eye(n_features) * c for c in rand_data.precisions["spherical"]]
)
elif covar_type == "diag":
prec_pred = np.array([np.diag(d) for d in g.precisions_])
prec_test = np.array([np.diag(d) for d in rand_data.precisions["diag"]])
arg_idx1 = np.trace(prec_pred, axis1=1, axis2=2).argsort()
arg_idx2 = np.trace(prec_test, axis1=1, axis2=2).argsort()
for k, h in zip(arg_idx1, arg_idx2):
ecov = EmpiricalCovariance()
ecov.covariance_ = prec_test[h]
# the accuracy depends on the number of data and randomness, rng
assert_allclose(ecov.error_norm(prec_pred[k]), 0, atol=0.15)
assert g.means_.dtype == global_dtype
assert g.covariances_.dtype == global_dtype
assert g.precisions_.dtype == global_dtype
def test_gaussian_mixture_fit_best_params():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
n_init = 10
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(
n_components=n_components,
n_init=1,
reg_covar=0,
random_state=rng,
covariance_type=covar_type,
)
ll = []
for _ in range(n_init):
g.fit(X)
ll.append(g.score(X))
ll = np.array(ll)
g_best = GaussianMixture(
n_components=n_components,
n_init=n_init,
reg_covar=0,
random_state=rng,
covariance_type=covar_type,
)
g_best.fit(X)
assert_almost_equal(ll.min(), g_best.score(X))
def test_gaussian_mixture_fit_convergence_warning():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=1)
n_components = rand_data.n_components
max_iter = 1
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(
n_components=n_components,
n_init=1,
max_iter=max_iter,
reg_covar=0,
random_state=rng,
covariance_type=covar_type,
)
msg = (
"Best performing initialization did not converge. "
"Try different init parameters, or increase max_iter, "
"tol, or check for degenerate data."
)
with pytest.warns(ConvergenceWarning, match=msg):
g.fit(X)
def test_multiple_init():
# Test that multiple inits does not much worse than a single one
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
for cv_type in COVARIANCE_TYPE:
train1 = (
GaussianMixture(
n_components=n_components, covariance_type=cv_type, random_state=0
)
.fit(X)
.score(X)
)
train2 = (
GaussianMixture(
n_components=n_components,
covariance_type=cv_type,
random_state=0,
n_init=5,
)
.fit(X)
.score(X)
)
assert train2 >= train1
def test_gaussian_mixture_n_parameters():
# Test that the right number of parameters is estimated
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 5, 2
X = rng.randn(n_samples, n_features)
n_params = {"spherical": 13, "diag": 21, "tied": 26, "full": 41}
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components, covariance_type=cv_type, random_state=rng
).fit(X)
assert g._n_parameters() == n_params[cv_type]
def test_bic_1d_1component():
# Test all of the covariance_types return the same BIC score for
# 1-dimensional, 1 component fits.
rng = np.random.RandomState(0)
n_samples, n_dim, n_components = 100, 1, 1
X = rng.randn(n_samples, n_dim)
bic_full = (
GaussianMixture(
n_components=n_components, covariance_type="full", random_state=rng
)
.fit(X)
.bic(X)
)
for covariance_type in ["tied", "diag", "spherical"]:
bic = (
GaussianMixture(
n_components=n_components,
covariance_type=covariance_type,
random_state=rng,
)
.fit(X)
.bic(X)
)
assert_almost_equal(bic_full, bic)
def test_gaussian_mixture_aic_bic():
# Test the aic and bic criteria
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 50, 3, 2
X = rng.randn(n_samples, n_features)
# standard gaussian entropy
sgh = 0.5 * (
fast_logdet(np.cov(X.T, bias=1)) + n_features * (1 + np.log(2 * np.pi))
)
for cv_type in COVARIANCE_TYPE:
g = GaussianMixture(
n_components=n_components,
covariance_type=cv_type,
random_state=rng,
max_iter=200,
)
g.fit(X)
aic = 2 * n_samples * sgh + 2 * g._n_parameters()
bic = 2 * n_samples * sgh + np.log(n_samples) * g._n_parameters()
bound = n_features / np.sqrt(n_samples)
assert (g.aic(X) - aic) / n_samples < bound
assert (g.bic(X) - bic) / n_samples < bound
def test_gaussian_mixture_verbose():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
g = GaussianMixture(
n_components=n_components,
n_init=1,
reg_covar=0,
random_state=rng,
covariance_type=covar_type,
verbose=1,
)
h = GaussianMixture(
n_components=n_components,
n_init=1,
reg_covar=0,
random_state=rng,
covariance_type=covar_type,
verbose=2,
)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
g.fit(X)
h.fit(X)
finally:
sys.stdout = old_stdout
@pytest.mark.filterwarnings("ignore:.*did not converge.*")
@pytest.mark.parametrize("seed", (0, 1, 2))
def test_warm_start(seed):
random_state = seed
rng = np.random.RandomState(random_state)
n_samples, n_features, n_components = 500, 2, 2
X = rng.rand(n_samples, n_features)
# Assert the warm_start give the same result for the same number of iter
g = GaussianMixture(
n_components=n_components,
n_init=1,
max_iter=2,
reg_covar=0,
random_state=random_state,
warm_start=False,
)
h = GaussianMixture(
n_components=n_components,
n_init=1,
max_iter=1,
reg_covar=0,
random_state=random_state,
warm_start=True,
)
g.fit(X)
score1 = h.fit(X).score(X)
score2 = h.fit(X).score(X)
assert_almost_equal(g.weights_, h.weights_)
assert_almost_equal(g.means_, h.means_)
assert_almost_equal(g.precisions_, h.precisions_)
assert score2 > score1
# Assert that by using warm_start we can converge to a good solution
g = GaussianMixture(
n_components=n_components,
n_init=1,
max_iter=5,
reg_covar=0,
random_state=random_state,
warm_start=False,
tol=1e-6,
)
h = GaussianMixture(
n_components=n_components,
n_init=1,
max_iter=5,
reg_covar=0,
random_state=random_state,
warm_start=True,
tol=1e-6,
)
g.fit(X)
assert not g.converged_
h.fit(X)
# depending on the data there is large variability in the number of
# refit necessary to converge due to the complete randomness of the
# data
for _ in range(1000):
h.fit(X)
if h.converged_:
break
assert h.converged_
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
def test_convergence_detected_with_warm_start():
# We check that convergence is detected when warm_start=True
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
X = rand_data.X["full"]
for max_iter in (1, 2, 50):
gmm = GaussianMixture(
n_components=n_components,
warm_start=True,
max_iter=max_iter,
random_state=rng,
)
for _ in range(100):
gmm.fit(X)
if gmm.converged_:
break
assert gmm.converged_
assert max_iter >= gmm.n_iter_
def test_score(global_dtype):
covar_type = "full"
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7, dtype=global_dtype)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
assert X.dtype == global_dtype
# Check the error message if we don't call fit
gmm1 = GaussianMixture(
n_components=n_components,
n_init=1,
max_iter=1,
reg_covar=0,
random_state=rng,
covariance_type=covar_type,
)
msg = (
"This GaussianMixture instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
)
with pytest.raises(NotFittedError, match=msg):
gmm1.score(X)
# Check score value
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
gmm1.fit(X)
assert gmm1.means_.dtype == global_dtype
assert gmm1.covariances_.dtype == global_dtype
gmm_score = gmm1.score(X)
gmm_score_proba = gmm1.score_samples(X).mean()
assert_almost_equal(gmm_score, gmm_score_proba)
assert gmm_score_proba.dtype == global_dtype
# Check if the score increase
gmm2 = GaussianMixture(
n_components=n_components,
n_init=1,
reg_covar=0,
random_state=rng,
covariance_type=covar_type,
).fit(X)
assert gmm2.score(X) > gmm1.score(X)
def test_score_samples():
covar_type = "full"
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
# Check the error message if we don't call fit
gmm = GaussianMixture(
n_components=n_components,
n_init=1,
reg_covar=0,
random_state=rng,
covariance_type=covar_type,
)
msg = (
"This GaussianMixture instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this estimator."
)
with pytest.raises(NotFittedError, match=msg):
gmm.score_samples(X)
gmm_score_samples = gmm.fit(X).score_samples(X)
assert gmm_score_samples.shape[0] == rand_data.n_samples
def test_monotonic_likelihood():
# We check that each step of the EM without regularization improve
# monotonically the training set likelihood
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7)
n_components = rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(
n_components=n_components,
covariance_type=covar_type,
reg_covar=0,
warm_start=True,
max_iter=1,
random_state=rng,
tol=1e-7,
)
current_log_likelihood = -np.inf
with warnings.catch_warnings():
warnings.simplefilter("ignore", ConvergenceWarning)
# Do one training iteration at a time so we can make sure that the
# training log likelihood increases after each iteration.
for _ in range(600):
prev_log_likelihood = current_log_likelihood
current_log_likelihood = gmm.fit(X).score(X)
assert current_log_likelihood >= prev_log_likelihood
if gmm.converged_:
break
assert gmm.converged_
def test_regularisation():
# We train the GaussianMixture on degenerate data by defining two clusters
# of a 0 covariance.
rng = np.random.RandomState(0)
n_samples, n_features = 10, 5
X = np.vstack(
(np.ones((n_samples // 2, n_features)), np.zeros((n_samples // 2, n_features)))
)
for covar_type in COVARIANCE_TYPE:
gmm = GaussianMixture(
n_components=n_samples,
reg_covar=0,
covariance_type=covar_type,
random_state=rng,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
msg = re.escape(
"Fitting the mixture model failed because some components have"
" ill-defined empirical covariance (for instance caused by "
"singleton or collapsed samples). Try to decrease the number "
"of components, increase reg_covar, or scale the input data."
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
gmm.set_params(reg_covar=1e-6).fit(X)
@pytest.mark.parametrize("covar_type", COVARIANCE_TYPE)
def test_fitted_precision_covariance_concistency(covar_type, global_dtype):
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7, dtype=global_dtype)
n_components = rand_data.n_components
X = rand_data.X[covar_type]
gmm = GaussianMixture(
n_components=n_components,
covariance_type=covar_type,
random_state=rng,
n_init=5,
)
gmm.fit(X)
assert gmm.precisions_.dtype == global_dtype
assert gmm.covariances_.dtype == global_dtype
if covar_type == "full":
for prec, covar in zip(gmm.precisions_, gmm.covariances_):
assert_array_almost_equal(linalg.inv(prec), covar)
elif covar_type == "tied":
assert_array_almost_equal(linalg.inv(gmm.precisions_), gmm.covariances_)
else:
assert_array_almost_equal(gmm.precisions_, 1.0 / gmm.covariances_)
def test_sample():
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=7, n_components=3)
n_features, n_components = rand_data.n_features, rand_data.n_components
for covar_type in COVARIANCE_TYPE:
X = rand_data.X[covar_type]
gmm = GaussianMixture(
n_components=n_components, covariance_type=covar_type, random_state=rng
)
# To sample we need that GaussianMixture is fitted
msg = "This GaussianMixture instance is not fitted"
with pytest.raises(NotFittedError, match=msg):
gmm.sample(0)
gmm.fit(X)
msg = "Invalid value for 'n_samples'"
with pytest.raises(ValueError, match=msg):
gmm.sample(0)
# Just to make sure the class samples correctly
n_samples = 20000
X_s, y_s = gmm.sample(n_samples)
for k in range(n_components):
if covar_type == "full":
assert_array_almost_equal(
gmm.covariances_[k], np.cov(X_s[y_s == k].T), decimal=1
)
elif covar_type == "tied":
assert_array_almost_equal(
gmm.covariances_, np.cov(X_s[y_s == k].T), decimal=1
)
elif covar_type == "diag":
assert_array_almost_equal(
gmm.covariances_[k], np.diag(np.cov(X_s[y_s == k].T)), decimal=1
)
else:
assert_array_almost_equal(
gmm.covariances_[k],
np.var(X_s[y_s == k] - gmm.means_[k]),
decimal=1,
)
means_s = np.array([np.mean(X_s[y_s == k], 0) for k in range(n_components)])
assert_array_almost_equal(gmm.means_, means_s, decimal=1)
# Check shapes of sampled data, see
# https://github.com/scikit-learn/scikit-learn/issues/7701
assert X_s.shape == (n_samples, n_features)
for sample_size in range(1, 100):
X_s, _ = gmm.sample(sample_size)
assert X_s.shape == (sample_size, n_features)
@pytest.mark.filterwarnings("ignore::sklearn.exceptions.ConvergenceWarning")
def test_init():
# We check that by increasing the n_init number we have a better solution
for random_state in range(15):
rand_data = RandomData(
np.random.RandomState(random_state), n_samples=50, scale=1
)
n_components = rand_data.n_components
X = rand_data.X["full"]
gmm1 = GaussianMixture(
n_components=n_components, n_init=1, max_iter=1, random_state=random_state
).fit(X)
gmm2 = GaussianMixture(
n_components=n_components, n_init=10, max_iter=1, random_state=random_state
).fit(X)
assert gmm2.lower_bound_ >= gmm1.lower_bound_
def test_gaussian_mixture_setting_best_params():
"""`GaussianMixture`'s best_parameters, `n_iter_` and `lower_bound_`
must be set appropriately in the case of divergence.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/18216
"""
rnd = np.random.RandomState(0)
n_samples = 30
X = rnd.uniform(size=(n_samples, 3))
# following initialization parameters were found to lead to divergence
means_init = np.array(
[
[0.670637869618158, 0.21038256107384043, 0.12892629765485303],
[0.09394051075844147, 0.5759464955561779, 0.929296197576212],
[0.5033230372781258, 0.9569852381759425, 0.08654043447295741],
[0.18578301420435747, 0.5531158970919143, 0.19388943970532435],
[0.4548589928173794, 0.35182513658825276, 0.568146063202464],
[0.609279894978321, 0.7929063819678847, 0.9620097270828052],
]
)
precisions_init = np.array(
[
999999.999604483,
999999.9990869573,
553.7603944542167,
204.78596008931834,
15.867423501783637,
85.4595728389735,
]
)
weights_init = [
0.03333333333333341,
0.03333333333333341,
0.06666666666666674,
0.06666666666666674,
0.7000000000000001,
0.10000000000000007,
]
gmm = GaussianMixture(
covariance_type="spherical",
reg_covar=0,
means_init=means_init,
weights_init=weights_init,
random_state=rnd,
n_components=len(weights_init),
precisions_init=precisions_init,
max_iter=1,
)
# ensure that no error is thrown during fit
gmm.fit(X)
# check that the fit did not converge
assert not gmm.converged_
# check that parameters are set for gmm
for attr in [
"weights_",
"means_",
"covariances_",
"precisions_cholesky_",
"n_iter_",
"lower_bound_",
"lower_bounds_",
]:
assert hasattr(gmm, attr)
@pytest.mark.parametrize(
"init_params", ["random", "random_from_data", "k-means++", "kmeans"]
)
def test_init_means_not_duplicated(init_params, global_random_seed):
# Check that all initialisations provide not duplicated starting means
rng = np.random.RandomState(global_random_seed)
rand_data = RandomData(rng, scale=5)
n_components = rand_data.n_components
X = rand_data.X["full"]
gmm = GaussianMixture(
n_components=n_components, init_params=init_params, random_state=rng, max_iter=0
)
gmm.fit(X)
means = gmm.means_
for i_mean, j_mean in itertools.combinations(means, r=2):
assert not np.allclose(i_mean, j_mean)
@pytest.mark.parametrize(
"init_params", ["random", "random_from_data", "k-means++", "kmeans"]
)
def test_means_for_all_inits(init_params, global_random_seed, global_dtype):
# Check fitted means properties for all initializations
rng = np.random.RandomState(global_random_seed)
rand_data = RandomData(rng, scale=5, dtype=global_dtype)
n_components = rand_data.n_components
X = rand_data.X["full"]
gmm = GaussianMixture(
n_components=n_components, init_params=init_params, random_state=rng
)
gmm.fit(X)
assert gmm.means_.shape == (n_components, X.shape[1])
assert np.all(X.min(axis=0) <= gmm.means_)
assert np.all(gmm.means_ <= X.max(axis=0))
assert gmm.converged_
assert gmm.means_.dtype == global_dtype
assert gmm.covariances_.dtype == global_dtype
assert gmm.weights_.dtype == global_dtype
def test_max_iter_zero():
# Check that max_iter=0 returns initialisation as expected
# Pick arbitrary initial means and check equal to max_iter=0
rng = np.random.RandomState(0)
rand_data = RandomData(rng, scale=5)
n_components = rand_data.n_components
X = rand_data.X["full"]
means_init = [[20, 30], [30, 25]]
gmm = GaussianMixture(
n_components=n_components,
random_state=rng,
means_init=means_init,
tol=1e-06,
max_iter=0,
)
gmm.fit(X)
assert_allclose(gmm.means_, means_init)
def test_gaussian_mixture_precisions_init_diag(global_dtype):
"""Check that we properly initialize `precision_cholesky_` when we manually
provide the precision matrix.
In this regard, we check the consistency between estimating the precision
matrix and providing the same precision matrix as initialization. It should
lead to the same results with the same number of iterations.
If the initialization is wrong then the number of iterations will increase.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/16944
"""
# generate a toy dataset
n_samples = 300
rng = np.random.RandomState(0)
shifted_gaussian = rng.randn(n_samples, 2) + np.array([20, 20])
C = np.array([[0.0, -0.7], [3.5, 0.7]])
stretched_gaussian = np.dot(rng.randn(n_samples, 2), C)
X = np.vstack([shifted_gaussian, stretched_gaussian]).astype(global_dtype)
# common parameters to check the consistency of precision initialization
n_components, covariance_type, reg_covar, random_state = 2, "diag", 1e-6, 0
# execute the manual initialization to compute the precision matrix:
# - run KMeans to have an initial guess
# - estimate the covariance
# - compute the precision matrix from the estimated covariance
resp = np.zeros((X.shape[0], n_components)).astype(global_dtype)
label = (
KMeans(n_clusters=n_components, n_init=1, random_state=random_state)
.fit(X)
.labels_
)
resp[np.arange(X.shape[0]), label] = 1
_, _, covariance = _estimate_gaussian_parameters(
X, resp, reg_covar=reg_covar, covariance_type=covariance_type
)
assert covariance.dtype == global_dtype
precisions_init = 1 / covariance
gm_with_init = GaussianMixture(
n_components=n_components,
covariance_type=covariance_type,
reg_covar=reg_covar,
precisions_init=precisions_init,
random_state=random_state,
).fit(X)
assert gm_with_init.means_.dtype == global_dtype
assert gm_with_init.covariances_.dtype == global_dtype
assert gm_with_init.precisions_cholesky_.dtype == global_dtype
gm_without_init = GaussianMixture(
n_components=n_components,
covariance_type=covariance_type,
reg_covar=reg_covar,
random_state=random_state,
).fit(X)
assert gm_without_init.means_.dtype == global_dtype
assert gm_without_init.covariances_.dtype == global_dtype
assert gm_without_init.precisions_cholesky_.dtype == global_dtype
assert gm_without_init.n_iter_ == gm_with_init.n_iter_
assert_allclose(
gm_with_init.precisions_cholesky_, gm_without_init.precisions_cholesky_
)
def _generate_data(seed, n_samples, n_features, n_components, dtype=np.float64):
"""Randomly generate samples and responsibilities."""
rs = np.random.RandomState(seed)
X = rs.random_sample((n_samples, n_features)).astype(dtype)
resp = rs.random_sample((n_samples, n_components)).astype(dtype)
resp /= resp.sum(axis=1)[:, np.newaxis]
return X, resp
def _calculate_precisions(X, resp, covariance_type):
"""Calculate precision matrix of X and its Cholesky decomposition
for the given covariance type.
"""
reg_covar = 1e-6
weights, means, covariances = _estimate_gaussian_parameters(
X, resp, reg_covar, covariance_type
)
precisions_cholesky = _compute_precision_cholesky(covariances, covariance_type)
_, n_components = resp.shape
# Instantiate a `GaussianMixture` model in order to use its
# `_set_parameters` method to return the `precisions_` and
# `precisions_cholesky_` from matching the `covariance_type`
# provided.
gmm = GaussianMixture(n_components=n_components, covariance_type=covariance_type)
params = (weights, means, covariances, precisions_cholesky)
gmm._set_parameters(params)
return gmm.precisions_, gmm.precisions_cholesky_
@pytest.mark.parametrize("covariance_type", COVARIANCE_TYPE)
def test_gaussian_mixture_precisions_init(
covariance_type, global_random_seed, global_dtype
):
"""Non-regression test for #26415."""
X, resp = _generate_data(
seed=global_random_seed,
n_samples=100,
n_features=3,
n_components=4,
dtype=global_dtype,
)
precisions_init, desired_precisions_cholesky = _calculate_precisions(
X, resp, covariance_type
)
assert precisions_init.dtype == global_dtype
assert desired_precisions_cholesky.dtype == global_dtype
gmm = GaussianMixture(
covariance_type=covariance_type, precisions_init=precisions_init
)
gmm._initialize(X, resp)
actual_precisions_cholesky = gmm.precisions_cholesky_
assert_allclose(actual_precisions_cholesky, desired_precisions_cholesky)
def test_gaussian_mixture_single_component_stable():
"""
Non-regression test for #23032 ensuring 1-component GM works on only a
few samples.
"""
rng = np.random.RandomState(0)
X = rng.multivariate_normal(np.zeros(2), np.identity(2), size=3)
gm = GaussianMixture(n_components=1)
gm.fit(X).sample()
def test_gaussian_mixture_all_init_does_not_estimate_gaussian_parameters(
monkeypatch,
global_random_seed,
):
"""When all init parameters are provided, the Gaussian parameters
are not estimated.
Non-regression test for gh-26015.
"""
mock = Mock(side_effect=_estimate_gaussian_parameters)
monkeypatch.setattr(
sklearn.mixture._gaussian_mixture, "_estimate_gaussian_parameters", mock
)
rng = np.random.RandomState(global_random_seed)
rand_data = RandomData(rng)
gm = GaussianMixture(
n_components=rand_data.n_components,
weights_init=rand_data.weights,
means_init=rand_data.means,
precisions_init=rand_data.precisions["full"],
random_state=rng,
)
gm.fit(rand_data.X["full"])
# The initial gaussian parameters are not estimated. They are estimated for every
# m_step.
assert mock.call_count == gm.n_iter_
@pytest.mark.parametrize("init_params", ["random", "random_from_data"])
@pytest.mark.parametrize("covariance_type", ["full", "tied", "diag", "spherical"])
@pytest.mark.parametrize(
"array_namespace, device_, dtype",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
@pytest.mark.parametrize("use_gmm_array_constructor_arguments", [False, True])
def test_gaussian_mixture_array_api_compliance(
init_params,
covariance_type,
array_namespace,
device_,
dtype,
use_gmm_array_constructor_arguments,
):
"""Test that array api works in GaussianMixture.fit()."""
xp = _array_api_for_tests(array_namespace, device_)
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
X = rand_data.X[covariance_type]
X = X.astype(dtype)
if use_gmm_array_constructor_arguments:
additional_kwargs = {
"means_init": rand_data.means.astype(dtype),
"precisions_init": rand_data.precisions[covariance_type].astype(dtype),
"weights_init": rand_data.weights.astype(dtype),
}
else:
additional_kwargs = {}
gmm = GaussianMixture(
n_components=rand_data.n_components,
covariance_type=covariance_type,
random_state=0,
init_params=init_params,
**additional_kwargs,
)
gmm.fit(X)
X_xp = xp.asarray(X, device=device_)
with sklearn.config_context(array_api_dispatch=True):
gmm_xp = sklearn.clone(gmm)
for param_name, param_value in additional_kwargs.items():
arg_xp = xp.asarray(param_value, device=device_)
setattr(gmm_xp, param_name, arg_xp)
gmm_xp.fit(X_xp)
assert get_namespace(gmm_xp.means_)[0] == xp
assert get_namespace(gmm_xp.covariances_)[0] == xp
assert device(gmm_xp.means_) == device(X_xp)
assert device(gmm_xp.covariances_) == device(X_xp)
predict_xp = gmm_xp.predict(X_xp)
predict_proba_xp = gmm_xp.predict_proba(X_xp)
score_samples_xp = gmm_xp.score_samples(X_xp)
score_xp = gmm_xp.score(X_xp)
aic_xp = gmm_xp.aic(X_xp)
bic_xp = gmm_xp.bic(X_xp)
sample_X_xp, sample_y_xp = gmm_xp.sample(10)
results = [
predict_xp,
predict_proba_xp,
score_samples_xp,
sample_X_xp,
sample_y_xp,
]
for result in results:
assert get_namespace(result)[0] == xp
assert device(result) == device(X_xp)
for score in [score_xp, aic_xp, bic_xp]:
assert isinstance(score, float)
# Define specific rtol to make tests pass
default_rtol = 1e-4 if dtype == "float32" else 1e-7
increased_atol = 5e-4 if dtype == "float32" else 0
increased_rtol = 1e-3 if dtype == "float32" else 1e-7
# Check fitted attributes
assert_allclose(gmm.means_, _convert_to_numpy(gmm_xp.means_, xp=xp))
assert_allclose(gmm.weights_, _convert_to_numpy(gmm_xp.weights_, xp=xp))
assert_allclose(
gmm.covariances_,
_convert_to_numpy(gmm_xp.covariances_, xp=xp),
atol=increased_atol,
rtol=increased_rtol,
)
assert_allclose(
gmm.precisions_cholesky_,
_convert_to_numpy(gmm_xp.precisions_cholesky_, xp=xp),
atol=increased_atol,
rtol=increased_rtol,
)
assert_allclose(
gmm.precisions_,
_convert_to_numpy(gmm_xp.precisions_, xp=xp),
atol=increased_atol,
rtol=increased_rtol,
)
# Check methods
assert (
adjusted_rand_score(gmm.predict(X), _convert_to_numpy(predict_xp, xp=xp)) > 0.95
)
assert_allclose(
gmm.predict_proba(X),
_convert_to_numpy(predict_proba_xp, xp=xp),
rtol=increased_rtol,
atol=increased_atol,
)
assert_allclose(
gmm.score_samples(X),
_convert_to_numpy(score_samples_xp, xp=xp),
rtol=increased_rtol,
)
# comparing Python float so need explicit rtol when X has dtype float32
assert_allclose(gmm.score(X), score_xp, rtol=default_rtol)
assert_allclose(gmm.aic(X), aic_xp, rtol=default_rtol)
assert_allclose(gmm.bic(X), bic_xp, rtol=default_rtol)
sample_X, sample_y = gmm.sample(10)
# generated samples are float64 so need explicit rtol when X has dtype float32
assert_allclose(sample_X, _convert_to_numpy(sample_X_xp, xp=xp), rtol=default_rtol)
assert_allclose(sample_y, _convert_to_numpy(sample_y_xp, xp=xp))
@skip_if_array_api_compat_not_configured
@pytest.mark.parametrize("init_params", ["kmeans", "k-means++"])
@pytest.mark.parametrize(
"array_namespace, device_, dtype",
yield_namespace_device_dtype_combinations(),
ids=_get_namespace_device_dtype_ids,
)
def test_gaussian_mixture_raises_where_array_api_not_implemented(
init_params, array_namespace, device_, dtype
):
X, _ = make_blobs(
n_samples=100,
n_features=2,
centers=3,
)
gmm = GaussianMixture(
n_components=3, covariance_type="diag", init_params=init_params
)
with sklearn.config_context(array_api_dispatch=True):
with pytest.raises(
NotImplementedError,
match="Allowed `init_params`.+if 'array_api_dispatch' is enabled",
):
gmm.fit(X)
|
RandomData
|
python
|
docker__docker-py
|
tests/unit/utils_test.py
|
{
"start": 15925,
"end": 16629
}
|
class ____(unittest.TestCase):
longMessage = True
def test_convert_filters(self):
tests = [
({'dangling': True}, '{"dangling": ["true"]}'),
({'dangling': "true"}, '{"dangling": ["true"]}'),
({'exited': 0}, '{"exited": ["0"]}'),
({'exited': [0, 1]}, '{"exited": ["0", "1"]}'),
]
for filters, expected in tests:
assert convert_filters(filters) == expected
def test_decode_json_header(self):
obj = {'a': 'b', 'c': 1}
data = None
data = base64.urlsafe_b64encode(bytes(json.dumps(obj), 'utf-8'))
decoded_data = decode_json_header(data)
assert obj == decoded_data
|
UtilsTest
|
python
|
django-mptt__django-mptt
|
mptt/admin.py
|
{
"start": 1232,
"end": 4581
}
|
class ____(ModelAdmin):
"""
A basic admin class that displays tree items according to their position in
the tree. No extra editing functionality beyond what Django admin normally
offers.
"""
if IS_GRAPPELLI_INSTALLED:
change_list_template = "admin/grappelli_mptt_change_list.html"
else:
change_list_template = "admin/mptt_change_list.html"
form = MPTTAdminForm
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if (
issubclass(db_field.remote_field.model, MPTTModel)
and not isinstance(db_field, TreeForeignKey)
and db_field.name not in self.raw_id_fields
):
db = kwargs.get("using")
limit_choices_to = db_field.get_limit_choices_to()
defaults = {
"form_class": TreeNodeChoiceField,
"queryset": db_field.remote_field.model._default_manager.using(
db
).complex_filter(limit_choices_to),
"required": False,
}
defaults.update(kwargs)
kwargs = defaults
return super().formfield_for_foreignkey(db_field, request, **kwargs)
def get_ordering(self, request):
"""
Changes the default ordering for changelists to tree-order.
"""
mptt_opts = self.model._mptt_meta
return self.ordering or (mptt_opts.tree_id_attr, mptt_opts.left_attr)
def delete_selected_tree(self, modeladmin, request, queryset):
"""
Deletes multiple instances and makes sure the MPTT fields get
recalculated properly. (Because merely doing a bulk delete doesn't
trigger the post_delete hooks.)
"""
# If this is True, the confirmation page has been displayed
if request.POST.get("post"):
n = 0
with queryset.model._tree_manager.delay_mptt_updates():
for obj in queryset:
if self.has_delete_permission(request, obj):
obj_display = force_str(obj)
if django.VERSION < (5, 1):
self.log_deletion(request, obj, obj_display)
else:
LogEntry.objects.log_actions(
user_id=request.user.pk,
queryset=[obj],
action_flag=DELETION,
single_object=True,
)
obj.delete()
n += 1
self.message_user(
request, _("Successfully deleted %(count)d items.") % {"count": n}
)
# Return None to display the change list page again
return None
else:
# (ab)using the built-in action to display the confirmation page
return delete_selected(self, request, queryset)
def get_actions(self, request):
actions = super().get_actions(request)
if actions is not None and "delete_selected" in actions:
actions["delete_selected"] = (
self.delete_selected_tree,
"delete_selected",
_("Delete selected %(verbose_name_plural)s"),
)
return actions
|
MPTTModelAdmin
|
python
|
getsentry__sentry
|
src/sentry/conf/types/taskworker.py
|
{
"start": 665,
"end": 947
}
|
class ____(TypedDict):
"""The schedule definition for an individual task."""
task: str
schedule: timedelta | crontab
ScheduleConfigMap = Mapping[str, ScheduleConfig]
"""A collection of schedule configuration, usually defined in application configuration"""
|
ScheduleConfig
|
python
|
PrefectHQ__prefect
|
tests/test_tasks.py
|
{
"start": 95672,
"end": 99950
}
|
class ____:
async def test_downstream_does_not_run_if_upstream_fails(self):
@task
def fails():
raise ValueError("Fail task!")
@flow
def bar(y):
return y
@flow
def test_flow():
f = fails.submit()
b = bar(2, wait_for=[f], return_state=True)
return b
flow_state = test_flow(return_state=True)
subflow_state = await flow_state.result(raise_on_failure=False)
assert subflow_state.is_pending()
assert subflow_state.name == "NotReady"
def test_downstream_runs_if_upstream_succeeds(self):
@flow
def foo(x):
return x
@flow
def bar(y):
return y
@flow
def test_flow():
f = foo(1)
b = bar(2, wait_for=[f])
return b
assert test_flow() == 2
async def test_backend_task_inputs_includes_wait_for_tasks(self, prefect_client):
@task
def foo(x):
return x
@flow
def flow_foo(x):
return x
@flow
def test_flow():
a, b = foo.submit(1), foo.submit(2)
c = foo.submit(3)
d = flow_foo(c, wait_for=[a, b], return_state=True)
return (a, b, c, d)
a, b, c, d = test_flow()
d_subflow_run = await prefect_client.read_flow_run(d.state_details.flow_run_id)
d_virtual_task_run = await prefect_client.read_task_run(
d_subflow_run.parent_task_run_id
)
assert d_virtual_task_run.task_inputs["x"] == [
TaskRunResult(id=c.state_details.task_run_id)
], "Data passing inputs are preserved"
assert set(d_virtual_task_run.task_inputs["wait_for"]) == {
TaskRunResult(id=a.state_details.task_run_id),
TaskRunResult(id=b.state_details.task_run_id),
}, "'wait_for' included as a key with upstreams"
assert set(d_virtual_task_run.task_inputs.keys()) == {
"x",
"wait_for",
}, "No extra keys around"
async def test_subflows_run_concurrently_with_tasks(self):
@task
async def waiter_task(event, delay):
await sleep(delay)
if event.is_set():
pass
else:
raise RuntimeError("The event hasn't been set!")
@flow
async def setter_flow(event):
event.set()
return 42
@flow
async def test_flow():
e = Event()
waiter_task.submit(e, 1)
b = await setter_flow(e)
return b
assert (await test_flow()) == 42
async def test_subflows_waiting_for_tasks_can_deadlock(self):
@task
async def waiter_task(event, delay):
await sleep(delay)
if event.is_set():
pass
else:
raise RuntimeError("The event hasn't been set!")
@flow
async def setter_flow(event):
event.set()
return 42
@flow
async def test_flow():
e = Event()
f = waiter_task.submit(e, 1)
b = await setter_flow(e, wait_for=[f])
return b
flow_state = await test_flow(return_state=True)
assert flow_state.is_failed()
assert "UnfinishedRun" in flow_state.message
def test_using_wait_for_in_task_definition_raises_reserved(self):
with pytest.raises(
ReservedArgumentError, match="'wait_for' is a reserved argument name"
):
@flow
def foo(wait_for):
pass
async def test_downstream_runs_if_upstream_fails_with_allow_failure_annotation(
self,
):
@task
def fails():
raise ValueError("Fail task!")
@flow
def bar(y):
return y
@flow
def test_flow():
f = fails.submit()
b = bar(2, wait_for=[allow_failure(f)], return_state=True)
return b
flow_state = test_flow(return_state=True)
subflow_state = await flow_state.result(raise_on_failure=False)
assert await subflow_state.result() == 2
|
TestSubflowWaitForTasks
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/dotnet.py
|
{
"start": 14984,
"end": 19759
}
|
class ____(RegexLexer):
"""
For
`Visual Basic.NET <http://msdn2.microsoft.com/en-us/vbasic/default.aspx>`_
source code.
"""
name = 'VB.net'
aliases = ['vb.net', 'vbnet']
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
uni_name = '[_' + uni.combine('Ll', 'Lt', 'Lm', 'Nl') + ']' + \
'[' + uni.combine('Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^\s*<.*?>', Name.Attribute),
(r'\s+', Text),
(r'\n', Text),
(r'rem\b.*?\n', Comment),
(r"'.*?\n", Comment),
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|'
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
Comment.Preproc),
(r'[(){}!#,.:]', Punctuation),
(r'Option\s+(Strict|Explicit|Compare)\s+'
r'(On|Off|Binary|Text)', Keyword.Declaration),
(words((
'AddHandler', 'Alias', 'ByRef', 'ByVal', 'Call', 'Case',
'Catch', 'CBool', 'CByte', 'CChar', 'CDate', 'CDec', 'CDbl',
'CInt', 'CLng', 'CObj', 'Continue', 'CSByte', 'CShort', 'CSng',
'CStr', 'CType', 'CUInt', 'CULng', 'CUShort', 'Declare',
'Default', 'Delegate', 'DirectCast', 'Do', 'Each', 'Else',
'ElseIf', 'EndIf', 'Erase', 'Error', 'Event', 'Exit', 'False',
'Finally', 'For', 'Friend', 'Get', 'Global', 'GoSub', 'GoTo',
'Handles', 'If', 'Implements', 'Inherits', 'Interface', 'Let',
'Lib', 'Loop', 'Me', 'MustInherit', 'MustOverride', 'MyBase',
'MyClass', 'Narrowing', 'New', 'Next', 'Not', 'Nothing',
'NotInheritable', 'NotOverridable', 'Of', 'On', 'Operator',
'Option', 'Optional', 'Overloads', 'Overridable', 'Overrides',
'ParamArray', 'Partial', 'Private', 'Protected', 'Public',
'RaiseEvent', 'ReadOnly', 'ReDim', 'RemoveHandler', 'Resume',
'Return', 'Select', 'Set', 'Shadows', 'Shared', 'Single',
'Static', 'Step', 'Stop', 'SyncLock', 'Then', 'Throw', 'To',
'True', 'Try', 'TryCast', 'Wend', 'Using', 'When', 'While',
'Widening', 'With', 'WithEvents', 'WriteOnly'),
prefix='(?<!\.)', suffix=r'\b'), Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
bygroups(Keyword, Text), 'funcname'),
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
bygroups(Keyword, Text), 'classname'),
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
r'UShort)\b', Keyword.Type),
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
r'<=|>=|<>|[-&*/\\^+=<>\[\]]',
Operator),
('"', String, 'string'),
(r'_\n', Text), # Line continuation (must be before Name)
(uni_name + '[%&@!#$]?', Name),
('#.*?#', Literal.Date),
(r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float),
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
],
'string': [
(r'""', String),
(r'"C?', String, '#pop'),
(r'[^"]+', String),
],
'dim': [
(uni_name, Name.Variable, '#pop'),
default('#pop'), # any other syntax
],
'funcname': [
(uni_name, Name.Function, '#pop'),
],
'classname': [
(uni_name, Name.Class, '#pop'),
],
'namespace': [
(uni_name, Name.Namespace),
(r'\.', Name.Namespace),
default('#pop'),
],
'end': [
(r'\s+', Text),
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
Keyword, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE):
return 0.5
|
VbNetLexer
|
python
|
MongoEngine__mongoengine
|
mongoengine/fields.py
|
{
"start": 90071,
"end": 91789
}
|
class ____(BaseField):
"""
128-bit decimal-based floating-point field capable of emulating decimal
rounding with exact precision. This field will expose decimal.Decimal but stores the value as a
`bson.Decimal128` behind the scene, this field is intended for monetary data, scientific computations, etc.
"""
DECIMAL_CONTEXT = create_decimal128_context()
def __init__(self, min_value=None, max_value=None, **kwargs):
self.min_value = min_value
self.max_value = max_value
super().__init__(**kwargs)
def to_mongo(self, value):
if value is None:
return None
if isinstance(value, Decimal128):
return value
if not isinstance(value, decimal.Decimal):
with decimal.localcontext(self.DECIMAL_CONTEXT) as ctx:
value = ctx.create_decimal(value)
return Decimal128(value)
def to_python(self, value):
if value is None:
return None
return self.to_mongo(value).to_decimal()
def validate(self, value):
if not isinstance(value, Decimal128):
try:
value = Decimal128(value)
except (TypeError, ValueError, decimal.InvalidOperation) as exc:
self.error("Could not convert value to Decimal128: %s" % exc)
if self.min_value is not None and value.to_decimal() < self.min_value:
self.error("Decimal value is too small")
if self.max_value is not None and value.to_decimal() > self.max_value:
self.error("Decimal value is too large")
def prepare_query_value(self, op, value):
return super().prepare_query_value(op, self.to_mongo(value))
|
Decimal128Field
|
python
|
astropy__astropy
|
astropy/io/fits/header.py
|
{
"start": 74210,
"end": 76018
}
|
class ____(_CardAccessor):
"""
A class used internally by the Header class for the Header.comments
attribute access.
This object can be used to display all the keyword comments in the Header,
or look up the comments on specific keywords. It allows all the same forms
of keyword lookup as the Header class itself, but returns comments instead
of values.
"""
def __iter__(self):
for card in self._header._cards:
yield card.comment
def __repr__(self):
"""Returns a simple list of all keywords and their comments."""
keyword_length = KEYWORD_LENGTH
for card in self._header._cards:
keyword_length = max(keyword_length, len(card.keyword))
return "\n".join(
"{:>{len}} {}".format(c.keyword, c.comment, len=keyword_length)
for c in self._header._cards
)
def __getitem__(self, item):
"""
Slices and filter strings return a new _HeaderComments containing the
returned cards. Otherwise the comment of a single card is returned.
"""
item = super().__getitem__(item)
if isinstance(item, _HeaderComments):
# The item key was a slice
return item
return item.comment
def __setitem__(self, item, comment):
"""
Set/update the comment on specified card or cards.
Slice/filter updates work similarly to how Header.__setitem__ works.
"""
if self._header._set_slice(item, comment, self):
return
# In this case, key/index errors should be raised; don't update
# comments of nonexistent cards
idx = self._header._cardindex(item)
value = self._header[idx]
self._header[idx] = (value, comment)
|
_HeaderComments
|
python
|
huggingface__transformers
|
src/transformers/models/internvl/modular_internvl.py
|
{
"start": 14839,
"end": 16414
}
|
class ____(InternVLVisionPreTrainedModel):
def __init__(self, config: InternVLVisionConfig) -> None:
super().__init__(config)
self.config = config
self.embeddings = InternVLVisionEmbeddings(config)
self.encoder = InternVLVisionEncoder(config)
self.layernorm = (
nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: Optional[torch.BoolTensor] = None,
) -> Union[tuple, InternVLVisionModelOutputWithPooling]:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
embedding_output, _ = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
encoder_outputs = self.encoder(embedding_output)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
return InternVLVisionModelOutputWithPooling(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
|
InternVLVisionModel
|
python
|
pytorch__pytorch
|
torch/_inductor/ir.py
|
{
"start": 238205,
"end": 239025
}
|
class ____(ExternKernelOut):
def __init__(self, count: int, device: torch.device) -> None:
limits = torch.iinfo(torch.int64)
super().__init__(
layout=FixedLayout(
device=device,
dtype=torch.int64,
size=[count],
),
inputs=[],
constant_args=[limits.min, limits.max, [count]],
python_kernel_name="aten.randint.low_out",
# FIXME: Ideally we should only use at::_ops::randint_low_out::call here,
# but the signature is different from is at::randint_out. Again,
# we can simplify the code when only keeping an ABI-compatible version.
cpp_kernel_name="at::_ops::randint_low_out::call",
op_overload=aten.randint.low_out,
)
|
RandomSeeds
|
python
|
pypa__warehouse
|
tests/unit/admin/views/test_users.py
|
{
"start": 3953,
"end": 4084
}
|
class ____:
def test_validate(self):
form = views.UserForm()
assert form.validate(), str(form.erros)
|
TestUserForm
|
python
|
ApeWorX__ape
|
tests/functional/test_test.py
|
{
"start": 14385,
"end": 15026
}
|
class ____:
"""
Note: Most isolation-based tests occur in `functional/test_fixtures.py`.
"""
@pytest.fixture
def registry(self):
return SnapshotRegistry()
def test_get_snapshot_id(self, registry):
actual = registry.get_snapshot_id(Scope.SESSION)
assert actual is None
def test_next_snapshots(self, registry):
actual = [x for x in registry.next_snapshots(Scope.SESSION)]
assert actual[0].scope is Scope.PACKAGE
assert actual[1].scope is Scope.MODULE
assert actual[2].scope is Scope.CLASS
assert actual[3].scope is Scope.FUNCTION
|
TestSnapshotRegistry
|
python
|
tensorflow__tensorflow
|
tensorflow/python/saved_model/model_utils/export_test.py
|
{
"start": 1422,
"end": 14261
}
|
class ____(test_util.TensorFlowTestCase):
def test_build_all_signature_defs_without_receiver_alternatives(self):
# Force the test to run in graph mode.
# This tests a deprecated v1 API that depends on graph-only functions such
# as build_tensor_info.
with ops.Graph().as_default():
receiver_tensor = array_ops.placeholder(dtypes.string)
output_1 = constant_op.constant([1.])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.RegressionOutput(value=output_1),
"head-2":
export_output.ClassificationOutput(classes=output_2),
"head-3":
export_output.PredictOutput(outputs={"some_output_3": output_3}),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs)
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(
receiver_tensor, output_1),
"head-2":
signature_def_utils.classification_signature_def(
receiver_tensor, output_2, None),
"head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensor}, {"some_output_3": output_3})
}
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_all_signature_defs_with_dict_alternatives(self):
# Force the test to run in graph mode.
# This tests a deprecated v1 API that depends on graph-only functions such
# as build_tensor_info.
with ops.Graph().as_default():
receiver_tensor = array_ops.placeholder(dtypes.string)
receiver_tensors_alternative_1 = {
"foo": array_ops.placeholder(dtypes.int64),
"bar": array_ops.sparse_placeholder(dtypes.float32)
}
unfed_input = array_ops.placeholder(dtypes.bool)
receiver_tensors_alternative_2 = {"unfed": unfed_input}
receiver_tensors_alternatives = {
"other": receiver_tensors_alternative_1,
"with_unfed_input": receiver_tensors_alternative_2
}
output_1 = constant_op.constant([1.])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
output_4 = unfed_input
output_5 = math_ops.logical_not(unfed_input)
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.RegressionOutput(value=output_1),
"head-2":
export_output.ClassificationOutput(classes=output_2),
"head-3":
export_output.PredictOutput(outputs={"some_output_3": output_3}),
"head-4":
export_output.PredictOutput(outputs={"some_output_4": output_4}),
"head-5":
export_output.PredictOutput(outputs={"some_output_5": output_5}),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs, receiver_tensors_alternatives)
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(
receiver_tensor, output_1),
"head-2":
signature_def_utils.classification_signature_def(
receiver_tensor, output_2, None),
"head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensor}, {"some_output_3": output_3}),
"other:head-3":
signature_def_utils.predict_signature_def(
receiver_tensors_alternative_1, {"some_output_3": output_3}),
# Note that the alternatives 'other:serving_default' and
# 'other:head-2' are invalid, because regression and classification
# signatures must take a single string input. Here we verify that
# these invalid signatures are not included in the export_utils.
# Similarly, we verify that 'head-4' and 'head-5', which depend on an
# input that is not being fed as a receiver tensor, are also omitted.
# All the three heads are present when that input is fed, however:
"with_unfed_input:head-3":
signature_def_utils.predict_signature_def(
receiver_tensors_alternative_2, {"some_output_3": output_3}),
"with_unfed_input:head-4":
signature_def_utils.predict_signature_def(
receiver_tensors_alternative_2, {"some_output_4": output_4}),
"with_unfed_input:head-5":
signature_def_utils.predict_signature_def(
receiver_tensors_alternative_2, {"some_output_5": output_5})
}
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_all_signature_defs_with_single_alternatives(self):
# Force the test to run in graph mode.
# This tests a deprecated v1 API that depends on graph-only functions such
# as build_tensor_info.
with ops.Graph().as_default():
receiver_tensor = array_ops.placeholder(dtypes.string)
receiver_tensors_alternative_1 = array_ops.placeholder(dtypes.int64)
receiver_tensors_alternative_2 = array_ops.sparse_placeholder(
dtypes.float32)
# Note we are passing single Tensors as values of
# receiver_tensors_alternatives, where normally that is a dict.
# In this case a dict will be created using the default receiver tensor
# name "input".
receiver_tensors_alternatives = {
"other1": receiver_tensors_alternative_1,
"other2": receiver_tensors_alternative_2
}
output_1 = constant_op.constant([1.])
output_2 = constant_op.constant(["2"])
output_3 = constant_op.constant(["3"])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.RegressionOutput(value=output_1),
"head-2":
export_output.ClassificationOutput(classes=output_2),
"head-3":
export_output.PredictOutput(outputs={"some_output_3": output_3}),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs, receiver_tensors_alternatives)
expected_signature_defs = {
"serving_default":
signature_def_utils.regression_signature_def(
receiver_tensor, output_1),
"head-2":
signature_def_utils.classification_signature_def(
receiver_tensor, output_2, None),
"head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensor}, {"some_output_3": output_3}),
"other1:head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensors_alternative_1},
{"some_output_3": output_3}),
"other2:head-3":
signature_def_utils.predict_signature_def(
{"input": receiver_tensors_alternative_2},
{"some_output_3": output_3})
# Note that the alternatives 'other:serving_default' and
# 'other:head-2' are invalid, because regression and classification
# signatures must take a single string input. Here we verify that
# these invalid signatures are not included in the export_utils.
}
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_build_all_signature_defs_export_outputs_required(self):
receiver_tensor = constant_op.constant(["11"])
with self.assertRaises(ValueError) as e:
export_utils.build_all_signature_defs(receiver_tensor, None)
self.assertTrue(
str(e.exception).startswith("`export_outputs` must be a dict"))
def test_get_timestamped_export_dir(self):
export_dir_base = tempfile.mkdtemp() + "export/"
export_dir_1 = export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_2 = export_utils.get_timestamped_export_dir(
export_dir_base)
time.sleep(2)
export_dir_3 = export_utils.get_timestamped_export_dir(
export_dir_base)
# Export directories should be named using a timestamp that is seconds
# since epoch. Such a timestamp is 10 digits long.
time_1 = os.path.basename(export_dir_1)
self.assertEqual(10, len(time_1))
time_2 = os.path.basename(export_dir_2)
self.assertEqual(10, len(time_2))
time_3 = os.path.basename(export_dir_3)
self.assertEqual(10, len(time_3))
self.assertLess(int(time_1), int(time_2))
self.assertLess(int(time_2), int(time_3))
def test_get_temp_export_dir(self):
export_dir = os.path.join("tmp", "export", "1576013284")
tmp_export_dir = export_utils.get_temp_export_dir(export_dir)
self.assertEqual(tmp_export_dir,
os.path.join(b"tmp", b"export", b"temp-1576013284"))
export_dir = os.path.join(b"tmp", b"export", b"1576013284")
tmp_export_dir = export_utils.get_temp_export_dir(export_dir)
self.assertEqual(tmp_export_dir,
os.path.join(b"tmp", b"export", b"temp-1576013284"))
def test_build_all_signature_defs_serving_only(self):
# Force the test to run in graph mode.
# This tests a deprecated v1 API that depends on graph-only functions such
# as build_tensor_info.
with ops.Graph().as_default():
receiver_tensor = {"input": array_ops.placeholder(dtypes.string)}
output_1 = constant_op.constant([1.])
export_outputs = {
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_output.PredictOutput(outputs=output_1),
"train":
export_output.TrainOutput(loss=output_1),
}
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs)
expected_signature_defs = {
"serving_default":
signature_def_utils.predict_signature_def(receiver_tensor,
{"output": output_1})
}
self.assertDictEqual(expected_signature_defs, signature_defs)
signature_defs = export_utils.build_all_signature_defs(
receiver_tensor, export_outputs, serving_only=False)
expected_signature_defs.update({
"train":
signature_def_utils.supervised_train_signature_def(
receiver_tensor, loss={"loss": output_1})
})
self.assertDictEqual(expected_signature_defs, signature_defs)
def test_export_outputs_for_mode(self):
predictions = {"predictions": constant_op.constant([1.])}
loss = {"loss": constant_op.constant([2.])}
metrics = {
"metrics": (constant_op.constant([3.]), constant_op.constant([4.]))}
expected_metrics = {
"metrics/value": metrics["metrics"][0],
"metrics/update_op": metrics["metrics"][1]
}
def _build_export_output(mode):
return export_utils.export_outputs_for_mode(
mode, None, predictions, loss, metrics)
ret = _build_export_output(KerasModeKeys.TRAIN)
self.assertIn(signature_constants.DEFAULT_TRAIN_SIGNATURE_DEF_KEY, ret)
export_out = ret[signature_constants.DEFAULT_TRAIN_SIGNATURE_DEF_KEY]
self.assertIsInstance(export_out, export_output.TrainOutput)
self.assertEqual(export_out.predictions, predictions)
self.assertEqual(export_out.loss, loss)
self.assertEqual(export_out.metrics, expected_metrics)
ret = _build_export_output(KerasModeKeys.TEST)
self.assertIn(signature_constants.DEFAULT_EVAL_SIGNATURE_DEF_KEY, ret)
export_out = ret[signature_constants.DEFAULT_EVAL_SIGNATURE_DEF_KEY]
self.assertIsInstance(export_out, export_output.EvalOutput)
self.assertEqual(export_out.predictions, predictions)
self.assertEqual(export_out.loss, loss)
self.assertEqual(export_out.metrics, expected_metrics)
ret = _build_export_output(KerasModeKeys.PREDICT)
self.assertIn(signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY, ret)
export_out = ret[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
self.assertIsInstance(export_out, export_output.PredictOutput)
self.assertEqual(export_out.outputs, predictions)
classes = constant_op.constant(["class5"])
ret = export_utils.export_outputs_for_mode(
KerasModeKeys.PREDICT,
{"classify": export_output.ClassificationOutput(
classes=classes)})
self.assertIn("classify", ret)
export_out = ret["classify"]
self.assertIsInstance(export_out, export_output.ClassificationOutput)
self.assertEqual(export_out.classes, classes)
if __name__ == "__main__":
test.main()
|
ExportTest
|
python
|
readthedocs__readthedocs.org
|
readthedocs/api/v3/views.py
|
{
"start": 4875,
"end": 10366
}
|
class ____(
APIv3Settings,
NestedViewSetMixin,
ProjectQuerySetMixin,
FlexFieldsMixin,
ProjectImportMixin,
UpdateChangeReasonMixin,
CreateModelMixin,
UpdateMixin,
UpdateModelMixin,
ReadOnlyModelViewSet,
):
model = Project
lookup_field = "slug"
lookup_url_kwarg = "project_slug"
filterset_class = ProjectFilter
permit_list_expands = [
"active_versions",
"organization",
"permissions",
"teams",
]
def get_permissions(self):
# Create and list are actions that act on the current user.
if self.action in ("create", "list"):
permission_classes = [IsAuthenticated]
# Actions that change the state of the project require admin permissions on the project.
elif self.action in ("update", "partial_update", "destroy", "sync_versions"):
permission_classes = [IsAuthenticated & IsProjectAdmin]
# Any other action is read-only.
else:
permission_classes = [ReadOnlyPermission]
return [permission() for permission in permission_classes]
def get_view_name(self):
# Avoid "Base" in BrowseableAPI view's title
if self.name:
return self.name
return f"Projects {self.suffix}"
def get_serializer_class(self):
"""
Return correct serializer depending on the action.
For GET it returns a serializer with many fields and on PUT/PATCH/POST,
it return a serializer to validate just a few fields.
"""
if self.action in ("list", "retrieve"):
return ProjectSerializer
if self.action == "create":
return ProjectCreateSerializer
if self.action in ("update", "partial_update"):
return ProjectUpdateSerializer
# Default serializer so that sync_versions works with the BrowseableAPI
return ProjectSerializer
def get_queryset(self):
if self.action == "list":
# When listing, return all the projects where the user is admin.
queryset = self.admin_projects(self.request.user)
else:
queryset = super().get_queryset()
# This could be a class attribute and managed on the ``ProjectQuerySetMixin`` in
# case we want to extend the ``prefetch_related`` to other views as
# well.
return queryset.select_related(
"main_language_project",
).prefetch_related(
"tags",
"users",
# Prefetch superprojects to avoid N+1 queries when serializing the project.
Prefetch(
"superprojects",
ProjectRelationship.objects.all().select_related("parent"),
to_attr="_superprojects",
),
# Prefetch the canonical domain to avoid N+1 queries when using the resolver.
Prefetch(
"domains",
Domain.objects.filter(canonical=True),
to_attr="_canonical_domains",
),
)
def create(self, request, *args, **kwargs):
"""
Import Project.
Override to use a different serializer in the response,
since it's a different format than the one used for the request.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
# Use a serializer that fully renders a Project,
# instead of the one used for the request.
serializer = ProjectSerializer(instance=serializer.instance)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def perform_create(self, serializer):
"""
Import Project.
Trigger our internal mechanism to import a project after it's saved in
the database.
"""
project = super().perform_create(serializer)
self.finish_import_project(self.request, project)
@action(detail=True, methods=["get"])
def superproject(self, request, project_slug):
"""Return the superproject of a ``Project``."""
superproject = self._get_superproject()
if not superproject:
return Response(status=status.HTTP_404_NOT_FOUND)
data = ProjectSerializer(superproject).data
return Response(data)
def _get_superproject(self):
"""Get the superproject of the project, taking into consideration the user permissions."""
project = self.get_object()
return self.get_queryset().filter(subprojects__child=project).first()
@action(detail=True, methods=["post"], url_path="sync-versions")
def sync_versions(self, request, project_slug):
"""
Kick off a task to sync versions for a project.
POST to this endpoint to trigger a task that syncs versions for the project.
This will be used in a button in the frontend,
but also can be used to trigger a sync from the API.
"""
project = self.get_object()
triggered = trigger_sync_versions(project)
data = {}
if triggered:
data.update({"triggered": True})
code = status.HTTP_202_ACCEPTED
else:
data.update({"triggered": False})
code = status.HTTP_400_BAD_REQUEST
return Response(data=data, status=code)
|
ProjectsViewSetBase
|
python
|
spyder-ide__spyder
|
spyder/plugins/preferences/plugin.py
|
{
"start": 1569,
"end": 14338
}
|
class ____(SpyderPluginV2):
"""
Spyder preferences plugin.
This class manages all the preference pages and tabs for all internal
and external plugins, as well enabling other plugins to add configurations
to other sections.
"""
NAME = 'preferences'
CONF_SECTION = 'preferences'
OPTIONAL = [Plugins.MainMenu, Plugins.Toolbar]
CONF_FILE = False
CONTAINER_CLASS = PreferencesContainer
CAN_BE_DISABLED = False
NEW_API = 'new'
def __init__(self, parent, configuration=None):
super().__init__(parent, configuration)
self.config_pages = {}
self.config_tabs = {}
self._config_pages_ordered = False
# ---- Public API
# -------------------------------------------------------------------------
def register_plugin_preferences(self, plugin: SpyderPluginV2) -> None:
if plugin.CONF_WIDGET_CLASS is not None:
Widget = plugin.CONF_WIDGET_CLASS
self.config_pages[plugin.NAME] = (self.NEW_API, Widget, plugin)
plugin_conf_version = plugin.CONF_VERSION or CONF_VERSION
plugin_conf_version = parse(plugin_conf_version)
# Check if the plugin adds new configuration options to other
# sections
if plugin.ADDITIONAL_CONF_OPTIONS is not None:
for conf_section in plugin.ADDITIONAL_CONF_OPTIONS:
conf_keys = plugin.ADDITIONAL_CONF_OPTIONS[conf_section]
for conf_key in conf_keys:
new_value = conf_keys[conf_key]
self.check_version_and_merge(
conf_section, conf_key, new_value,
plugin_conf_version, plugin)
# Check if the plugin declares any additional configuration tabs
if plugin.ADDITIONAL_CONF_TABS is not None:
for plugin_name in plugin.ADDITIONAL_CONF_TABS:
tabs_to_add = plugin.ADDITIONAL_CONF_TABS[plugin_name]
plugin_tabs = self.config_tabs.get(plugin_name, [])
plugin_tabs += tabs_to_add
self.config_tabs[plugin_name] = plugin_tabs
def deregister_plugin_preferences(self, plugin: SpyderPluginV2) -> None:
"""Remove a plugin preference page and additional configuration tabs."""
name = getattr(plugin, "NAME", None) or getattr(
plugin, "CONF_SECTION", None
)
# Remove configuration page for the plugin
self.config_pages.pop(name)
# Remove additional configuration tabs that the plugin did introduce
if isinstance(plugin, SpyderPluginV2):
for plugin_name in (plugin.ADDITIONAL_CONF_TABS or []):
tabs = plugin.ADDITIONAL_CONF_TABS[plugin_name]
for tab in tabs:
self.config_tabs[plugin_name].remove(tab)
def check_version_and_merge(
self,
conf_section: str,
conf_key: str,
new_value: BasicType,
current_version: Version,
plugin
):
"""Add a versioned additional option to a configuration section."""
current_value = self.get_conf(
conf_key, section=conf_section, default=None)
section_additional = self.get_conf('additional_configuration',
section=conf_section,
default={})
plugin_additional = section_additional.get(plugin.NAME, {})
if conf_key in plugin_additional:
conf_key_info = plugin_additional[conf_key]
prev_default = conf_key_info['default']
prev_version = parse(conf_key_info['version'])
allow_replacement = current_version > prev_version
allow_deletions = current_version.major > prev_version.major
new_value = self.merge_defaults(prev_default, new_value,
allow_replacement, allow_deletions)
new_default = new_value
if current_value != NoDefault:
new_value = self.merge_configurations(current_value, new_value)
self.set_conf(
conf_key, new_value, section=conf_section)
conf_key_info['version'] = str(current_version)
conf_key_info['default'] = new_default
plugin_additional[conf_key] = conf_key_info
section_additional[plugin.NAME] = plugin_additional
self.set_conf(
'additional_configuration', section_additional,
section=conf_section)
else:
plugin_additional[conf_key] = {
'version': str(current_version),
'default': new_value
}
section_additional[plugin.NAME] = plugin_additional
self.set_conf(
'additional_configuration', section_additional,
section=conf_section)
if current_value != NoDefault:
new_value = self.merge_configurations(current_value, new_value)
self.set_conf(
conf_key, new_value, section=conf_section)
def merge_defaults(
self,
prev_default: BasicType,
new_default: BasicType,
allow_replacement: bool = False,
allow_deletions: bool = False
) -> BasicType:
"""Compare and merge two versioned values."""
prev_type = type(prev_default)
new_type = type(new_default)
if prev_type is dict and new_type is dict:
# Merge two dicts case
for new_key in new_default:
if new_key in prev_default:
current_subvalue = prev_default[new_key]
new_subvalue = new_default[new_key]
prev_default[new_key] = self.merge_defaults(
current_subvalue, new_subvalue,
allow_replacement, allow_deletions)
else:
# Additions are allowed everytime
prev_default[new_key] = new_default[new_key]
if allow_deletions:
for old_key in list(prev_default.keys()):
if old_key not in new_default:
prev_default.pop(old_key)
return prev_default
elif prev_default != new_default:
if allow_replacement:
return new_default
else:
return prev_default
else:
return prev_default
def merge_configurations(
self,
current_value: BasicType,
new_value: BasicType
) -> BasicType:
"""
Recursively match and merge a new configuration value into a
previous one.
"""
current_type = type(current_value)
new_type = type(new_value)
iterable_types = {list, tuple}
base_types = {int, float, bool, complex, str, bytes}
if current_type is dict and new_type is dict:
# Merge two dicts case
for new_key in new_value:
if new_key in current_value:
current_subvalue = current_value[new_key]
new_subvalue = new_value[new_key]
current_value[new_key] = self.merge_configurations(
current_subvalue, new_subvalue)
else:
current_value[new_key] = new_value[new_key]
return current_value
elif current_type in iterable_types and new_type in iterable_types:
# Merge two lists/tuples case
return current_type(list(current_value) + list(new_value))
elif (current_type == new_type and
current_type in base_types and new_type in base_types):
# Replace the values directly
return new_value
elif current_type in iterable_types and new_type in base_types:
# Add a value to a list or tuple
return current_type((list(current_value) + [new_value]))
elif current_value is None:
# Assigns the new value if it doesn't exist
return new_value
else:
logger.warning(f'The value {current_value} cannot be replaced'
f'by {new_value}')
return current_value
def open_dialog(self):
container = self.get_container()
self.before_long_process('')
if not running_under_pytest():
self._reorder_config_pages()
container.create_dialog(
self.config_pages, self.config_tabs, self.get_main()
)
self.after_long_process()
@Slot()
def reset(self):
answer = QMessageBox.warning(
self.main,
_("Warning"),
_("Spyder will restart and reset to default settings:"
"<br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No
)
if answer == QMessageBox.Yes:
os.environ['SPYDER_RESET'] = 'True'
self.sig_restart_requested.emit()
# ---- SpyderPluginV2 API
# -------------------------------------------------------------------------
@staticmethod
def get_name() -> str:
return _('Preferences')
@staticmethod
def get_description() -> str:
return _("Manage Spyder's preferences.")
@classmethod
def get_icon(cls) -> QIcon:
return cls.create_icon('configure')
def on_initialize(self):
container = self.get_container()
container.sig_show_preferences_requested.connect(self.open_dialog)
container.sig_reset_preferences_requested.connect(self.reset)
@on_plugin_available(plugin=Plugins.MainMenu)
def on_main_menu_available(self):
container = self.get_container()
main_menu = self.get_plugin(Plugins.MainMenu)
main_menu.add_item_to_application_menu(
container.show_action,
menu_id=ApplicationMenus.Tools,
section=ToolsMenuSections.Preferences,
before=PreferencesActions.Reset,
)
main_menu.add_item_to_application_menu(
container.reset_action,
menu_id=ApplicationMenus.Tools,
section=ToolsMenuSections.Preferences,
)
@on_plugin_available(plugin=Plugins.Toolbar)
def on_toolbar_available(self):
container = self.get_container()
toolbar = self.get_plugin(Plugins.Toolbar)
toolbar.add_item_to_application_toolbar(
container.show_action,
toolbar_id=ApplicationToolbars.Main,
section=MainToolbarSections.ApplicationSection,
before=PythonpathActions.Manager
)
@on_plugin_teardown(plugin=Plugins.MainMenu)
def on_main_menu_teardown(self):
main_menu = self.get_plugin(Plugins.MainMenu)
main_menu.remove_item_from_application_menu(
PreferencesActions.Show,
menu_id=ApplicationMenus.Tools,
)
main_menu.remove_item_from_application_menu(
PreferencesActions.Reset,
menu_id=ApplicationMenus.Tools,
)
@on_plugin_teardown(plugin=Plugins.Toolbar)
def on_toolbar_teardown(self):
toolbar = self.get_plugin(Plugins.Toolbar)
toolbar.remove_item_from_application_toolbar(
PreferencesActions.Show,
toolbar_id=ApplicationToolbars.Main
)
def on_close(self, cancelable=False):
container = self.get_container()
if container.is_preferences_open():
container.close_preferences()
return True
# ---- Private API
# -------------------------------------------------------------------------
def _reorder_config_pages(self):
if self._config_pages_ordered:
return
plugins_page = [PreferencesAdapter.NAME]
# Order pages alphabetically by plugin name
pages = []
for k, v in self.config_pages.items():
pages.append((k, v[2].get_name()))
collator = Collator()
pages.sort(key=lambda p: collator.sort_key(p[1]))
# Get pages from the previous list without including the most important
# ones and the plugins page because they'll be added in a different
# order.
other_pages = [
page[0] for page in pages
if page[0] not in (MOST_IMPORTANT_PAGES + plugins_page)
]
# Show most important pages first and the Plugins page last
ordering = MOST_IMPORTANT_PAGES + other_pages + plugins_page
self.config_pages = {k: self.config_pages[k] for k in ordering}
self._config_pages_ordered = True
|
Preferences
|
python
|
django-extensions__django-extensions
|
django_extensions/mongodb/models.py
|
{
"start": 360,
"end": 654
}
|
class ____(Document):
"""
TimeStampedModel
An abstract base class model that provides self-managed "created" and
"modified" fields.
"""
created = CreationDateTimeField()
modified = ModificationDateTimeField()
class Meta:
abstract = True
|
TimeStampedModel
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/streams.py
|
{
"start": 4082,
"end": 4529
}
|
class ____(FBMarketingIncrementalStream):
"""doc: https://developers.facebook.com/docs/marketing-api/reference/adgroup"""
entity_prefix = "ad"
status_field = "effective_status"
valid_statuses = [status.value for status in ValidAdStatuses]
def list_objects(self, params: Mapping[str, Any], account_id: str) -> Iterable:
return self._api.get_account(account_id=account_id).get_ads(params=params, fields=self.fields())
|
Ads
|
python
|
davidhalter__jedi
|
test/completion/classes.py
|
{
"start": 5308,
"end": 5546
}
|
class ____():
def b(self):
class B():
def b(self):
return []
return B().b()
#? list()
A().b()
# -----------------
# ducktyping
# -----------------
def meth(self):
return self.a, self.b
|
A
|
python
|
dagster-io__dagster
|
examples/with_openai/with_openai/assets.py
|
{
"start": 1809,
"end": 3617
}
|
class ____(Config):
model: str
question: str
@asset(
compute_kind="OpenAI",
ins={
"search_index": AssetIn(partition_mapping=AllPartitionMapping()),
},
)
def completion(
context: AssetExecutionContext,
openai: OpenAIResource,
config: OpenAIConfig,
search_index: dict[str, Any],
):
merged_index: Any = None
# allow_dangerous_deserialization set to True since since we created the search index ourselves
# in the search_index asset
for index in search_index.values():
curr = FAISS.deserialize_from_bytes(
index, OpenAIEmbeddings(), allow_dangerous_deserialization=True
)
if not merged_index:
merged_index = curr
else:
merged_index.merge_from(
FAISS.deserialize_from_bytes(
index, OpenAIEmbeddings(), allow_dangerous_deserialization=True
)
)
with openai.get_client(context) as client:
prompt = stuff_prompt.PROMPT
model = ChatOpenAI(client=client.chat.completions, model=config.model, temperature=0)
summaries = " ".join(
[
SUMMARY_TEMPLATE.format(content=doc.page_content, source=doc.metadata["source"])
for doc in merged_index.similarity_search(config.question, k=4)
]
)
context.log.info(summaries)
output_parser = StrOutputParser()
chain = prompt | model | output_parser
context.log.info(chain.invoke({"summaries": summaries, "question": config.question}))
search_index_job = define_asset_job(
"search_index_job",
selection="*search_index",
partitions_def=docs_partitions_def,
)
question_job = define_asset_job(
name="question_job",
selection="completion",
)
|
OpenAIConfig
|
python
|
numba__numba
|
numba/core/types/containers.py
|
{
"start": 7536,
"end": 7849
}
|
class ____(Type):
def __init__(self, types):
self.types = tuple(sorted(set(types), key=lambda x: x.name))
name = "Union[{}]".format(",".join(map(str, self.types)))
super(UnionType, self).__init__(name=name)
def get_type_tag(self, typ):
return self.types.index(typ)
|
UnionType
|
python
|
xlwings__xlwings
|
xlwings/expansion.py
|
{
"start": 101,
"end": 494
}
|
class ____:
def register(self, *aliases):
for alias in aliases:
expanders[alias] = self
def expand(self, rng):
"""
Expands a range
Arguments
---------
rng: Range
The reference range
Returns
-------
Range object: The expanded range
"""
raise NotImplementedError()
|
Expander
|
python
|
numpy__numpy
|
numpy/lib/tests/test_type_check.py
|
{
"start": 7847,
"end": 8808
}
|
class ____:
# Fixme, wrong place, isfinite now ufunc
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isfinite(z) == 1
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((1.,)) / 0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((-1.,)) / 0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((0.,)) / 0.) == 0)
def test_integer(self):
assert_all(np.isfinite(1) == 1)
def test_complex(self):
assert_all(np.isfinite(1 + 1j) == 1)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array(1 + 1j) / 0.) == 0)
|
TestIsfinite
|
python
|
boto__boto3
|
tests/unit/resources/test_model.py
|
{
"start": 8620,
"end": 14421
}
|
class ____(BaseTestCase):
def test_multiple(self):
# This tests a bunch of different renames working together
model = ResourceModel(
'test',
{
'identifiers': [{'name': 'Foo'}],
'actions': {'Foo': {}},
'has': {
'Foo': {
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'data',
'path': 'FrobId',
}
],
}
}
},
'hasMany': {'Foo': {}},
'waiters': {'Foo': {}},
},
{'Frob': {}},
)
shape = (
DenormalizedStructureBuilder()
.with_members(
{
'Foo': {
'type': 'string',
},
'Bar': {'type': 'string'},
}
)
.build_model()
)
model.load_rename_map(shape)
assert model.identifiers[0].name == 'foo'
assert model.actions[0].name == 'foo_action'
assert model.references[0].name == 'foo_reference'
assert model.collections[0].name == 'foo_collection'
assert model.waiters[0].name == 'wait_until_foo'
# If an identifier and an attribute share the same name, then
# the attribute is essentially hidden.
assert 'foo_attribute' not in model.get_attributes(shape)
# Other attributes need to be there, though
assert 'bar' in model.get_attributes(shape)
# The rest of the tests below ensure the correct order of precedence
# for the various categories of attributes/properties/methods on the
# resource model.
def test_meta_beats_identifier(self):
model = ResourceModel('test', {'identifiers': [{'name': 'Meta'}]}, {})
model.load_rename_map()
assert model.identifiers[0].name == 'meta_identifier'
def test_load_beats_identifier(self):
model = ResourceModel(
'test',
{
'identifiers': [{'name': 'Load'}],
'load': {'request': {'operation': 'GetFrobs'}},
},
{},
)
model.load_rename_map()
assert model.load
assert model.identifiers[0].name == 'load_identifier'
def test_identifier_beats_action(self):
model = ResourceModel(
'test',
{
'identifiers': [{'name': 'foo'}],
'actions': {'Foo': {'request': {'operation': 'GetFoo'}}},
},
{},
)
model.load_rename_map()
assert model.identifiers[0].name == 'foo'
assert model.actions[0].name == 'foo_action'
def test_action_beats_reference(self):
model = ResourceModel(
'test',
{
'actions': {'Foo': {'request': {'operation': 'GetFoo'}}},
'has': {
'Foo': {
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'data',
'path': 'FrobId',
}
],
}
}
},
},
{'Frob': {}},
)
model.load_rename_map()
assert model.actions[0].name == 'foo'
assert model.references[0].name == 'foo_reference'
def test_reference_beats_collection(self):
model = ResourceModel(
'test',
{
'has': {
'Foo': {
'resource': {
'type': 'Frob',
'identifiers': [
{
'target': 'Id',
'source': 'data',
'path': 'FrobId',
}
],
}
}
},
'hasMany': {'Foo': {'resource': {'type': 'Frob'}}},
},
{'Frob': {}},
)
model.load_rename_map()
assert model.references[0].name == 'foo'
assert model.collections[0].name == 'foo_collection'
def test_collection_beats_waiter(self):
model = ResourceModel(
'test',
{
'hasMany': {'WaitUntilFoo': {'resource': {'type': 'Frob'}}},
'waiters': {'Foo': {}},
},
{'Frob': {}},
)
model.load_rename_map()
assert model.collections[0].name == 'wait_until_foo'
assert model.waiters[0].name == 'wait_until_foo_waiter'
def test_waiter_beats_attribute(self):
model = ResourceModel('test', {'waiters': {'Foo': {}}}, {'Frob': {}})
shape = (
DenormalizedStructureBuilder()
.with_members(
{
'WaitUntilFoo': {
'type': 'string',
}
}
)
.build_model()
)
model.load_rename_map(shape)
assert model.waiters[0].name == 'wait_until_foo'
assert 'wait_until_foo_attribute' in model.get_attributes(shape)
|
TestRenaming
|
python
|
doocs__leetcode
|
solution/1200-1299/1248.Count Number of Nice Subarrays/Solution.py
|
{
"start": 0,
"end": 250
}
|
class ____:
def numberOfSubarrays(self, nums: List[int], k: int) -> int:
cnt = Counter({0: 1})
ans = t = 0
for v in nums:
t += v & 1
ans += cnt[t - k]
cnt[t] += 1
return ans
|
Solution
|
python
|
pypa__pip
|
src/pip/_vendor/urllib3/exceptions.py
|
{
"start": 232,
"end": 316
}
|
class ____(Warning):
"""Base warning used by this module."""
pass
|
HTTPWarning
|
python
|
joke2k__faker
|
faker/providers/internet/es_CL/__init__.py
|
{
"start": 134,
"end": 899
}
|
class ____(InternetProvider):
safe_email_tlds = ("com", "net", "cl", "cl")
tlds = ("com", "com", "com", "net", "org", "cl", "cl", "cl")
replacements = (
("à", "a"),
("â", "a"),
("ã", "a"),
("á", "a"),
("ç", "c"),
("é", "e"),
("ê", "e"),
("í", "i"),
("ô", "o"),
("ö", "o"),
("õ", "o"),
("ó", "o"),
("ú", "u"),
)
@lowercase
@slugify_unicode
def domain_word(self) -> str:
company: str = self.generator.format("company")
company_elements: List[str] = company.split(" ")
# select 2 items as companies include prefix
name_items = company_elements[:2]
return self._to_ascii("".join(name_items))
|
Provider
|
python
|
huggingface__transformers
|
src/transformers/models/siglip/modeling_siglip.py
|
{
"start": 11752,
"end": 14260
}
|
class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
queries,
keys,
values,
attention_mask,
is_causal=self.is_causal,
scaling=self.scale,
dropout=0.0 if not self.training else self.dropout,
)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Siglip
|
SiglipAttention
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_query.py
|
{
"start": 246559,
"end": 254495
}
|
class ____(QueryTest):
run_setup_mappers = "each"
@contextlib.contextmanager
def _assert_bind_args(self, session, expect_mapped_bind=True):
get_bind = mock.Mock(side_effect=session.get_bind)
with mock.patch.object(session, "get_bind", get_bind):
yield
for call_ in get_bind.mock_calls:
if expect_mapped_bind:
eq_(
call_,
mock.call(
clause=mock.ANY, mapper=inspect(self.classes.User)
),
)
else:
eq_(call_, mock.call(clause=mock.ANY))
def test_single_entity_q(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
session.query(User).all()
def test_aliased_entity_q(self):
User = self.classes.User
u = aliased(User)
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
session.query(u).all()
def test_sql_expr_entity_q(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
session.query(User.id).all()
def test_sql_expr_subquery_from_entity(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
subq = session.query(User.id).subquery()
session.query(subq).all()
@testing.requires.boolean_col_expressions
def test_sql_expr_exists_from_entity(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
subq = session.query(User.id).exists()
session.query(subq).all()
def test_sql_expr_cte_from_entity(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
cte = session.query(User.id).cte()
subq = session.query(cte).subquery()
session.query(subq).all()
def test_sql_expr_bundle_cte_from_entity(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
cte = session.query(User.id, User.name).cte()
subq = session.query(cte).subquery()
bundle = Bundle(subq.c.id, subq.c.name)
session.query(bundle).all()
def test_count(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
session.query(User).count()
def test_single_col(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
session.query(User.name).all()
def test_single_col_from_subq(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
subq = session.query(User.id, User.name).subquery()
session.query(subq.c.name).all()
def test_aggregate_fn(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
session.query(func.max(User.name)).all()
def test_case(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
session.query(case((User.name == "x", "C"), else_="W")).all()
def test_cast(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
session.query(cast(User.name, String())).all()
def test_type_coerce(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
session.query(type_coerce(User.name, String())).all()
def test_binary_op(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
session.query(User.name + "x").all()
@testing.requires.boolean_col_expressions
def test_boolean_op(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
session.query(User.name == "x").all()
def test_bulk_update_no_sync(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session):
session.query(User).filter(User.id == 15).update(
{"name": "foob"}, synchronize_session=False
)
def test_bulk_delete_no_sync(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session):
session.query(User).filter(User.id == 15).delete(
synchronize_session=False
)
def test_bulk_update_fetch_sync(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session):
session.query(User).filter(User.id == 15).update(
{"name": "foob"}, synchronize_session="fetch"
)
def test_bulk_delete_fetch_sync(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session):
session.query(User).filter(User.id == 15).delete(
synchronize_session="fetch"
)
def test_column_property(self):
User = self.classes.User
mapper = inspect(User)
mapper.add_property(
"score",
column_property(func.coalesce(self.tables.users.c.name, None)),
)
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=True):
session.query(func.max(User.score)).scalar()
def test_plain_table(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=False):
session.query(inspect(User).local_table).all()
def _test_plain_table_from_self(self):
User = self.classes.User
# TODO: this test is dumb
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=False):
session.query(inspect(User).local_table).from_self().all()
def test_plain_table_count(self):
User = self.classes.User
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=False):
session.query(inspect(User).local_table).count()
def test_plain_table_select_from(self):
User = self.classes.User
table = inspect(User).local_table
session = fixture_session()
with self._assert_bind_args(session, expect_mapped_bind=False):
session.query(table).select_from(table).all()
@testing.requires.nested_aggregates
def test_column_property_select(self):
User = self.classes.User
Address = self.classes.Address
mapper = inspect(User)
mapper.add_property(
"score",
column_property(
select(func.sum(Address.id))
.where(Address.user_id == User.id)
.scalar_subquery()
),
)
session = fixture_session()
with self._assert_bind_args(session):
session.query(func.max(User.score)).scalar()
|
SessionBindTest
|
python
|
python-excel__xlrd
|
xlrd/biffh.py
|
{
"start": 417,
"end": 16650
}
|
class ____(object):
"""
Parent of almost all other classes in the package. Defines a common
:meth:`dump` method for debugging.
"""
_repr_these = []
def dump(self, f=None, header=None, footer=None, indent=0):
"""
:param f: open file object, to which the dump is written
:param header: text to write before the dump
:param footer: text to write after the dump
:param indent: number of leading spaces (for recursive calls)
"""
if f is None:
f = sys.stderr
if hasattr(self, "__slots__"):
alist = []
for attr in self.__slots__:
alist.append((attr, getattr(self, attr)))
else:
alist = self.__dict__.items()
alist = sorted(alist)
pad = " " * indent
if header is not None: print(header, file=f)
list_type = type([])
dict_type = type({})
for attr, value in alist:
if getattr(value, 'dump', None) and attr != 'book':
value.dump(f,
header="%s%s (%s object):" % (pad, attr, value.__class__.__name__),
indent=indent+4)
elif (attr not in self._repr_these and
(isinstance(value, list_type) or isinstance(value, dict_type))):
print("%s%s: %s, len = %d" % (pad, attr, type(value), len(value)), file=f)
else:
fprintf(f, "%s%s: %r\n", pad, attr, value)
if footer is not None: print(footer, file=f)
FUN, FDT, FNU, FGE, FTX = range(5) # unknown, date, number, general, text
DATEFORMAT = FDT
NUMBERFORMAT = FNU
(
XL_CELL_EMPTY,
XL_CELL_TEXT,
XL_CELL_NUMBER,
XL_CELL_DATE,
XL_CELL_BOOLEAN,
XL_CELL_ERROR,
XL_CELL_BLANK, # for use in debugging, gathering stats, etc
) = range(7)
biff_text_from_num = {
0: "(not BIFF)",
20: "2.0",
21: "2.1",
30: "3",
40: "4S",
45: "4W",
50: "5",
70: "7",
80: "8",
85: "8X",
}
#: This dictionary can be used to produce a text version of the internal codes
#: that Excel uses for error cells.
error_text_from_code = {
0x00: '#NULL!', # Intersection of two cell ranges is empty
0x07: '#DIV/0!', # Division by zero
0x0F: '#VALUE!', # Wrong type of operand
0x17: '#REF!', # Illegal or deleted cell reference
0x1D: '#NAME?', # Wrong function or range name
0x24: '#NUM!', # Value range overflow
0x2A: '#N/A', # Argument or function not available
}
BIFF_FIRST_UNICODE = 80
XL_WORKBOOK_GLOBALS = WBKBLOBAL = 0x5
XL_WORKBOOK_GLOBALS_4W = 0x100
XL_WORKSHEET = WRKSHEET = 0x10
XL_BOUNDSHEET_WORKSHEET = 0x00
XL_BOUNDSHEET_CHART = 0x02
XL_BOUNDSHEET_VB_MODULE = 0x06
# XL_RK2 = 0x7e
XL_ARRAY = 0x0221
XL_ARRAY2 = 0x0021
XL_BLANK = 0x0201
XL_BLANK_B2 = 0x01
XL_BOF = 0x809
XL_BOOLERR = 0x205
XL_BOOLERR_B2 = 0x5
XL_BOUNDSHEET = 0x85
XL_BUILTINFMTCOUNT = 0x56
XL_CF = 0x01B1
XL_CODEPAGE = 0x42
XL_COLINFO = 0x7D
XL_COLUMNDEFAULT = 0x20 # BIFF2 only
XL_COLWIDTH = 0x24 # BIFF2 only
XL_CONDFMT = 0x01B0
XL_CONTINUE = 0x3c
XL_COUNTRY = 0x8C
XL_DATEMODE = 0x22
XL_DEFAULTROWHEIGHT = 0x0225
XL_DEFCOLWIDTH = 0x55
XL_DIMENSION = 0x200
XL_DIMENSION2 = 0x0
XL_EFONT = 0x45
XL_EOF = 0x0a
XL_EXTERNNAME = 0x23
XL_EXTERNSHEET = 0x17
XL_EXTSST = 0xff
XL_FEAT11 = 0x872
XL_FILEPASS = 0x2f
XL_FONT = 0x31
XL_FONT_B3B4 = 0x231
XL_FORMAT = 0x41e
XL_FORMAT2 = 0x1E # BIFF2, BIFF3
XL_FORMULA = 0x6
XL_FORMULA3 = 0x206
XL_FORMULA4 = 0x406
XL_GCW = 0xab
XL_HLINK = 0x01B8
XL_QUICKTIP = 0x0800
XL_HORIZONTALPAGEBREAKS = 0x1b
XL_INDEX = 0x20b
XL_INTEGER = 0x2 # BIFF2 only
XL_IXFE = 0x44 # BIFF2 only
XL_LABEL = 0x204
XL_LABEL_B2 = 0x04
XL_LABELRANGES = 0x15f
XL_LABELSST = 0xfd
XL_LEFTMARGIN = 0x26
XL_TOPMARGIN = 0x28
XL_RIGHTMARGIN = 0x27
XL_BOTTOMMARGIN = 0x29
XL_HEADER = 0x14
XL_FOOTER = 0x15
XL_HCENTER = 0x83
XL_VCENTER = 0x84
XL_MERGEDCELLS = 0xE5
XL_MSO_DRAWING = 0x00EC
XL_MSO_DRAWING_GROUP = 0x00EB
XL_MSO_DRAWING_SELECTION = 0x00ED
XL_MULRK = 0xbd
XL_MULBLANK = 0xbe
XL_NAME = 0x18
XL_NOTE = 0x1c
XL_NUMBER = 0x203
XL_NUMBER_B2 = 0x3
XL_OBJ = 0x5D
XL_PAGESETUP = 0xA1
XL_PALETTE = 0x92
XL_PANE = 0x41
XL_PRINTGRIDLINES = 0x2B
XL_PRINTHEADERS = 0x2A
XL_RK = 0x27e
XL_ROW = 0x208
XL_ROW_B2 = 0x08
XL_RSTRING = 0xd6
XL_SCL = 0x00A0
XL_SHEETHDR = 0x8F # BIFF4W only
XL_SHEETPR = 0x81
XL_SHEETSOFFSET = 0x8E # BIFF4W only
XL_SHRFMLA = 0x04bc
XL_SST = 0xfc
XL_STANDARDWIDTH = 0x99
XL_STRING = 0x207
XL_STRING_B2 = 0x7
XL_STYLE = 0x293
XL_SUPBOOK = 0x1AE # aka EXTERNALBOOK in OOo docs
XL_TABLEOP = 0x236
XL_TABLEOP2 = 0x37
XL_TABLEOP_B2 = 0x36
XL_TXO = 0x1b6
XL_UNCALCED = 0x5e
XL_UNKNOWN = 0xffff
XL_VERTICALPAGEBREAKS = 0x1a
XL_WINDOW2 = 0x023E
XL_WINDOW2_B2 = 0x003E
XL_WRITEACCESS = 0x5C
XL_WSBOOL = XL_SHEETPR
XL_XF = 0xe0
XL_XF2 = 0x0043 # BIFF2 version of XF record
XL_XF3 = 0x0243 # BIFF3 version of XF record
XL_XF4 = 0x0443 # BIFF4 version of XF record
boflen = {0x0809: 8, 0x0409: 6, 0x0209: 6, 0x0009: 4}
bofcodes = (0x0809, 0x0409, 0x0209, 0x0009)
XL_FORMULA_OPCODES = (0x0006, 0x0406, 0x0206)
_cell_opcode_list = [
XL_BOOLERR,
XL_FORMULA,
XL_FORMULA3,
XL_FORMULA4,
XL_LABEL,
XL_LABELSST,
XL_MULRK,
XL_NUMBER,
XL_RK,
XL_RSTRING,
]
_cell_opcode_dict = {}
for _cell_opcode in _cell_opcode_list:
_cell_opcode_dict[_cell_opcode] = 1
def is_cell_opcode(c):
return c in _cell_opcode_dict
def upkbits(tgt_obj, src, manifest, local_setattr=setattr):
for n, mask, attr in manifest:
local_setattr(tgt_obj, attr, (src & mask) >> n)
def upkbitsL(tgt_obj, src, manifest, local_setattr=setattr, local_int=int):
for n, mask, attr in manifest:
local_setattr(tgt_obj, attr, local_int((src & mask) >> n))
def unpack_string(data, pos, encoding, lenlen=1):
nchars = unpack('<' + 'BH'[lenlen-1], data[pos:pos+lenlen])[0]
pos += lenlen
return unicode(data[pos:pos+nchars], encoding)
def unpack_string_update_pos(data, pos, encoding, lenlen=1, known_len=None):
if known_len is not None:
# On a NAME record, the length byte is detached from the front of the string.
nchars = known_len
else:
nchars = unpack('<' + 'BH'[lenlen-1], data[pos:pos+lenlen])[0]
pos += lenlen
newpos = pos + nchars
return (unicode(data[pos:newpos], encoding), newpos)
def unpack_unicode(data, pos, lenlen=2):
"Return unicode_strg"
nchars = unpack('<' + 'BH'[lenlen-1], data[pos:pos+lenlen])[0]
if not nchars:
# Ambiguous whether 0-length string should have an "options" byte.
# Avoid crash if missing.
return UNICODE_LITERAL("")
pos += lenlen
options = BYTES_ORD(data[pos])
pos += 1
# phonetic = options & 0x04
# richtext = options & 0x08
if options & 0x08:
# rt = unpack('<H', data[pos:pos+2])[0] # unused
pos += 2
if options & 0x04:
# sz = unpack('<i', data[pos:pos+4])[0] # unused
pos += 4
if options & 0x01:
# Uncompressed UTF-16-LE
rawstrg = data[pos:pos+2*nchars]
# if DEBUG: print "nchars=%d pos=%d rawstrg=%r" % (nchars, pos, rawstrg)
strg = unicode(rawstrg, 'utf_16_le')
# pos += 2*nchars
else:
# Note: this is COMPRESSED (not ASCII!) encoding!!!
# Merely returning the raw bytes would work OK 99.99% of the time
# if the local codepage was cp1252 -- however this would rapidly go pear-shaped
# for other codepages so we grit our Anglocentric teeth and return Unicode :-)
strg = unicode(data[pos:pos+nchars], "latin_1")
# pos += nchars
# if richtext:
# pos += 4 * rt
# if phonetic:
# pos += sz
# return (strg, pos)
return strg
def unpack_unicode_update_pos(data, pos, lenlen=2, known_len=None):
"Return (unicode_strg, updated value of pos)"
if known_len is not None:
# On a NAME record, the length byte is detached from the front of the string.
nchars = known_len
else:
nchars = unpack('<' + 'BH'[lenlen-1], data[pos:pos+lenlen])[0]
pos += lenlen
if not nchars and not data[pos:]:
# Zero-length string with no options byte
return (UNICODE_LITERAL(""), pos)
options = BYTES_ORD(data[pos])
pos += 1
phonetic = options & 0x04
richtext = options & 0x08
if richtext:
rt = unpack('<H', data[pos:pos+2])[0]
pos += 2
if phonetic:
sz = unpack('<i', data[pos:pos+4])[0]
pos += 4
if options & 0x01:
# Uncompressed UTF-16-LE
strg = unicode(data[pos:pos+2*nchars], 'utf_16_le')
pos += 2*nchars
else:
# Note: this is COMPRESSED (not ASCII!) encoding!!!
strg = unicode(data[pos:pos+nchars], "latin_1")
pos += nchars
if richtext:
pos += 4 * rt
if phonetic:
pos += sz
return (strg, pos)
def unpack_cell_range_address_list_update_pos(output_list, data, pos, biff_version, addr_size=6):
# output_list is updated in situ
assert addr_size in (6, 8)
# Used to assert size == 6 if not BIFF8, but pyWLWriter writes
# BIFF8-only MERGEDCELLS records in a BIFF5 file!
n, = unpack("<H", data[pos:pos+2])
pos += 2
if n:
if addr_size == 6:
fmt = "<HHBB"
else:
fmt = "<HHHH"
for _unused in xrange(n):
ra, rb, ca, cb = unpack(fmt, data[pos:pos+addr_size])
output_list.append((ra, rb+1, ca, cb+1))
pos += addr_size
return pos
_brecstrg = """\
0000 DIMENSIONS_B2
0001 BLANK_B2
0002 INTEGER_B2_ONLY
0003 NUMBER_B2
0004 LABEL_B2
0005 BOOLERR_B2
0006 FORMULA
0007 STRING_B2
0008 ROW_B2
0009 BOF_B2
000A EOF
000B INDEX_B2_ONLY
000C CALCCOUNT
000D CALCMODE
000E PRECISION
000F REFMODE
0010 DELTA
0011 ITERATION
0012 PROTECT
0013 PASSWORD
0014 HEADER
0015 FOOTER
0016 EXTERNCOUNT
0017 EXTERNSHEET
0018 NAME_B2,5+
0019 WINDOWPROTECT
001A VERTICALPAGEBREAKS
001B HORIZONTALPAGEBREAKS
001C NOTE
001D SELECTION
001E FORMAT_B2-3
001F BUILTINFMTCOUNT_B2
0020 COLUMNDEFAULT_B2_ONLY
0021 ARRAY_B2_ONLY
0022 DATEMODE
0023 EXTERNNAME
0024 COLWIDTH_B2_ONLY
0025 DEFAULTROWHEIGHT_B2_ONLY
0026 LEFTMARGIN
0027 RIGHTMARGIN
0028 TOPMARGIN
0029 BOTTOMMARGIN
002A PRINTHEADERS
002B PRINTGRIDLINES
002F FILEPASS
0031 FONT
0032 FONT2_B2_ONLY
0036 TABLEOP_B2
0037 TABLEOP2_B2
003C CONTINUE
003D WINDOW1
003E WINDOW2_B2
0040 BACKUP
0041 PANE
0042 CODEPAGE
0043 XF_B2
0044 IXFE_B2_ONLY
0045 EFONT_B2_ONLY
004D PLS
0051 DCONREF
0055 DEFCOLWIDTH
0056 BUILTINFMTCOUNT_B3-4
0059 XCT
005A CRN
005B FILESHARING
005C WRITEACCESS
005D OBJECT
005E UNCALCED
005F SAVERECALC
0063 OBJECTPROTECT
007D COLINFO
007E RK2_mythical_?
0080 GUTS
0081 WSBOOL
0082 GRIDSET
0083 HCENTER
0084 VCENTER
0085 BOUNDSHEET
0086 WRITEPROT
008C COUNTRY
008D HIDEOBJ
008E SHEETSOFFSET
008F SHEETHDR
0090 SORT
0092 PALETTE
0099 STANDARDWIDTH
009B FILTERMODE
009C FNGROUPCOUNT
009D AUTOFILTERINFO
009E AUTOFILTER
00A0 SCL
00A1 SETUP
00AB GCW
00BD MULRK
00BE MULBLANK
00C1 MMS
00D6 RSTRING
00D7 DBCELL
00DA BOOKBOOL
00DD SCENPROTECT
00E0 XF
00E1 INTERFACEHDR
00E2 INTERFACEEND
00E5 MERGEDCELLS
00E9 BITMAP
00EB MSO_DRAWING_GROUP
00EC MSO_DRAWING
00ED MSO_DRAWING_SELECTION
00EF PHONETIC
00FC SST
00FD LABELSST
00FF EXTSST
013D TABID
015F LABELRANGES
0160 USESELFS
0161 DSF
01AE SUPBOOK
01AF PROTECTIONREV4
01B0 CONDFMT
01B1 CF
01B2 DVAL
01B6 TXO
01B7 REFRESHALL
01B8 HLINK
01BC PASSWORDREV4
01BE DV
01C0 XL9FILE
01C1 RECALCID
0200 DIMENSIONS
0201 BLANK
0203 NUMBER
0204 LABEL
0205 BOOLERR
0206 FORMULA_B3
0207 STRING
0208 ROW
0209 BOF
020B INDEX_B3+
0218 NAME
0221 ARRAY
0223 EXTERNNAME_B3-4
0225 DEFAULTROWHEIGHT
0231 FONT_B3B4
0236 TABLEOP
023E WINDOW2
0243 XF_B3
027E RK
0293 STYLE
0406 FORMULA_B4
0409 BOF
041E FORMAT
0443 XF_B4
04BC SHRFMLA
0800 QUICKTIP
0809 BOF
0862 SHEETLAYOUT
0867 SHEETPROTECTION
0868 RANGEPROTECTION
"""
biff_rec_name_dict = {}
for _buff in _brecstrg.splitlines():
_numh, _name = _buff.split()
biff_rec_name_dict[int(_numh, 16)] = _name
del _buff, _name, _brecstrg
def hex_char_dump(strg, ofs, dlen, base=0, fout=sys.stdout, unnumbered=False):
endpos = min(ofs + dlen, len(strg))
pos = ofs
numbered = not unnumbered
num_prefix = ''
while pos < endpos:
endsub = min(pos + 16, endpos)
substrg = strg[pos:endsub]
lensub = endsub - pos
if lensub <= 0 or lensub != len(substrg):
fprintf(
sys.stdout,
'??? hex_char_dump: ofs=%d dlen=%d base=%d -> endpos=%d pos=%d endsub=%d substrg=%r\n',
ofs, dlen, base, endpos, pos, endsub, substrg)
break
hexd = ''.join("%02x " % BYTES_ORD(c) for c in substrg)
chard = ''
for c in substrg:
c = chr(BYTES_ORD(c))
if c == '\0':
c = '~'
elif not (' ' <= c <= '~'):
c = '?'
chard += c
if numbered:
num_prefix = "%5d: " % (base+pos-ofs)
fprintf(fout, "%s %-48s %s\n", num_prefix, hexd, chard)
pos = endsub
def biff_dump(mem, stream_offset, stream_len, base=0, fout=sys.stdout, unnumbered=False):
pos = stream_offset
stream_end = stream_offset + stream_len
adj = base - stream_offset
dummies = 0
numbered = not unnumbered
num_prefix = ''
while stream_end - pos >= 4:
rc, length = unpack('<HH', mem[pos:pos+4])
if rc == 0 and length == 0:
if mem[pos:] == b'\0' * (stream_end - pos):
dummies = stream_end - pos
savpos = pos
pos = stream_end
break
if dummies:
dummies += 4
else:
savpos = pos
dummies = 4
pos += 4
else:
if dummies:
if numbered:
num_prefix = "%5d: " % (adj + savpos)
fprintf(fout, "%s---- %d zero bytes skipped ----\n", num_prefix, dummies)
dummies = 0
recname = biff_rec_name_dict.get(rc, '<UNKNOWN>')
if numbered:
num_prefix = "%5d: " % (adj + pos)
fprintf(fout, "%s%04x %s len = %04x (%d)\n", num_prefix, rc, recname, length, length)
pos += 4
hex_char_dump(mem, pos, length, adj+pos, fout, unnumbered)
pos += length
if dummies:
if numbered:
num_prefix = "%5d: " % (adj + savpos)
fprintf(fout, "%s---- %d zero bytes skipped ----\n", num_prefix, dummies)
if pos < stream_end:
if numbered:
num_prefix = "%5d: " % (adj + pos)
fprintf(fout, "%s---- Misc bytes at end ----\n", num_prefix)
hex_char_dump(mem, pos, stream_end-pos, adj + pos, fout, unnumbered)
elif pos > stream_end:
fprintf(fout, "Last dumped record has length (%d) that is too large\n", length)
def biff_count_records(mem, stream_offset, stream_len, fout=sys.stdout):
pos = stream_offset
stream_end = stream_offset + stream_len
tally = {}
while stream_end - pos >= 4:
rc, length = unpack('<HH', mem[pos:pos+4])
if rc == 0 and length == 0:
if mem[pos:] == b'\0' * (stream_end - pos):
break
recname = "<Dummy (zero)>"
else:
recname = biff_rec_name_dict.get(rc, None)
if recname is None:
recname = "Unknown_0x%04X" % rc
if recname in tally:
tally[recname] += 1
else:
tally[recname] = 1
pos += length + 4
slist = sorted(tally.items())
for recname, count in slist:
print("%8d %s" % (count, recname), file=fout)
encoding_from_codepage = {
1200 : 'utf_16_le',
10000: 'mac_roman',
10006: 'mac_greek', # guess
10007: 'mac_cyrillic', # guess
10029: 'mac_latin2', # guess
10079: 'mac_iceland', # guess
10081: 'mac_turkish', # guess
32768: 'mac_roman',
32769: 'cp1252',
}
# some more guessing, for Indic scripts
# codepage 57000 range:
# 2 Devanagari [0]
# 3 Bengali [1]
# 4 Tamil [5]
# 5 Telegu [6]
# 6 Assamese [1] c.f. Bengali
# 7 Oriya [4]
# 8 Kannada [7]
# 9 Malayalam [8]
# 10 Gujarati [3]
# 11 Gurmukhi [2]
|
BaseObject
|
python
|
openai__openai-python
|
tests/test_transform.py
|
{
"start": 2907,
"end": 3003
}
|
class ____(TypedDict):
foo: Annotated[Union[Bar4, List[Baz4]], PropertyInfo(alias="FOO")]
|
Foo5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.