language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
openai__openai-python
|
src/openai/resources/audio/translations.py
|
{
"start": 14237,
"end": 14478
}
|
class ____:
def __init__(self, translations: Translations) -> None:
self._translations = translations
self.create = to_streamed_response_wrapper(
translations.create,
)
|
TranslationsWithStreamingResponse
|
python
|
pydantic__pydantic
|
tests/test_plugin_loader.py
|
{
"start": 149,
"end": 339
}
|
class ____:
def __init__(self, name, value, group):
self.name = name
self.value = value
self.group = group
def load(self):
return self.value
|
EntryPoint
|
python
|
django__django
|
django/contrib/gis/geos/libgeos.py
|
{
"start": 3585,
"end": 3867
}
|
class ____(Structure):
pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
lgeos = SimpleLazyObject(load_geos)
|
GEOSContextHandle_t
|
python
|
realpython__materials
|
django-diary/source_code_step_6/entries/views.py
|
{
"start": 653,
"end": 918
}
|
class ____(SuccessMessageMixin, UpdateView):
model = Entry
fields = ["title", "content"]
success_message = "Your entry was updated!"
def get_success_url(self):
return reverse_lazy("entry-detail", kwargs={"pk": self.object.pk})
|
EntryUpdateView
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_23/models.py
|
{
"start": 93045,
"end": 93913
}
|
class ____(Request):
"""
Gets model information
:param model: Model id
:type model: str
"""
_service = "models"
_action = "get_by_id"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {"model": {"description": "Model id", "type": "string"}},
"required": ["model"],
"type": "object",
}
def __init__(self, model: str, **kwargs: Any) -> None:
super(GetByIdRequest, self).__init__(**kwargs)
self.model = model
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
|
GetByIdRequest
|
python
|
pytorch__pytorch
|
torch/_functorch/_aot_autograd/descriptors.py
|
{
"start": 24220,
"end": 24467
}
|
class ____(AOTOutput):
"""The final offset from the functionalized RNG calls, backward only"""
def expr(self) -> str:
return "__philox_updated_backward_offset"
@dataclasses.dataclass(frozen=True)
|
PhiloxUpdatedBackwardOffsetAOTOutput
|
python
|
Textualize__textual
|
src/textual/_animator.py
|
{
"start": 5296,
"end": 7272
}
|
class ____:
def __init__(self, animator: Animator, obj: object) -> None:
self._animator = animator
self._obj = obj
def __call__(
self,
attribute: str,
value: str | float | Animatable,
*,
final_value: object = ...,
duration: float | None = None,
speed: float | None = None,
delay: float = 0.0,
easing: EasingFunction | str = DEFAULT_EASING,
on_complete: CallbackType | None = None,
level: AnimationLevel = "full",
) -> None:
"""Animate an attribute.
Args:
attribute: Name of the attribute to animate.
value: The value to animate to.
final_value: The final value of the animation. Defaults to `value` if not set.
duration: The duration (in seconds) of the animation.
speed: The speed of the animation.
delay: A delay (in seconds) before the animation starts.
easing: An easing method.
on_complete: A callable to invoke when the animation is finished.
level: Minimum level required for the animation to take place (inclusive).
"""
start_value = getattr(self._obj, attribute)
if isinstance(value, str) and hasattr(start_value, "parse"):
# Color and Scalar have a parse method
# I'm exploiting a coincidence here, but I think this should be a first-class concept
# TODO: add a `Parsable` protocol
value = start_value.parse(value)
easing_function = EASING[easing] if isinstance(easing, str) else easing
return self._animator.animate(
self._obj,
attribute=attribute,
value=value,
final_value=final_value,
duration=duration,
speed=speed,
delay=delay,
easing=easing_function,
on_complete=on_complete,
level=level,
)
|
BoundAnimator
|
python
|
getsentry__sentry
|
src/sentry/api/bases/organization.py
|
{
"start": 12097,
"end": 12842
}
|
class ____(TypedDict):
start: datetime
end: datetime
project_id: list[int]
project_objects: list[Project]
organization_id: int
environment: NotRequired[list[str]]
environment_objects: NotRequired[list[Environment]]
def _validate_fetched_projects(
filtered_projects: Sequence[Project],
slugs: set[str] | None,
ids: set[int] | None,
) -> None:
"""
Validates that user has access to the specific projects they are requesting.
"""
missing_project_ids = ids and ids != {p.id for p in filtered_projects}
missing_project_slugs = slugs and slugs != {p.slug for p in filtered_projects}
if missing_project_ids or missing_project_slugs:
raise PermissionDenied
|
FilterParamsDateNotNull
|
python
|
getsentry__sentry
|
tests/sentry/backup/test_rpc.py
|
{
"start": 1462,
"end": 12559
}
|
class ____(TestCase):
"""
Ensure that retries don't duplicate writes.
"""
def test_good_local_retry_idempotent(self) -> None:
# If the response gets lost on the way to the caller, it will try again. Make sure it is
# clever enough to not try to write the data twice if its already been committed.
import_uuid = str(uuid4().hex)
option_count = Option.objects.count()
import_chunk_count = RegionImportChunk.objects.count()
def verify_option_write() -> RegionImportChunk:
result = import_export_service.import_by_model(
import_model_name="sentry.option",
scope=RpcImportScope.Global,
flags=RpcImportFlags(import_uuid=import_uuid),
filter_by=[],
pk_map=RpcPrimaryKeyMap(),
json_data="""
[
{
"model": "sentry.option",
"pk": 5,
"fields": {
"key": "foo",
"last_updated": "2023-06-22T00:00:00.000Z",
"last_updated_by": "unknown",
"value": "bar"
}
}
]
""",
min_ordinal=1,
)
assert isinstance(result, RpcImportOk)
assert result.min_ordinal == 1
assert result.max_ordinal == 1
assert result.min_source_pk == 5
assert result.max_source_pk == 5
assert result.min_inserted_pk == result.max_inserted_pk
mapping = result.mapped_pks.from_rpc().mapping[str(OPTION_MODEL_NAME)]
assert len(mapping) == 1
assert mapping.get(5, None) is not None
assert Option.objects.count() == option_count + 1
assert RegionImportChunk.objects.count() == import_chunk_count + 1
import_chunk = RegionImportChunk.objects.get(import_uuid=import_uuid)
assert import_chunk.min_ordinal == 1
assert import_chunk.max_ordinal == 1
assert import_chunk.min_source_pk == 5
assert import_chunk.max_source_pk == 5
assert import_chunk.min_inserted_pk == import_chunk.max_inserted_pk
assert len(import_chunk.inserted_map) == 1
assert len(import_chunk.existing_map) == 0
assert len(import_chunk.overwrite_map) == 0
existing_import_chunk = get_existing_import_chunk(
OPTION_MODEL_NAME,
ImportFlags(import_uuid=import_uuid),
RegionImportChunk,
1,
)
assert existing_import_chunk is not None
mapping = existing_import_chunk.mapped_pks.from_rpc().mapping[str(OPTION_MODEL_NAME)]
assert len(mapping) == 1
assert mapping.get(5, None) is not None
return import_chunk
# Doing the write twice should produce identical results from the sender's point of view,
# and should not result in multiple `RegionImportChunk`s being written.
assert verify_option_write() == verify_option_write()
def test_good_remote_retry_idempotent(self) -> None:
# If the response gets lost on the way to the caller, it will try again. Make sure it is
# clever enough to not try to write the data twice if its already been committed.
import_uuid = str(uuid4().hex)
with assume_test_silo_mode(SiloMode.CONTROL):
control_option_count = ControlOption.objects.count()
import_chunk_count = ControlImportChunk.objects.count()
def verify_control_option_write() -> ControlImportChunk:
result = import_export_service.import_by_model(
import_model_name="sentry.controloption",
scope=RpcImportScope.Global,
flags=RpcImportFlags(import_uuid=import_uuid),
filter_by=[],
pk_map=RpcPrimaryKeyMap(),
json_data="""
[
{
"model": "sentry.controloption",
"pk": 7,
"fields": {
"key": "foo",
"last_updated": "2023-06-22T00:00:00.000Z",
"last_updated_by": "unknown",
"value": "bar"
}
}
]
""",
min_ordinal=1,
)
assert isinstance(result, RpcImportOk)
assert result.min_ordinal == 1
assert result.max_ordinal == 1
assert result.min_source_pk == 7
assert result.max_source_pk == 7
assert result.min_inserted_pk == result.max_inserted_pk
assert len(result.mapped_pks.from_rpc().mapping[str(CONTROL_OPTION_MODEL_NAME)]) == 1
with assume_test_silo_mode(SiloMode.CONTROL):
assert ControlOption.objects.count() == control_option_count + 1
assert ControlImportChunk.objects.count() == import_chunk_count + 1
import_chunk = ControlImportChunk.objects.get(import_uuid=import_uuid)
assert import_chunk.min_ordinal == 1
assert import_chunk.max_ordinal == 1
assert import_chunk.min_source_pk == 7
assert import_chunk.max_source_pk == 7
assert import_chunk.min_inserted_pk == import_chunk.max_inserted_pk
assert len(import_chunk.inserted_map) == 1
assert len(import_chunk.existing_map) == 0
assert len(import_chunk.overwrite_map) == 0
existing_import_chunk = get_existing_import_chunk(
CONTROL_OPTION_MODEL_NAME,
ImportFlags(import_uuid=import_uuid),
ControlImportChunk,
1,
)
assert existing_import_chunk is not None
mapping = existing_import_chunk.mapped_pks.from_rpc().mapping[
str(CONTROL_OPTION_MODEL_NAME)
]
assert len(mapping) == 1
assert mapping.get(7, None) is not None
return import_chunk
# Doing the write twice should produce identical results from the sender's point of view,
# and should not result in multiple `ControlImportChunk`s being written.
assert verify_control_option_write() == verify_control_option_write()
# This is a bit of a hacky way in which to "simulate" a race that occurs between when we first
# try to detect the duplicate chunk and when we try to send our actual write.
def test_good_handles_racing_imports(self) -> None:
mock_call_count = 0
# First call returns `None`, but then, by the time we get around to trying to commit the
# atomic transaction, another mocked concurrent process has written the same chunk. We
# should handle this gracefully by going and getting that chunk instead.
def wrapped_get_existing_import_chunk(
model_name: NormalizedModelName,
flags: ImportFlags,
import_chunk_type: type[models.base.Model],
min_ordinal: int,
) -> RpcImportOk | None:
nonlocal mock_call_count
mock_call_count += 1
if mock_call_count > 1:
return get_existing_import_chunk(model_name, flags, import_chunk_type, min_ordinal)
return None
with patch(
"sentry.backup.services.import_export.impl.get_existing_import_chunk",
MagicMock(side_effect=wrapped_get_existing_import_chunk),
) as get_existing_import_chunk_mock:
import_uuid = str(uuid4().hex)
with assume_test_silo_mode(SiloMode.CONTROL):
import_chunk_count = ControlImportChunk.objects.count()
ControlImportChunk.objects.create(
import_uuid=import_uuid,
model="sentry.controloption",
min_ordinal=1,
max_ordinal=1,
min_source_pk=9,
max_source_pk=9,
min_inserted_pk=123,
max_inserted_pk=123,
inserted_map={9: 123},
)
result = import_export_service.import_by_model(
import_model_name="sentry.controloption",
scope=RpcImportScope.Global,
flags=RpcImportFlags(import_uuid=import_uuid),
filter_by=[],
pk_map=RpcPrimaryKeyMap(),
json_data="""
[
{
"model": "sentry.controloption",
"pk": 9,
"fields": {
"key": "foo",
"last_updated": "2023-06-22T00:00:00.000Z",
"last_updated_by": "unknown",
"value": "bar"
}
}
]
""",
min_ordinal=1,
)
assert get_existing_import_chunk_mock.call_count == 2
assert isinstance(result, RpcImportOk)
assert result.min_ordinal == 1
assert result.max_ordinal == 1
assert result.min_source_pk == 9
assert result.max_source_pk == 9
assert result.min_inserted_pk == result.max_inserted_pk
assert len(result.mapped_pks.from_rpc().mapping[str(CONTROL_OPTION_MODEL_NAME)]) == 1
with assume_test_silo_mode(SiloMode.CONTROL):
import_chunk = ControlImportChunk.objects.get(import_uuid=import_uuid)
assert import_chunk.min_ordinal == 1
assert import_chunk.max_ordinal == 1
assert import_chunk.min_source_pk == 9
assert import_chunk.max_source_pk == 9
assert import_chunk.min_inserted_pk == import_chunk.max_inserted_pk
assert len(import_chunk.inserted_map) == 1
assert len(import_chunk.existing_map) == 0
assert len(import_chunk.overwrite_map) == 0
existing_import_chunk = get_existing_import_chunk(
CONTROL_OPTION_MODEL_NAME,
ImportFlags(import_uuid=import_uuid),
ControlImportChunk,
1,
)
assert existing_import_chunk is not None
mapping = existing_import_chunk.mapped_pks.from_rpc().mapping[
str(CONTROL_OPTION_MODEL_NAME)
]
assert len(mapping) == 1
assert mapping.get(9, None) is not None
assert ControlImportChunk.objects.count() == import_chunk_count + 1
@no_silo_test
|
RpcImportRetryTests
|
python
|
instagram__MonkeyType
|
demo/models.py
|
{
"start": 2967,
"end": 3224
}
|
class ____(Generic[T]):
type: EventType
def __init__(self, repo: RepoInterface) -> None:
self.repo = repo
def add(self, event: T) -> None:
pass
def aggregate(self) -> List[AggregatedItem]:
return []
|
AggregatorInterface
|
python
|
networkx__networkx
|
networkx/algorithms/tree/tests/test_mst.py
|
{
"start": 17454,
"end": 28157
}
|
class ____:
"""
Uses the same graph as the above class but with an added edge of twice the weight.
"""
def setup_method(self):
# New graph
edges = [
(0, 1, 5),
(0, 1, 10),
(1, 2, 4),
(1, 2, 8),
(1, 4, 6),
(1, 4, 12),
(2, 3, 5),
(2, 3, 10),
(2, 4, 7),
(2, 4, 14),
(3, 4, 3),
(3, 4, 6),
]
self.G = nx.MultiGraph()
self.G.add_weighted_edges_from(edges)
# There are 128 trees. I'd rather not list all 128 here, and computing them
# on such a small graph actually doesn't take that long.
from itertools import combinations
self.spanning_trees = []
for e in combinations(self.G.edges, 4):
tree = self.G.edge_subgraph(e)
if nx.is_tree(tree):
self.spanning_trees.append(sorted(tree.edges(keys=True, data=True)))
def test_minimum_spanning_tree_iterator_multigraph(self):
"""
Tests that the spanning trees are correctly returned in increasing order
"""
tree_index = 0
last_weight = 0
for tree in nx.SpanningTreeIterator(self.G):
actual = sorted(tree.edges(keys=True, data=True))
weight = sum([e[3]["weight"] for e in actual])
assert actual in self.spanning_trees
assert weight >= last_weight
tree_index += 1
def test_maximum_spanning_tree_iterator_multigraph(self):
"""
Tests that the spanning trees are correctly returned in decreasing order
"""
tree_index = 127
# Maximum weight tree is 46
last_weight = 50
for tree in nx.SpanningTreeIterator(self.G, minimum=False):
actual = sorted(tree.edges(keys=True, data=True))
weight = sum([e[3]["weight"] for e in actual])
assert actual in self.spanning_trees
assert weight <= last_weight
tree_index -= 1
def test_random_spanning_tree_multiplicative_small():
"""
Using a fixed seed, sample one tree for repeatability.
"""
from math import exp
pytest.importorskip("scipy")
gamma = {
(0, 1): -0.6383,
(0, 2): -0.6827,
(0, 5): 0,
(1, 2): -1.0781,
(1, 4): 0,
(2, 3): 0,
(5, 3): -0.2820,
(5, 4): -0.3327,
(4, 3): -0.9927,
}
# The undirected support of gamma
G = nx.Graph()
for u, v in gamma:
G.add_edge(u, v, lambda_key=exp(gamma[(u, v)]))
solution_edges = [(2, 3), (3, 4), (0, 5), (5, 4), (4, 1)]
solution = nx.Graph()
solution.add_edges_from(solution_edges)
sampled_tree = nx.random_spanning_tree(G, "lambda_key", seed=42)
assert nx.utils.edges_equal(solution.edges, sampled_tree.edges)
@pytest.mark.slow
def test_random_spanning_tree_multiplicative_large():
"""
Sample many trees from the distribution created in the last test
"""
from math import exp
from random import Random
pytest.importorskip("numpy")
stats = pytest.importorskip("scipy.stats")
gamma = {
(0, 1): -0.6383,
(0, 2): -0.6827,
(0, 5): 0,
(1, 2): -1.0781,
(1, 4): 0,
(2, 3): 0,
(5, 3): -0.2820,
(5, 4): -0.3327,
(4, 3): -0.9927,
}
# The undirected support of gamma
G = nx.Graph()
for u, v in gamma:
G.add_edge(u, v, lambda_key=exp(gamma[(u, v)]))
# Find the multiplicative weight for each tree.
total_weight = 0
tree_expected = {}
for t in nx.SpanningTreeIterator(G):
# Find the multiplicative weight of the spanning tree
weight = 1
for u, v, d in t.edges(data="lambda_key"):
weight *= d
tree_expected[t] = weight
total_weight += weight
# Assert that every tree has an entry in the expected distribution
assert len(tree_expected) == 75
# Set the sample size and then calculate the expected number of times we
# expect to see each tree. This test uses a near minimum sample size where
# the most unlikely tree has an expected frequency of 5.15.
# (Minimum required is 5)
#
# Here we also initialize the tree_actual dict so that we know the keys
# match between the two. We will later take advantage of the fact that since
# python 3.7 dict order is guaranteed so the expected and actual data will
# have the same order.
sample_size = 1200
tree_actual = {}
for t in tree_expected:
tree_expected[t] = (tree_expected[t] / total_weight) * sample_size
tree_actual[t] = 0
# Sample the spanning trees
#
# Assert that they are actually trees and record which of the 75 trees we
# have sampled.
#
# For repeatability, we want to take advantage of the decorators in NetworkX
# to randomly sample the same sample each time. However, if we pass in a
# constant seed to sample_spanning_tree we will get the same tree each time.
# Instead, we can create our own random number generator with a fixed seed
# and pass those into sample_spanning_tree.
rng = Random(37)
for _ in range(sample_size):
sampled_tree = nx.random_spanning_tree(G, "lambda_key", seed=rng)
assert nx.is_tree(sampled_tree)
for t in tree_expected:
if nx.utils.edges_equal(t.edges, sampled_tree.edges):
tree_actual[t] += 1
break
# Conduct a Chi squared test to see if the actual distribution matches the
# expected one at an alpha = 0.05 significance level.
#
# H_0: The distribution of trees in tree_actual matches the normalized product
# of the edge weights in the tree.
#
# H_a: The distribution of trees in tree_actual follows some other
# distribution of spanning trees.
_, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values()))
# Assert that p is greater than the significance level so that we do not
# reject the null hypothesis
assert not p < 0.05
def test_random_spanning_tree_additive_small():
"""
Sample a single spanning tree from the additive method.
"""
pytest.importorskip("scipy")
edges = {
(0, 1): 1,
(0, 2): 1,
(0, 5): 3,
(1, 2): 2,
(1, 4): 3,
(2, 3): 3,
(5, 3): 4,
(5, 4): 5,
(4, 3): 4,
}
# Build the graph
G = nx.Graph()
for u, v in edges:
G.add_edge(u, v, weight=edges[(u, v)])
solution_edges = [(0, 2), (1, 2), (2, 3), (3, 4), (3, 5)]
solution = nx.Graph()
solution.add_edges_from(solution_edges)
sampled_tree = nx.random_spanning_tree(
G, weight="weight", multiplicative=False, seed=37
)
assert nx.utils.edges_equal(solution.edges, sampled_tree.edges)
@pytest.mark.slow
def test_random_spanning_tree_additive_large():
"""
Sample many spanning trees from the additive method.
"""
from random import Random
pytest.importorskip("numpy")
stats = pytest.importorskip("scipy.stats")
edges = {
(0, 1): 1,
(0, 2): 1,
(0, 5): 3,
(1, 2): 2,
(1, 4): 3,
(2, 3): 3,
(5, 3): 4,
(5, 4): 5,
(4, 3): 4,
}
# Build the graph
G = nx.Graph()
for u, v in edges:
G.add_edge(u, v, weight=edges[(u, v)])
# Find the additive weight for each tree.
total_weight = 0
tree_expected = {}
for t in nx.SpanningTreeIterator(G):
# Find the multiplicative weight of the spanning tree
weight = 0
for u, v, d in t.edges(data="weight"):
weight += d
tree_expected[t] = weight
total_weight += weight
# Assert that every tree has an entry in the expected distribution
assert len(tree_expected) == 75
# Set the sample size and then calculate the expected number of times we
# expect to see each tree. This test uses a near minimum sample size where
# the most unlikely tree has an expected frequency of 5.07.
# (Minimum required is 5)
#
# Here we also initialize the tree_actual dict so that we know the keys
# match between the two. We will later take advantage of the fact that since
# python 3.7 dict order is guaranteed so the expected and actual data will
# have the same order.
sample_size = 500
tree_actual = {}
for t in tree_expected:
tree_expected[t] = (tree_expected[t] / total_weight) * sample_size
tree_actual[t] = 0
# Sample the spanning trees
#
# Assert that they are actually trees and record which of the 75 trees we
# have sampled.
#
# For repeatability, we want to take advantage of the decorators in NetworkX
# to randomly sample the same sample each time. However, if we pass in a
# constant seed to sample_spanning_tree we will get the same tree each time.
# Instead, we can create our own random number generator with a fixed seed
# and pass those into sample_spanning_tree.
rng = Random(37)
for _ in range(sample_size):
sampled_tree = nx.random_spanning_tree(
G, "weight", multiplicative=False, seed=rng
)
assert nx.is_tree(sampled_tree)
for t in tree_expected:
if nx.utils.edges_equal(t.edges, sampled_tree.edges):
tree_actual[t] += 1
break
# Conduct a Chi squared test to see if the actual distribution matches the
# expected one at an alpha = 0.05 significance level.
#
# H_0: The distribution of trees in tree_actual matches the normalized product
# of the edge weights in the tree.
#
# H_a: The distribution of trees in tree_actual follows some other
# distribution of spanning trees.
_, p = stats.chisquare(list(tree_actual.values()), list(tree_expected.values()))
# Assert that p is greater than the significance level so that we do not
# reject the null hypothesis
assert not p < 0.05
def test_random_spanning_tree_empty_graph():
G = nx.Graph()
rst = nx.tree.random_spanning_tree(G)
assert len(rst.nodes) == 0
assert len(rst.edges) == 0
def test_random_spanning_tree_single_node_graph():
G = nx.Graph()
G.add_node(0)
rst = nx.tree.random_spanning_tree(G)
assert len(rst.nodes) == 1
assert len(rst.edges) == 0
def test_random_spanning_tree_single_node_loop():
G = nx.Graph()
G.add_node(0)
G.add_edge(0, 0)
rst = nx.tree.random_spanning_tree(G)
assert len(rst.nodes) == 1
assert len(rst.edges) == 0
|
TestSpanningTreeMultiGraphIterator
|
python
|
getsentry__sentry
|
src/sentry/sentry_metrics/consumers/indexer/slicing_router.py
|
{
"start": 3063,
"end": 5762
}
|
class ____(MessageRouter):
"""
Router which works based on the settings defined for slicing.
"""
def __init__(
self,
sliceable: Sliceable,
) -> None:
self.__sliceable = sliceable
self.__slice_to_producer: MutableMapping[int, MessageRoute] = {}
_validate_slicing_config()
_validate_slicing_consumer_config(self.__sliceable)
for (
current_sliceable,
current_slice_id,
), configuration in settings.SLICED_KAFKA_TOPICS.items():
producer_config = kafka_config.get_kafka_producer_cluster_options(
configuration["cluster"]
)
producer_config["client.id"] = (
f"sentry.sentry_metrics.slicing_router.{current_sliceable}.{current_slice_id}"
)
producer = get_confluent_producer(producer_config)
self.__slice_to_producer[current_slice_id] = MessageRoute(
producer=producer,
topic=Topic(configuration["topic"]),
)
# All logical partitions should be routed to a slice ID that's present in the slice
# ID to producer message route mapping
assert set(settings.SENTRY_SLICING_CONFIG[sliceable].values()).issubset(
self.__slice_to_producer.keys()
), f"Unknown slice ID in SENTRY_SLICING_CONFIG for {sliceable}"
def get_all_producers(self) -> Sequence[Producer]:
return [route.producer for route in self.__slice_to_producer.values()]
def get_route_for_message(self, message: Message[RoutingPayload]) -> MessageRoute:
"""
Get route for the message. The message will be routed based on the org_id
present in the message payload header and how it maps to a specific
slice.
"""
org_id = message.payload.routing_header.get("org_id", None)
if org_id is None:
raise MissingOrgInRoutingHeader("org_id is missing from the routing header")
else:
slice_id = map_logical_partition_to_slice(
self.__sliceable, map_org_id_to_logical_partition(org_id)
)
producer = self.__slice_to_producer[slice_id]
return producer
def get_slicing_router(config: MetricsIngestConfiguration) -> SlicingRouter | None:
if config.is_output_sliced:
if config.use_case_id == UseCaseKey.PERFORMANCE:
sliceable: Final = "generic_metrics"
else:
raise SlicingConfigurationException(
f"Slicing not supported for " f"{config.use_case_id}"
)
return SlicingRouter(sliceable=sliceable)
else:
return None
|
SlicingRouter
|
python
|
scipy__scipy
|
scipy/signal/tests/test_filter_design.py
|
{
"start": 15103,
"end": 25409
}
|
class ____:
# @pytest.mark.parametrize('dt', 'fdgFDG')
# XXX: quietly remove float128 and complex256
@pytest.mark.parametrize('dt', ['float32', 'float64', 'complex64', 'complex128'])
@pytest.mark.parametrize('pairing, analog',
[('nearest', False),
('keep_odd', False),
('minimal', False),
('minimal', True)])
def test_dtypes(self, dt, pairing, analog, xp):
dtype = getattr(xp, dt)
# the poles have to be complex
cdtype = (1j*xp.empty(0, dtype=dtype)).dtype
z = xp.asarray([-1, -1], dtype=dtype)
p = xp.asarray([0.57149 + 0.29360j, 0.57149 - 0.29360j], dtype=cdtype)
k = xp.asarray(1, dtype=dtype)
sos = zpk2sos(z, p, k, pairing=pairing, analog=analog)
# octave & MATLAB
sos2 = xp.asarray([[1, 2, 1, 1, -1.14298, 0.41280]], dtype=dtype)
assert_array_almost_equal(sos, sos2, decimal=4)
def test_basic(self, xp):
for pairing in ('nearest', 'keep_odd'):
#
# Cases that match octave
#
z = xp.asarray([-1.0, -1.0])
p = xp.asarray([0.57149 + 0.29360j, 0.57149 - 0.29360j])
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = xp.asarray([[1, 2, 1, 1, -1.14298, 0.41280]]) # octave & MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = xp.asarray([1j, -1j])
p = xp.asarray([0.9, -0.9, 0.7j, -0.7j])
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 0, 1, 1, 0, +0.49],
[1, 0, 0, 1, 0, -0.81]] # octave
sos2 = xp.asarray(sos2)
# sos2 = [[0, 0, 1, 1, -0.9, 0],
# [1, 0, 1, 1, 0.9, 0]] # MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = xp.asarray([])
p = xp.asarray([0.8, -0.5+0.25j, -0.5-0.25j])
k = 1.
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1., 0., 0., 1., 1., 0.3125],
[1., 0., 0., 1., -0.8, 0.]] # octave, MATLAB fails
sos2 = xp.asarray(sos2)
assert_array_almost_equal(sos, sos2, decimal=4)
z = xp.asarray([1., 1., 0.9j, -0.9j])
p = xp.asarray([0.99+0.01j, 0.99-0.01j, 0.1+0.9j, 0.1-0.9j])
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1, 0, 0.81, 1, -0.2, 0.82],
[1, -2, 1, 1, -1.98, 0.9802]] # octave
sos2 = xp.asarray(sos2)
# sos2 = [[1, -2, 1, 1, -0.2, 0.82],
# [1, 0, 0.81, 1, -1.98, 0.9802]] # MATLAB
assert_array_almost_equal(sos, sos2, decimal=4)
z = xp.asarray([0.9+0.1j, 0.9-0.1j, -0.9])
p = xp.asarray([0.75+0.25j, 0.75-0.25j, 0.9])
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
if pairing == 'keep_odd':
sos2 = [[1, -1.8, 0.82, 1, -1.5, 0.625],
[1, 0.9, 0, 1, -0.9, 0]] # octave; MATLAB fails
sos2 = xp.asarray(sos2)
assert_array_almost_equal(sos, sos2, decimal=4)
else: # pairing == 'nearest'
sos2 = [[1, 0.9, 0, 1, -1.5, 0.625],
[1, -1.8, 0.82, 1, -0.9, 0]] # our algorithm
sos2 = xp.asarray(sos2)
assert_array_almost_equal(sos, sos2, decimal=4)
#
# Cases that differ from octave:
#
z = [-0.3090 + 0.9511j, -0.3090 - 0.9511j, 0.8090 + 0.5878j,
+0.8090 - 0.5878j, -1.0000 + 0.0000j]
p = [-0.3026 + 0.9312j, -0.3026 - 0.9312j, 0.7922 + 0.5755j,
+0.7922 - 0.5755j, -0.9791 + 0.0000j]
z = xp.asarray(z)
p = xp.asarray(p)
k = 1
sos = zpk2sos(z, p, k, pairing=pairing)
# sos2 = [[1, 0.618, 1, 1, 0.6052, 0.95870],
# [1, -1.618, 1, 1, -1.5844, 0.95878],
# [1, 1, 0, 1, 0.9791, 0]] # octave, MATLAB fails
sos2 = [[1, 1, 0, 1, +0.97915, 0],
[1, 0.61803, 1, 1, +0.60515, 0.95873],
[1, -1.61803, 1, 1, -1.58430, 0.95873]]
sos2 = xp.asarray(sos2)
assert_array_almost_equal(sos, sos2, decimal=4)
z = [-1 - 1.4142j, -1 + 1.4142j,
-0.625 - 1.0533j, -0.625 + 1.0533j]
p = [-0.2 - 0.6782j, -0.2 + 0.6782j,
-0.1 - 0.5385j, -0.1 + 0.5385j]
z = xp.asarray(z)
p = xp.asarray(p)
k = 4
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[4, 8, 12, 1, 0.2, 0.3],
[1, 1.25, 1.5, 1, 0.4, 0.5]] # MATLAB
sos2 = xp.asarray(sos2, dtype=xp.float64)
# sos2 = [[4, 8, 12, 1, 0.4, 0.5],
# [1, 1.25, 1.5, 1, 0.2, 0.3]] # octave
xp_assert_close(sos, sos2, rtol=1e-4, atol=1e-4)
z = xp.asarray([])
p = xp.asarray([0.2, -0.5+0.25j, -0.5-0.25j])
k = 1.
sos = zpk2sos(z, p, k, pairing=pairing)
sos2 = [[1., 0., 0., 1., -0.2, 0.],
[1., 0., 0., 1., 1., 0.3125]]
sos2 = xp.asarray(sos2)
# sos2 = [[1., 0., 0., 1., 1., 0.3125],
# [1., 0., 0., 1., -0.2, 0]] # octave, MATLAB fails
assert_array_almost_equal(sos, sos2, decimal=4)
# The next two examples are adapted from Leland B. Jackson,
# "Digital Filters and Signal Processing (1995) p.400:
# http://books.google.com/books?id=VZ8uabI1pNMC&lpg=PA400&ots=gRD9pi8Jua&dq=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&pg=PA400#v=onepage&q=Pole%2Fzero%20pairing%20for%20minimum%20roundoff%20noise%20in%20BSF.&f=false
deg2rad = xp.pi / 180.
k = 1.
# first example
thetas = xp.asarray([22.5, 45, 77.5])
mags = xp.asarray([0.8, 0.6, 0.9])
z = xp.exp(1j * deg2rad * thetas)
z = xp.concat((z, xp.conj(z)))
p = xp.exp(1j * deg2rad * thetas) * mags
p = xp.concat((p, xp.conj(p)))
sos = zpk2sos(z, p, k)
# sos2 = [[1, -0.43288, 1, 1, -0.38959, 0.81], # octave,
# [1, -1.41421, 1, 1, -0.84853, 0.36], # MATLAB fails
# [1, -1.84776, 1, 1, -1.47821, 0.64]]
# Note that pole-zero pairing matches, but ordering is different
sos2 = [[1, -1.41421, 1, 1, -0.84853, 0.36],
[1, -1.84776, 1, 1, -1.47821, 0.64],
[1, -0.43288, 1, 1, -0.38959, 0.81]]
sos2 = xp.asarray(sos2)
assert_array_almost_equal(sos, sos2, decimal=4)
# second example
thetas = xp.asarray([85., 10.])
z = xp.exp(1j * deg2rad * thetas)
z = xp.concat((z, xp.conj(z), xp.asarray([1.0, -1.0])))
sos = zpk2sos(z, p, k)
# sos2 = [[1, -0.17431, 1, 1, -0.38959, 0.81], # octave "wrong",
# [1, -1.96962, 1, 1, -0.84853, 0.36], # MATLAB fails
# [1, 0, -1, 1, -1.47821, 0.64000]]
# Our pole-zero pairing matches the text, Octave does not
sos2 = [[1, 0, -1, 1, -0.84853, 0.36],
[1, -1.96962, 1, 1, -1.47821, 0.64],
[1, -0.17431, 1, 1, -0.38959, 0.81]]
sos2 = xp.asarray(sos2)
assert_array_almost_equal(sos, sos2, decimal=4)
# these examples are taken from the doc string, and show the
# effect of the 'pairing' argument
@pytest.mark.parametrize('pairing, sos',
[('nearest',
np.array([[1., 1., 0.5, 1., -0.75, 0.],
[1., 1., 0., 1., -1.6, 0.65]])),
('keep_odd',
np.array([[1., 1., 0, 1., -0.75, 0.],
[1., 1., 0.5, 1., -1.6, 0.65]])),
('minimal',
np.array([[0., 1., 1., 0., 1., -0.75],
[1., 1., 0.5, 1., -1.6, 0.65]]))])
def test_pairing(self, pairing, sos, xp):
sos = xp.asarray(sos)
z1 = xp.asarray([-1, -0.5-0.5j, -0.5+0.5j])
p1 = xp.asarray([0.75, 0.8+0.1j, 0.8-0.1j])
sos2 = zpk2sos(z1, p1, 1, pairing=pairing)
assert_array_almost_equal(sos, sos2, decimal=4)
@pytest.mark.parametrize('p, sos_dt',
[([-1, 1, -0.1, 0.1],
[[0., 0., 1., 1., 0., -0.01],
[0., 0., 1., 1., 0., -1]]),
([-0.7071+0.7071j, -0.7071-0.7071j, -0.1j, 0.1j],
[[0., 0., 1., 1., 0., 0.01],
[0., 0., 1., 1., 1.4142, 1.]])])
def test_analog(self, p, sos_dt, xp):
# test `analog` argument
# for discrete time, poles closest to unit circle should appear last
# for cont. time, poles closest to imaginary axis should appear last
z, p = xp.asarray([]), xp.asarray(p)
sos_dt = xp.asarray(sos_dt)
sos2_dt = zpk2sos(z, p, 1, pairing='minimal', analog=False)
sos2_ct = zpk2sos(z, p, 1, pairing='minimal', analog=True)
assert_array_almost_equal(sos_dt, sos2_dt, decimal=4)
assert_array_almost_equal(xp.flip(sos_dt, axis=0), sos2_ct, decimal=4)
def test_bad_args(self):
with pytest.raises(ValueError, match=r'pairing must be one of'):
zpk2sos([1], [2], 1, pairing='no_such_pairing')
with pytest.raises(ValueError, match=r'.*pairing must be "minimal"'):
zpk2sos([1], [2], 1, pairing='keep_odd', analog=True)
with pytest.raises(ValueError,
match=r'.*must have len\(p\)>=len\(z\)'):
zpk2sos([1, 1], [2], 1, analog=True)
with pytest.raises(ValueError, match=r'k must be real'):
zpk2sos([1], [2], k=1j)
@make_xp_test_case(freqs)
|
TestZpk2Sos
|
python
|
getsentry__sentry
|
src/sentry/notifications/platform/templates/sample.py
|
{
"start": 12321,
"end": 14267
}
|
class ____(NotificationTemplate[TeamUpdateData]):
category = NotificationCategory.DEBUG
example_data = TeamUpdateData(
team_name="Engineering",
update_type="Weekly Standup Reminder",
message="Don't forget about our weekly standup meeting tomorrow at 10 AM. Please prepare your updates on current sprint progress.",
author="jane.smith@acme.com",
timestamp="2024-01-15 16:45:00 UTC",
)
def render(self, data: TeamUpdateData) -> NotificationRenderedTemplate:
return NotificationRenderedTemplate(
subject=f"Team Update: {data.update_type}",
body=[
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text=f"Team {data.team_name} has posted a {data.update_type} update. ",
)
],
),
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text=f"Message: {data.message} ",
)
],
),
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text=f"Posted by {data.author} at {data.timestamp}.",
)
],
),
],
footer="This is an informational update from your team.",
)
|
TeamUpdateNotificationTemplate
|
python
|
mitmproxy__pdoc
|
test/testdata/flavors_google.py
|
{
"start": 6148,
"end": 12587
}
|
class ____(object):
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Alternatively, attributes may be documented
inline with the attribute's declaration (see __init__ method below).
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Attributes:
attr1 (str): Description of `attr1`.
attr2 (:obj:`int`, optional): Description of `attr2`.
"""
def __init__(self, param1, param2, param3):
"""Example of docstring on the __init__ method.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1 (str): Description of `param1`.
param2 (:obj:`int`, optional): Description of `param2`. Multiple
lines are supported.
param3 (:obj:`list` of :obj:`str`): Description of `param3`.
"""
self.attr1 = param1
self.attr2 = param2
self.attr3 = param3 #: Doc comment *inline* with attribute
#: list of str: Doc comment *before* attribute, with type specified
self.attr4 = ['attr4']
self.attr5 = None
"""str: Docstring *after* attribute, with type specified."""
@property
def readonly_property(self):
"""str: Properties should be documented in their getter method."""
return 'readonly_property'
@property
def readwrite_property(self):
""":obj:`list` of :obj:`str`: Properties with both a getter and setter
should only be documented in their getter method.
If the setter method contains notable behavior, it should be
mentioned here.
"""
return ['readwrite_property']
@readwrite_property.setter
def readwrite_property(self, value):
value
def example_method(self, param1, param2):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
return True
def __special__(self):
"""By default special members with docstrings are not included.
Special members are any methods or attributes that start with and
end with a double underscore. Any special member with a docstring
will be included in the output, if
``napoleon_include_special_with_doc`` is set to True.
This behavior can be enabled by changing the following setting in
Sphinx's conf.py::
napoleon_include_special_with_doc = True
"""
pass
def __special_without_docstring__(self):
pass
def _private(self):
"""By default private members are not included.
Private members are any methods or attributes that start with an
underscore and are *not* special. By default they are not included
in the output.
This behavior can be changed such that private members *are* included
by changing the following setting in Sphinx's conf.py::
napoleon_include_private_with_doc = True
"""
pass
def _private_without_docstring(self):
pass
def fetch_smalltable_rows(table_handle: Any,
keys: Sequence[str],
require_all_keys: bool = False,
) -> Mapping[bytes, Tuple[str]]:
"""Fetches rows from a Smalltable.
Retrieves rows pertaining to the given keys from the Table instance
represented by table_handle. String keys will be UTF-8 encoded.
Args:
table_handle: An open smalltable.Table instance.
keys: A sequence of strings representing the key of each table
row to fetch. String keys will be UTF-8 encoded.
require_all_keys: Optional; If require_all_keys is True only
rows with values set for all keys will be returned.
Returns:
A dict mapping keys to the corresponding table row data
fetched. Each row is represented as a tuple of strings. For
example:
{b'Serak': ('Rigel VII', 'Preparer'),
b'Zim': ('Irk', 'Invader'),
b'Lrrr': ('Omicron Persei 8', 'Emperor')}
Returned keys are always bytes. If a key from the keys argument is
missing from the dictionary, then that row was not found in the
table (and require_all_keys must have been False).
Raises:
IOError: An error occurred accessing the smalltable.
"""
raise NotImplementedError
def fetch_smalltable_rows2(table_handle: Any,
keys: Sequence[str],
require_all_keys: bool = False,
) -> Mapping[bytes, Tuple[str]]:
"""Fetches rows from a Smalltable.
Retrieves rows pertaining to the given keys from the Table instance
represented by table_handle. String keys will be UTF-8 encoded.
Args:
table_handle:
An open smalltable.Table instance.
keys:
A sequence of strings representing the key of each table row to
fetch. String keys will be UTF-8 encoded.
require_all_keys:
Optional; If require_all_keys is True only rows with values set
for all keys will be returned.
Returns:
A dict mapping keys to the corresponding table row data
fetched. Each row is represented as a tuple of strings. For
example:
{b'Serak': ('Rigel VII', 'Preparer'),
b'Zim': ('Irk', 'Invader'),
b'Lrrr': ('Omicron Persei 8', 'Emperor')}
Returned keys are always bytes. If a key from the keys argument is
missing from the dictionary, then that row was not found in the
table (and require_all_keys must have been False).
Raises:
IOError: An error occurred accessing the smalltable.
"""
raise NotImplementedError
|
ExampleClass
|
python
|
huggingface__transformers
|
src/transformers/models/minimax/modeling_minimax.py
|
{
"start": 23895,
"end": 24853
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.top_k = config.num_experts_per_tok
self.jitter_noise = config.router_jitter_noise
self.gate = MiniMaxTopKRouter(config)
self.experts = MiniMaxExperts(config)
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
batch_size, sequence_length, hidden_dim = hidden_states.shape
if self.training and self.jitter_noise > 0:
hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
top_k_weights, top_k_index = self.gate(hidden_states)
hidden_states = self.experts(hidden_states, top_k_index, top_k_weights)
hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim)
return hidden_states
|
MiniMaxSparseMoeBlock
|
python
|
realpython__materials
|
python-dict-attribute/config_v1.py
|
{
"start": 0,
"end": 349
}
|
class ____:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
update = __init__
def __str__(self):
return str(self.__dict__)
config = Config(theme="light", font_size=12, language="English")
print(config)
user = {"theme": "dark", "font_size": 14, "language": "Spanish"}
config.update(**user)
print(config)
|
Config
|
python
|
django-haystack__django-haystack
|
haystack/inputs.py
|
{
"start": 1191,
"end": 1734
}
|
class ____(BaseInput):
"""
An input type for making exact matches.
"""
input_type_name = "exact"
def prepare(self, query_obj):
query_string = super().prepare(query_obj)
if self.kwargs.get("clean", False):
# We need to clean each part of the exact match.
exact_bits = [
Clean(bit).prepare(query_obj) for bit in query_string.split(" ") if bit
]
query_string = " ".join(exact_bits)
return query_obj.build_exact_query(query_string)
|
Exact
|
python
|
huggingface__transformers
|
src/transformers/models/prophetnet/modeling_prophetnet.py
|
{
"start": 43590,
"end": 44895
}
|
class ____(GradientCheckpointingLayer):
"""
Encoder block for Prophetnet
"""
def __init__(self, config: ProphetNetConfig):
super().__init__()
# 1st residual block
self.self_attn = ProphetNetAttention(config, config.num_encoder_attention_heads)
self.self_attn_layer_norm = LayerNorm(config.hidden_size)
# 2nd residual block
self.feed_forward = ProphetNetFeedForward(config, config.encoder_ffn_dim)
self.feed_forward_layer_norm = LayerNorm(config.hidden_size)
def forward(
self,
hidden_states,
attention_mask,
output_attentions: bool = False,
):
# 1st residual block
attention_output, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = self.self_attn_layer_norm(attention_output + hidden_states)
# 2nd residual block
feed_forward_output = self.feed_forward(hidden_states)
hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
ProphetNetEncoderLayer
|
python
|
astropy__astropy
|
astropy/coordinates/representation/cylindrical.py
|
{
"start": 454,
"end": 5303
}
|
class ____(BaseRepresentation):
"""
Representation of points in 3D cylindrical coordinates.
Parameters
----------
rho : `~astropy.units.Quantity`
The distance from the z axis to the point(s).
phi : `~astropy.units.Quantity` or str
The azimuth of the point(s), in angular units, which will be wrapped
to an angle between 0 and 360 degrees. This can also be instances of
`~astropy.coordinates.Angle`,
z : `~astropy.units.Quantity`
The z coordinate(s) of the point(s)
differentials : dict, `~astropy.coordinates.CylindricalDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single
`~astropy.coordinates.CylindricalDifferential` instance, or a dictionary of of differential
instances with keys set to a string representation of the SI unit with
which the differential (derivative) is taken. For example, for a
velocity differential on a positional representation, the key would be
``'s'`` for seconds, indicating that the derivative is a time
derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"rho": u.Quantity, "phi": Angle, "z": u.Quantity}
def __init__(self, rho, phi=None, z=None, differentials=None, copy=True):
super().__init__(rho, phi, z, copy=copy, differentials=differentials)
if not self._rho.unit.is_equivalent(self._z.unit):
raise u.UnitsError("rho and z should have matching physical types")
@property
def rho(self):
"""
The distance of the point(s) from the z-axis.
"""
return self._rho
@property
def phi(self):
"""
The azimuth of the point(s).
"""
return self._phi
@property
def z(self):
"""
The height of the point(s).
"""
return self._z
def unit_vectors(self):
sinphi, cosphi = np.sin(self.phi), np.cos(self.phi)
l = np.broadcast_to(1.0, self.shape)
return {
"rho": CartesianRepresentation(cosphi, sinphi, 0, copy=COPY_IF_NEEDED),
"phi": CartesianRepresentation(-sinphi, cosphi, 0, copy=COPY_IF_NEEDED),
"z": CartesianRepresentation(0, 0, l, unit=u.one, copy=COPY_IF_NEEDED),
}
def scale_factors(self):
rho = self.rho / u.radian
l = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"rho": l, "phi": rho, "z": l}
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to cylindrical polar
coordinates.
"""
rho = np.hypot(cart.x, cart.y)
phi = np.arctan2(cart.y, cart.x)
z = cart.z
return cls(rho=rho, phi=phi, z=z, copy=False)
def to_cartesian(self):
"""
Converts cylindrical polar coordinates to 3D rectangular cartesian
coordinates.
"""
x = self.rho * np.cos(self.phi)
y = self.rho * np.sin(self.phi)
z = self.z
return CartesianRepresentation(x=x, y=y, z=z, copy=False)
def _scale_operation(self, op, *args):
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
phi_op, _, rho_op = _spherical_op_funcs(op, *args)
z_op = lambda x: op(x, *args)
result = self.__class__(
rho_op(self.rho), phi_op(self.phi), z_op(self.z), copy=COPY_IF_NEEDED
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(rho_op, operator.pos, z_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
def represent_as(self, other_class, differential_class=None):
if isinstance(other_class, type):
if issubclass(other_class, PhysicsSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
r = np.hypot(self.rho, self.z)
return other_class(
r=r,
theta=np.arctan2(self.rho, self.z),
phi=self.phi,
differentials=diffs,
)
return super().represent_as(other_class, differential_class)
|
CylindricalRepresentation
|
python
|
PrefectHQ__prefect
|
src/prefect/server/events/actions.py
|
{
"start": 24450,
"end": 26125
}
|
class ____(Action):
"""Base class for Actions that operate on Deployments and need to infer them from
events"""
source: Literal["selected", "inferred"] = Field(
"selected",
description=(
"Whether this Action applies to a specific selected "
"deployment (given by `deployment_id`), or to a deployment that is "
"inferred from the triggering event. If the source is 'inferred', "
"the `deployment_id` may not be set. If the source is 'selected', the "
"`deployment_id` must be set."
),
)
deployment_id: Optional[UUID] = Field(
None, description="The identifier of the deployment"
)
@model_validator(mode="after")
def selected_deployment_requires_id(self) -> Self:
wants_selected_deployment = self.source == "selected"
has_deployment_id = bool(self.deployment_id)
if wants_selected_deployment != has_deployment_id:
raise ValueError(
"deployment_id is "
+ ("not allowed" if has_deployment_id else "required")
)
return self
async def deployment_id_to_use(self, triggered_action: "TriggeredAction") -> UUID:
if self.source == "selected":
assert self.deployment_id
return self.deployment_id
event = triggered_action.triggering_event
if not event:
raise ActionFailed("No event to infer the deployment")
assert event
if id := _id_of_first_resource_of_kind(event, "prefect.deployment"):
return id
raise ActionFailed("No deployment could be inferred")
|
DeploymentAction
|
python
|
tox-dev__tox
|
src/tox/tox_env/python/package.py
|
{
"start": 1290,
"end": 5183
}
|
class ____(Python, PackageToxEnv, ABC):
def __init__(self, create_args: ToxEnvCreateArgs) -> None:
self._wheel_build_envs: dict[str, PythonPackageToxEnv] = {}
super().__init__(create_args)
def _setup_env(self) -> None:
"""Setup the tox environment."""
super()._setup_env()
self._install(self.requires(), PythonPackageToxEnv.__name__, "requires")
self._install(self.conf["deps"], PythonPackageToxEnv.__name__, "deps")
@abstractmethod
def requires(self) -> tuple[Requirement, ...] | PythonDeps:
raise NotImplementedError
def register_run_env(self, run_env: RunToxEnv) -> Generator[tuple[str, str], PackageToxEnv, None]:
yield from super().register_run_env(run_env)
if run_env.conf["package"] != "skip" and "deps" not in self.conf:
self.conf.add_config(
keys="deps",
of_type=list[Requirement],
default=[],
desc="Name of the python dependencies as specified by PEP-440",
)
if (
not isinstance(run_env, Python)
or run_env.conf["package"] not in {"wheel", "editable"}
or "wheel_build_env" in run_env.conf
):
return
def default_wheel_tag(conf: Config, env_name: str | None) -> str: # noqa: ARG001
# https://www.python.org/dev/peps/pep-0427/#file-name-convention
# when building wheels we need to ensure that the built package is compatible with the target env
# compatibility is documented within https://www.python.org/dev/peps/pep-0427/#file-name-convention
# a wheel tag example: {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl
# python only code are often compatible at major level (unless universal wheel in which case both 2/3)
# c-extension codes are trickier, but as of today both poetry/setuptools uses pypa/wheels logic
# https://github.com/pypa/wheel/blob/master/src/wheel/bdist_wheel.py#L234-L280
try:
run_py = cast("Python", run_env).base_python
except NoInterpreter:
run_py = None
if run_py is None:
base = ",".join(run_env.conf["base_python"])
msg = f"could not resolve base python with {base}"
raise Skip(msg)
default_pkg_py = self.base_python
if (
default_pkg_py.version_no_dot == run_py.version_no_dot
and default_pkg_py.impl_lower == run_py.impl_lower
):
return self.conf.name
return f"{self.conf.name}-{run_py.impl_lower}{run_py.version_no_dot}"
run_env.conf.add_config(
keys=["wheel_build_env"],
of_type=str,
default=default_wheel_tag,
desc="wheel tag to use for building applications",
)
pkg_env = run_env.conf["wheel_build_env"]
result = yield pkg_env, run_env.conf["package_tox_env_type"]
self._wheel_build_envs[pkg_env] = cast("PythonPackageToxEnv", result)
def child_pkg_envs(self, run_conf: EnvConfigSet) -> Iterator[PackageToxEnv]:
if run_conf["package"] == "wheel":
try:
conf = run_conf["wheel_build_env"]
except Skip:
# the __getitem__ method might raise Skip if the interpreter is not available
return
env = self._wheel_build_envs.get(conf)
if env is not None and env.name != self.name:
yield env
def _teardown(self) -> None:
for env in self._wheel_build_envs.values():
if env is not self:
with env.display_context(self._has_display_suspended):
env.teardown()
super()._teardown()
|
PythonPackageToxEnv
|
python
|
huggingface__transformers
|
src/transformers/models/ministral/modeling_ministral.py
|
{
"start": 22089,
"end": 22494
}
|
class ____(GenericForQuestionAnswering, MinistralPreTrainedModel):
base_model_prefix = "transformer" # For BC, where `transformer` was used instead of `model`
__all__ = [
"MinistralPreTrainedModel",
"MinistralModel",
"MinistralForCausalLM",
"MinistralForSequenceClassification",
"MinistralForTokenClassification",
"MinistralForQuestionAnswering",
]
|
MinistralForQuestionAnswering
|
python
|
lepture__authlib
|
authlib/integrations/httpx_client/oauth2_client.py
|
{
"start": 808,
"end": 1549
}
|
class ____(Auth, TokenAuth):
"""Sign requests for OAuth 2.0, currently only bearer token is supported."""
requires_request_body = True
def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]:
try:
url, headers, body = self.prepare(
str(request.url), request.headers, request.content
)
headers["Content-Length"] = str(len(body))
yield build_request(
url=url, headers=headers, body=body, initial_request=request
)
except KeyError as error:
description = f"Unsupported token_type: {str(error)}"
raise UnsupportedTokenTypeError(description=description) from error
|
OAuth2Auth
|
python
|
getsentry__sentry
|
src/sentry/integrations/discord/types.py
|
{
"start": 24,
"end": 464
}
|
class ____(Enum):
# https://discord.com/developers/docs/topics/permissions#permissions
VIEW_CHANNEL = 1 << 10
SEND_MESSAGES = 1 << 11
SEND_TTS_MESSAGES = 1 << 12
EMBED_LINKS = 1 << 14
ATTACH_FILES = 1 << 15
MANAGE_THREADS = 1 << 34
CREATE_PUBLIC_THREADS = 1 << 35
CREATE_PRIVATE_THREADS = 1 << 36
SEND_MESSAGES_IN_THREADS = 1 << 38
MANAGE_GUILD = 1 << 5
ADMINISTRATOR = 1 << 3
|
DiscordPermissions
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-k8s/dagster_k8s/component.py
|
{
"start": 421,
"end": 1999
}
|
class ____(Component, Resolvable):
"""Component that creates assets backed by kubernetes pod execution via Dagster Pipes."""
name: str
assets: Sequence[ResolvedAssetSpec]
image: Optional[str] = None
command: Optional[Union[str, Sequence[str]]] = None
namespace: Optional[str] = None
env: Optional[Mapping[str, str]] = None
base_pod_meta: Optional[Mapping[str, Any]] = None
base_pod_spec: Optional[Mapping[str, Any]] = None
def __post_init__(self):
# validate that we can build a pod for the given args
# i.e. image or base_pod_self.image
build_pod_body(
pod_name=self.name,
image=self.image,
command=self.command,
env_vars=self.env or {},
base_pod_meta=self.base_pod_meta,
base_pod_spec=self.base_pod_spec,
)
@cached_property
def client(self):
return PipesK8sClient()
def build_defs(self, context: ComponentLoadContext):
return Definitions(assets=[self.build_asset()])
def build_asset(self) -> AssetsDefinition:
@multi_asset(name=self.name, specs=self.assets)
def _asset(context: AssetExecutionContext):
return self.client.run(
context=context,
image=self.image,
command=self.command,
namespace=self.namespace,
env=self.env,
base_pod_meta=self.base_pod_meta,
base_pod_spec=self.base_pod_spec,
).get_results()
return _asset
|
PipesK8sComponent
|
python
|
pyodide__pyodide
|
src/py/pyodide/webloop.py
|
{
"start": 35124,
"end": 36631
}
|
class ____(asyncio.DefaultEventLoopPolicy):
"""
A simple event loop policy for managing :py:class:`WebLoop`-based event loops.
"""
def __init__(self):
self._default_loop = None
def get_event_loop(self):
"""Get the current event loop"""
if self._default_loop:
return self._default_loop
return self.new_event_loop()
def new_event_loop(self) -> WebLoop:
"""Create a new event loop"""
self._default_loop = WebLoop()
return self._default_loop
def set_event_loop(self, loop: Any) -> None:
"""Set the current event loop"""
self._default_loop = loop
_orig_run = asyncio.run
@wraps(_orig_run)
def _run(main, *, debug=None, loop_factory=None):
from pyodide_js._api import config
if loop_factory is None and config.enableRunUntilComplete:
loop = asyncio.events._get_running_loop()
if isinstance(loop, WebLoop):
return loop.run_until_complete(main)
return _orig_run(main, debug=debug, loop_factory=loop_factory)
_orig_sleep = time.sleep
@wraps(_orig_sleep)
def _sleep(t):
if can_run_sync():
run_sync(sleep(t))
else:
_orig_sleep(t)
def _initialize_event_loop():
if not IN_PYODIDE:
return
asyncio.run = _run
time.sleep = _sleep
policy = WebLoopPolicy()
asyncio.set_event_loop_policy(policy)
policy.get_event_loop()
__all__ = ["WebLoop", "WebLoopPolicy", "PyodideFuture", "PyodideTask"]
|
WebLoopPolicy
|
python
|
pytorch__pytorch
|
test/mobile/model_test/android_api_module.py
|
{
"start": 69,
"end": 3776
}
|
class ____(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input):
return None
@torch.jit.script_method
def eqBool(self, input: bool) -> bool:
return input
@torch.jit.script_method
def eqInt(self, input: int) -> int:
return input
@torch.jit.script_method
def eqFloat(self, input: float) -> float:
return input
@torch.jit.script_method
def eqStr(self, input: str) -> str:
return input
@torch.jit.script_method
def eqTensor(self, input: Tensor) -> Tensor:
return input
@torch.jit.script_method
def eqDictStrKeyIntValue(self, input: dict[str, int]) -> dict[str, int]:
return input
@torch.jit.script_method
def eqDictIntKeyIntValue(self, input: dict[int, int]) -> dict[int, int]:
return input
@torch.jit.script_method
def eqDictFloatKeyIntValue(self, input: dict[float, int]) -> dict[float, int]:
return input
@torch.jit.script_method
def listIntSumReturnTuple(self, input: list[int]) -> tuple[list[int], int]:
sum = 0
for x in input:
sum += x
return (input, sum)
@torch.jit.script_method
def listBoolConjunction(self, input: list[bool]) -> bool:
res = True
for x in input:
res = res and x
return res
@torch.jit.script_method
def listBoolDisjunction(self, input: list[bool]) -> bool:
res = False
for x in input:
res = res or x
return res
@torch.jit.script_method
def tupleIntSumReturnTuple(
self, input: tuple[int, int, int]
) -> tuple[tuple[int, int, int], int]:
sum = 0
for x in input:
sum += x
return (input, sum)
@torch.jit.script_method
def optionalIntIsNone(self, input: Optional[int]) -> bool:
return input is None
@torch.jit.script_method
def intEq0None(self, input: int) -> Optional[int]:
if input == 0:
return None
return input
@torch.jit.script_method
def str3Concat(self, input: str) -> str:
return input + input + input
@torch.jit.script_method
def newEmptyShapeWithItem(self, input):
return torch.tensor([int(input.item())])[0]
@torch.jit.script_method
def testAliasWithOffset(self) -> list[Tensor]:
x = torch.tensor([100, 200])
a = [x[0], x[1]]
return a
@torch.jit.script_method
def testNonContiguous(self):
x = torch.tensor([100, 200, 300])[::2]
assert not x.is_contiguous()
assert x[0] == 100
assert x[1] == 300
return x
@torch.jit.script_method
def conv2d(self, x: Tensor, w: Tensor, toChannelsLast: bool) -> Tensor:
r = torch.nn.functional.conv2d(x, w)
if toChannelsLast:
r = r.contiguous(memory_format=torch.channels_last)
else:
r = r.contiguous()
return r
@torch.jit.script_method
def conv3d(self, x: Tensor, w: Tensor, toChannelsLast: bool) -> Tensor:
r = torch.nn.functional.conv3d(x, w)
if toChannelsLast:
r = r.contiguous(memory_format=torch.channels_last_3d)
else:
r = r.contiguous()
return r
@torch.jit.script_method
def contiguous(self, x: Tensor) -> Tensor:
return x.contiguous()
@torch.jit.script_method
def contiguousChannelsLast(self, x: Tensor) -> Tensor:
return x.contiguous(memory_format=torch.channels_last)
@torch.jit.script_method
def contiguousChannelsLast3d(self, x: Tensor) -> Tensor:
return x.contiguous(memory_format=torch.channels_last_3d)
|
AndroidAPIModule
|
python
|
huggingface__transformers
|
src/transformers/models/csm/modeling_csm.py
|
{
"start": 30020,
"end": 30598
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.embed_audio_tokens = nn.Embedding((config.num_codebooks * config.vocab_size), config.hidden_size)
self.register_buffer(
"audio_tokens_offsets", torch.arange(config.num_codebooks) * config.vocab_size, persistent=False
)
def forward(self, input_ids):
input_embeds = self.embed_audio_tokens(input_ids + self.audio_tokens_offsets)
input_embeds = input_embeds.sum(dim=2)
return input_embeds
@auto_docstring
|
CsmBackboneModelEmbeddings
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/test_privacy_urls.py
|
{
"start": 9237,
"end": 9450
}
|
class ____(ProjectMixin):
def test_private_urls(self):
from readthedocs.projects.urls.private import urlpatterns
self._test_url(urlpatterns)
# ## Public Project Testing ###
|
PrivateProjectMixin
|
python
|
wandb__wandb
|
wandb/sdk/lib/auth/wbnetrc.py
|
{
"start": 216,
"end": 5408
}
|
class ____(Exception):
"""Could not write to the netrc file."""
def read_netrc_auth(*, host: str) -> str | None:
"""Read a W&B API key from the .netrc file.
Args:
host: The W&B server URL.
Returns:
An API key for the host, or None if there's no .netrc file
or if it doesn't contain credentials for the specified host.
Raises:
AuthenticationError: If an API key is found but is not in
a valid format.
"""
if not (auth := read_netrc_auth_with_source(host=host)):
return None
assert isinstance(auth.auth, AuthApiKey)
return auth.auth.api_key
def read_netrc_auth_with_source(*, host: str) -> AuthWithSource | None:
"""Read a W&B API key from the .netrc file.
Args:
host: The W&B server URL.
Returns:
An API key for the host, or None if there's no .netrc file
or it doesn't contain credentials for the specified host.
Also returns the file in which the API key was found.
Raises:
AuthenticationError: If an API key is found but is not in
a valid format.
"""
path = _get_netrc_file_path()
try:
netrc_file = netrc.netrc(path)
except FileNotFoundError:
return None
except (netrc.NetrcParseError, OSError) as e:
if isinstance(e, netrc.NetrcParseError) and e.lineno is not None:
term.termwarn(
f"Failed to read netrc file at {path},"
+ f" error on line {e.lineno}: {e.msg}"
)
else:
term.termwarn(f"Failed to read netrc file at {path}: {e}")
return None
if not (netloc := urlsplit(host).netloc):
return None
if not (creds := netrc_file.authenticators(netloc)):
return None
_, _, password = creds
if not password:
term.termwarn(f"Found entry for machine {netloc!r} with no API key at {path}")
return None
return AuthWithSource(
auth=AuthApiKey(host=host, api_key=password),
source=str(path),
)
def write_netrc_auth(*, host: str, api_key: str) -> None:
"""Store an API key in the .netrc file.
Args:
host: The W&B server URL.
api_key: A valid API key to write.
Raises:
WriteNetrcError: If there's a problem writing to the .netrc file.
"""
if not (netloc := urlsplit(host).netloc):
raise ValueError(f"Invalid host URL: {host!r}")
_update_netrc(
_get_netrc_file_path(),
machine=netloc,
password=api_key,
)
def _update_netrc(
path: pathlib.Path,
*,
machine: str,
password: str,
) -> None:
# Avoid accidentally breaking the user's .netrc file
# given invalid or malicious input.
#
# The .netrc file format allows using quotes in the same way
# as in sh syntax; the built-in netrc library also uses shlex.
machine = shlex.quote(machine)
password = shlex.quote(password)
machine_line = f"machine {machine}"
orig_lines = []
try:
orig_lines = path.read_text().splitlines()
except FileNotFoundError:
term.termlog("No netrc file found, creating one.")
path.touch(mode=0o600) # user readable and writable
except OSError as e:
# Include the original error message because the stack trace
# will not be shown to the user.
raise WriteNetrcError(f"Unable to read {path}: {e}") from e
new_lines: list[str] = []
# Copy over the original lines, minus the machine section we're updating.
skip = 0
for line in orig_lines:
if machine_line in line:
skip = 2
elif skip > 0:
skip -= 1
else:
new_lines.append(line)
new_lines.extend(
[
f"machine {machine}",
" login user",
f" password {password}",
"", # End with a blank line, by convention.
]
)
term.termlog(f"Appending key for {machine} to your netrc file: {path}")
try:
_write_text(path, "\n".join(new_lines))
except OSError as e:
# Include the original error message because the stack trace
# will not be shown to the user.
raise WriteNetrcError(f"Unable to write {path}: {e}") from e
def _write_text(path: pathlib.Path, text: str) -> None:
"""Call pathlib.Path.write_text().
Patched in tests.
"""
path.write_text(text)
def _get_netrc_file_path() -> pathlib.Path:
"""Returns the path to the .netrc file.
The file at the path may or may not exist.
"""
# The environment variable takes priority.
if netrc_file := os.environ.get("NETRC"):
return pathlib.Path(netrc_file).expanduser()
# If a netrc file exists in a standard location, use it.
unix_netrc = pathlib.Path("~/.netrc").expanduser()
if unix_netrc.exists():
return unix_netrc
windows_netrc = pathlib.Path("~/_netrc").expanduser()
if windows_netrc.exists():
return windows_netrc
# Otherwise, use the conventional file based on the platform.
if platform.system() != "Windows":
return unix_netrc
else:
return windows_netrc
|
WriteNetrcError
|
python
|
pytorch__pytorch
|
test/inductor/test_fuzzer.py
|
{
"start": 1283,
"end": 7706
}
|
class ____(TestCase):
def test_sampling_method_toggle(self):
toggle = SamplingMethod.dispatch(SamplingMethod.TOGGLE)
self.assertEqual(toggle("", bool, False), True)
self.assertEqual(toggle("", bool, True), False)
self.assertEqual(toggle("", Literal["foo", "bar"], "foo"), "bar")
self.assertEqual(toggle("", Literal["foo", "bar"], "bar"), "foo")
self.assertTrue("bar" in toggle("", list[Literal["foo", "bar"]], ["foo"]))
self.assertTrue("foo" in toggle("", list[Literal["foo", "bar"]], ["bar"]))
def test_sampling_method_random(self):
random = SamplingMethod.dispatch(SamplingMethod.RANDOM)
samp = [random("", bool, False) for i in range(1000)]
self.assertTrue(not all(samp))
@unittest.skipIf(not HAS_GPU, "requires gpu")
def test_config_fuzzer_inductor_gpu(self):
fuzzer = ConfigFuzzer(inductor_config, create_simple_test_model_gpu, seed=30)
self.assertIsNotNone(fuzzer.default)
fuzzer.reproduce([{"max_fusion_size": 1}])
def test_config_fuzzer_inductor_cpu(self):
fuzzer = ConfigFuzzer(inductor_config, create_simple_test_model_cpu, seed=100)
self.assertIsNotNone(fuzzer.default)
fuzzer.reproduce([{"max_fusion_size": 1}])
def test_config_fuzzer_bisector_exception(self):
key_1 = {"e_bool": False, "e_optional": None}
class MyException(Exception):
pass
def create_key_1():
def myfn():
if not fake_config.e_bool and fake_config.e_optional is None:
raise MyException("hi")
return True
return myfn
fuzzer = ConfigFuzzer(fake_config, create_key_1, seed=100, default={})
results = fuzzer.bisect(num_attempts=2, p=1.0)
self.assertEqual(len(results), 2)
for res in results:
self.assertEqual(res, key_1)
def test_config_fuzzer_bisector_boolean(self):
key_1 = {"e_bool": False, "e_optional": None}
def create_key_1():
def myfn():
if not fake_config.e_bool and fake_config.e_optional is None:
return False
return True
return myfn
fuzzer = ConfigFuzzer(fake_config, create_key_1, seed=100, default={})
num_attempts = 2
results = fuzzer.bisect(num_attempts=num_attempts, p=1.0)
self.assertEqual(len(results), num_attempts)
for res in results:
self.assertEqual(res, key_1)
def test_config_fuzzer_n_tuple(self):
key_1 = {"e_bool": False, "e_optional": None}
def create_key_1():
def myfn():
if not fake_config.e_bool and fake_config.e_optional is None:
return False
return True
return myfn
fuzzer = ConfigFuzzer(fake_config, create_key_1, seed=100, default={})
max_combo = 100
results = fuzzer.fuzz_n_tuple(2, max_combinations=max_combo)
self.assertEqual(results.num_ran(), max_combo)
self.assertEqual(results.lookup(tuple(key_1.keys())), Status.FAILED_RUN_RETURN)
def test_config_fuzzer_inductor_bisect(self):
# these values just chosen randomly, change to different ones if necessary
key_1 = {"split_reductions": False, "compute_all_bounds": True}
def create_key_1():
def myfn():
if (
not inductor_config.split_reductions
and inductor_config.compute_all_bounds
):
return False
return True
return myfn
fuzzer = ConfigFuzzer(inductor_config, create_key_1, seed=100)
num_attempts = 2
results = fuzzer.bisect(num_attempts=num_attempts, p=1.0)
self.assertEqual(len(results), num_attempts)
for res in results:
self.assertEqual(res, key_1)
new_results = fuzzer.reproduce(results)
self.assertEqual(len(new_results), 1)
self.assertEqual(
set(key_1.keys()),
{j for i in new_results.keys() for j in i} # noqa: SIM118
- set(MODULE_DEFAULTS["torch._inductor.config"].keys()),
)
@unittest.skipIf(not IS_LINUX, "PerfCounters are only supported on Linux")
@unittest.skip(
"Need default values for dynamo flags - https://github.com/pytorch/pytorch/issues/164062"
)
def test_config_fuzzer_dynamo_bisect(self):
# these values just chosen randomly, change to different ones if necessary
key_1 = {"dead_code_elimination": False, "specialize_int": True}
def create_key_1():
def myfn():
if (
not dynamo_config.dead_code_elimination
and dynamo_config.specialize_int
):
return False
return True
return myfn
fuzzer = ConfigFuzzer(dynamo_config, create_key_1, seed=10)
num_attempts = 2
results = fuzzer.bisect(num_attempts=num_attempts, p=1.0)
self.assertEqual(len(results), num_attempts)
for res in results:
self.assertEqual(res, key_1)
new_results = fuzzer.reproduce(results)
self.assertEqual(len(new_results), 1)
self.assertEqual(
set(key_1.keys()),
{j for i in new_results for j in i} # noqa: SIM118
- set(MODULE_DEFAULTS["torch._dynamo.config"].keys()),
)
@patch("torch.compile")
def test_fuzzer_inductor_calling_compile(self, compile):
def create_key_1():
def myfn():
return True
return myfn
fuzzer = ConfigFuzzer(inductor_config, create_key_1, seed=100)
num_attempts = 3
fuzzer.bisect(num_attempts=num_attempts, p=0.5)
self.assertEqual(compile.call_count, num_attempts)
def test_fuzzer_running_test(self):
def create_key_1():
def myfn():
return True
return myfn
fuzzer = ConfigFuzzer(inductor_config, create_key_1, seed=100)
fuzzer.test_config = MagicMock(return_value=Status.PASSED)
num_attempts = 20
fuzzer.bisect(num_attempts=num_attempts, p=0.5)
self.assertEqual(fuzzer.test_config.call_count, num_attempts)
if __name__ == "__main__":
run_tests()
|
TestConfigFuzzer
|
python
|
neetcode-gh__leetcode
|
python/0076-minimum-window-substring.py
|
{
"start": 0,
"end": 965
}
|
class ____:
def minWindow(self, s: str, t: str) -> str:
if len(s) < len(t):
return ""
countT, window = {}, {}
for c in t:
countT[c] = 1 + countT.get(c, 0)
have, need = 0, len(countT)
res, resLen = [-1, -1], float("infinity")
l = 0
for r in range(len(s)):
c = s[r]
window[c] = 1 + window.get(c, 0)
if c in countT and window[c] == countT[c]:
have += 1
while have == need:
# update our result
if (r - l + 1) < resLen:
res = [l, r]
resLen = r - l + 1
# pop from the left of our window
window[s[l]] -= 1
if s[l] in countT and window[s[l]] < countT[s[l]]:
have -= 1
l += 1
l, r = res
return s[l : r + 1] if resLen != float("infinity") else ""
|
Solution
|
python
|
geekcomputers__Python
|
BlackJack_game/blackjack_simulate.py
|
{
"start": 5378,
"end": 7678
}
|
class ____:
def __init__(self, name, role, chips_amount=None, color="END"):
"""
:param name: User name
:param role: dealer or player
:param chips_amount: Casino tokens equal money
"""
self.name = name
self.prompt = "{role} >> ({name}) : ".format(role=role, name=self.name)
self.chips = Chips(chips_amount)
self.color = color
self.hand = []
self.point = 0
def __repr__(self):
return str(self.__dict__)
def obtain_card(self, deck, face=True):
card = deck.deliver()
card.is_face = face
self.hand.append(card)
def drop_card(self):
self.hand.clear()
self.point = 0
def show_card(self):
print("\t ** Here is my card **")
for card in self.hand:
card.show()
def unveil_card(self):
for card in self.hand:
card.is_face = True
self.show_card()
def calculate_point(self):
def _extract_rank():
raw_ranks = [card.rank for card in self.hand]
cook_ranks = [10 if rank > 10 else rank for rank in raw_ranks]
return cook_ranks
def _sum_up(ranks):
rank_one = sum(ranks)
rank_eleven = sum([11 if rank == 1 else rank for rank in ranks])
# Over or has 2 Ace
if (ranks[::-1] == ranks) and (1 in ranks):
return 11 + len(ranks) - 1
if rank_eleven <= BLACK_JACK:
return rank_eleven
return rank_one
points = _extract_rank()
self.point = _sum_up(points)
def is_point(self, opt, point):
self.calculate_point()
compare_fmt = "{user_point} {opt} {point}".format(
user_point=self.point, opt=opt, point=point
)
return eval(compare_fmt)
def speak(self, content="", end_char="\n"):
print("")
print(
COLOR.get(self.color) + self.prompt + COLOR.get("END") + content,
end=end_char,
)
def showing(self):
self.speak()
self.show_card()
def unveiling(self):
self.calculate_point()
points_fmt = "My point is: {}".format(str(self.point))
self.speak(points_fmt)
self.unveil_card()
|
User
|
python
|
Netflix__metaflow
|
test/unit/configs/test_config_plain.py
|
{
"start": 234,
"end": 2040
}
|
class ____:
"""Test Config with plain=True option."""
def test_flow_completes(self, config_plain_run):
"""Test that the flow completes successfully."""
assert config_plain_run.successful
assert config_plain_run.finished
def test_plain_string_without_parser(self, config_plain_run):
"""Test plain Config without parser returns raw string."""
end_task = config_plain_run["end"].task
# Verify it's a string
assert end_task["plain_str_type"].data == "str"
# Verify the value is the raw string (not parsed JSON)
assert end_task["plain_str_value"].data == '{"raw": "string", "number": 123}'
def test_plain_list_with_parser(self, config_plain_run):
"""Test plain Config with parser returning list (non-dict)."""
end_task = config_plain_run["end"].task
# Verify it's a list
assert end_task["plain_list_type"].data == "list"
# Verify the list contents
assert end_task["plain_list_value"].data == [
"apple",
"banana",
"cherry",
"date",
]
assert end_task["plain_list_length"].data == 4
assert end_task["plain_list_first"].data == "apple"
def test_plain_tuple_with_parser(self, config_plain_run):
"""Test plain Config with parser returning tuple (non-dict)."""
end_task = config_plain_run["end"].task
# Verify it's a tuple type
assert end_task["plain_tuple_type"].data == "tuple"
# Verify tuple contents
assert end_task["plain_tuple_value"].data == ("test_tuple", 42, True)
assert end_task["tuple_name"].data == "test_tuple"
assert end_task["tuple_count"].data == 42
assert end_task["tuple_enabled"].data == True
|
TestConfigPlain
|
python
|
readthedocs__readthedocs.org
|
readthedocs/projects/views/private.py
|
{
"start": 39097,
"end": 39255
}
|
class ____(AutomationRuleMixin, DeleteViewWithMessage):
success_message = _("Automation rule deleted")
http_method_names = ["post"]
|
AutomationRuleDelete
|
python
|
readthedocs__readthedocs.org
|
readthedocs/api/v3/permissions.py
|
{
"start": 1269,
"end": 1553
}
|
class ____(BasePermission):
"""Grant permission if user has admin rights on the Project."""
def has_permission(self, request, view):
project = view._get_parent_project()
if view.has_admin_permission(request.user, project):
return True
|
IsProjectAdmin
|
python
|
doocs__leetcode
|
solution/2100-2199/2192.All Ancestors of a Node in a Directed Acyclic Graph/Solution.py
|
{
"start": 0,
"end": 589
}
|
class ____:
def getAncestors(self, n: int, edges: List[List[int]]) -> List[List[int]]:
def bfs(s: int):
q = deque([s])
vis = {s}
while q:
i = q.popleft()
for j in g[i]:
if j not in vis:
vis.add(j)
q.append(j)
ans[j].append(s)
g = defaultdict(list)
for u, v in edges:
g[u].append(v)
ans = [[] for _ in range(n)]
for i in range(n):
bfs(i)
return ans
|
Solution
|
python
|
google__pytype
|
pytype/pytd/slots_test.py
|
{
"start": 75,
"end": 821
}
|
class ____(unittest.TestCase):
"""Test the operator mappings in slots.py."""
def test_reverse_name_mapping(self):
for operator in (
"add",
"and",
"div",
"divmod",
"floordiv",
"lshift",
"matmul",
"mod",
"mul",
"or",
"pow",
"rshift",
"sub",
"truediv",
"xor",
):
normal = f"__{operator}__"
reverse = f"__r{operator}__"
self.assertEqual(slots.REVERSE_NAME_MAPPING[normal], reverse)
def test_symbol_mapping(self):
for operator, symbol in [("__add__", "+"), ("__invert__", "~")]:
self.assertEqual(slots.SYMBOL_MAPPING[operator], symbol)
if __name__ == "__main__":
unittest.main()
|
TestPytd
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py
|
{
"start": 450,
"end": 585
}
|
class ____:
def f(self):
print("f")
def defined_outside(self):
super(MyClass, self).f() # CANNOT use super()
|
BaseClass
|
python
|
celery__celery
|
t/unit/worker/test_loops.py
|
{
"start": 925,
"end": 4481
}
|
class ____:
def __init__(self, app, heartbeat=None, on_task_message=None,
transport_driver_type=None):
hub = Hub()
(
self.obj,
self.connection,
self.consumer,
self.blueprint,
self.hub,
self.qos,
self.heartbeat,
self.clock,
) = self.args = [Mock(name='obj'),
Mock(name='connection'),
Mock(name='consumer'),
Mock(name='blueprint'),
hub,
Mock(name='qos'),
heartbeat,
Mock(name='clock')]
self.connection.supports_heartbeats = True
self.connection.get_heartbeat_interval.side_effect = (
lambda: self.heartbeat
)
self.consumer.callbacks = []
self.obj.strategies = {}
self.connection.connection_errors = (socket.error,)
if transport_driver_type:
self.connection.transport.driver_type = transport_driver_type
self.hub.readers = {}
self.hub.timer = Mock(name='hub.timer')
self.hub.timer._queue = [Mock()]
self.hub.fire_timers = Mock(name='hub.fire_timers')
self.hub.fire_timers.return_value = 1.7
self.hub.poller = Mock(name='hub.poller')
self.hub.close = Mock(name='hub.close()') # asynloop calls hub.close
self.Hub = self.hub
self.blueprint.state = RUN
# need this for create_task_handler
self._consumer = _consumer = Consumer(
Mock(), timer=Mock(), controller=Mock(), app=app)
_consumer.on_task_message = on_task_message or []
self.obj.create_task_handler = _consumer.create_task_handler
self.on_unknown_message = self.obj.on_unknown_message = Mock(
name='on_unknown_message',
)
_consumer.on_unknown_message = self.on_unknown_message
self.on_unknown_task = self.obj.on_unknown_task = Mock(
name='on_unknown_task',
)
_consumer.on_unknown_task = self.on_unknown_task
self.on_invalid_task = self.obj.on_invalid_task = Mock(
name='on_invalid_task',
)
_consumer.on_invalid_task = self.on_invalid_task
self.on_decode_error = self.obj.on_decode_error = Mock(
name='on_decode_error',
)
_consumer.on_decode_error = self.on_decode_error
_consumer.strategies = self.obj.strategies
def timeout_then_error(self, mock):
def first(*args, **kwargs):
mock.side_effect = socket.error()
raise socket.timeout()
mock.side_effect = first
def close_then_error(self, mock=None, mod=0, exc=None):
mock = Mock() if mock is None else mock
def first(*args, **kwargs):
if not mod or mock.call_count > mod:
self.close()
raise (socket.error() if exc is None else exc)
mock.side_effect = first
return mock
def close(self, *args, **kwargs):
self.blueprint.state = CLOSE
def closer(self, mock=None, mod=0):
mock = Mock() if mock is None else mock
def closing(*args, **kwargs):
if not mod or mock.call_count >= mod:
self.close()
mock.side_effect = closing
return mock
def get_task_callback(*args, **kwargs):
x = X(*args, **kwargs)
x.blueprint.state = CLOSE
asynloop(*x.args)
return x, x.consumer.on_message
|
X
|
python
|
huggingface__transformers
|
src/transformers/models/fuyu/modeling_fuyu.py
|
{
"start": 11021,
"end": 17485
}
|
class ____(FuyuPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {
"^language_model.model": "model.language_model",
"^vision_embed_tokens": "model.vision_embed_tokens",
"^language_model.lm_head": "lm_head",
}
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
def __init__(self, config: FuyuConfig):
super().__init__(config)
self.model = FuyuModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
# [batch_size, num_total_patches, patch_size_ x patch_size x num_channels ]
image_patches: Optional[torch.Tensor] = None,
image_patches_indices: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
logits_to_keep: Optional[int] = 0,
**kwargs,
) -> Union[tuple, CausalLMOutputWithPast]:
r"""
image_patches (`torch.FloatTensor` of shape `(batch_size, num_total_patches, patch_size_ x patch_size x num_channels)`, *optional*):
Image patches to be used as continuous embeddings. The patches are flattened and then projected to the
hidden size of the model.
image_patches_indices (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Tensor of indices of the image patches in the input_ids tensor.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.text_config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.text_config.vocab_size]`.
Examples:
```python
>>> from transformers import FuyuProcessor, FuyuForCausalLM
>>> from PIL import Image
>>> import requests
>>> processor = FuyuProcessor.from_pretrained("adept/fuyu-8b")
>>> model = FuyuForCausalLM.from_pretrained("adept/fuyu-8b")
>>> url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> prompt = "Generate a coco-style caption.\n"
>>> inputs = processor(images=image, text=prompt, return_tensors="pt")
>>> outputs = model(**inputs)
>>> generated_ids = model.generate(**inputs, max_new_tokens=7)
>>> generation_text = processor.batch_decode(generated_ids[:, -7:], skip_special_tokens=True)
>>> print(generation_text[0])
A blue bus parked on the side of a road.
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.model(
input_ids=input_ids,
image_patches=image_patches,
image_patches_indices=image_patches_indices,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
use_cache=use_cache,
return_dict=True,
# don't pass kwargs because Persimmon-backbone doesn't accept FA2 kwargs yet, TODO: raushan
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
image_patches=None,
image_patches_indices=None,
cache_position=None,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
image_patches=image_patches,
image_patches_indices=image_patches_indices,
cache_position=cache_position,
**kwargs,
)
if cache_position[0] != 0:
# set image_patches and image_patches_indices to `None` for decoding stage
model_inputs["image_patches_indices"] = None
model_inputs["image_patches"] = None
return model_inputs
__all__ = ["FuyuForCausalLM", "FuyuPreTrainedModel", "FuyuModel"]
|
FuyuForCausalLM
|
python
|
astropy__astropy
|
astropy/cosmology/_src/tests/flrw/test_wpwazpcdm.py
|
{
"start": 3893,
"end": 7890
}
|
class ____(
FLRWTest, ParameterwpTestMixin, ParameterwaTestMixin, ParameterzpTestMixin
):
"""Test :class:`astropy.cosmology.wpwaCDM`."""
def setup_class(self):
"""Setup for testing."""
super().setup_class(self)
self.cls = wpwaCDM
self.cls_kwargs.update(wp=-0.9, wa=0.2, zp=0.5)
# ===============================================================
# Method & Attribute Tests
def test_clone_change_param(self, cosmo):
"""Test method ``.clone()`` changing a(many) Parameter(s)."""
super().test_clone_change_param(cosmo)
# `w` params
c = cosmo.clone(wp=0.1, wa=0.2, zp=14)
assert c.wp == 0.1
assert c.wa == 0.2
assert c.zp == 14
for n, v in filter_keys_from_items(c.parameters, ("wp", "wa", "zp")):
v_expect = getattr(cosmo, n)
assert_quantity_allclose(v, v_expect, atol=1e-4 * getattr(v, "unit", 1))
# @pytest.mark.parametrize("z", valid_zs) # TODO! recompute comparisons below
def test_w(self, cosmo):
"""Test :meth:`astropy.cosmology.wpwaCDM.w`."""
# super().test_w(cosmo, z)
assert u.allclose(cosmo.w(0.5), -0.9)
assert u.allclose(
cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.94848485, -0.93333333, -0.9, -0.84666667, -0.82380952, -0.78266667],
)
def test_repr(self, cosmo_cls, cosmo):
"""Test method ``.__repr__()``."""
assert repr(cosmo) == (
"wpwaCDM(name='ABCMeta', H0=<Quantity 70. km / (Mpc s)>, Om0=0.27,"
" Ode0=0.73, Tcmb0=<Quantity 3. K>, Neff=3.04,"
" m_nu=<Quantity [0., 0., 0.] eV>, Ob0=0.03, wp=-0.9, wa=0.2,"
" zp=<Quantity 0.5 redshift>)"
)
# ===============================================================
# Usage Tests
@pytest.mark.skipif(not HAS_SCIPY, reason="scipy required for this test.")
@pytest.mark.parametrize(
("args", "kwargs", "expected"),
[
( # no relativistic species
(75.0, 0.3, 0.6),
{},
[2954.68975298, 4599.83254834, 5643.04013201, 6373.36147627] * u.Mpc,
),
( # massless neutrinos
(75.0, 0.25, 0.5),
{"zp": 0.4, "Tcmb0": 3.0, "Neff": 3, "m_nu": 0 * u.eV},
[2919.00656215, 4558.0218123, 5615.73412391, 6366.10224229] * u.Mpc,
),
( # massive neutrinos
(75.0, 0.25, 0.5),
{"zp": 1.0, "Tcmb0": 3.0, "Neff": 4, "m_nu": 5 * u.eV},
[2629.48489827, 3874.13392319, 4614.31562397, 5116.51184842] * u.Mpc,
),
# FLAT: these match the tests in TestFlatwpwaCDM, except Ode0 is set manually.
( # no relativistic species
(75.0, 0.3, 0.7),
{},
[3030.70481348, 4745.82435272, 5828.73710847, 6582.60454542] * u.Mpc,
),
( # massless neutrinos
(75.0, 0.25, 0.75),
{"zp": 0.4, "Tcmb0": 3.0, "Neff": 3, "m_nu": 0 * u.eV},
[3113.62199365, 4943.28425668, 6114.45491003, 6934.07461377] * u.Mpc,
),
( # massive neutrinos
(75.0, 0.25, 0.2458794183661), # to make Ok0 = 0, Otot0 = 1
{"zp": 1.0, "Tcmb0": 3.0, "Neff": 4, "m_nu": 5 * u.eV},
[2517.08634022, 3694.21111754, 4402.17802962, 4886.65787948] * u.Mpc,
),
],
)
def test_comoving_distance_example(self, cosmo_cls, args, kwargs, expected):
"""Test :meth:`astropy.cosmology.LambdaCDM.comoving_distance`.
These do not come from external codes -- they are just internal checks to make
sure nothing changes if we muck with the distance calculators.
"""
super().test_comoving_distance_example(
cosmo_cls, args, {**COMOVING_DISTANCE_EXAMPLE_KWARGS, **kwargs}, expected
)
|
TestwpwaCDM
|
python
|
python-excel__xlwt
|
xlwt/antlr.py
|
{
"start": 36257,
"end": 46976
}
|
class ____(TokenStream):
## class members
NO_CHAR = 0
EOF_CHAR = '' ### EOF shall be the empty string.
def __init__(self, *argv, **kwargs):
super(CharScanner, self).__init__()
self.saveConsumedInput = True
self.tokenClass = None
self.caseSensitive = True
self.caseSensitiveLiterals = True
self.literals = None
self.tabsize = 8
self._returnToken = None
self.commitToPath = False
self.traceDepth = 0
self.text = StringBuffer()
self.hashString = hash(self)
self.setTokenObjectClass(CommonToken)
self.setInput(*argv)
def __iter__(self):
return CharScannerIterator(self)
def setInput(self,*argv):
## case 1:
## if there's no arg we default to read from
## standard input
if not argv:
import sys
self.setInput(sys.stdin)
return
## get 1st argument
arg1 = argv[0]
## case 2:
## if arg1 is a string, we assume it's a file name
## and open a stream using 2nd argument as open
## mode. If there's no 2nd argument we fall back to
## mode '+rb'.
if is_string_type(arg1):
f = open(arg1,"rb")
self.setInput(f)
self.setFilename(arg1)
return
## case 3:
## if arg1 is a file we wrap it by a char buffer (
## some additional checks?? No, can't do this in
## general).
if isinstance(arg1,file):
self.setInput(CharBuffer(arg1))
return
## case 4:
## if arg1 is of type SharedLexerInputState we use
## argument as is.
if isinstance(arg1,LexerSharedInputState):
self.inputState = arg1
return
## case 5:
## check whether argument type is of type input
## buffer. If so create a SharedLexerInputState and
## go ahead.
if isinstance(arg1,InputBuffer):
self.setInput(LexerSharedInputState(arg1))
return
## case 6:
## check whether argument type has a method read(int)
## If so create CharBuffer ...
try:
if arg1.read:
rd = Reader(arg1)
cb = CharBuffer(rd)
ss = LexerSharedInputState(cb)
self.inputState = ss
return
except:
pass
## case 7:
## raise wrong argument exception
raise TypeError(argv)
def setTabSize(self,size) :
self.tabsize = size
def getTabSize(self) :
return self.tabsize
def setCaseSensitive(self,t) :
self.caseSensitive = t
def setCommitToPath(self,commit) :
self.commitToPath = commit
def setFilename(self,f) :
self.inputState.filename = f
def setLine(self,line) :
self.inputState.line = line
def setText(self,s) :
self.resetText()
self.text.append(s)
def getCaseSensitive(self) :
return self.caseSensitive
def getCaseSensitiveLiterals(self) :
return self.caseSensitiveLiterals
def getColumn(self) :
return self.inputState.column
def setColumn(self,c) :
self.inputState.column = c
def getCommitToPath(self) :
return self.commitToPath
def getFilename(self) :
return self.inputState.filename
def getInputBuffer(self) :
return self.inputState.input
def getInputState(self) :
return self.inputState
def setInputState(self,state) :
assert isinstance(state,LexerSharedInputState)
self.inputState = state
def getLine(self) :
return self.inputState.line
def getText(self) :
return str(self.text)
def getTokenObject(self) :
return self._returnToken
def LA(self,i) :
c = self.inputState.input.LA(i)
if not self.caseSensitive:
### E0006
c = c.__class__.lower(c)
return c
def makeToken(self,type) :
try:
## dynamically load a class
assert self.tokenClass
tok = self.tokenClass()
tok.setType(type)
tok.setColumn(self.inputState.tokenStartColumn)
tok.setLine(self.inputState.tokenStartLine)
return tok
except:
self.panic("unable to create new token")
return Token.badToken
def mark(self) :
return self.inputState.input.mark()
def _match_bitset(self,b) :
if b.member(self.LA(1)):
self.consume()
else:
raise MismatchedCharException(self.LA(1), b, False, self)
def _match_string(self,s) :
for c in s:
if self.LA(1) == c:
self.consume()
else:
raise MismatchedCharException(self.LA(1), c, False, self)
def match(self,item):
if is_string_type(item):
return self._match_string(item)
else:
return self._match_bitset(item)
def matchNot(self,c) :
if self.LA(1) != c:
self.consume()
else:
raise MismatchedCharException(self.LA(1), c, True, self)
def matchRange(self,c1,c2) :
if self.LA(1) < c1 or self.LA(1) > c2 :
raise MismatchedCharException(self.LA(1), c1, c2, False, self)
else:
self.consume()
def newline(self) :
self.inputState.line += 1
self.inputState.column = 1
def tab(self) :
c = self.getColumn()
nc = ( ((c-1)/self.tabsize) + 1) * self.tabsize + 1
self.setColumn(nc)
def panic(self,s='') :
print("CharScanner: panic: " + s)
sys.exit(1)
def reportError(self,s) :
if not self.getFilename():
print("error: " + str(s))
else:
print(self.getFilename() + ": error: " + str(s))
def reportWarning(self,s) :
if not self.getFilename():
print("warning: " + str(s))
else:
print(self.getFilename() + ": warning: " + str(s))
def resetText(self) :
self.text.setLength(0)
self.inputState.tokenStartColumn = self.inputState.column
self.inputState.tokenStartLine = self.inputState.line
def rewind(self,pos) :
self.inputState.input.rewind(pos)
def setTokenObjectClass(self,cl):
self.tokenClass = cl
def testForLiteral(self,token):
if not token:
return
assert isinstance(token,Token)
_type = token.getType()
## special tokens can't be literals
if _type in [SKIP,INVALID_TYPE,EOF_TYPE,NULL_TREE_LOOKAHEAD] :
return
_text = token.getText()
if not _text:
return
assert is_string_type(_text)
_type = self.testLiteralsTable(_text,_type)
token.setType(_type)
return _type
def testLiteralsTable(self,*args):
if is_string_type(args[0]):
s = args[0]
i = args[1]
else:
s = self.text.getString()
i = args[0]
## check whether integer has been given
if not isinstance(i,int):
assert isinstance(i,int)
## check whether we have a dict
assert isinstance(self.literals,dict)
try:
## E0010
if not self.caseSensitiveLiterals:
s = s.__class__.lower(s)
i = self.literals[s]
except:
pass
return i
def toLower(self,c):
return c.__class__.lower()
def traceIndent(self):
print(' ' * self.traceDepth)
def traceIn(self,rname):
self.traceDepth += 1
self.traceIndent()
print("> lexer %s c== %s" % (rname,self.LA(1)))
def traceOut(self,rname):
self.traceIndent()
print("< lexer %s c== %s" % (rname,self.LA(1)))
self.traceDepth -= 1
def uponEOF(self):
pass
def append(self,c):
if self.saveConsumedInput :
self.text.append(c)
def commit(self):
self.inputState.input.commit()
def consume(self):
if not self.inputState.guessing:
c = self.LA(1)
if self.caseSensitive:
self.append(c)
else:
# use input.LA(), not LA(), to get original case
# CharScanner.LA() would toLower it.
c = self.inputState.input.LA(1)
self.append(c)
if c and c in "\t":
self.tab()
else:
self.inputState.column += 1
self.inputState.input.consume()
## Consume chars until one matches the given char
def consumeUntil_char(self,c):
while self.LA(1) != EOF_CHAR and self.LA(1) != c:
self.consume()
## Consume chars until one matches the given set
def consumeUntil_bitset(self,bitset):
while self.LA(1) != EOF_CHAR and not self.set.member(self.LA(1)):
self.consume()
### If symbol seen is EOF then generate and set token, otherwise
### throw exception.
def default(self,la1):
if not la1 :
self.uponEOF()
self._returnToken = self.makeToken(EOF_TYPE)
else:
self.raise_NoViableAlt(la1)
def filterdefault(self,la1,*args):
if not la1:
self.uponEOF()
self._returnToken = self.makeToken(EOF_TYPE)
return
if not args:
self.consume()
raise TryAgain()
else:
### apply filter object
self.commit();
try:
func=args[0]
func(*args[1:])
except RecognitionException as e:
## catastrophic failure
self.reportError(e);
self.consume();
raise TryAgain()
def raise_NoViableAlt(self,la1=None):
if not la1: la1 = self.LA(1)
fname = self.getFilename()
line = self.getLine()
col = self.getColumn()
raise NoViableAltForCharException(la1,fname,line,col)
def set_return_token(self,_create,_token,_ttype,_offset):
if _create and not _token and (not _ttype == SKIP):
string = self.text.getString(_offset)
_token = self.makeToken(_ttype)
_token.setText(string)
self._returnToken = _token
return _token
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
### CharScannerIterator ###
###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
|
CharScanner
|
python
|
doocs__leetcode
|
solution/2900-2999/2924.Find Champion II/Solution.py
|
{
"start": 0,
"end": 221
}
|
class ____:
def findChampion(self, n: int, edges: List[List[int]]) -> int:
indeg = [0] * n
for _, v in edges:
indeg[v] += 1
return -1 if indeg.count(0) != 1 else indeg.index(0)
|
Solution
|
python
|
spack__spack
|
lib/spack/spack/version/version_types.py
|
{
"start": 7111,
"end": 7750
}
|
class ____(VersionType):
"""Base type for versions that represents a single (non-range or list) version."""
def _stringify_version(versions: VersionTuple, separators: Tuple[str, ...]) -> str:
"""Create a string representation from version components."""
release, prerelease = versions
components = [f"{rel}{sep}" for rel, sep in zip(release, separators)]
if prerelease[0] != FINAL:
components.append(PRERELEASE_TO_STRING[prerelease[0]])
if len(prerelease) > 1:
components.append(separators[len(release)])
components.append(str(prerelease[1]))
return "".join(components)
|
ConcreteVersion
|
python
|
astropy__astropy
|
astropy/time/tests/test_ut1.py
|
{
"start": 1331,
"end": 2558
}
|
class ____:
def setup_class(cls):
# Need auto_download so that IERS_B won't be loaded and cause tests to
# fail.
iers_conf.auto_download = True
def teardown_class(cls):
# This setting is to be consistent with astropy/conftest.py
iers_conf.auto_download = False
def test_utc_to_ut1(self):
"Test conversion of UTC to UT1, making sure to include a leap second"
t = Time(
[
"2012-06-30 12:00:00",
"2012-06-30 23:59:59",
"2012-06-30 23:59:60",
"2012-07-01 00:00:00",
"2012-07-01 12:00:00",
],
scale="utc",
)
t_ut1_jd = t.ut1.jd
t_comp = np.array(
[
2456108.9999932079,
2456109.4999816339,
2456109.4999932083,
2456109.5000047823,
2456110.0000047833,
]
)
assert allclose_jd(t_ut1_jd, t_comp)
t_back = t.ut1.utc
assert allclose_jd(t.jd, t_back.jd)
tnow = Time.now()
tnow.ut1
def test_ut1_iers_auto(self):
do_ut1_prediction_tst(iers.IERS_Auto)
|
TestTimeUT1Remote
|
python
|
eventlet__eventlet
|
tests/hub_test.py
|
{
"start": 6391,
"end": 7976
}
|
class ____(tests.LimitedTestCase):
def test_exceptionpreservation(self):
# events for controlling execution order
gt1event = eventlet.Event()
gt2event = eventlet.Event()
def test_gt1():
try:
raise KeyError()
except KeyError:
gt1event.send('exception')
gt2event.wait()
assert sys.exc_info()[0] is KeyError
gt1event.send('test passed')
def test_gt2():
gt1event.wait()
gt1event.reset()
assert sys.exc_info()[0] is None
try:
raise ValueError()
except ValueError:
gt2event.send('exception')
gt1event.wait()
assert sys.exc_info()[0] is ValueError
g1 = eventlet.spawn(test_gt1)
g2 = eventlet.spawn(test_gt2)
try:
g1.wait()
g2.wait()
finally:
g1.kill()
g2.kill()
def test_exceptionleaks(self):
# tests expected behaviour with all versions of greenlet
def test_gt(sem):
try:
raise KeyError()
except KeyError:
sem.release()
hubs.get_hub().switch()
# semaphores for controlling execution order
sem = eventlet.Semaphore()
sem.acquire()
g = eventlet.spawn(test_gt, sem)
try:
sem.acquire()
assert sys.exc_info()[0] is None
finally:
g.kill()
|
TestExceptionInGreenthread
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py
|
{
"start": 5898,
"end": 6421
}
|
class ____(BaseModel):
class Config:
extra = Extra.allow
type: Literal["CustomRecordFilter"]
class_name: str = Field(
...,
description="Fully-qualified name of the class that will be implementing the custom record filter strategy. The format is `source_<name>.<package>.<class_name>`.",
examples=["source_railz.components.MyCustomCustomRecordFilter"],
title="Class Name",
)
parameters: Optional[Dict[str, Any]] = Field(None, alias="$parameters")
|
CustomRecordFilter
|
python
|
google__jax
|
jax/experimental/jax2tf/tests/flax_models/gnn.py
|
{
"start": 1678,
"end": 4135
}
|
class ____(nn.Module):
"""A complete Graph Network model defined with Jraph."""
latent_size: int
num_mlp_layers: int
message_passing_steps: int
output_globals_size: int
dropout_rate: float = 0
skip_connections: bool = True
use_edge_model: bool = True
layer_norm: bool = True
deterministic: bool = True
@nn.compact
def __call__(self, graphs: jraph.GraphsTuple) -> jraph.GraphsTuple:
# We will first linearly project the original features as 'embeddings'.
embedder = jraph.GraphMapFeatures(
embed_node_fn=nn.Dense(self.latent_size),
embed_edge_fn=nn.Dense(self.latent_size),
embed_global_fn=nn.Dense(self.latent_size))
processed_graphs = embedder(graphs)
# Now, we will apply a Graph Network once for each message-passing round.
mlp_feature_sizes = [self.latent_size] * self.num_mlp_layers
for _ in range(self.message_passing_steps):
if self.use_edge_model:
update_edge_fn = jraph.concatenated_args(
MLP(mlp_feature_sizes,
dropout_rate=self.dropout_rate,
deterministic=self.deterministic))
else:
update_edge_fn = None
update_node_fn = jraph.concatenated_args(
MLP(mlp_feature_sizes,
dropout_rate=self.dropout_rate,
deterministic=self.deterministic))
update_global_fn = jraph.concatenated_args(
MLP(mlp_feature_sizes,
dropout_rate=self.dropout_rate,
deterministic=self.deterministic))
graph_net = jraph.GraphNetwork(
update_node_fn=update_node_fn,
update_edge_fn=update_edge_fn,
update_global_fn=update_global_fn)
if self.skip_connections:
processed_graphs = add_graphs_tuples(
graph_net(processed_graphs), processed_graphs)
else:
processed_graphs = graph_net(processed_graphs)
if self.layer_norm:
processed_graphs = processed_graphs._replace(
nodes=nn.LayerNorm()(processed_graphs.nodes),
edges=nn.LayerNorm()(processed_graphs.edges),
globals=nn.LayerNorm()(processed_graphs.globals),
)
# Since our graph-level predictions will be at globals, we will
# decode to get the required output logits.
decoder = jraph.GraphMapFeatures(
embed_global_fn=nn.Dense(self.output_globals_size))
processed_graphs = decoder(processed_graphs)
return processed_graphs
|
GraphNet
|
python
|
openai__openai-python
|
src/openai/types/beta/realtime/transcription_session_update.py
|
{
"start": 801,
"end": 963
}
|
class ____(BaseModel):
expires_at: Optional[SessionClientSecretExpiresAt] = None
"""Configuration for the ephemeral token expiration."""
|
SessionClientSecret
|
python
|
pytorch__pytorch
|
test/dynamo/test_deviceguard.py
|
{
"start": 1707,
"end": 3092
}
|
class ____(torch._dynamo.test_case.TestCase):
"""
Unit tests for the DeviceGuard class using a CudaInterface.
"""
def setUp(self):
super().setUp()
self.device_interface = CudaInterface
@unittest.skipIf(not TEST_MULTIGPU, "need multiple GPU")
def test_device_guard(self):
current_device = torch.cuda.current_device()
device_guard = DeviceGuard(self.device_interface, 1)
with device_guard as _:
self.assertEqual(torch.cuda.current_device(), 1)
self.assertEqual(device_guard.prev_idx, 0)
self.assertEqual(device_guard.idx, 1)
self.assertEqual(torch.cuda.current_device(), current_device)
self.assertEqual(device_guard.prev_idx, 0)
self.assertEqual(device_guard.idx, 1)
def test_device_guard_no_index(self):
current_device = torch.cuda.current_device()
device_guard = DeviceGuard(self.device_interface, None)
with device_guard as _:
self.assertEqual(torch.cuda.current_device(), current_device)
self.assertEqual(device_guard.prev_idx, -1)
self.assertEqual(device_guard.idx, None)
self.assertEqual(device_guard.prev_idx, -1)
self.assertEqual(device_guard.idx, None)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
|
TestCUDADeviceGuard
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_select.py
|
{
"start": 1689,
"end": 17885
}
|
class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def test_old_bracket_style_fail(self):
with expect_raises_message(
exc.ArgumentError,
r"Column expression, FROM clause, or other columns clause .*"
r".*Did you mean to say",
):
select([table1.c.myid])
def test_new_calling_style(self):
stmt = select(table1.c.myid).where(table1.c.myid == table2.c.otherid)
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable, myothertable "
"WHERE mytable.myid = myothertable.otherid",
)
@testing.combinations(
(
lambda tbl: select().select_from(tbl).where(tbl.c.id == 123),
"SELECT FROM tbl WHERE tbl.id = :id_1",
),
(lambda tbl: select().where(true()), "SELECT WHERE 1 = 1"),
(
lambda tbl: select()
.select_from(tbl)
.where(tbl.c.id == 123)
.exists(),
"EXISTS (SELECT FROM tbl WHERE tbl.id = :id_1)",
),
)
def test_select_no_columns(self, stmt, expected):
"""test #9440"""
tbl = table("tbl", column("id"))
stmt = testing.resolve_lambda(stmt, tbl=tbl)
self.assert_compile(stmt, expected)
def test_new_calling_style_clauseelement_thing_that_has_iter(self):
class Thing:
def __clause_element__(self):
return table1
def __iter__(self):
return iter(["a", "b", "c"])
stmt = select(Thing())
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable",
)
def test_new_calling_style_inspectable_ce_thing_that_has_iter(self):
class Thing:
def __iter__(self):
return iter(["a", "b", "c"])
class InspectedThing:
def __clause_element__(self):
return table1
from sqlalchemy.inspection import _inspects
@_inspects(Thing)
def _ce(thing):
return InspectedThing()
stmt = select(Thing())
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, "
"mytable.description FROM mytable",
)
def test_join_nofrom_implicit_left_side_explicit_onclause(self):
stmt = select(table1).join(table2, table1.c.myid == table2.c.otherid)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_nofrom_implicit_left_side_explicit_onclause_3level(self):
stmt = (
select(parent)
.join(child, child.c.parent_id == parent.c.id)
.join(grandchild, grandchild.c.child_id == child.c.id)
)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON child.parent_id = parent.id "
"JOIN grandchild ON grandchild.child_id = child.id",
)
def test_join_nofrom_explicit_left_side_explicit_onclause(self):
stmt = select(table1).join_from(
table1, table2, table1.c.myid == table2.c.otherid
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
@testing.variation(
"jointype",
["child_grandchild", "parent_grandchild", "grandchild_alone"],
)
def test_join_from_multiple_explicit_left_side_implicit_onclause(
self, jointype
):
"""test #12931
when join_from() is indicated, favor the explicit "left" side given
over the "left side of hte join" for creating onclause.
when join() is indicated, use the normal behavior of assuming
right side of the previous join is the new left side.
"""
if jointype.child_grandchild:
stmt = (
select(parent)
.join_from(parent, child)
.join_from(child, grandchild_w_parent)
)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN "
"child ON parent.id = child.parent_id "
"JOIN grandchildwparent "
"ON child.id = grandchildwparent.child_id",
)
elif jointype.parent_grandchild:
stmt = (
select(parent)
.join_from(parent, child)
.join_from(parent, grandchild_w_parent)
)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent "
"JOIN child ON parent.id = child.parent_id "
"JOIN grandchildwparent "
"ON parent.id = grandchildwparent.parent_id",
)
elif jointype.grandchild_alone:
stmt = (
select(parent)
.join_from(parent, child)
.join(grandchild_w_parent)
)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent "
"JOIN child ON parent.id = child.parent_id "
"JOIN grandchildwparent "
"ON child.id = grandchildwparent.child_id",
)
else:
jointype.fail()
def test_outerjoin_nofrom_explicit_left_side_explicit_onclause(self):
stmt = select(table1).outerjoin_from(
table1, table2, table1.c.myid == table2.c.otherid
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable LEFT OUTER JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_nofrom_implicit_left_side_implicit_onclause(self):
stmt = select(parent).join(child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_nofrom_implicit_left_side_implicit_onclause_3level(self):
stmt = select(parent).join(child).join(grandchild)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id "
"JOIN grandchild ON child.id = grandchild.child_id",
)
def test_join_nofrom_explicit_left_side_implicit_onclause(self):
stmt = select(parent).join_from(parent, child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_froms_implicit_left_side_explicit_onclause(self):
stmt = (
select(table1)
.select_from(table1)
.join(table2, table1.c.myid == table2.c.otherid)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_froms_explicit_left_side_explicit_onclause(self):
stmt = (
select(table1)
.select_from(table1)
.join_from(table1, table2, table1.c.myid == table2.c.otherid)
)
self.assert_compile(
stmt,
"SELECT mytable.myid, mytable.name, mytable.description "
"FROM mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid",
)
def test_join_froms_implicit_left_side_implicit_onclause(self):
stmt = select(parent).select_from(parent).join(child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_froms_explicit_left_side_implicit_onclause(self):
stmt = select(parent).select_from(parent).join_from(parent, child)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id",
)
def test_join_implicit_left_side_wo_cols_onelevel(self):
"""test issue #6503"""
stmt = select(parent).join(child).with_only_columns(child.c.id)
self.assert_compile(
stmt,
"SELECT child.id FROM parent "
"JOIN child ON parent.id = child.parent_id",
)
def test_join_implicit_left_side_wo_cols_onelevel_union(self):
"""test issue #6698, regression from #6503.
this issue didn't affect Core but testing it here anyway."""
stmt = select(parent).join(child).with_only_columns(child.c.id)
stmt = stmt.union(select(child.c.id))
self.assert_compile(
stmt,
"SELECT child.id FROM parent "
"JOIN child ON parent.id = child.parent_id "
"UNION "
"SELECT child.id FROM child",
)
def test_join_implicit_left_side_wo_cols_twolevel(self):
"""test issue #6503"""
stmt = (
select(parent)
.join(child)
.with_only_columns(child.c.id)
.join(grandchild)
.with_only_columns(grandchild.c.id)
)
self.assert_compile(
stmt,
"SELECT grandchild.id FROM parent "
"JOIN child ON parent.id = child.parent_id "
"JOIN grandchild ON child.id = grandchild.child_id",
)
def test_join_implicit_left_side_wo_cols_twolevel_union(self):
"""test issue #6698, regression from #6503.
this issue didn't affect Core but testing it here anyway."""
stmt = (
select(parent)
.join(child)
.with_only_columns(child.c.id)
.join(grandchild)
.with_only_columns(grandchild.c.id)
)
stmt = union(stmt, select(grandchild.c.id))
self.assert_compile(
stmt,
"SELECT grandchild.id FROM parent "
"JOIN child ON parent.id = child.parent_id "
"JOIN grandchild ON child.id = grandchild.child_id "
"UNION "
"SELECT grandchild.id FROM grandchild",
)
def test_right_nested_inner_join(self):
inner = child.join(grandchild)
stmt = select(parent).outerjoin_from(parent, inner)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent "
"LEFT OUTER JOIN "
"(child JOIN grandchild ON child.id = grandchild.child_id) "
"ON parent.id = child.parent_id",
)
def test_joins_w_filter_by(self):
stmt = (
select(parent)
.filter_by(data="p1")
.join(child)
.filter_by(data="c1")
.join_from(table1, table2, table1.c.myid == table2.c.otherid)
.filter_by(otherid=5)
)
self.assert_compile(
stmt,
"SELECT parent.id, parent.data FROM parent JOIN child "
"ON parent.id = child.parent_id, mytable JOIN myothertable "
"ON mytable.myid = myothertable.otherid "
"WHERE parent.data = :data_1 AND child.data = :data_2 "
"AND myothertable.otherid = :otherid_1",
checkparams={"data_1": "p1", "data_2": "c1", "otherid_1": 5},
)
def test_filter_by_from_col(self):
stmt = select(table1.c.myid).filter_by(name="foo")
self.assert_compile(
stmt,
"SELECT mytable.myid FROM mytable WHERE mytable.name = :name_1",
)
def test_filter_by_from_func(self):
"""test #6414"""
stmt = select(func.count(table1.c.myid)).filter_by(name="foo")
self.assert_compile(
stmt,
"SELECT count(mytable.myid) AS count_1 "
"FROM mytable WHERE mytable.name = :name_1",
)
def test_filter_by_from_func_not_the_first_arg(self):
"""test #6414"""
stmt = select(func.bar(True, table1.c.myid)).filter_by(name="foo")
self.assert_compile(
stmt,
"SELECT bar(:bar_2, mytable.myid) AS bar_1 "
"FROM mytable WHERE mytable.name = :name_1",
)
def test_filter_by_from_cast(self):
"""test #6414"""
stmt = select(cast(table1.c.myid, Integer)).filter_by(name="foo")
self.assert_compile(
stmt,
"SELECT CAST(mytable.myid AS INTEGER) AS myid "
"FROM mytable WHERE mytable.name = :name_1",
)
def test_filter_by_from_binary(self):
"""test #6414"""
stmt = select(table1.c.myid == 5).filter_by(name="foo")
self.assert_compile(
stmt,
"SELECT mytable.myid = :myid_1 AS anon_1 "
"FROM mytable WHERE mytable.name = :name_1",
)
def test_filter_by_from_label(self):
"""test #6414"""
stmt = select(table1.c.myid.label("some_id")).filter_by(name="foo")
self.assert_compile(
stmt,
"SELECT mytable.myid AS some_id "
"FROM mytable WHERE mytable.name = :name_1",
)
def test_filter_by_no_property_from_table(self):
assert_raises_message(
exc.InvalidRequestError,
'Entity namespace for "mytable" has no property "foo"',
select(table1).filter_by,
foo="bar",
)
def test_filter_by_no_property_from_col(self):
assert_raises_message(
exc.InvalidRequestError,
'Entity namespace for "mytable.myid" has no property "foo"',
select(table1.c.myid).filter_by,
foo="bar",
)
def test_select_tuple_outer(self):
stmt = select(tuple_(table1.c.myid, table1.c.name))
assert_raises_message(
exc.CompileError,
r"Most backends don't support SELECTing from a tuple\(\) object. "
"If this is an ORM query, consider using the Bundle object.",
stmt.compile,
)
def test_select_tuple_subquery(self):
subq = select(
table1.c.name, tuple_(table1.c.myid, table1.c.name)
).subquery()
stmt = select(subq.c.name)
# if we aren't fetching it, then render it
self.assert_compile(
stmt,
"SELECT anon_1.name FROM (SELECT mytable.name AS name, "
"(mytable.myid, mytable.name) AS anon_2 FROM mytable) AS anon_1",
)
@testing.combinations(
("union_all", "UNION ALL"),
("union", "UNION"),
("intersect_all", "INTERSECT ALL"),
("intersect", "INTERSECT"),
("except_all", "EXCEPT ALL"),
("except_", "EXCEPT"),
)
def test_select_multiple_compound_elements(self, methname, joiner):
stmt = select(literal(1))
meth = getattr(stmt, methname)
stmt = meth(select(literal(2)), select(literal(3)))
self.assert_compile(
stmt,
"SELECT :param_1 AS anon_1"
" %(joiner)s SELECT :param_2 AS anon_2"
" %(joiner)s SELECT :param_3 AS anon_3" % {"joiner": joiner},
)
@testing.combinations(
lambda stmt: stmt.with_statement_hint("some hint"),
lambda stmt: stmt.with_hint(table("x"), "some hint"),
lambda stmt: stmt.where(column("q") == 5),
lambda stmt: stmt.having(column("q") == 5),
lambda stmt: stmt.order_by(column("q")),
lambda stmt: stmt.group_by(column("q")),
# TODO: continue
)
def test_methods_generative(self, testcase):
s1 = select(1)
s2 = testing.resolve_lambda(testcase, stmt=s1)
assert s1 is not s2
|
SelectTest
|
python
|
python-poetry__poetry
|
src/poetry/mixology/incompatibility_cause.py
|
{
"start": 278,
"end": 338
}
|
class ____(IncompatibilityCauseError):
pass
|
RootCauseError
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/math_ops/segment_reduction_ops_test.py
|
{
"start": 54453,
"end": 56887
}
|
class ____(test.Benchmark):
outer_dim_options = [2**x for x in range(9, 14, 2)]
ratio_options = [2**x for x in range(1, 6, 2)]
inner_dim_options = [2**x for x in range(9, 14, 2)]
# randomly generated sizes with less alignments
inner_dim_options += [
1120, 1215, 1856, 1302, 1329, 1531, 1313, 1672, 1851, 1584
]
dtype_options = [np.float32, np.float64]
options = (outer_dim_options, ratio_options, inner_dim_options, dtype_options)
# pylint: disable=g-long-lambda
op_functors = [lambda vc, vs, seg_ids:
("sorted", math_ops.segment_sum(vc, vs)),
lambda vc, vs, seg_ids:
("unsorted",
math_ops.unsorted_segment_sum(vc, vs, seg_ids[-1]+1))]
# pylint: enable=g-long-lambda
repeat = 10
def _npTypeToStr(self, t):
if t == np.float32:
return "fp32"
if t == np.float64:
return "fp64"
def _runGraph(self, op_functor, outer_dim, ratio, inner_dim, dtype):
output_outer_dim = int(outer_dim / ratio)
const = np.random.randint(5, size=(outer_dim, inner_dim))
seg_ids = np.sort(np.random.randint(output_outer_dim, size=outer_dim))
vs = variables.Variable(seg_ids.astype(np.int32))
with ops.device("/gpu:0"):
vc = variables.Variable(const.astype(dtype))
name, op = op_functor(vc, vs, seg_ids)
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
r = self.run_op_benchmark(
sess,
op,
min_iters=self.repeat,
name="_".join(
map(str,
[name, outer_dim, ratio, inner_dim,
self._npTypeToStr(dtype)])))
return name, r["wall_time"]
def benchmarkSegmentSumGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):
op_functor = self.op_functors[0]
with ops.Graph().as_default():
self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)
def benchmarkUnsortedSegmentSumGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):
op_functor = self.op_functors[1]
with ops.Graph().as_default():
self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)
if __name__ == "__main__":
test.main()
|
SegmentReductionOpBenchmark
|
python
|
astropy__astropy
|
astropy/io/votable/exceptions.py
|
{
"start": 23458,
"end": 23855
}
|
class ____(VOTableSpecWarning):
"""
The given element was not supported inside of the given element
until the specified VOTable version, however the version declared
in the file is for an earlier version. These attributes may not
be written out to the file.
"""
message_template = "'{}' inside '{}' added in VOTable {}"
default_args = ("child", "parent", "X.X")
|
W26
|
python
|
getsentry__sentry
|
src/sentry/monitors/models.py
|
{
"start": 21442,
"end": 22613
}
|
class ____(BaseManager["MonitorEnvironment"]):
"""
A manager that consolidates logic for monitor environment updates
"""
def ensure_environment(
self, project: Project, monitor: Monitor, environment_name: str | None
) -> MonitorEnvironment:
from sentry.monitors.rate_limit import update_monitor_quota
if not environment_name:
environment_name = "production"
if not Environment.is_valid_name(environment_name):
raise MonitorEnvironmentValidationFailed("Environment name too long")
# TODO: assume these objects exist once backfill is completed
environment = Environment.get_or_create(project=project, name=environment_name)
monitor_env, created = MonitorEnvironment.objects.get_or_create(
monitor=monitor,
environment_id=environment.id,
defaults={"status": MonitorStatus.ACTIVE, "is_muted": is_monitor_muted(monitor)},
)
# recompute per-project monitor check-in rate limit quota
if created:
update_monitor_quota(monitor_env)
return monitor_env
@region_silo_model
|
MonitorEnvironmentManager
|
python
|
scipy__scipy
|
scipy/optimize/_shgo.py
|
{
"start": 61425,
"end": 61586
}
|
class ____:
def __init__(self, v):
self.v = v
self.x_l = None
self.lres = None
self.f_min = None
self.lbounds = []
|
LMap
|
python
|
bokeh__bokeh
|
src/bokeh/models/sources.py
|
{
"start": 30381,
"end": 31824
}
|
class ____(ColumnDataSource):
''' Base class for web column data sources that can update from data
URLs.
.. note::
This base class is typically not useful to instantiate on its own.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
adapter = Nullable(Instance(CustomJS), help="""
A JavaScript callback to adapt raw JSON responses to Bokeh ``ColumnDataSource``
format.
If provided, this callback is executes immediately after the JSON data is
received, but before appending or replacing data in the data source. The
``CustomJS`` callback will receive the ``AjaxDataSource`` as ``cb_obj`` and
will receive the raw JSON response as ``cb_data.response``. The callback
code should return a ``data`` object suitable for a Bokeh ``ColumnDataSource``
(i.e. a mapping of string column names to arrays of data).
""")
max_size = Nullable(Int, help="""
Maximum size of the data columns. If a new fetch would result in columns
larger than ``max_size``, then earlier data is dropped to make room.
""")
mode = Enum("replace", "append", help="""
Whether to append new data to existing data (up to ``max_size``), or to
replace existing data entirely.
""")
data_url = Required(String, help="""
A URL to to fetch data from.
""")
|
WebDataSource
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/misc.py
|
{
"start": 42949,
"end": 44982
}
|
class ____(UserDefinedObjectVariable):
"""
Represents a torch._C._ImperativeEngine instance.
"""
def __init__(
self,
value,
value_type=None,
**kwargs,
) -> None:
super().__init__(value=value, value_type=value_type, **kwargs)
def call_method(
self,
tx: "InstructionTranslator",
name,
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
if name == "queue_callback":
if torch._dynamo.compiled_autograd.in_compiled_autograd_region:
assert tx.one_graph or tx.error_on_graph_break, (
"queue_callback() is only supported when Compiled Autograd is enabled with fullgraph=True"
)
# queue_callback is a method-wrapper, no need to insert a guard.
fn_vt = VariableTracker.build(
tx,
torch._dynamo.external_utils.FakeCompiledAutogradEngine.queue_callback,
)
return fn_vt.call_function(
tx,
(tx.output.side_effects.get_ca_final_callbacks_var(), *args),
kwargs,
)
else:
unimplemented(
gb_type="Unsupported torch._C._ImperativeEngine.queue_callback()",
context=f"call_method {self} {name}",
explanation="queue_callback() is only supported when "
"Compiled Autograd is enabled with fullgraph=True.",
hints=[],
)
else:
unimplemented(
gb_type="Unsupported torch._C._ImperativeEngine method",
context=f"call_method {self} {name}",
explanation="Dynamo only supports the `queue_callback` method "
f"on a torch._C._ImperativeEngine instance, but found: `{name}`.",
hints=[],
)
|
AutogradEngineVariable
|
python
|
allegroai__clearml
|
examples/frameworks/jsonargparse/jsonargparse_command.py
|
{
"start": 56,
"end": 347
}
|
class ____:
def __init__(self, prize: int = 100):
self.prize = prize
def person(self, name: str):
return "{} won {}!".format(name, self.prize)
if __name__ == "__main__":
Task.init(project_name="examples", task_name="jsonargparse command")
print(CLI(Main))
|
Main
|
python
|
wandb__wandb
|
tools/cloud_tool.py
|
{
"start": 1912,
"end": 3978
}
|
class ____:
"""A simple CLI for managing GKE clusters.
It is assumed that the user has installed the Google Cloud SDK with
the required components (gke-gcloud-auth-plugin and kubectl) and has
authenticated with the Google Cloud Platform.
"""
def __init__(
self,
config: GKEConfig,
verbose: bool = False,
log_level: int = logging.INFO,
) -> None:
self.config = config
self.logger = Logger(__name__.lower(), verbose, log_level)
self.logger.print(f"Initialized {__name__} CLI")
self.logger.print(self.config)
self.update_components()
@staticmethod
def update_components() -> None:
subprocess.run(["gcloud", "--quiet", "components", "update"])
@staticmethod
def install_components() -> None:
for component in ["gke-gcloud-auth-plugin", "kubectl"]:
subprocess.run(["gcloud", "--quiet", "components", "install", component])
def create_cluster(self) -> None:
subprocess.run(
[
"gcloud",
"container",
"clusters",
"create",
self.config.cluster_name,
"--num-nodes",
str(self.config.num_nodes),
"--machine-type",
self.config.machine_type,
"--disk-size",
self.config.disk_size,
"--disk-type",
self.config.disk_type,
"--accelerator",
f"type={self.config.accelerator_type},count={self.config.accelerator_count}",
]
)
def get_cluster_credentials(self) -> None:
subprocess.run(
[
"gcloud",
"container",
"clusters",
"get-credentials",
self.config.cluster_name,
]
)
def delete_cluster(self) -> None:
subprocess.run(
["gcloud", "container", "clusters", "delete", self.config.cluster_name]
)
|
GKE
|
python
|
falconry__falcon
|
examples/recipes/msgspec_media_validation.py
|
{
"start": 73,
"end": 397
}
|
class ____:
def process_resource(
self, req: Request, resp: Response, resource: object, params: dict
) -> None:
if schema := getattr(resource, f'{req.method}_SCHEMA', None):
param = schema.__name__.lower()
params[param] = msgspec.convert(req.get_media(), schema)
|
MsgspecMiddleware
|
python
|
walkccc__LeetCode
|
solutions/1233. Remove Sub-Folders from the Filesystem/1233.py
|
{
"start": 0,
"end": 285
}
|
class ____:
def removeSubfolders(self, folder: list[str]) -> list[str]:
ans = []
prev = ""
folder.sort()
for f in folder:
if len(prev) > 0 and f.startswith(prev) and f[len(prev)] == '/':
continue
ans.append(f)
prev = f
return ans
|
Solution
|
python
|
rapidsai__cudf
|
python/cudf/cudf/core/groupby/groupby.py
|
{
"start": 111657,
"end": 114906
}
|
class ____(GroupBy):
obj: Series
def agg(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
result = super().agg(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
# downcast the result to a Series:
if result._num_columns:
if result.shape[1] == 1 and not is_list_like(func):
return result.iloc[:, 0]
# drop the first level if we have a multiindex
if result._data.nlevels > 1:
result.columns = result._data.to_pandas_index.droplevel(0)
return result
aggregate = agg
def apply(self, func, *args, **kwargs):
result = super().apply(func, *args, **kwargs)
# apply Series name to result
result.name = self.obj.name
return result
@property
def dtype(self) -> pd.Series:
raise NotImplementedError("dtype is currently not implemented.")
def hist(
self,
by=None,
ax=None,
grid: bool = True,
xlabelsize: int | None = None,
xrot: float | None = None,
ylabelsize: int | None = None,
yrot: float | None = None,
figsize: tuple[float, float] | None = None,
bins: int | Sequence[int] = 10,
backend: str | None = None,
legend: bool = False,
**kwargs,
):
raise NotImplementedError("hist is currently not implemented.")
@property
def is_monotonic_increasing(self) -> Series:
"""
Return whether each group's values are monotonically increasing.
Currently not implemented
"""
raise NotImplementedError(
"is_monotonic_increasing is currently not implemented."
)
@property
def is_monotonic_decreasing(self) -> Series:
"""
Return whether each group's values are monotonically decreasing.
Currently not implemented
"""
raise NotImplementedError(
"is_monotonic_decreasing is currently not implemented."
)
def nlargest(
self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
) -> Series:
"""
Return the largest n elements.
Currently not implemented
"""
raise NotImplementedError("nlargest is currently not implemented.")
def nsmallest(
self, n: int = 5, keep: Literal["first", "last", "all"] = "first"
) -> Series:
"""
Return the smallest n elements.
Currently not implemented
"""
raise NotImplementedError("nsmallest is currently not implemented.")
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
) -> Series | DataFrame:
raise NotImplementedError("value_counts is currently not implemented.")
def corr(
self,
other: Series,
method: str = "pearson",
min_periods: int | None = None,
) -> Series:
raise NotImplementedError("corr is currently not implemented.")
SeriesGroupBy.__doc__ = groupby_doc_template.format(ret="")
# TODO: should we define this as a dataclass instead?
|
SeriesGroupBy
|
python
|
numpy__numpy
|
numpy/_core/tests/test_multiarray.py
|
{
"start": 193639,
"end": 199983
}
|
class ____:
sizes = [(), (3,), (3, 2), (2, 3),
(3, 3), (2, 3, 4), (4, 3, 2),
(1, 2, 3, 4), (2, 3, 4, 1),
(3, 4, 1, 2), (4, 1, 2, 3),
(64,), (128,), (256,)]
@pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis)
for axis in list(range(-len(size), len(size))) + [None]]
for size in sizes]))
@pytest.mark.parametrize('method', [np.argmax, np.argmin])
def test_np_argmin_argmax_keepdims(self, size, axis, method):
arr = np.random.normal(size=size)
# contiguous arrays
if axis is None:
new_shape = [1 for _ in range(len(size))]
else:
new_shape = list(size)
new_shape[axis] = 1
new_shape = tuple(new_shape)
_res_orig = method(arr, axis=axis)
res_orig = _res_orig.reshape(new_shape)
res = method(arr, axis=axis, keepdims=True)
assert_equal(res, res_orig)
assert_(res.shape == new_shape)
outarray = np.empty(res.shape, dtype=res.dtype)
res1 = method(arr, axis=axis, out=outarray,
keepdims=True)
assert_(res1 is outarray)
assert_equal(res, outarray)
if len(size) > 0:
wrong_shape = list(new_shape)
if axis is not None:
wrong_shape[axis] = 2
else:
wrong_shape[0] = 2
wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
with pytest.raises(ValueError):
method(arr.T, axis=axis,
out=wrong_outarray, keepdims=True)
# non-contiguous arrays
if axis is None:
new_shape = [1 for _ in range(len(size))]
else:
new_shape = list(size)[::-1]
new_shape[axis] = 1
new_shape = tuple(new_shape)
_res_orig = method(arr.T, axis=axis)
res_orig = _res_orig.reshape(new_shape)
res = method(arr.T, axis=axis, keepdims=True)
assert_equal(res, res_orig)
assert_(res.shape == new_shape)
outarray = np.empty(new_shape[::-1], dtype=res.dtype)
outarray = outarray.T
res1 = method(arr.T, axis=axis, out=outarray,
keepdims=True)
assert_(res1 is outarray)
assert_equal(res, outarray)
if len(size) > 0:
# one dimension lesser for non-zero sized
# array should raise an error
with pytest.raises(ValueError):
method(arr[0], axis=axis,
out=outarray, keepdims=True)
if len(size) > 0:
wrong_shape = list(new_shape)
if axis is not None:
wrong_shape[axis] = 2
else:
wrong_shape[0] = 2
wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
with pytest.raises(ValueError):
method(arr.T, axis=axis,
out=wrong_outarray, keepdims=True)
@pytest.mark.parametrize('method', ['max', 'min'])
def test_all(self, method):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
arg_method = getattr(a, 'arg' + method)
val_method = getattr(a, method)
for i in range(a.ndim):
a_maxmin = val_method(i)
aarg_maxmin = arg_method(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(a_maxmin == aarg_maxmin.choose(
*a.transpose(i, *axes))))
@pytest.mark.parametrize('method', ['argmax', 'argmin'])
def test_output_shape(self, method):
# see also gh-616
a = np.ones((10, 5))
arg_method = getattr(a, method)
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, arg_method, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, arg_method, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, arg_method, -1, out)
out = np.ones(10, dtype=np.int_)
arg_method(-1, out=out)
assert_equal(out, arg_method(-1))
@pytest.mark.parametrize('ndim', [0, 1])
@pytest.mark.parametrize('method', ['argmax', 'argmin'])
def test_ret_is_out(self, ndim, method):
a = np.ones((4,) + (256,) * ndim)
arg_method = getattr(a, method)
out = np.empty((256,) * ndim, dtype=np.intp)
ret = arg_method(axis=0, out=out)
assert ret is out
@pytest.mark.parametrize('np_array, method, idx, val',
[(np.zeros, 'argmax', 5942, "as"),
(np.ones, 'argmin', 6001, "0")])
def test_unicode(self, np_array, method, idx, val):
d = np_array(6031, dtype='<U9')
arg_method = getattr(d, method)
d[idx] = val
assert_equal(arg_method(), idx)
@pytest.mark.parametrize('arr_method, np_method',
[('argmax', np.argmax),
('argmin', np.argmin)])
def test_np_vs_ndarray(self, arr_method, np_method):
# make sure both ndarray.argmax/argmin and
# numpy.argmax/argmin support out/axis args
a = np.random.normal(size=(2, 3))
arg_method = getattr(a, arr_method)
# check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(arg_method(1, out1), np_method(a, 1, out2))
assert_equal(out1, out2)
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(arg_method(out=out1, axis=0),
np_method(a, out=out2, axis=0))
assert_equal(out1, out2)
@pytest.mark.leaks_references(reason="replaces None with NULL.")
@pytest.mark.parametrize('method, vals',
[('argmax', (10, 30)),
('argmin', (30, 10))])
def test_object_with_NULLs(self, method, vals):
# See gh-6032
a = np.empty(4, dtype='O')
arg_method = getattr(a, method)
ctypes.memset(a.ctypes.data, 0, a.nbytes)
assert_equal(arg_method(), 0)
a[3] = vals[0]
assert_equal(arg_method(), 3)
a[1] = vals[1]
assert_equal(arg_method(), 1)
|
TestArgmaxArgminCommon
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-oci-data-science/tests/test_oci_data_science_utils.py
|
{
"start": 7943,
"end": 9359
}
|
class ____:
"""Unit tests for _get_response_token_counts function."""
def test_with_usage(self):
"""Ensures token counts are extracted correctly when usage is present."""
raw_response = {
"usage": {
"prompt_tokens": 10,
"completion_tokens": 5,
"total_tokens": 15,
}
}
expected_result = {
"prompt_tokens": 10,
"completion_tokens": 5,
"total_tokens": 15,
}
result = _get_response_token_counts(raw_response)
assert result == expected_result
def test_without_usage(self):
"""Ensures function returns empty dict when usage is missing."""
raw_response = {}
expected_result = {}
result = _get_response_token_counts(raw_response)
assert result == expected_result
def test_missing_token_counts(self):
"""Ensures missing token counts default to zero."""
raw_response = {"usage": {}}
result = _get_response_token_counts(raw_response)
assert result == {}
raw_response = {"usage": {"prompt_tokens": 10}}
expected_result = {
"prompt_tokens": 10,
"completion_tokens": 0,
"total_tokens": 0,
}
result = _get_response_token_counts(raw_response)
assert result == expected_result
|
TestGetResponseTokenCounts
|
python
|
wandb__wandb
|
wandb/automations/scopes.py
|
{
"start": 1680,
"end": 2290
}
|
class ____(_BaseScope, ProjectScopeFields):
"""An automation scope defined by a specific `Project`."""
scope_type: Literal[ScopeType.PROJECT] = ScopeType.PROJECT
# for type annotations
AutomationScope: TypeAlias = Annotated[
Union[_ArtifactSequenceScope, _ArtifactPortfolioScope, ProjectScope],
BeforeValidator(parse_scope),
Field(discriminator="typename__"),
]
# for runtime type checks
AutomationScopeTypes: tuple[type, ...] = get_args(AutomationScope.__origin__) # type: ignore[attr-defined]
__all__ = [
"ScopeType",
"ArtifactCollectionScope",
"ProjectScope",
]
|
ProjectScope
|
python
|
huggingface__transformers
|
tests/models/video_llava/test_modeling_video_llava.py
|
{
"start": 1478,
"end": 6671
}
|
class ____:
def __init__(
self,
parent,
ignore_index=-100,
image_token_index=0,
video_token_index=1,
projector_hidden_act="gelu",
seq_length=3,
num_frames=2,
vision_feature_select_strategy="default",
vision_feature_layer=-1,
text_config={
"model_type": "llama",
"seq_length": 13,
"is_training": True,
"use_input_mask": True,
"use_token_type_ids": False,
"use_labels": True,
"vocab_size": 99,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"attention_probs_dropout_prob": 0.1,
"max_position_embeddings": 2048, # we need it high because videos are 8 frames
"type_vocab_size": 16,
"type_sequence_label_size": 2,
"initializer_range": 0.02,
"num_labels": 3,
"num_choices": 4,
"pad_token_id": 3,
},
is_training=True,
vision_config={
"model_type": "clip_vision_model",
"batch_size": 12,
"image_size": 8,
"patch_size": 6,
"num_channels": 3,
"is_training": True,
"hidden_size": 32,
"projection_dim": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 37,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
},
):
self.parent = parent
self.ignore_index = ignore_index
self.image_token_index = image_token_index
self.video_token_index = video_token_index
self.projector_hidden_act = projector_hidden_act
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
self.text_config = text_config
self.vision_config = vision_config
self.num_frames = num_frames
self.pad_token_id = text_config["pad_token_id"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.is_training = is_training
self.batch_size = 5
self.num_channels = 3
self.image_size = 224
self.num_image_tokens = (vision_config["image_size"] // vision_config["patch_size"]) ** 2
self.num_video_tokens = (self.num_image_tokens + 1) * self.num_frames
self.seq_length = seq_length + self.num_image_tokens + self.num_video_tokens
def get_config(self):
return VideoLlavaConfig(
text_config=self.text_config,
vision_config=self.vision_config,
ignore_index=self.ignore_index,
image_token_index=self.image_token_index,
video_token_index=self.video_token_index,
projector_hidden_act=self.projector_hidden_act,
vision_feature_select_strategy=self.vision_feature_select_strategy,
vision_feature_layer=self.vision_feature_layer,
image_seq_length=self.num_image_tokens,
video_seq_length=self.num_video_tokens,
)
def prepare_config_and_inputs(self):
pixel_values_videos = floats_tensor(
[
self.batch_size,
self.num_frames,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
pixel_values_images = floats_tensor(
[
self.batch_size,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values_images, pixel_values_videos
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values_images, pixel_values_videos = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 1) + 1
attention_mask = input_ids.ne(1).to(torch_device)
input_ids[(input_ids == config.image_token_index) | (input_ids == config.video_token_index)] = (
self.pad_token_id
)
input_ids[:, : self.num_image_tokens] = config.image_token_index
input_ids[:, self.num_image_tokens : self.num_video_tokens + self.num_image_tokens] = config.video_token_index
inputs_dict = {
"pixel_values_videos": pixel_values_videos,
"pixel_values_images": pixel_values_images,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
|
VideoLlavaVisionText2TextModelTester
|
python
|
ansible__ansible
|
lib/ansible/module_utils/facts/system/distribution.py
|
{
"start": 1181,
"end": 23197
}
|
class ____:
"""has-a various distro file parsers (os-release, etc) and logic for finding the right one."""
# every distribution name mentioned here, must have one of
# - allowempty == True
# - be listed in SEARCH_STRING
# - have a function get_distribution_DISTNAME implemented
# keep names in sync with Conditionals page of docs
OSDIST_LIST = (
{'path': '/etc/altlinux-release', 'name': 'Altlinux'},
{'path': '/etc/oracle-release', 'name': 'OracleLinux'},
{'path': '/etc/slackware-version', 'name': 'Slackware'},
{'path': '/etc/centos-release', 'name': 'CentOS'},
{'path': '/etc/redhat-release', 'name': 'RedHat'},
{'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
{'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
{'path': '/etc/os-release', 'name': 'Amazon'},
{'path': '/etc/system-release', 'name': 'Amazon'},
{'path': '/etc/alpine-release', 'name': 'Alpine'},
{'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
{'path': '/etc/os-release', 'name': 'Archlinux'},
{'path': '/etc/os-release', 'name': 'SUSE'},
{'path': '/etc/SuSE-release', 'name': 'SUSE'},
{'path': '/etc/gentoo-release', 'name': 'Gentoo'},
{'path': '/etc/os-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Mandriva'},
{'path': '/etc/sourcemage-release', 'name': 'SMGL'},
{'path': '/usr/lib/os-release', 'name': 'ClearLinux'},
{'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
{'path': '/etc/os-release', 'name': 'Flatcar'},
{'path': '/etc/os-release', 'name': 'NA'},
)
SEARCH_STRING = {
'OracleLinux': 'Oracle Linux',
'RedHat': 'Red Hat',
'Altlinux': 'ALT',
'SMGL': 'Source Mage GNU/Linux',
}
# We can't include this in SEARCH_STRING because a name match on its keys
# causes a fallback to using the first whitespace separated item from the file content
# as the name. For os-release, that is in form 'NAME=Arch'
OS_RELEASE_ALIAS = {
'Archlinux': 'Arch Linux'
}
STRIP_QUOTES = r'\'\"\\'
def __init__(self, module):
self.module = module
def _get_file_content(self, path):
return get_file_content(path)
def _get_dist_file_content(self, path, allow_empty=False):
# can't find that dist file, or it is incorrectly empty
if not _file_exists(path, allow_empty=allow_empty):
return False, None
data = self._get_file_content(path)
return True, data
def _parse_dist_file(self, name, dist_file_content, path, collected_facts):
dist_file_dict = {}
dist_file_content = dist_file_content.strip(DistributionFiles.STRIP_QUOTES)
if name in self.SEARCH_STRING:
# look for the distribution string in the data and replace according to RELEASE_NAME_MAP
# only the distribution name is set, the version is assumed to be correct from distro.linux_distribution()
if self.SEARCH_STRING[name] in dist_file_content:
# this sets distribution=RedHat if 'Red Hat' shows up in data
dist_file_dict['distribution'] = name
dist_file_dict['distribution_file_search_string'] = self.SEARCH_STRING[name]
else:
# this sets distribution to what's in the data, e.g. CentOS, Scientific, ...
dist_file_dict['distribution'] = dist_file_content.split()[0]
return True, dist_file_dict
if name in self.OS_RELEASE_ALIAS:
if self.OS_RELEASE_ALIAS[name] in dist_file_content:
dist_file_dict['distribution'] = name
return True, dist_file_dict
return False, dist_file_dict
# call a dedicated function for parsing the file content
# TODO: replace with a map or a class
try:
# FIXME: most of these dont actually look at the dist file contents, but random other stuff
distfunc_name = 'parse_distribution_file_' + name
distfunc = getattr(self, distfunc_name)
parsed, dist_file_dict = distfunc(name, dist_file_content, path, collected_facts)
return parsed, dist_file_dict
except AttributeError as exc:
self.module.debug('exc: %s' % exc)
# this should never happen, but if it does fail quietly and not with a traceback
return False, dist_file_dict
return True, dist_file_dict
# to debug multiple matching release files, one can use:
# self.facts['distribution_debug'].append({path + ' ' + name:
# (parsed,
# self.facts['distribution'],
# self.facts['distribution_version'],
# self.facts['distribution_release'],
# )})
def _guess_distribution(self):
# try to find out which linux distribution this is
dist = (get_distribution(), get_distribution_version(), get_distribution_codename())
distribution_guess = {
'distribution': dist[0] or 'NA',
'distribution_version': dist[1] or 'NA',
# distribution_release can be the empty string
'distribution_release': 'NA' if dist[2] is None else dist[2]
}
distribution_guess['distribution_major_version'] = distribution_guess['distribution_version'].split('.')[0] or 'NA'
return distribution_guess
def process_dist_files(self):
# Try to handle the exceptions now ...
# self.facts['distribution_debug'] = []
dist_file_facts = {}
dist_guess = self._guess_distribution()
dist_file_facts.update(dist_guess)
for ddict in self.OSDIST_LIST:
name = ddict['name']
path = ddict['path']
allow_empty = ddict.get('allowempty', False)
has_dist_file, dist_file_content = self._get_dist_file_content(path, allow_empty=allow_empty)
# but we allow_empty. For example, ArchLinux with an empty /etc/arch-release and a
# /etc/os-release with a different name
if has_dist_file and allow_empty:
dist_file_facts['distribution'] = name
dist_file_facts['distribution_file_path'] = path
dist_file_facts['distribution_file_variety'] = name
break
if not has_dist_file:
# keep looking
continue
parsed_dist_file, parsed_dist_file_facts = self._parse_dist_file(name, dist_file_content, path, dist_file_facts)
# finally found the right os dist file and were able to parse it
if parsed_dist_file:
dist_file_facts['distribution'] = name
dist_file_facts['distribution_file_path'] = path
# distribution and file_variety are the same here, but distribution
# will be changed/mapped to a more specific name.
# ie, dist=Fedora, file_variety=RedHat
dist_file_facts['distribution_file_variety'] = name
dist_file_facts['distribution_file_parsed'] = parsed_dist_file
dist_file_facts.update(parsed_dist_file_facts)
break
return dist_file_facts
# FIXME: split distro file parsing into its own module or class
def parse_distribution_file_Slackware(self, name, data, path, collected_facts):
slackware_facts = {}
if 'Slackware' not in data:
return False, slackware_facts # TODO: remove
slackware_facts['distribution'] = name
version = re.findall(r'\w+[.]\w+\+?', data)
if version:
slackware_facts['distribution_version'] = version[0]
return True, slackware_facts
def parse_distribution_file_Amazon(self, name, data, path, collected_facts):
amazon_facts = {}
if 'Amazon' not in data:
return False, amazon_facts
amazon_facts['distribution'] = 'Amazon'
if path == '/etc/os-release':
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
distribution_version = version.group(1)
amazon_facts['distribution_version'] = distribution_version
version_data = distribution_version.split(".")
if len(version_data) > 1:
major, minor = version_data
else:
major, minor = version_data[0], 'NA'
amazon_facts['distribution_major_version'] = major
amazon_facts['distribution_minor_version'] = minor
else:
version = [n for n in data.split() if n.isdigit()]
version = version[0] if version else 'NA'
amazon_facts['distribution_version'] = version
return True, amazon_facts
def parse_distribution_file_OpenWrt(self, name, data, path, collected_facts):
openwrt_facts = {}
if 'OpenWrt' not in data:
return False, openwrt_facts # TODO: remove
openwrt_facts['distribution'] = name
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
openwrt_facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
openwrt_facts['distribution_release'] = release.groups()[0]
return True, openwrt_facts
def parse_distribution_file_Alpine(self, name, data, path, collected_facts):
alpine_facts = {}
alpine_facts['distribution'] = 'Alpine'
alpine_facts['distribution_version'] = data
return True, alpine_facts
def parse_distribution_file_SUSE(self, name, data, path, collected_facts):
suse_facts = {}
if 'suse' not in data.lower():
return False, suse_facts # TODO: remove if tested without this
if path == '/etc/os-release':
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution:
suse_facts['distribution'] = distribution.group(1).strip('"')
# example pattern are 13.04 13.0 13
distribution_version = re.search(r'^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line)
if distribution_version:
suse_facts['distribution_version'] = distribution_version.group(1)
suse_facts['distribution_major_version'] = distribution_version.group(1).split('.')[0]
if 'open' in data.lower():
release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release:
suse_facts['distribution_release'] = release.groups()[0]
elif 'enterprise' in data.lower() and 'VERSION_ID' in line:
# SLES doesn't got funny release names
release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release.group(1):
release = release.group(1)
else:
release = "0" # no minor number, so it is the first release
suse_facts['distribution_release'] = release
elif path == '/etc/SuSE-release':
if 'open' in data.lower():
data = data.splitlines()
distdata = get_file_content(path).splitlines()[0]
suse_facts['distribution'] = distdata.split()[0]
for line in data:
release = re.search('CODENAME *= *([^\n]+)', line)
if release:
suse_facts['distribution_release'] = release.groups()[0].strip()
elif 'enterprise' in data.lower():
lines = data.splitlines()
distribution = lines[0].split()[0]
if "Server" in data:
suse_facts['distribution'] = "SLES"
elif "Desktop" in data:
suse_facts['distribution'] = "SLED"
for line in lines:
release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
if release:
suse_facts['distribution_release'] = release.group(1)
suse_facts['distribution_version'] = collected_facts['distribution_version'] + '.' + release.group(1)
# Check VARIANT_ID first for SLES4SAP or SL-Micro
variant_id_match = re.search(r'^VARIANT_ID="?([^"\n]*)"?', data, re.MULTILINE)
if variant_id_match:
variant_id = variant_id_match.group(1)
if variant_id in ('server-sap', 'sles-sap'):
suse_facts['distribution'] = 'SLES_SAP'
elif variant_id == 'transactional':
suse_facts['distribution'] = 'SL-Micro'
else:
# Fallback for older SLES 15 using baseproduct symlink
if os.path.islink('/etc/products.d/baseproduct'):
resolved = os.path.realpath('/etc/products.d/baseproduct')
if resolved.endswith('SLES_SAP.prod'):
suse_facts['distribution'] = 'SLES_SAP'
elif resolved.endswith('SL-Micro.prod'):
suse_facts['distribution'] = 'SL-Micro'
return True, suse_facts
def parse_distribution_file_Debian(self, name, data, path, collected_facts):
debian_facts = {}
if any(distro in data for distro in ('Debian', 'Raspbian')):
debian_facts['distribution'] = 'Debian'
release = re.search(r"PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
# Last resort: try to find release from tzdata as either lsb is missing or this is very old debian
if collected_facts['distribution_release'] == 'NA' and 'Debian' in data:
dpkg_cmd = self.module.get_bin_path('dpkg')
if dpkg_cmd:
cmd = "%s --status tzdata|grep Provides|cut -f2 -d'-'" % dpkg_cmd
rc, out, err = self.module.run_command(cmd)
if rc == 0:
debian_facts['distribution_release'] = out.strip()
debian_version_path = '/etc/debian_version'
distdata = get_file_lines(debian_version_path)
for line in distdata:
m = re.search(r'(\d+)\.(\d+)', line.strip())
if m:
debian_facts['distribution_minor_version'] = m.groups()[1]
elif 'Ubuntu' in data:
debian_facts['distribution'] = 'Ubuntu'
# nothing else to do, Ubuntu gets correct info from python functions
elif 'SteamOS' in data:
debian_facts['distribution'] = 'SteamOS'
# nothing else to do, SteamOS gets correct info from python functions
elif path in ('/etc/lsb-release', '/etc/os-release') and ('Kali' in data or 'Parrot' in data):
if 'Kali' in data:
# Kali does not provide /etc/lsb-release anymore
debian_facts['distribution'] = 'Kali'
elif 'Parrot' in data:
debian_facts['distribution'] = 'Parrot'
release = re.search('DISTRIB_RELEASE=(.*)', data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
elif 'Devuan' in data:
debian_facts['distribution'] = 'Devuan'
release = re.search(r"PRETTY_NAME=\"?[^(\"]+ \(?([^) \"]+)\)?", data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = version.group(1)
elif 'Cumulus' in data:
debian_facts['distribution'] = 'Cumulus Linux'
version = re.search(r"VERSION_ID=(.*)", data)
if version:
major, _minor, _dummy_ver = version.group(1).split(".")
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = major
release = re.search(r'VERSION="(.*)"', data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
elif "Mint" in data:
debian_facts['distribution'] = 'Linux Mint'
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = version.group(1).split('.')[0]
elif 'UOS' in data or 'Uos' in data or 'uos' in data:
debian_facts['distribution'] = 'Uos'
release = re.search(r"VERSION_CODENAME=\"?([^\"]+)\"?", data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = version.group(1).split('.')[0]
elif 'Deepin' in data or 'deepin' in data:
debian_facts['distribution'] = 'Deepin'
release = re.search(r"VERSION_CODENAME=\"?([^\"]+)\"?", data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = version.group(1).split('.')[0]
elif 'LMDE' in data:
debian_facts['distribution'] = 'Linux Mint Debian Edition'
else:
return False, debian_facts
return True, debian_facts
def parse_distribution_file_Mandriva(self, name, data, path, collected_facts):
mandriva_facts = {}
if 'Mandriva' in data:
mandriva_facts['distribution'] = 'Mandriva'
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
mandriva_facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
mandriva_facts['distribution_release'] = release.groups()[0]
mandriva_facts['distribution'] = name
else:
return False, mandriva_facts
return True, mandriva_facts
def parse_distribution_file_NA(self, name, data, path, collected_facts):
na_facts = {}
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution and name == 'NA':
na_facts['distribution'] = distribution.group(1).strip('"')
version = re.search("^VERSION=(.*)", line)
if version and collected_facts['distribution_version'] == 'NA':
na_facts['distribution_version'] = version.group(1).strip('"')
return True, na_facts
def parse_distribution_file_Coreos(self, name, data, path, collected_facts):
coreos_facts = {}
# FIXME: pass in ro copy of facts for this kind of thing
distro = get_distribution()
if distro.lower() == 'coreos':
if not data:
# include fix from #15230, #15228
# TODO: verify this is ok for above bugs
return False, coreos_facts
release = re.search("^GROUP=(.*)", data)
if release:
coreos_facts['distribution_release'] = release.group(1).strip('"')
else:
return False, coreos_facts # TODO: remove if tested without this
return True, coreos_facts
def parse_distribution_file_Flatcar(self, name, data, path, collected_facts):
flatcar_facts = {}
distro = get_distribution()
if distro.lower() != 'flatcar':
return False, flatcar_facts
if not data:
return False, flatcar_facts
version = re.search("VERSION=(.*)", data)
if version:
flatcar_facts['distribution_major_version'] = version.group(1).strip('"').split('.')[0]
flatcar_facts['distribution_version'] = version.group(1).strip('"')
return True, flatcar_facts
def parse_distribution_file_ClearLinux(self, name, data, path, collected_facts):
clear_facts = {}
if "clearlinux" not in name.lower():
return False, clear_facts
pname = re.search('NAME="(.*)"', data)
if pname:
if 'Clear Linux' not in pname.groups()[0]:
return False, clear_facts
clear_facts['distribution'] = pname.groups()[0]
version = re.search('VERSION_ID=(.*)', data)
if version:
clear_facts['distribution_major_version'] = version.groups()[0]
clear_facts['distribution_version'] = version.groups()[0]
release = re.search('ID=(.*)', data)
if release:
clear_facts['distribution_release'] = release.groups()[0]
return True, clear_facts
def parse_distribution_file_CentOS(self, name, data, path, collected_facts):
centos_facts = {}
if 'CentOS Stream' in data:
centos_facts['distribution_release'] = 'Stream'
return True, centos_facts
if "TencentOS Server" in data:
centos_facts['distribution'] = 'TencentOS'
return True, centos_facts
return False, centos_facts
|
DistributionFiles
|
python
|
huggingface__transformers
|
src/transformers/models/seed_oss/modeling_seed_oss.py
|
{
"start": 22412,
"end": 22515
}
|
class ____(GenericForTokenClassification, SeedOssPreTrainedModel):
pass
|
SeedOssForTokenClassification
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/waiters/test_opensearch_serverless.py
|
{
"start": 1534,
"end": 2987
}
|
class ____(TestOpenSearchServerlessCustomWaitersBase):
WAITER_NAME = "collection_available"
@pytest.fixture
def mock_getter(self):
with mock.patch.object(self.client, "batch_get_collection") as getter:
yield getter
@pytest.mark.parametrize("state", OpenSearchServerlessCollectionActiveSensor.SUCCESS_STATES)
def test_model_customization_job_complete(self, state, mock_getter):
mock_getter.return_value = {"collectionDetails": [{"status": state}]}
OpenSearchServerlessHook().get_waiter(self.WAITER_NAME).wait(collection_id="collection_id")
@pytest.mark.parametrize("state", OpenSearchServerlessCollectionActiveSensor.FAILURE_STATES)
def test_model_customization_job_failed(self, state, mock_getter):
mock_getter.return_value = {"collectionDetails": [{"status": state}]}
with pytest.raises(botocore.exceptions.WaiterError):
OpenSearchServerlessHook().get_waiter(self.WAITER_NAME).wait(collection_id="collection_id")
def test_model_customization_job_wait(self, mock_getter):
wait = {"collectionDetails": [{"status": "CREATING"}]}
success = {"collectionDetails": [{"status": "ACTIVE"}]}
mock_getter.side_effect = [wait, wait, success]
OpenSearchServerlessHook().get_waiter(self.WAITER_NAME).wait(
collection_id="collection_id", WaiterConfig={"Delay": 0.01, "MaxAttempts": 3}
)
|
TestCollectionAvailableWaiter
|
python
|
pdm-project__pdm
|
src/pdm/cli/commands/publish/package.py
|
{
"start": 1040,
"end": 8267
}
|
class ____:
"""A distribution file for upload.
XXX: currently only supports sdist and wheel.
"""
filename: str
metadata: email.message.Message
comment: str | None
py_version: str | None
filetype: str
def __post_init__(self) -> None:
self.base_filename = os.path.basename(self.filename)
self.gpg_signature: tuple[str, bytes] | None = None
def get_hashes(self) -> dict[str, str]:
hashers = {"sha256_digest": hashlib.sha256()}
try:
hashers["md5_digest"] = hashlib.md5()
except ValueError:
pass
try:
hashers["blake2_256_digest"] = hashlib.blake2b(digest_size=256 // 8) # type: ignore[assignment]
except (TypeError, ValueError):
pass
with open(self.filename, "rb") as f:
for chunk in iter(lambda: f.read(8192), b""):
for hasher in hashers.values():
hasher.update(chunk)
return {k: v.hexdigest() for k, v in hashers.items()}
@classmethod
def from_filename(cls, filename: str, comment: str | None) -> PackageFile:
filetype = ""
for ext, dtype in DIST_EXTENSIONS.items():
if filename.endswith(ext):
filetype = dtype
break
else:
raise PdmUsageError(f"Unknown distribution file type: {filename}")
if filetype == "bdist_wheel":
metadata = cls.read_metadata_from_wheel(filename)
match = wheel_file_re.match(os.path.basename(filename))
if match is None:
py_ver = "any"
else:
py_ver = match.group("pyver")
elif filename.endswith(".zip"):
metadata = cls.read_metadata_from_zip(filename)
py_ver = "source"
else:
metadata = cls.read_metadata_from_tar(filename)
py_ver = "source"
return cls(filename, metadata, comment, py_ver, filetype)
@staticmethod
def read_metadata_from_tar(filename: str) -> email.message.Message:
import tarfile
from unearth.preparer import has_leading_dir, split_leading_dir
if filename.endswith(".gz"):
mode = "r:gz"
elif filename.endswith(".bz2"):
mode = "r:bz2"
else:
logger.warning(f"Can't determine the compression mode for {filename}")
mode = "r:*"
with tarfile.open(filename, mode) as tar: # type: ignore[call-overload]
members = tar.getmembers()
has_leading = has_leading_dir(m.name for m in members)
for m in members:
fn = split_leading_dir(m.name)[1] if has_leading else m.name
if fn == "PKG-INFO":
return parse_metadata(cast(IO[bytes], tar.extractfile(m)))
raise ProjectError(f"No PKG-INFO found in {filename}")
@staticmethod
def read_metadata_from_zip(filename: str) -> email.message.Message:
import zipfile
from unearth.preparer import has_leading_dir, split_leading_dir
with zipfile.ZipFile(filename, allowZip64=True) as zip:
filenames = zip.namelist()
has_leading = has_leading_dir(filenames)
for name in filenames:
fn = split_leading_dir(name)[1] if has_leading else name
if fn == "PKG-INFO":
return parse_metadata(zip.open(name))
raise ProjectError(f"No PKG-INFO found in {filename}")
@staticmethod
def read_metadata_from_wheel(filename: str) -> email.message.Message:
import zipfile
with zipfile.ZipFile(filename, allowZip64=True) as zip:
for fn in zip.namelist():
if fn.replace("\\", "/").endswith(".dist-info/METADATA"):
return parse_metadata(zip.open(fn))
raise ProjectError(f"No egg-info is found in {filename}")
def add_gpg_signature(self, filename: str, signature_name: str) -> None:
if self.gpg_signature is not None:
raise PdmUsageError("GPG signature already added")
with open(filename, "rb") as f:
self.gpg_signature = (signature_name, f.read())
def sign(self, identity: str | None) -> None:
logger.info("Signing %s with gpg", self.base_filename)
gpg_args = ["gpg", "--detach-sign"]
if identity is not None:
gpg_args.extend(["--local-user", identity])
gpg_args.extend(["-a", self.filename])
self._run_gpg(gpg_args)
self.add_gpg_signature(self.filename + ".asc", self.base_filename + ".asc")
@staticmethod
def _run_gpg(gpg_args: list[str]) -> None:
try:
subprocess.run(gpg_args, check=True)
return
except FileNotFoundError:
logger.warning("gpg executable not available. Attempting fallback to gpg2.")
gpg_args[0] = "gpg2"
try:
subprocess.run(gpg_args, check=True)
except FileNotFoundError:
raise PdmUsageError(
"'gpg' or 'gpg2' executables not available.\n"
"Try installing one of these or specifying an executable "
"with the --sign-with flag."
) from None
@property
def metadata_dict(self) -> dict[str, Any]:
meta = self.metadata
data = {
# identify release
"name": meta["Name"],
"version": meta["Version"],
# file content
"filetype": self.filetype,
"pyversion": self.py_version,
# additional meta-data
"metadata_version": meta["Metadata-Version"],
"summary": meta["Summary"],
"home_page": meta["Home-page"],
"author": meta["Author"],
"author_email": meta["Author-email"],
"maintainer": meta["Maintainer"],
"maintainer_email": meta["Maintainer-email"],
"license": meta["License"],
"description": meta.get_payload(),
"keywords": meta["Keywords"],
"platform": meta.get_all("Platform") or (),
"classifiers": meta.get_all("Classifier") or [],
"download_url": meta["Download-URL"],
"supported_platform": meta.get_all("Supported-Platform") or (),
"comment": self.comment,
# Metadata 1.2
"project_urls": meta.get_all("Project-URL") or (),
"provides_dist": meta.get_all("Provides-Dist") or (),
"obsoletes_dist": meta.get_all("Obsoletes-Dist") or (),
"requires_dist": meta.get_all("Requires-Dist") or (),
"requires_external": meta.get_all("Requires-External") or (),
"requires_python": meta.get_all("Requires-Python") or (),
# Metadata 2.1
"provides_extras": meta.get_all("Provides-Extra") or (),
"description_content_type": meta.get("Description-Content-Type"),
# Metadata 2.2
"dynamic": meta.get_all("Dynamic") or (),
# Hashes
**self.get_hashes(),
}
if self.gpg_signature is not None:
data["gpg_signature"] = self.gpg_signature
return data
|
PackageFile
|
python
|
PyCQA__pyflakes
|
pyflakes/test/test_api.py
|
{
"start": 25234,
"end": 25706
}
|
class ____(IntegrationTests):
"""
Tests of the pyflakes main function.
"""
def runPyflakes(self, paths, stdin=None):
try:
with SysStreamCapturing(stdin) as capture:
main(args=paths)
except SystemExit as e:
self.assertIsInstance(e.code, bool)
rv = int(e.code)
return (capture.output, capture.error, rv)
else:
raise RuntimeError('SystemExit not raised')
|
TestMain
|
python
|
huggingface__transformers
|
src/transformers/models/lfm2/modeling_lfm2.py
|
{
"start": 19452,
"end": 23540
}
|
class ____(nn.Module):
def __init__(
self,
config: Lfm2Config,
layer_idx: int,
):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.L_cache = config.conv_L_cache
self.bias = config.conv_bias
self.conv = nn.Conv1d(
in_channels=config.hidden_size,
out_channels=config.hidden_size,
kernel_size=self.L_cache,
groups=config.hidden_size,
bias=self.bias,
padding=self.L_cache - 1,
)
self.in_proj = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=self.bias)
self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=self.bias)
def cuda_kernels_forward(
self,
x: torch.Tensor,
past_key_values: Optional[Lfm2HybridConvCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
x = apply_mask_to_padding_states(x, attention_mask)
BCx = self.in_proj(x).transpose(-1, -2)
B, C, x = BCx.chunk(3, dim=-2)
Bx = B * x
conv_weights = self.conv.weight.view(self.conv.weight.size(0), self.conv.weight.size(2))
if past_key_values is not None and cache_position[0] > 0:
conv_out = causal_conv1d_update(
Bx.squeeze(-1),
past_key_values.conv_cache[self.layer_idx],
conv_weights,
self.conv.bias,
None,
)
conv_out = conv_out.unsqueeze(-1)
else:
if past_key_values is not None:
conv_state = nn.functional.pad(Bx, (self.L_cache - Bx.shape[-1], 0))
past_key_values.conv_cache[self.layer_idx].copy_(conv_state)
conv_out = causal_conv1d_fn(Bx, conv_weights, self.conv.bias, activation=None)
y = C * conv_out
y = self.out_proj(y.transpose(-1, -2).contiguous())
return y
def slow_forward(
self,
x: torch.Tensor,
past_key_values: Optional[Lfm2HybridConvCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
seqlen = x.shape[1]
x = apply_mask_to_padding_states(x, attention_mask)
BCx = self.in_proj(x).transpose(-1, -2)
B, C, x = BCx.chunk(3, dim=-2)
Bx = B * x
if past_key_values is not None and cache_position[0] > 0:
conv_state = past_key_values.conv_cache[self.layer_idx]
cache_position = cache_position.clamp(0, self.L_cache - 1)
conv_state = conv_state.roll(shifts=-1, dims=-1)
conv_state[:, :, cache_position] = Bx.to(device=conv_state.device, dtype=conv_state.dtype)
past_key_values.conv_cache[self.layer_idx].copy_(conv_state)
conv_out = torch.sum(conv_state.to(Bx.device) * self.conv.weight[:, 0, :], dim=-1)
if self.bias:
conv_out += self.conv.bias
conv_out = conv_out.unsqueeze(-1)
else:
if past_key_values is not None:
conv_state = nn.functional.pad(Bx, (self.L_cache - Bx.shape[-1], 0))
past_key_values.conv_cache[self.layer_idx].copy_(conv_state)
conv_out = self.conv(Bx)[..., :seqlen]
y = C * conv_out
y = y.transpose(-1, -2).contiguous()
y = self.out_proj(y)
return y
def forward(
self,
hidden_states: torch.Tensor,
past_key_values: Optional[Lfm2HybridConvCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
if is_fast_path_available and "cuda" in hidden_states.device.type and not is_torchdynamo_compiling():
return self.cuda_kernels_forward(hidden_states, past_key_values, cache_position, attention_mask)
return self.slow_forward(hidden_states, past_key_values, cache_position, attention_mask)
|
Lfm2ShortConv
|
python
|
aimacode__aima-python
|
probability4e.py
|
{
"start": 814,
"end": 2730
}
|
class ____:
"""A discrete probability distribution. You name the random variable
in the constructor, then assign and query probability of values.
>>> P = ProbDist('Flip'); P['H'], P['T'] = 0.25, 0.75; P['H']
0.25
>>> P = ProbDist('X', {'lo': 125, 'med': 375, 'hi': 500})
>>> P['lo'], P['med'], P['hi']
(0.125, 0.375, 0.5)
"""
def __init__(self, varname='?', freqs=None):
"""If freqs is given, it is a dictionary of values - frequency pairs,
then ProbDist is normalized."""
self.prob = {}
self.varname = varname
self.values = []
if freqs:
for (v, p) in freqs.items():
self[v] = p
self.normalize()
def __getitem__(self, val):
"""Given a value, return P(value)."""
try:
return self.prob[val]
except KeyError:
return 0
def __setitem__(self, val, p):
"""Set P(val) = p."""
if val not in self.values:
self.values.append(val)
self.prob[val] = p
def normalize(self):
"""Make sure the probabilities of all values sum to 1.
Returns the normalized distribution.
Raises a ZeroDivisionError if the sum of the values is 0."""
total = sum(self.prob.values())
if not np.isclose(total, 1.0):
for val in self.prob:
self.prob[val] /= total
return self
def show_approx(self, numfmt='{:.3g}'):
"""Show the probabilities rounded and sorted by key, for the
sake of portable doctests."""
return ', '.join([('{}: ' + numfmt).format(v, p)
for (v, p) in sorted(self.prob.items())])
def __repr__(self):
return "P({})".format(self.varname)
# ______________________________________________________________________________
# 12.3 Inference Using Full Joint Distributions
|
ProbDist
|
python
|
tornadoweb__tornado
|
tornado/test/testing_test.py
|
{
"start": 3608,
"end": 6024
}
|
class ____(unittest.TestCase):
# These tests verify that tests that return non-None values (without being decorated with
# @gen_test) raise errors instead of incorrectly succeeding. These tests should be removed or
# updated when the _callTestMethod method is removed from AsyncTestCase (the same checks will
# still happen, but they'll be performed in the stdlib as DeprecationWarnings)
def test_undecorated_generator(self):
class Test(AsyncTestCase):
def test_gen(self):
yield
test = Test("test_gen")
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIn("should be decorated", result.errors[0][1])
@unittest.skipIf(
platform.python_implementation() == "PyPy",
"pypy destructor warnings cannot be silenced",
)
@unittest.skipIf(
# This check actually exists in 3.11 but it changed in 3.12 in a way that breaks
# this test.
sys.version_info >= (3, 12),
"py312 has its own check for test case returns",
)
def test_undecorated_coroutine(self):
class Test(AsyncTestCase):
async def test_coro(self):
pass
test = Test("test_coro")
result = unittest.TestResult()
# Silence "RuntimeWarning: coroutine 'test_coro' was never awaited".
with warnings.catch_warnings():
warnings.simplefilter("ignore")
test.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIn("should be decorated", result.errors[0][1])
def test_undecorated_generator_with_skip(self):
class Test(AsyncTestCase):
@unittest.skip("don't run this")
def test_gen(self):
yield
test = Test("test_gen")
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.skipped), 1)
def test_other_return(self):
class Test(AsyncTestCase):
def test_other_return(self):
return 42
test = Test("test_other_return")
result = unittest.TestResult()
test.run(result)
self.assertEqual(len(result.errors), 1)
self.assertIn("Return value from test method ignored", result.errors[0][1])
|
AsyncTestCaseReturnAssertionsTest
|
python
|
apache__airflow
|
helm-tests/tests/helm_tests/security/test_security_context.py
|
{
"start": 4379,
"end": 21613
}
|
class ____:
"""Tests security context."""
# Test securityContext setting for Pods and Containers
def test_check_default_setting(self):
docs = render_chart(
values={
"securityContext": {"runAsUser": 6000, "fsGroup": 60},
"webserver": {"defaultUser": {"enabled": True}},
"flower": {"enabled": True},
"statsd": {"enabled": False},
"airflowVersion": "2.2.0",
"executor": "CeleryKubernetesExecutor",
},
show_only=[
"templates/flower/flower-deployment.yaml",
"templates/scheduler/scheduler-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/jobs/create-user-job.yaml",
"templates/jobs/migrate-database-job.yaml",
],
)
for doc in docs:
assert jmespath.search("spec.template.spec.securityContext.runAsUser", doc) == 6000
assert jmespath.search("spec.template.spec.securityContext.fsGroup", doc) == 60
# Test priority:
# <local>.securityContext > securityContext > uid + gid
def test_check_local_setting(self):
component_contexts = {"securityContext": {"runAsUser": 9000, "fsGroup": 90}}
docs = render_chart(
values={
"uid": 3000,
"gid": 30,
"securityContext": {"runAsUser": 6000, "fsGroup": 60},
"webserver": {"defaultUser": {"enabled": True}, **component_contexts},
"workers": {**component_contexts},
"flower": {"enabled": True, **component_contexts},
"scheduler": {**component_contexts},
"createUserJob": {**component_contexts},
"migrateDatabaseJob": {**component_contexts},
"triggerer": {**component_contexts},
"redis": {**component_contexts},
"statsd": {"enabled": True, **component_contexts},
"airflowVersion": "2.2.0",
"executor": "CeleryKubernetesExecutor",
},
show_only=[
"templates/flower/flower-deployment.yaml",
"templates/scheduler/scheduler-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/jobs/create-user-job.yaml",
"templates/jobs/migrate-database-job.yaml",
"templates/statsd/statsd-deployment.yaml",
"templates/redis/redis-statefulset.yaml",
],
)
for doc in docs:
assert jmespath.search("spec.template.spec.securityContext.runAsUser", doc) == 9000
assert jmespath.search("spec.template.spec.securityContext.fsGroup", doc) == 90
# Test containerSecurity priority over uid under components using localSecurityContext
def test_check_local_uid(self):
component_contexts = {"uid": 3000, "securityContext": {"runAsUser": 7000}}
docs = render_chart(
values={
"redis": {**component_contexts},
"statsd": {"enabled": True, **component_contexts},
},
show_only=[
"templates/statsd/statsd-deployment.yaml",
"templates/redis/redis-statefulset.yaml",
],
)
for doc in docs:
assert jmespath.search("spec.template.spec.securityContext.runAsUser", doc) == 7000
# Test containerSecurity priority over uid under dags.gitSync
def test_gitsync_sidecar_and_init_container(self):
docs = render_chart(
values={
"dags": {"gitSync": {"enabled": True, "uid": 9000, "securityContext": {"runAsUser": 8000}}},
"airflowVersion": "1.10.15",
},
show_only=[
"templates/workers/worker-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/scheduler/scheduler-deployment.yaml",
],
)
for doc in docs:
assert "git-sync" in [c["name"] for c in jmespath.search("spec.template.spec.containers", doc)]
assert "git-sync-init" in [
c["name"] for c in jmespath.search("spec.template.spec.initContainers", doc)
]
assert (
jmespath.search(
"spec.template.spec.initContainers[?name=='git-sync-init'].securityContext.runAsUser | [0]",
doc,
)
== 8000
)
assert (
jmespath.search(
"spec.template.spec.containers[?name=='git-sync'].securityContext.runAsUser | [0]",
doc,
)
== 8000
)
# Test securityContexts for main containers
def test_global_security_context(self):
ctx_value_pod = {"runAsUser": 7000}
ctx_value_container = {"allowPrivilegeEscalation": False}
docs = render_chart(
values={
"securityContexts": {"containers": ctx_value_container, "pod": ctx_value_pod},
"cleanup": {"enabled": True},
"flower": {"enabled": True},
"pgbouncer": {"enabled": True},
},
show_only=[
"templates/cleanup/cleanup-cronjob.yaml",
"templates/flower/flower-deployment.yaml",
"templates/scheduler/scheduler-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/jobs/create-user-job.yaml",
"templates/jobs/migrate-database-job.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/pgbouncer/pgbouncer-deployment.yaml",
"templates/statsd/statsd-deployment.yaml",
"templates/redis/redis-statefulset.yaml",
],
)
assert ctx_value_container == jmespath.search(
"spec.jobTemplate.spec.template.spec.containers[0].securityContext", docs[0]
)
assert ctx_value_pod == jmespath.search(
"spec.jobTemplate.spec.template.spec.securityContext", docs[0]
)
for doc in docs[1:-3]:
assert ctx_value_container == jmespath.search(
"spec.template.spec.containers[0].securityContext", doc
)
assert ctx_value_pod == jmespath.search("spec.template.spec.securityContext", doc)
# Global security context is not propagated to pgbouncer, redis and statsd, so we test default value
default_ctx_value_container = {"allowPrivilegeEscalation": False, "capabilities": {"drop": ["ALL"]}}
default_ctx_value_pod_pgbouncer = {"runAsUser": 65534}
default_ctx_value_pod_statsd = {"runAsUser": 65534}
default_ctx_value_pod_redis = {"runAsUser": 0}
for doc in docs[-3:]:
assert default_ctx_value_container == jmespath.search(
"spec.template.spec.containers[0].securityContext", doc
)
# Test pgbouncer metrics-exporter container
assert default_ctx_value_container == jmespath.search(
"spec.template.spec.containers[1].securityContext", docs[-3]
)
assert default_ctx_value_pod_pgbouncer == jmespath.search(
"spec.template.spec.securityContext", docs[-3]
)
assert default_ctx_value_pod_statsd == jmespath.search("spec.template.spec.securityContext", docs[-2])
assert default_ctx_value_pod_redis == jmespath.search("spec.template.spec.securityContext", docs[-1])
# Test securityContexts for main containers
def test_main_container_setting(self):
ctx_value = {"allowPrivilegeEscalation": False}
security_context = {"securityContexts": {"container": ctx_value}}
docs = render_chart(
values={
"cleanup": {"enabled": True, **security_context},
"scheduler": {**security_context},
"webserver": {**security_context},
"workers": {**security_context},
"flower": {"enabled": True, **security_context},
"statsd": {**security_context},
"createUserJob": {**security_context},
"migrateDatabaseJob": {**security_context},
"triggerer": {**security_context},
"pgbouncer": {"enabled": True, **security_context},
"redis": {**security_context},
},
show_only=[
"templates/cleanup/cleanup-cronjob.yaml",
"templates/flower/flower-deployment.yaml",
"templates/scheduler/scheduler-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/statsd/statsd-deployment.yaml",
"templates/jobs/create-user-job.yaml",
"templates/jobs/migrate-database-job.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/pgbouncer/pgbouncer-deployment.yaml",
"templates/redis/redis-statefulset.yaml",
],
)
assert ctx_value == jmespath.search(
"spec.jobTemplate.spec.template.spec.containers[0].securityContext", docs[0]
)
for doc in docs[1:]:
assert ctx_value == jmespath.search("spec.template.spec.containers[0].securityContext", doc)
# Test securityContexts for log-groomer-sidecar main container
def test_log_groomer_sidecar_container_setting(self):
ctx_value = {"allowPrivilegeEscalation": False}
spec = {"logGroomerSidecar": {"securityContexts": {"container": ctx_value}}}
docs = render_chart(
values={
"scheduler": {**spec},
"workers": {**spec},
"dagProcessor": {**spec},
},
show_only=[
"templates/scheduler/scheduler-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/dag-processor/dag-processor-deployment.yaml",
],
)
for doc in docs:
assert ctx_value == jmespath.search("spec.template.spec.containers[1].securityContext", doc)
# Test securityContexts for metrics-explorer main container
def test_metrics_explorer_container_setting(self):
ctx_value = {"allowPrivilegeEscalation": False}
docs = render_chart(
values={
"pgbouncer": {
"enabled": True,
"metricsExporterSidecar": {"securityContexts": {"container": ctx_value}},
},
},
show_only=["templates/pgbouncer/pgbouncer-deployment.yaml"],
)
assert ctx_value == jmespath.search("spec.template.spec.containers[1].securityContext", docs[0])
# Test securityContexts for worker-kerberos main container
def test_worker_kerberos_container_setting(self):
ctx_value = {"allowPrivilegeEscalation": False}
docs = render_chart(
values={
"workers": {
"kerberosSidecar": {"enabled": True, "securityContexts": {"container": ctx_value}}
},
},
show_only=["templates/workers/worker-deployment.yaml"],
)
assert ctx_value == jmespath.search("spec.template.spec.containers[2].securityContext", docs[0])
# Test securityContexts for the wait-for-migrations init containers
def test_wait_for_migrations_init_container_setting(self):
ctx_value = {"allowPrivilegeEscalation": False}
spec = {
"waitForMigrations": {
"enabled": True,
"securityContexts": {"container": ctx_value},
}
}
docs = render_chart(
values={
"scheduler": {**spec},
"webserver": {**spec},
"triggerer": {**spec},
"workers": {"waitForMigrations": {"securityContexts": {"container": ctx_value}}},
},
show_only=[
"templates/scheduler/scheduler-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/workers/worker-deployment.yaml",
],
)
for doc in docs:
assert ctx_value == jmespath.search("spec.template.spec.initContainers[0].securityContext", doc)
# Test securityContexts for volume-permissions init container
def test_volume_permissions_init_container_setting(self):
ctx_value = {"allowPrivilegeEscalation": False}
docs = render_chart(
values={
"workers": {
"persistence": {
"enabled": True,
"fixPermissions": True,
"securityContexts": {"container": ctx_value},
}
}
},
show_only=["templates/workers/worker-deployment.yaml"],
)
assert ctx_value == jmespath.search("spec.template.spec.initContainers[0].securityContext", docs[0])
# Test securityContexts for main pods
def test_main_pod_setting(self):
ctx_value = {"runAsUser": 7000}
security_context = {"securityContexts": {"pod": ctx_value}}
docs = render_chart(
values={
"cleanup": {"enabled": True, **security_context},
"scheduler": {**security_context},
"webserver": {**security_context},
"workers": {**security_context},
"flower": {"enabled": True, **security_context},
"statsd": {**security_context},
"createUserJob": {**security_context},
"migrateDatabaseJob": {**security_context},
"triggerer": {**security_context},
"pgbouncer": {"enabled": True, **security_context},
"redis": {**security_context},
},
show_only=[
"templates/cleanup/cleanup-cronjob.yaml",
"templates/flower/flower-deployment.yaml",
"templates/scheduler/scheduler-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/statsd/statsd-deployment.yaml",
"templates/jobs/create-user-job.yaml",
"templates/jobs/migrate-database-job.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/pgbouncer/pgbouncer-deployment.yaml",
"templates/redis/redis-statefulset.yaml",
],
)
assert ctx_value == jmespath.search("spec.jobTemplate.spec.template.spec.securityContext", docs[0])
for doc in docs[1:]:
assert ctx_value == jmespath.search("spec.template.spec.securityContext", doc)
# Test securityContexts for main pods
def test_main_pod_setting_legacy_security(self):
ctx_value = {"runAsUser": 7000}
security_context = {"securityContext": ctx_value}
docs = render_chart(
values={
"cleanup": {"enabled": True, **security_context},
"scheduler": {**security_context},
"webserver": {**security_context},
"workers": {**security_context},
"flower": {"enabled": True, **security_context},
"statsd": {**security_context},
"createUserJob": {**security_context},
"migrateDatabaseJob": {**security_context},
"triggerer": {**security_context},
"redis": {**security_context},
},
show_only=[
"templates/cleanup/cleanup-cronjob.yaml",
"templates/flower/flower-deployment.yaml",
"templates/scheduler/scheduler-deployment.yaml",
"templates/webserver/webserver-deployment.yaml",
"templates/workers/worker-deployment.yaml",
"templates/statsd/statsd-deployment.yaml",
"templates/jobs/create-user-job.yaml",
"templates/jobs/migrate-database-job.yaml",
"templates/triggerer/triggerer-deployment.yaml",
"templates/redis/redis-statefulset.yaml",
],
)
assert ctx_value == jmespath.search("spec.jobTemplate.spec.template.spec.securityContext", docs[0])
for doc in docs[1:]:
assert ctx_value == jmespath.search("spec.template.spec.securityContext", doc)
|
TestSecurityContext
|
python
|
realpython__materials
|
python-inherit-list-userlist/custom_list2.py
|
{
"start": 35,
"end": 433
}
|
class ____(UserList):
def join(self, separator=" "):
return separator.join(str(item) for item in self)
def map(self, action):
return type(self)(action(item) for item in self.data)
def filter(self, predicate):
return type(self)(item for item in self if predicate(item))
def for_each(self, func):
for item in self.data:
func(item)
|
CustomList
|
python
|
openai__gym
|
gym/error.py
|
{
"start": 4029,
"end": 4136
}
|
class ____(Error):
"""Error message when an invalid frame is captured."""
# Wrapper errors
|
InvalidFrame
|
python
|
Pylons__pyramid
|
src/pyramid/security.py
|
{
"start": 15732,
"end": 16724
}
|
class ____(ACLPermitsResult, Allowed):
"""
An instance of ``ACLAllowed`` is a specialization of
:class:`pyramid.security.Allowed` that represents that a security check
made explicitly against ACL was allowed. It evaluates equal to all
boolean true types. It also has the following attributes: ``acl``,
``ace``, ``permission``, ``principals``, and ``context``. These
attributes indicate the security values involved in the request. Its
``__str__`` method prints a summary of these attributes for debugging
purposes. The same summary is available as the ``msg`` attribute.
"""
for attr in (
'ALL_PERMISSIONS',
'DENY_ALL',
'ACLAllowed',
'ACLDenied',
'AllPermissionsList',
'Allow',
'Authenticated',
'Deny',
'Everyone',
):
deprecated(
attr,
'"pyramid.security.{attr}" is deprecated in Pyramid 2.0. Adjust your '
'import to "pyramid.authorization.{attr}"'.format(attr=attr),
)
|
ACLAllowed
|
python
|
huggingface__transformers
|
tests/repo_utils/test_check_copies.py
|
{
"start": 3488,
"end": 4017
}
|
class ____:
attr_1 = 1
attr_2 = 2
def __init__(self, a=1, b=2):
self.a = a
self.b = b
# Ignore copy
def only_in_roberta_to_be_ignored(self, c):
return 3
# Copied from transformers.models.dummy_gpt2.modeling_dummy_gpt2.GPT2DummyModel.forward
def forward(self, c):
return 1
def existing_common(self, c):
return 4
# Ignore copy
def existing_diff_to_be_ignored(self, c):
return 6
"""
MOCK_DUMMY_BERT_CODE_NO_MATCH = """
|
RobertaBertDummyModel
|
python
|
realpython__materials
|
python-textual/vertical_scroll.py
|
{
"start": 127,
"end": 410
}
|
class ____(App):
CSS_PATH = "vertical_layout.tcss"
def compose(self):
with VerticalScroll():
for i in range(NUM_BOXES):
yield Static(f"Static {i + 1}")
if __name__ == "__main__":
app = VerticalScrollApp()
app.run()
|
VerticalScrollApp
|
python
|
mlflow__mlflow
|
mlflow/utils/file_utils.py
|
{
"start": 1856,
"end": 7526
}
|
class ____:
def __init__(self, desc, total, step, **kwargs) -> None:
self.desc = desc
self.total = total
self.step = step
self.pbar = None
self.progress = 0
self.kwargs = kwargs
def set_pbar(self):
if MLFLOW_ENABLE_ARTIFACTS_PROGRESS_BAR.get():
try:
from tqdm.auto import tqdm
self.pbar = tqdm(total=self.total, desc=self.desc, **self.kwargs)
except ImportError:
pass
@classmethod
def chunks(cls, file_size, desc, chunk_size):
bar = cls(
desc,
total=file_size,
step=chunk_size,
unit="iB",
unit_scale=True,
unit_divisor=1024,
miniters=1,
)
if file_size >= _PROGRESS_BAR_DISPLAY_THRESHOLD:
bar.set_pbar()
return bar
@classmethod
def files(cls, desc, total):
bar = cls(desc, total=total, step=1)
bar.set_pbar()
return bar
def update(self):
if self.pbar:
update_step = min(self.total - self.progress, self.step)
self.pbar.update(update_step)
self.pbar.refresh()
self.progress += update_step
def __enter__(self):
return self
def __exit__(self, *args):
if self.pbar:
self.pbar.close()
def is_directory(name):
return os.path.isdir(name)
def is_file(name):
return os.path.isfile(name)
def exists(name):
return os.path.exists(name)
def list_all(root, filter_func=lambda x: True, full_path=False):
"""List all entities directly under 'dir_name' that satisfy 'filter_func'
Args:
root: Name of directory to start search.
filter_func: function or lambda that takes path.
full_path: If True will return results as full path including `root`.
Returns:
list of all files or directories that satisfy the criteria.
"""
if not is_directory(root):
raise Exception(f"Invalid parent directory '{root}'")
matches = [x for x in os.listdir(root) if filter_func(os.path.join(root, x))]
return [os.path.join(root, m) for m in matches] if full_path else matches
def list_subdirs(dir_name, full_path=False):
"""
Equivalent to UNIX command:
``find $dir_name -depth 1 -type d``
Args:
dir_name: Name of directory to start search.
full_path: If True will return results as full path including `root`.
Returns:
list of all directories directly under 'dir_name'.
"""
return list_all(dir_name, os.path.isdir, full_path)
def list_files(dir_name, full_path=False):
"""
Equivalent to UNIX command:
``find $dir_name -depth 1 -type f``
Args:
dir_name: Name of directory to start search.
full_path: If True will return results as full path including `root`.
Returns:
list of all files directly under 'dir_name'.
"""
return list_all(dir_name, os.path.isfile, full_path)
def find(root, name, full_path=False):
"""Search for a file in a root directory. Equivalent to:
``find $root -name "$name" -depth 1``
Args:
root: Name of root directory for find.
name: Name of file or directory to find directly under root directory.
full_path: If True will return results as full path including `root`.
Returns:
list of matching files or directories.
"""
path_name = os.path.join(root, name)
return list_all(root, lambda x: x == path_name, full_path)
def mkdir(root, name=None):
"""Make directory with name "root/name", or just "root" if name is None.
Args:
root: Name of parent directory.
name: Optional name of leaf directory.
Returns:
Path to created directory.
"""
target = os.path.join(root, name) if name is not None else root
try:
os.makedirs(target, exist_ok=True)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(target):
raise e
return target
def make_containing_dirs(path):
"""
Create the base directory for a given file path if it does not exist; also creates parent
directories.
"""
dir_name = os.path.dirname(path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def read_parquet_as_pandas_df(data_parquet_path: str):
"""Deserialize and load the specified parquet file as a Pandas DataFrame.
Args:
data_parquet_path: String, path object (implementing os.PathLike[str]),
or file-like object implementing a binary read() function. The string
could be a URL. Valid URL schemes include http, ftp, s3, gs, and file.
For file URLs, a host is expected. A local file could
be: file://localhost/path/to/table.parquet. A file URL can also be a path to a
directory that contains multiple partitioned parquet files. Pyarrow
support paths to directories as well as file URLs. A directory
path could be: file://localhost/path/to/tables or s3://bucket/partition_dir.
Returns:
pandas dataframe
"""
import pandas as pd
return pd.read_parquet(data_parquet_path, engine="pyarrow")
def write_pandas_df_as_parquet(df, data_parquet_path: str):
"""Write a DataFrame to the binary parquet format.
Args:
df: pandas data frame.
data_parquet_path: String, path object (implementing os.PathLike[str]),
or file-like object implementing a binary write() function.
"""
df.to_parquet(data_parquet_path, engine="pyarrow")
|
ArtifactProgressBar
|
python
|
sympy__sympy
|
sympy/functions/special/error_functions.py
|
{
"start": 14013,
"end": 19347
}
|
class ____(DefinedFunction):
r"""
Imaginary error function.
Explanation
===========
The function erfi is defined as:
.. math ::
\mathrm{erfi}(x) = \frac{2}{\sqrt{\pi}} \int_0^x e^{t^2} \mathrm{d}t
Examples
========
>>> from sympy import I, oo, erfi
>>> from sympy.abc import z
Several special values are known:
>>> erfi(0)
0
>>> erfi(oo)
oo
>>> erfi(-oo)
-oo
>>> erfi(I*oo)
I
>>> erfi(-I*oo)
-I
In general one can pull out factors of -1 and $I$ from the argument:
>>> erfi(-z)
-erfi(z)
>>> from sympy import conjugate
>>> conjugate(erfi(z))
erfi(conjugate(z))
Differentiation with respect to $z$ is supported:
>>> from sympy import diff
>>> diff(erfi(z), z)
2*exp(z**2)/sqrt(pi)
We can numerically evaluate the imaginary error function to arbitrary
precision on the whole complex plane:
>>> erfi(2).evalf(30)
18.5648024145755525987042919132
>>> erfi(-2*I).evalf(30)
-0.995322265018952734162069256367*I
See Also
========
erf: Gaussian error function.
erfc: Complementary error function.
erf2: Two-argument error function.
erfinv: Inverse error function.
erfcinv: Inverse Complementary error function.
erf2inv: Inverse two-argument error function.
References
==========
.. [1] https://en.wikipedia.org/wiki/Error_function
.. [2] https://mathworld.wolfram.com/Erfi.html
.. [3] https://functions.wolfram.com/GammaBetaErf/Erfi
"""
unbranched = True
def fdiff(self, argindex=1):
if argindex == 1:
return 2*exp(self.args[0]**2)/sqrt(pi)
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, z):
if z.is_Number:
if z is S.NaN:
return S.NaN
elif z.is_zero:
return S.Zero
elif z is S.Infinity:
return S.Infinity
if z.is_zero:
return S.Zero
# Try to pull out factors of -1
if z.could_extract_minus_sign():
return -cls(-z)
# Try to pull out factors of I
nz = z.extract_multiplicatively(I)
if nz is not None:
if nz is S.Infinity:
return I
if isinstance(nz, erfinv):
return I*nz.args[0]
if isinstance(nz, erfcinv):
return I*(S.One - nz.args[0])
# Only happens with unevaluated erf2inv
if isinstance(nz, erf2inv) and nz.args[0].is_zero:
return I*nz.args[1]
@staticmethod
@cacheit
def taylor_term(n, x, *previous_terms):
if n < 0 or n % 2 == 0:
return S.Zero
else:
x = sympify(x)
k = floor((n - 1)/S(2))
if len(previous_terms) > 2:
return previous_terms[-2] * x**2 * (n - 2)/(n*k)
else:
return 2 * x**n/(n*factorial(k)*sqrt(pi))
def _eval_conjugate(self):
return self.func(self.args[0].conjugate())
def _eval_is_extended_real(self):
return self.args[0].is_extended_real
def _eval_is_zero(self):
return self.args[0].is_zero
def _eval_rewrite_as_tractable(self, z, limitvar=None, **kwargs):
return self.rewrite(erf).rewrite("tractable", deep=True, limitvar=limitvar)
def _eval_rewrite_as_erf(self, z, **kwargs):
return -I*erf(I*z)
def _eval_rewrite_as_erfc(self, z, **kwargs):
return I*erfc(I*z) - I
def _eval_rewrite_as_fresnels(self, z, **kwargs):
arg = (S.One + I)*z/sqrt(pi)
return (S.One - I)*(fresnelc(arg) - I*fresnels(arg))
def _eval_rewrite_as_fresnelc(self, z, **kwargs):
arg = (S.One + I)*z/sqrt(pi)
return (S.One - I)*(fresnelc(arg) - I*fresnels(arg))
def _eval_rewrite_as_meijerg(self, z, **kwargs):
return z/sqrt(pi)*meijerg([S.Half], [], [0], [Rational(-1, 2)], -z**2)
def _eval_rewrite_as_hyper(self, z, **kwargs):
return 2*z/sqrt(pi)*hyper([S.Half], [3*S.Half], z**2)
def _eval_rewrite_as_uppergamma(self, z, **kwargs):
from sympy.functions.special.gamma_functions import uppergamma
return sqrt(-z**2)/z*(uppergamma(S.Half, -z**2)/sqrt(pi) - S.One)
def _eval_rewrite_as_expint(self, z, **kwargs):
return sqrt(-z**2)/z - z*expint(S.Half, -z**2)/sqrt(pi)
def _eval_expand_func(self, **hints):
return self.rewrite(erf)
as_real_imag = real_to_real_as_real_imag
def _eval_as_leading_term(self, x, logx, cdir):
arg = self.args[0].as_leading_term(x, logx=logx, cdir=cdir)
arg0 = arg.subs(x, 0)
if x in arg.free_symbols and arg0.is_zero:
return 2*arg/sqrt(pi)
elif arg0.is_finite:
return self.func(arg0)
return self.func(arg)
def _eval_aseries(self, n, args0, x, logx):
from sympy.series.order import Order
point = args0[0]
if point is S.Infinity:
z = self.args[0]
s = [factorial2(2*k - 1) / (2**k * z**(2*k + 1))
for k in range(n)] + [Order(1/z**n, x)]
return -I + (exp(z**2)/sqrt(pi)) * Add(*s)
return super(erfi, self)._eval_aseries(n, args0, x, logx)
|
erfi
|
python
|
getsentry__sentry
|
src/sentry/data_export/base.py
|
{
"start": 393,
"end": 595
}
|
class ____(str, Enum):
Early = "EARLY" # The download is being prepared
Valid = "VALID" # The download is ready for the user
Expired = "EXPIRED" # The download has been deleted
|
ExportStatus
|
python
|
gevent__gevent
|
src/greentest/3.10/test_asyncore.py
|
{
"start": 14746,
"end": 14973
}
|
class ____(BaseTestHandler):
def __init__(self, family, address):
BaseTestHandler.__init__(self)
self.create_socket(family)
self.connect(address)
def handle_connect(self):
pass
|
BaseClient
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/ctx_manager.py
|
{
"start": 27480,
"end": 30117
}
|
class ____(ContextWrappingVariable):
"""represents whether torch function overrides are enabled or not"""
_guards_singleton = Guard(GlobalStateSource(), GuardBuilder.TORCH_FUNCTION_STATE) # type: ignore[arg-type]
@staticmethod
def create(
tx: "InstructionTranslator", **kwargs: Any
) -> "TorchFunctionDisableVariable":
var = TorchFunctionDisableVariable(
target_values=[],
initial_values=[],
**kwargs,
)
return var
def __init__(
self,
target_values: Sized,
initial_values: Optional[Sized] = None,
only_subclass: bool = True,
**kwargs: Any,
) -> None:
assert len(target_values) == 0
assert initial_values is not None and len(initial_values) == 0
from ..symbolic_convert import InstructionTranslator
tx = InstructionTranslator.current_tx()
self.only_subclass = only_subclass
self.initial_torch_function_subclass_enabled = (
tx.symbolic_torch_function_state.torch_function_subclass_enabled
)
self.initial_torch_function_mode_enabled = (
tx.symbolic_torch_function_state.torch_function_mode_enabled
)
super().__init__(
target_values=target_values, initial_values=initial_values, **kwargs
)
install_guard(self._guards_singleton)
def set_cleanup_hook(
self,
tx: "InstructionTranslator",
cleanup_fn: Optional[Callable[..., Any]] = None,
) -> None:
if cleanup_fn is None:
def cleanup_fn() -> None:
tx.symbolic_torch_function_state.torch_function_subclass_enabled = (
self.initial_torch_function_subclass_enabled
)
if not self.only_subclass:
tx.symbolic_torch_function_state.torch_function_mode_enabled = (
self.initial_torch_function_subclass_enabled
)
self.cleanup_fn = cleanup_fn
tx.output.add_cleanup_hook(self.cleanup)
def _call_func(self, tx: "InstructionTranslator", values: Sized) -> None:
assert len(values) == 0
tx.symbolic_torch_function_state.torch_function_subclass_enabled = False
if not self.only_subclass:
tx.symbolic_torch_function_state.torch_function_mode_enabled = False
def module_name(self) -> str:
return "torch._C"
def fn_name(self) -> str:
if self.only_subclass:
return "DisableTorchFunctionSubclass"
return "DisableTorchFunction"
|
TorchFunctionDisableVariable
|
python
|
pydantic__pydantic
|
pydantic-core/tests/serializers/test_list_tuple.py
|
{
"start": 8194,
"end": 19309
}
|
class ____(ImplicitContains):
__contains__ = None # This might be done to explicitly force the `x in RemovedContains()` check to not be allowed
@pytest.mark.parametrize(
'include,exclude,expected',
[
({1, 3}, None, ['b', 'd']),
({1, 3, 5}, {5}, ['b', 'd']),
({2: None, 3: None, 5: None}.keys(), {5}, ['c', 'd']),
(ExplicitContains(), set(), ['c', 'f']),
(ExplicitContains(), {5}, ['c']),
({2, 3}, ExplicitContains(), ['d']),
([1, 2, 3], [2, 3], ['b']),
],
)
def test_filter_runtime_more(include, exclude, expected):
v = SchemaSerializer(core_schema.list_schema(core_schema.any_schema()))
assert v.to_python(list('abcdefgh'), include=include, exclude=exclude) == expected
@pytest.mark.parametrize(
'schema_func,seq_f', [(core_schema.list_schema, as_list), (core_schema.tuple_variable_schema, as_tuple)]
)
@pytest.mark.parametrize(
'include,exclude',
[
(ImplicitContains(), None),
(RemovedContains(), None),
(1, None),
(None, ImplicitContains()),
(None, RemovedContains()),
(None, 1),
],
)
def test_include_error_call_time(schema_func, seq_f, include, exclude):
kind = 'include' if include is not None else 'exclude'
v = SchemaSerializer(schema_func(core_schema.any_schema()))
with pytest.raises(TypeError, match=f'`{kind}` argument must be a set or dict.'):
v.to_python(seq_f(0, 1, 2, 3), include=include, exclude=exclude)
def test_tuple_fallback():
v = SchemaSerializer(core_schema.tuple_variable_schema(core_schema.any_schema()))
with pytest.warns(
UserWarning,
match=r"Expected `tuple\[any, ...\]` - serialized value may not be as expected \[input_value='apple', input_type=str\]",
):
assert v.to_python('apple') == 'apple'
with pytest.warns(UserWarning) as warning_info:
assert v.to_json([1, 2, 3]) == b'[1,2,3]'
assert (
'Expected `tuple[any, ...]` - serialized value may not be as expected [input_value=[1, 2, 3], input_type=list]'
in warning_info.list[0].message.args[0]
)
with pytest.warns(
UserWarning,
match=r"Expected `tuple\[any, ...\]` - serialized value may not be as expected \[input_value=b'apple', input_type=bytes\]",
):
assert v.to_json(b'apple') == b'"apple"'
assert v.to_python((1, 2, 3)) == (1, 2, 3)
# even though we're in the fallback state, non JSON types should still be converted to JSON here
with pytest.warns(
UserWarning,
match=r'Expected `tuple\[any, ...\]` - serialized value may not be as expected \[input_value=\[1, 2, 3\], input_type=list\]',
):
assert v.to_python([1, 2, 3], mode='json') == [1, 2, 3]
@pytest.mark.parametrize(
'params',
[
dict(include=None, exclude=None, expected=['0', '1', '2', '3']),
dict(include={0, 1}, exclude=None, expected=['0', '1']),
dict(include={0: ..., 1: ...}, exclude=None, expected=['0', '1']),
dict(include={0: True, 1: True}, exclude=None, expected=['0', '1']),
dict(include={0: {1}, 1: {1}}, exclude=None, expected=['0', '1']),
dict(include=None, exclude={0, 1}, expected=['2', '3']),
dict(include=None, exclude={0: ..., 1: ...}, expected=['2', '3']),
dict(include={0, 1}, exclude={1, 2}, expected=['0']),
dict(include=None, exclude={3: {1}}, expected=['0', '1', '2', '3']),
dict(include={0, 1}, exclude={3: {1}}, expected=['0', '1']),
dict(include={0, 1}, exclude={1: {1}}, expected=['0', '1']),
dict(include={0, 1}, exclude={1: ...}, expected=['0']),
dict(include={1}, exclude={1}, expected=[]),
dict(include={0}, exclude={1}, expected=['0']),
dict(include={'__all__'}, exclude={1}, expected=['0', '2', '3']),
dict(include=None, exclude={1}, expected=['0', '2', '3']),
dict(include=None, exclude={'__all__'}, expected=[]),
],
)
def test_filter_args(params):
s = SchemaSerializer(core_schema.list_schema())
include, exclude, expected = params['include'], params['exclude'], params['expected']
value = ['0', '1', '2', '3']
assert s.to_python(value, include=include, exclude=exclude) == expected
assert s.to_python(value, mode='json', include=include, exclude=exclude) == expected
assert json.loads(s.to_json(value, include=include, exclude=exclude)) == expected
@pytest.mark.parametrize(
'params',
[
dict(include=None, exclude=None, expected=[[0], [0, 1], [0, 1, 2], [0, 1, 2, 3]]),
dict(include=None, exclude={1: {0}}, expected=[[0], [1], [0, 1, 2], [0, 1, 2, 3]]),
dict(include=None, exclude={1: {0}, 2: ...}, expected=[[0], [1], [0, 1, 2, 3]]),
dict(include=None, exclude={1: {0}, 2: True}, expected=[[0], [1], [0, 1, 2, 3]]),
dict(include={1: {0}}, exclude=None, expected=[[0]]),
],
)
def test_filter_args_nested(params):
s = SchemaSerializer(core_schema.list_schema(core_schema.list_schema()))
include, exclude, expected = params['include'], params['exclude'], params['expected']
value = [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3]]
assert s.to_python(value, include=include, exclude=exclude) == expected
assert s.to_python(value, mode='json', include=include, exclude=exclude) == expected
assert json.loads(s.to_json(value, include=include, exclude=exclude)) == expected
def test_filter_list_of_dicts():
s = SchemaSerializer(core_schema.list_schema(core_schema.dict_schema()))
v = [{'a': 1, 'b': 2}, {'a': 3, 'b': 4}]
assert s.to_python(v) == v
assert s.to_python(v, exclude={0: {'a'}}) == [{'b': 2}, {'a': 3, 'b': 4}]
assert s.to_python(v, exclude={0: {'__all__'}}) == [{}, {'a': 3, 'b': 4}]
assert s.to_python(v, exclude={'__all__': {'a'}}) == [{'b': 2}, {'b': 4}]
assert s.to_json(v) == b'[{"a":1,"b":2},{"a":3,"b":4}]'
assert s.to_json(v, exclude={0: {'a'}}) == b'[{"b":2},{"a":3,"b":4}]'
assert s.to_json(v, exclude={0: {'__all__'}}) == b'[{},{"a":3,"b":4}]'
assert s.to_json(v, exclude={'__all__': {'a'}}) == b'[{"b":2},{"b":4}]'
assert s.to_python(v, include={0: {'a'}, 1: None}) == [{'a': 1}, {'a': 3, 'b': 4}]
assert s.to_python(v, include={'__all__': {'a'}}) == [{'a': 1}, {'a': 3}]
assert s.to_json(v, include={0: {'a'}, 1: None}) == b'[{"a":1},{"a":3,"b":4}]'
assert s.to_json(v, include={'__all__': {'a'}}) == b'[{"a":1},{"a":3}]'
def test_positional_tuple():
s = SchemaSerializer({'type': 'tuple', 'items_schema': [{'type': 'int'}, {'type': 'bytes'}, {'type': 'float'}]})
assert s.to_python((1, b'2', 3.0)) == (1, b'2', 3.0)
with pytest.warns(UserWarning, match='Unexpected extra items present in tuple'):
assert s.to_python((1, b'2', 3.0, 123)) == (1, b'2', 3.0, 123)
assert s.to_python((1, b'2')) == (1, b'2')
assert s.to_python((1, b'2', 3.0), mode='json') == [1, '2', 3.0]
with pytest.warns(UserWarning, match='Unexpected extra items present in tuple'):
assert s.to_python((1, b'2', 3.0, 123), mode='json') == [1, '2', 3.0, 123]
assert s.to_python((1, b'2'), mode='json') == [1, '2']
assert s.to_json((1, b'2', 3.0)) == b'[1,"2",3.0]'
with pytest.warns(UserWarning, match='Unexpected extra items present in tuple'):
assert s.to_json((1, b'2', 3.0, 123)) == b'[1,"2",3.0,123]'
assert s.to_json((1, b'2')) == b'[1,"2"]'
def test_function_positional_tuple():
def f(prefix, value, _info):
return f'{prefix}{value}'
s = SchemaSerializer(
{
'type': 'tuple',
'items_schema': [
core_schema.any_schema(
serialization=core_schema.plain_serializer_function_ser_schema(partial(f, 'a'), info_arg=True)
),
core_schema.any_schema(
serialization=core_schema.plain_serializer_function_ser_schema(partial(f, 'b'), info_arg=True)
),
core_schema.any_schema(
serialization=core_schema.plain_serializer_function_ser_schema(partial(f, 'extra'), info_arg=True)
),
],
'variadic_item_index': 2,
}
)
assert s.to_python((1,)) == ('a1',)
assert s.to_python((1, 2)) == ('a1', 'b2')
assert s.to_python((1, 2, 3)) == ('a1', 'b2', 'extra3')
assert s.to_python((1,), mode='json') == ['a1']
assert s.to_python((1, 2), mode='json') == ['a1', 'b2']
assert s.to_python((1, 2, 3), mode='json') == ['a1', 'b2', 'extra3']
assert s.to_json((1,)) == b'["a1"]'
assert s.to_json((1, 2)) == b'["a1","b2"]'
assert s.to_json((1, 2, 3)) == b'["a1","b2","extra3"]'
def test_list_dict_key():
s = SchemaSerializer(core_schema.dict_schema(core_schema.list_schema(), core_schema.int_schema()))
with pytest.warns(UserWarning, match=r'Expected `list\[any\]`.+ input_type=str'):
assert s.to_python({'xx': 1}) == {'xx': 1}
def test_tuple_var_dict_key():
s = SchemaSerializer(core_schema.dict_schema(core_schema.tuple_variable_schema(), core_schema.int_schema()))
with pytest.warns(UserWarning, match=r'Expected `tuple\[any, ...\]`.+input_type=str'):
assert s.to_python({'xx': 1}) == {'xx': 1}
assert s.to_python({(1, 2): 1}) == {(1, 2): 1}
assert s.to_python({(1, 2): 1}, mode='json') == {'1,2': 1}
assert s.to_json({(1, 2): 1}) == b'{"1,2":1}'
def test_tuple_pos_dict_key():
s = SchemaSerializer(
core_schema.dict_schema(
core_schema.tuple_positional_schema(
[core_schema.int_schema(), core_schema.str_schema()], extras_schema=core_schema.int_schema()
),
core_schema.int_schema(),
)
)
assert s.to_python({(1, 'a'): 1}) == {(1, 'a'): 1}
assert s.to_python({(1, 'a', 2): 1}) == {(1, 'a', 2): 1}
assert s.to_python({(1, 'a'): 1}, mode='json') == {'1,a': 1}
assert s.to_python({(1, 'a', 2): 1}, mode='json') == {'1,a,2': 1}
assert s.to_json({(1, 'a'): 1}) == b'{"1,a":1}'
assert s.to_json({(1, 'a', 2): 1}) == b'{"1,a,2":1}'
def test_tuple_wrong_size_union():
# See https://github.com/pydantic/pydantic/issues/8677
f = core_schema.float_schema()
s = SchemaSerializer(
core_schema.union_schema([core_schema.tuple_schema([f, f]), core_schema.tuple_schema([f, f, f])])
)
assert s.to_python((1.0, 2.0)) == (1.0, 2.0)
assert s.to_python((1.0, 2.0, 3.0)) == (1.0, 2.0, 3.0)
with pytest.warns(UserWarning, match='Unexpected extra items present in tuple'):
s.to_python((1.0, 2.0, 3.0, 4.0))
assert s.to_python((1.0, 2.0), mode='json') == [1.0, 2.0]
assert s.to_python((1.0, 2.0, 3.0), mode='json') == [1.0, 2.0, 3.0]
with pytest.warns(UserWarning, match='Unexpected extra items present in tuple'):
s.to_python((1.0, 2.0, 3.0, 4.0), mode='json')
assert s.to_json((1.0, 2.0)) == b'[1.0,2.0]'
assert s.to_json((1.0, 2.0, 3.0)) == b'[1.0,2.0,3.0]'
with pytest.warns(UserWarning, match='Unexpected extra items present in tuple'):
s.to_json((1.0, 2.0, 3.0, 4.0))
|
RemovedContains
|
python
|
jupyterlab__jupyterlab
|
jupyterlab/browser_check.py
|
{
"start": 6108,
"end": 7639
}
|
class ____(LabApp):
"""An app the launches JupyterLab and waits for it to start up, checking for
JS console errors, JS errors, and Python logged errors.
"""
name = __name__
open_browser = False
serverapp_config = {"base_url": "/foo/"}
default_url = Unicode("/lab?reset", config=True, help="The default URL to redirect to from `/`")
ip = "127.0.0.1"
flags = test_flags
aliases = test_aliases
test_browser = Bool(True)
def initialize_settings(self):
self.settings.setdefault("page_config_data", {})
self.settings["page_config_data"]["browserTest"] = True
self.settings["page_config_data"]["buildAvailable"] = False
self.settings["page_config_data"]["exposeAppInBrowser"] = True
super().initialize_settings()
def initialize_handlers(self):
def func(*args, **kwargs):
return 0
if self.test_browser:
func = run_browser_sync if os.name == "nt" else run_browser
run_test(self.serverapp, func)
super().initialize_handlers()
def _jupyter_server_extension_points():
return [{"module": __name__, "app": BrowserApp}]
def _jupyter_server_extension_paths():
return [{"module": "jupyterlab.browser_check"}]
if __name__ == "__main__":
skip_options = ["--no-browser-test", "--no-chrome-test"]
for option in skip_options:
if option in sys.argv:
BrowserApp.test_browser = False
sys.argv.remove(option)
BrowserApp.launch_instance()
|
BrowserApp
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_container_user.py
|
{
"start": 383,
"end": 3363
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'linux': 'V1LinuxContainerUser'
}
attribute_map = {
'linux': 'linux'
}
def __init__(self, linux=None, local_vars_configuration=None): # noqa: E501
"""V1ContainerUser - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._linux = None
self.discriminator = None
if linux is not None:
self.linux = linux
@property
def linux(self):
"""Gets the linux of this V1ContainerUser. # noqa: E501
:return: The linux of this V1ContainerUser. # noqa: E501
:rtype: V1LinuxContainerUser
"""
return self._linux
@linux.setter
def linux(self, linux):
"""Sets the linux of this V1ContainerUser.
:param linux: The linux of this V1ContainerUser. # noqa: E501
:type: V1LinuxContainerUser
"""
self._linux = linux
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ContainerUser):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ContainerUser):
return True
return self.to_dict() != other.to_dict()
|
V1ContainerUser
|
python
|
huggingface__transformers
|
tests/models/splinter/test_modeling_splinter.py
|
{
"start": 1193,
"end": 7537
}
|
class ____:
def __init__(
self,
parent,
batch_size=13,
num_questions=3,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
question_token_id=1,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.num_questions = num_questions
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.question_token_id = question_token_id
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids[:, 1] = self.question_token_id
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
start_positions = None
end_positions = None
question_positions = None
if self.use_labels:
start_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size)
end_positions = ids_tensor([self.batch_size, self.num_questions], self.type_sequence_label_size)
question_positions = ids_tensor([self.batch_size, self.num_questions], self.num_labels)
config = SplinterConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
question_token_id=self.question_token_id,
)
return (config, input_ids, token_type_ids, input_mask, start_positions, end_positions, question_positions)
def create_and_check_model(
self,
config,
input_ids,
token_type_ids,
input_mask,
start_positions,
end_positions,
question_positions,
):
model = SplinterModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_question_answering(
self,
config,
input_ids,
token_type_ids,
input_mask,
start_positions,
end_positions,
question_positions,
):
model = SplinterForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=start_positions[:, 0],
end_positions=end_positions[:, 0],
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_pretraining(
self,
config,
input_ids,
token_type_ids,
input_mask,
start_positions,
end_positions,
question_positions,
):
model = SplinterForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=start_positions,
end_positions=end_positions,
question_positions=question_positions,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.num_questions, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.num_questions, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
start_positions,
end_positions,
question_positions,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
|
SplinterModelTester
|
python
|
getsentry__sentry
|
src/sentry/charts/chartcuterie.py
|
{
"start": 432,
"end": 3262
}
|
class ____(ChartRenderer):
"""
The Chartcuterie service is responsible for converting series data into a
chart of the data as an image.
This uses the external Chartcuterie API to produce charts
"""
@property
def service_url(self) -> str | None:
return options.get("chart-rendering.chartcuterie", {}).get("url")
@property
def storage_options(self) -> dict[str, Any] | None:
backend = options.get("chart-rendering.storage.backend")
opts = options.get("chart-rendering.storage.options")
# No custom storage driver configured, let get_storage fallback to default
if not backend:
return None
return {"backend": backend, "options": opts}
def validate(self) -> None:
if not self.is_enabled():
return
if self.storage_options is not None and self.storage_options["options"] is None:
raise InvalidConfiguration(
"`chart-rendering.storage.options` must be configured if `chart-rendering.storage.backend` is configured"
)
if not self.service_url:
raise InvalidConfiguration("`chart-rendering.chartcuterie.url` is not configured")
def generate_chart(self, style: ChartType, data: Any, size: ChartSize | None = None) -> str:
request_id = uuid4().hex
payload = {
"requestId": request_id,
"style": style.value,
"data": data,
}
# Override the default size defined by the chart style
if size:
payload.update(size)
with sentry_sdk.start_span(
op="charts.chartcuterie.generate_chart",
name=type(self).__name__,
):
# Using sentry json formatter to handle datetime objects
assert self.service_url is not None
resp = requests.post(
url=urljoin(self.service_url, "render"),
data=orjson.dumps(payload, option=orjson.OPT_UTC_Z | orjson.OPT_NON_STR_KEYS),
headers={"Content-Type": "application/json"},
)
if resp.status_code == 503 and settings.DEBUG:
logger.info(
"You may need to build the chartcuterie config using `pnpm build-chartcuterie-config`"
)
if resp.status_code != 200:
raise RuntimeError(f"Chartcuterie responded with {resp.status_code}: {resp.text}")
file_name = f"{request_id}.png"
with sentry_sdk.start_span(
op="charts.chartcuterie.upload",
name=type(self).__name__,
):
storage = get_storage(self.storage_options)
storage.save(file_name, BytesIO(resp.content))
url = absolute_uri(storage.url(file_name))
return url
|
Chartcuterie
|
python
|
apache__airflow
|
airflow-core/src/airflow/models/dag.py
|
{
"start": 11867,
"end": 31347
}
|
class ____(Base):
"""Table containing DAG properties."""
__tablename__ = "dag"
"""
These items are stored in the database for state related information.
"""
dag_id: Mapped[str] = mapped_column(StringID(), primary_key=True)
# A DAG can be paused from the UI / DB
# Set this default value of is_paused based on a configuration value!
is_paused_at_creation = airflow_conf.getboolean("core", "dags_are_paused_at_creation")
is_paused: Mapped[bool] = mapped_column(Boolean, default=is_paused_at_creation)
# Whether that DAG was seen on the last DagBag load
is_stale: Mapped[bool] = mapped_column(Boolean, default=True)
# Last time the scheduler started
last_parsed_time: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
# How long it took to parse this file
last_parse_duration: Mapped[float | None] = mapped_column(Float, nullable=True)
# Time when the DAG last received a refresh signal
# (e.g. the DAG's "refresh" button was clicked in the web UI)
last_expired: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
# The location of the file containing the DAG object
# Note: Do not depend on fileloc pointing to a file; in the case of a
# packaged DAG, it will point to the subpath of the DAG within the
# associated zip.
fileloc: Mapped[str | None] = mapped_column(String(2000), nullable=True)
relative_fileloc: Mapped[str | None] = mapped_column(String(2000), nullable=True)
bundle_name: Mapped[str] = mapped_column(StringID(), ForeignKey("dag_bundle.name"), nullable=False)
# The version of the bundle the last time the DAG was processed
bundle_version: Mapped[str | None] = mapped_column(String(200), nullable=True)
# String representing the owners
owners: Mapped[str | None] = mapped_column(String(2000), nullable=True)
# Display name of the dag
_dag_display_property_value: Mapped[str | None] = mapped_column(
"dag_display_name", String(2000), nullable=True
)
# Description of the dag
description: Mapped[str | None] = mapped_column(Text, nullable=True)
# Timetable summary
timetable_summary: Mapped[str | None] = mapped_column(Text, nullable=True)
# Timetable description
timetable_description: Mapped[str | None] = mapped_column(String(1000), nullable=True)
# Asset expression based on asset triggers
asset_expression: Mapped[dict[str, Any] | None] = mapped_column(
sqlalchemy_jsonfield.JSONField(json=json), nullable=True
)
# DAG deadline information
_deadline: Mapped[dict[str, Any] | None] = mapped_column(
"deadline", sqlalchemy_jsonfield.JSONField(json=json), nullable=True
)
# Tags for view filter
tags = relationship("DagTag", cascade="all, delete, delete-orphan", backref=backref("dag"))
# Dag owner links for DAGs view
dag_owner_links = relationship(
"DagOwnerAttributes", cascade="all, delete, delete-orphan", backref=backref("dag")
)
max_active_tasks: Mapped[int] = mapped_column(Integer, nullable=False)
max_active_runs: Mapped[int | None] = mapped_column(
Integer, nullable=True
) # todo: should not be nullable if we have a default
max_consecutive_failed_dag_runs: Mapped[int] = mapped_column(Integer, nullable=False)
has_task_concurrency_limits: Mapped[bool] = mapped_column(Boolean, nullable=False)
has_import_errors: Mapped[bool] = mapped_column(Boolean(), default=False, server_default="0")
fail_fast: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False, server_default="0")
# The logical date of the next dag run.
next_dagrun: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
# Must be either both NULL or both datetime.
next_dagrun_data_interval_start: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
next_dagrun_data_interval_end: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
# Earliest time at which this ``next_dagrun`` can be created.
next_dagrun_create_after: Mapped[datetime | None] = mapped_column(UtcDateTime, nullable=True)
__table_args__ = (Index("idx_next_dagrun_create_after", next_dagrun_create_after, unique=False),)
schedule_asset_references = relationship(
"DagScheduleAssetReference",
back_populates="dag",
cascade="all, delete, delete-orphan",
)
schedule_asset_alias_references = relationship(
"DagScheduleAssetAliasReference",
back_populates="dag",
cascade="all, delete, delete-orphan",
)
schedule_asset_name_references = relationship(
"DagScheduleAssetNameReference",
back_populates="dag",
cascade="all, delete, delete-orphan",
)
schedule_asset_uri_references = relationship(
"DagScheduleAssetUriReference",
back_populates="dag",
cascade="all, delete, delete-orphan",
)
schedule_assets = association_proxy("schedule_asset_references", "asset")
task_inlet_asset_references = relationship(
"TaskInletAssetReference",
cascade="all, delete, delete-orphan",
)
task_outlet_asset_references = relationship(
"TaskOutletAssetReference",
cascade="all, delete, delete-orphan",
)
NUM_DAGS_PER_DAGRUN_QUERY = airflow_conf.getint(
"scheduler", "max_dagruns_to_create_per_loop", fallback=10
)
dag_versions = relationship(
"DagVersion", back_populates="dag_model", cascade="all, delete, delete-orphan"
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self.max_active_tasks is None:
self.max_active_tasks = airflow_conf.getint("core", "max_active_tasks_per_dag")
if self.max_active_runs is None:
self.max_active_runs = airflow_conf.getint("core", "max_active_runs_per_dag")
if self.max_consecutive_failed_dag_runs is None:
self.max_consecutive_failed_dag_runs = airflow_conf.getint(
"core", "max_consecutive_failed_dag_runs_per_dag"
)
if self.has_task_concurrency_limits is None:
# Be safe -- this will be updated later once the DAG is parsed
self.has_task_concurrency_limits = True
def __repr__(self):
return f"<DAG: {self.dag_id}>"
@property
def next_dagrun_data_interval(self) -> DataInterval | None:
return _get_model_data_interval(
self,
"next_dagrun_data_interval_start",
"next_dagrun_data_interval_end",
)
@next_dagrun_data_interval.setter
def next_dagrun_data_interval(self, value: tuple[datetime, datetime] | None) -> None:
if value is None:
self.next_dagrun_data_interval_start = self.next_dagrun_data_interval_end = None
else:
self.next_dagrun_data_interval_start, self.next_dagrun_data_interval_end = value
@property
def deadline(self):
"""Get the deserialized deadline alert."""
if self._deadline is None:
return None
if isinstance(self._deadline, list):
return [DeadlineAlert.deserialize_deadline_alert(item) for item in self._deadline]
return DeadlineAlert.deserialize_deadline_alert(self._deadline)
@deadline.setter
def deadline(self, value):
"""Set and serialize the deadline alert."""
if value is None:
self._deadline = None
elif isinstance(value, list):
self._deadline = [
item if isinstance(item, dict) else item.serialize_deadline_alert() for item in value
]
elif isinstance(value, dict):
self._deadline = value
else:
self._deadline = value.serialize_deadline_alert()
@property
def timezone(self):
return settings.TIMEZONE
@staticmethod
@provide_session
def get_dagmodel(dag_id: str, session: Session = NEW_SESSION) -> DagModel | None:
return session.get(
DagModel,
dag_id,
)
@classmethod
@provide_session
def get_current(cls, dag_id: str, session: Session = NEW_SESSION) -> DagModel | None:
return session.scalar(select(cls).where(cls.dag_id == dag_id))
@provide_session
def get_last_dagrun(
self, session: Session = NEW_SESSION, include_manually_triggered: bool = False
) -> DagRun | None:
return get_last_dagrun(
self.dag_id, session=session, include_manually_triggered=include_manually_triggered
)
def get_is_active(self, *, session: Session | None = None) -> bool:
"""Provide interface compatibility to 'DAG'."""
return not self.is_stale
@staticmethod
@provide_session
def get_paused_dag_ids(dag_ids: list[str], session: Session = NEW_SESSION) -> set[str]:
"""
Given a list of dag_ids, get a set of Paused Dag Ids.
:param dag_ids: List of Dag ids
:param session: ORM Session
:return: Paused Dag_ids
"""
paused_dag_ids = session.execute(
select(DagModel.dag_id)
.where(DagModel.is_paused == expression.true())
.where(DagModel.dag_id.in_(dag_ids))
)
paused_dag_ids = {paused_dag_id for (paused_dag_id,) in paused_dag_ids}
return paused_dag_ids
@property
def safe_dag_id(self):
return self.dag_id.replace(".", "__dot__")
@hybrid_property
def dag_display_name(self) -> str:
return self._dag_display_property_value or self.dag_id
@dag_display_name.expression # type: ignore[no-redef]
def dag_display_name(self) -> str:
"""
Expression part of the ``dag_display`` name hybrid property.
:meta private:
"""
return case(
(self._dag_display_property_value.is_not(None), self._dag_display_property_value),
else_=self.dag_id,
)
@classmethod
@provide_session
def deactivate_deleted_dags(
cls,
bundle_name: str,
rel_filelocs: list[str],
session: Session = NEW_SESSION,
) -> bool:
"""
Set ``is_active=False`` on the DAGs for which the DAG files have been removed.
:param bundle_name: bundle for filelocs
:param rel_filelocs: relative filelocs for bundle
:param session: ORM Session
:return: True if any DAGs were marked as stale, False otherwise
"""
log.debug("Deactivating DAGs (for which DAG files are deleted) from %s table ", cls.__tablename__)
dag_models = session.scalars(
select(cls)
.where(
cls.bundle_name == bundle_name,
)
.options(
load_only(
cls.relative_fileloc,
cls.is_stale,
),
)
)
any_deactivated = False
for dm in dag_models:
if dm.relative_fileloc not in rel_filelocs:
dm.is_stale = True
any_deactivated = True
return any_deactivated
@classmethod
def dags_needing_dagruns(cls, session: Session) -> tuple[Any, dict[str, datetime]]:
"""
Return (and lock) a list of Dag objects that are due to create a new DagRun.
This will return a resultset of rows that is row-level-locked with a "SELECT ... FOR UPDATE" query,
you should ensure that any scheduling decisions are made in a single transaction -- as soon as the
transaction is committed it will be unlocked.
:meta private:
"""
from airflow.models.serialized_dag import SerializedDagModel
evaluator = AssetEvaluator(session)
def dag_ready(dag_id: str, cond: BaseAsset, statuses: dict[AssetUniqueKey, bool]) -> bool:
try:
return evaluator.run(cond, statuses)
except AttributeError:
# if dag was serialized before 2.9 and we *just* upgraded,
# we may be dealing with old version. In that case,
# just wait for the dag to be reserialized.
log.warning("Dag '%s' has old serialization; skipping run creation.", dag_id)
return False
except Exception:
log.exception("Dag '%s' failed to be evaluated; assuming not ready", dag_id)
return False
# this loads all the ADRQ records.... may need to limit num dags
adrq_by_dag: dict[str, list[AssetDagRunQueue]] = defaultdict(list)
for adrq in session.scalars(select(AssetDagRunQueue).options(joinedload(AssetDagRunQueue.dag_model))):
if adrq.dag_model.asset_expression is None:
# The dag referenced does not actually depend on an asset! This
# could happen if the dag DID depend on an asset at some point,
# but no longer does. Delete the stale adrq.
session.delete(adrq)
else:
adrq_by_dag[adrq.target_dag_id].append(adrq)
dag_statuses: dict[str, dict[AssetUniqueKey, bool]] = {
dag_id: {AssetUniqueKey.from_asset(adrq.asset): True for adrq in adrqs}
for dag_id, adrqs in adrq_by_dag.items()
}
ser_dags = SerializedDagModel.get_latest_serialized_dags(dag_ids=list(dag_statuses), session=session)
for ser_dag in ser_dags:
dag_id = ser_dag.dag_id
statuses = dag_statuses[dag_id]
if not dag_ready(dag_id, cond=ser_dag.dag.timetable.asset_condition, statuses=statuses):
del adrq_by_dag[dag_id]
del dag_statuses[dag_id]
del dag_statuses
# triggered dates for asset triggered dags
triggered_date_by_dag: dict[str, datetime] = {
dag_id: max(adrq.created_at for adrq in adrqs) for dag_id, adrqs in adrq_by_dag.items()
}
del adrq_by_dag
asset_triggered_dag_ids = set(triggered_date_by_dag.keys())
if asset_triggered_dag_ids:
# exclude as max active runs has been reached
exclusion_list = set(
session.scalars(
select(DagModel.dag_id)
.join(DagRun.dag_model)
.where(DagRun.state.in_((DagRunState.QUEUED, DagRunState.RUNNING)))
.where(DagModel.dag_id.in_(asset_triggered_dag_ids))
.group_by(DagModel.dag_id)
.having(func.count() >= func.max(DagModel.max_active_runs))
)
)
if exclusion_list:
asset_triggered_dag_ids -= exclusion_list
triggered_date_by_dag = {
k: v for k, v in triggered_date_by_dag.items() if k not in exclusion_list
}
# We limit so that _one_ scheduler doesn't try to do all the creation of dag runs
query = (
select(cls)
.where(
cls.is_paused == expression.false(),
cls.is_stale == expression.false(),
cls.has_import_errors == expression.false(),
or_(
cls.next_dagrun_create_after <= func.now(),
cls.dag_id.in_(asset_triggered_dag_ids),
),
)
.order_by(cls.next_dagrun_create_after)
.limit(cls.NUM_DAGS_PER_DAGRUN_QUERY)
)
return (
session.scalars(with_row_locks(query, of=cls, session=session, skip_locked=True)),
triggered_date_by_dag,
)
def calculate_dagrun_date_fields(
self,
dag: SerializedDAG,
last_automated_dag_run: None | DataInterval,
) -> None:
"""
Calculate ``next_dagrun`` and `next_dagrun_create_after``.
:param dag: The DAG object
:param last_automated_dag_run: DataInterval (or datetime) of most recent run of this dag, or none
if not yet scheduled.
"""
last_automated_data_interval: DataInterval | None
if isinstance(last_automated_dag_run, datetime):
raise ValueError(
"Passing a datetime to `DagModel.calculate_dagrun_date_fields` is not supported. "
"Provide a data interval instead."
)
last_automated_data_interval = last_automated_dag_run
next_dagrun_info = dag.next_dagrun_info(last_automated_data_interval)
if next_dagrun_info is None:
self.next_dagrun_data_interval = self.next_dagrun = self.next_dagrun_create_after = None
else:
self.next_dagrun_data_interval = next_dagrun_info.data_interval
self.next_dagrun = next_dagrun_info.logical_date
self.next_dagrun_create_after = next_dagrun_info.run_after
log.info(
"Setting next_dagrun for %s to %s, run_after=%s",
dag.dag_id,
self.next_dagrun,
self.next_dagrun_create_after,
)
@provide_session
def get_asset_triggered_next_run_info(
self, *, session: Session = NEW_SESSION
) -> dict[str, int | str] | None:
if self.asset_expression is None:
return None
# When an asset alias does not resolve into assets, get_asset_triggered_next_run_info returns
# an empty dict as there's no asset info to get. This method should thus return None.
return get_asset_triggered_next_run_info([self.dag_id], session=session).get(self.dag_id, None)
@staticmethod
@provide_session
def get_team_name(dag_id: str, session: Session = NEW_SESSION) -> str | None:
"""Return the team name associated to a Dag or None if it is not owned by a specific team."""
stmt = (
select(Team.name)
.join(DagBundleModel.teams)
.join(DagModel, DagModel.bundle_name == DagBundleModel.name)
.where(DagModel.dag_id == dag_id)
)
return session.scalar(stmt)
@staticmethod
@provide_session
def get_dag_id_to_team_name_mapping(
dag_ids: list[str], session: Session = NEW_SESSION
) -> dict[str, str | None]:
stmt = (
select(DagModel.dag_id, Team.name)
.join(DagBundleModel.teams)
.join(DagModel, DagModel.bundle_name == DagBundleModel.name)
.where(DagModel.dag_id.in_(dag_ids))
)
return {dag_id: team_name for dag_id, team_name in session.execute(stmt)}
STATICA_HACK = True
globals()["kcah_acitats"[::-1].upper()] = False
if STATICA_HACK: # pragma: no cover
from airflow.models.serialized_dag import SerializedDagModel
DagModel.serialized_dag = relationship(SerializedDagModel)
""":sphinx-autoapi-skip:"""
def __getattr__(name: str):
# Add DAG and dag for compatibility. We can't do this in
# airflow/models/__init__.py since this module contains other things.
if name not in ("DAG", "dag"):
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
import warnings
from airflow.utils.deprecation_tools import DeprecatedImportWarning
warnings.warn(
f"Import {name!r} directly from the airflow module is deprecated and "
f"will be removed in the future. Please import it from 'airflow.sdk'.",
DeprecatedImportWarning,
stacklevel=2,
)
import airflow.sdk
return getattr(airflow.sdk, name)
|
DagModel
|
python
|
pytorch__pytorch
|
test/distributed/launcher/test_run.py
|
{
"start": 1693,
"end": 26848
}
|
class ____(TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp()
# remove any lingering environment variables
for env in os.environ.keys(): # noqa: SIM118
if env.startswith("PET_"):
del os.environ[env]
# set a sentinel env var on the parent proc
# this should be present on the child and gets
# asserted in ``bin/test_script.py``
os.environ["TEST_SENTINEL_PARENT"] = "FOOBAR"
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_launch_user_script_python(self):
self._test_launch_user_script_python()
def _test_launch_user_script_python(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc-per-node={nproc_per_node}",
f"--rdzv-id={run_id}",
"--monitor-interval=1",
"--start-method=spawn",
path("bin/test_script.py"),
f"--touch-file-dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_launch_user_script_python_caffe2_bc(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc-per-node={nproc_per_node}",
"--monitor-interval=1",
"--start-method=spawn",
"--master-addr=localhost",
f"--master-port={master_port}",
"--node-rank=0",
path("bin/test_script.py"),
f"--touch-file-dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_user_script_bash(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc-per-node={nproc_per_node}",
f"--rdzv-id={run_id}",
"--monitor-interval=1",
"--start-method=spawn",
"--no-python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no-python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_user_script_default_nproc(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
world_size = 1
args = [
f"--nnodes={nnodes}",
f"--rdzv-id={run_id}",
"--monitor-interval=1",
"--start-method=spawn",
"--no-python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no-python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_with_env_vars(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
os.environ["PET_NNODES"] = str(nnodes)
os.environ["PET_NPROC_PER_NODE"] = str(nproc_per_node)
os.environ["PET_RDZV_ID"] = run_id
os.environ["PET_MONITOR_INTERVAL"] = "1"
os.environ["PET_START_METHOD"] = "spawn"
os.environ["PET_NO_PYTHON"] = "1"
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no-python cannot be used with --module
os.environ["PET_MODULE"] = "1"
launch.main(script_args)
os.environ["PET_MODULE"] = "0"
launch.main(script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def _test_nproc_launch_configuration(self, nproc_type, expected_number):
run_id = str(uuid.uuid4().int)
nnodes = 1
args = [
f"--nnodes={nnodes}",
f"--nproc-per-node={nproc_type}",
f"--rdzv-id={run_id}",
"--monitor-interval=1",
"--start-method=spawn",
"--no-python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
launch.main(args + script_args)
world_size = nnodes * expected_number
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
@patch("torch.cuda.is_available", return_value=False)
def test_nproc_launch_auto_configurations(self, _mock1):
self._test_nproc_launch_configuration("auto", os.cpu_count())
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_nproc_launch_number_configurations(self):
self._test_nproc_launch_configuration("4", 4)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_nproc_launch_unknown_configurations(self):
with self.assertRaises(ValueError):
self._test_nproc_launch_configuration("unknown", 4)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
@patch("torch.cuda.is_available", return_value=True)
@patch("torch.cuda.device_count", return_value=3)
@patch("torch.accelerator.is_available", return_value=True)
@patch("torch.accelerator.device_count", return_value=3)
@patch("torch.accelerator.current_accelerator", return_value=MagicMock(type="gpu"))
def test_nproc_gpu_launch_configurations(
self, _mock1, _mock2, _mock3, _mock4, _mock5
):
self._test_nproc_launch_configuration("auto", 3)
self._test_nproc_launch_configuration("gpu", 3)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
@patch("torch.xpu.is_available", return_value=True)
@patch("torch.xpu.device_count", return_value=3)
@patch("torch.accelerator.is_available", return_value=True)
@patch("torch.accelerator.device_count", return_value=3)
@patch("torch.accelerator.current_accelerator", return_value=MagicMock(type="xpu"))
def test_nproc_xpu_launch_configurations(
self, _mock1, _mock2, _mock3, _mock4, _mock5
):
self._test_nproc_launch_configuration("auto", 3)
self._test_nproc_launch_configuration("xpu", 3)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_elastic(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
# we are only launching 1 node (even though max = 2)
world_size = nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc-per-node={nproc_per_node}",
"--rdzv-backend=c10d",
f"--rdzv-endpoint=localhost:{get_free_port()}",
"--rdzv-conf='join_timeout=5,last_call_timeout=1,timeout=5'",
f"--rdzv-id={run_id}",
"--monitor-interval=1",
"--start-method=spawn",
path("bin/test_script.py"),
f"--touch-file-dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@mock.patch("torch.distributed.elastic.events.record")
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_elastic_worker_raise_exception(self, record_mock):
"""
Asserts that when the worker program fails and lancher raieses exception
to indicate that worker process failed
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc-per-node={nproc_per_node}",
"--rdzv-backend=c10d",
f"--rdzv-endpoint=localhost:{get_free_port()}",
"--rdzv-conf='join_timeout=5,last_call_timeout=1,timeout=5'",
f"--rdzv-id={run_id}",
"--monitor-interval=1",
"--max-restarts=0",
"--start-method=spawn",
path("bin/test_script.py"),
"--fail",
]
with self.assertRaises(ChildFailedError):
launch.main(args)
record_mock.assert_called_once()
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
@mock.patch(
"torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run"
)
@mock.patch("torch.distributed.elastic.events.record")
def test_launch_elastic_agent_raise_exception(self, record_mock, mock_agent_run):
"""
Asserts that when the agent raises an exception
the launcher re-raises the original exception
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc-per-node={nproc_per_node}",
"--rdzv-backend=c10d",
f"--rdzv-endpoint=localhost:{get_free_port()}",
"--rdzv_conf=timeout=5",
f"--rdzv-id={run_id}",
"--monitor-interval=1",
"--max-restarts=0",
"--start-method=spawn",
path("bin/test_script.py"),
f"--touch-file-dir={self.test_dir}",
]
mock_agent_run.side_effect = MockException
with self.assertRaises(MockException):
launch.main(args)
record_mock.assert_called_once()
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_standalone(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc-per-node={nproc_per_node}",
"--standalone",
"--monitor-interval=1",
"--start-method=spawn",
path("bin/test_script.py"),
f"--touch-file-dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_run_path(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
"--run-path",
f"--nnodes={nnodes}",
f"--nproc-per-node={nproc_per_node}",
"--monitor-interval=1",
"--start-method=spawn",
path("bin/test_script.py"),
f"--touch-file-dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_launch_elastic_multiple_agents(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
nnodes = 2
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc-per-node={nproc_per_node}",
"--rdzv-backend=c10d",
f"--rdzv-endpoint=localhost:{get_free_port()}",
"--rdzv_conf=timeout=5",
f"--rdzv-id={run_id}",
"--monitor-interval=1",
"--start-method=spawn",
path("bin/test_script.py"),
f"--touch-file-dir={self.test_dir}",
]
procs = []
for _ in range(nnodes - 1):
p = mp.Process(target=launch.main, args=[args])
procs.append(p)
p.start()
launch.main(args)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_min_max_nodes_parse(self):
min_nodes, max_nodes = launch.parse_min_max_nnodes("1")
self.assertEqual(min_nodes, max_nodes)
self.assertEqual(1, min_nodes)
min_nodes, max_nodes = launch.parse_min_max_nnodes("2:20")
self.assertEqual(2, min_nodes)
self.assertEqual(20, max_nodes)
with self.assertRaises(RuntimeError):
launch.parse_min_max_nnodes("2:20:30")
@patch("torch.distributed.launcher.api.LocalElasticAgent")
def test_launch_shutdown(self, agent_mock_cls):
nnodes = 1
nproc_per_node = 4
args = [
f"--nnodes={nnodes}",
f"--nproc-per-node={nproc_per_node}",
"--monitor-interval=1",
"--start-method=spawn",
path("bin/test_script.py"),
f"--touch-file-dir={self.test_dir}",
]
agent_mock = Mock()
agent_mock.run.return_value = RunResult(WorkerState.SUCCEEDED)
agent_mock_cls.return_value = agent_mock
rdzv_handler_mock = Mock()
with patch(
"torch.distributed.elastic.rendezvous.registry.get_rendezvous_handler"
) as param_mock:
param_mock.return_value = rdzv_handler_mock
launch.main(args)
rdzv_handler_mock.shutdown.assert_called_once()
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_is_torchelastic_launched(self):
# launch test script with torchelastic and validate that
# torch.distributed.is_torchelastic_launched() returns True
out_file = f"{os.path.join(self.test_dir, 'out')}"
launch.main(
[
"--run-path",
"--nnodes=1",
"--nproc-per-node=1",
"--monitor-interval=1",
path("bin/test_script_is_torchelastic_launched.py"),
f"--out-file={out_file}",
]
)
with open(out_file) as fp:
is_torchelastic_launched = fp.readline()
self.assertEqual("True", is_torchelastic_launched)
@patch("torch.distributed.run.metadata")
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_is_torchelastic_launched_with_logs_spec_defined(self, metadata_mock):
# mock the entrypoint API to avoid version issues.
entrypoints = MagicMock()
metadata_mock.entry_points.return_value = entrypoints
group = MagicMock()
entrypoints.select.return_value = group
ep = MagicMock()
ep.load.return_value = DefaultLogsSpecs
group.select.return_value = ep
group.__getitem__.return_value = ep
out_file = f"{os.path.join(self.test_dir, 'out')}"
if os.path.exists(out_file):
os.remove(out_file)
launch.main(
[
"--run-path",
"--nnodes=1",
"--nproc-per-node=1",
"--monitor-interval=1",
"--logs_specs=default",
path("bin/test_script_is_torchelastic_launched.py"),
f"--out-file={out_file}",
]
)
with open(out_file) as fp:
is_torchelastic_launched = fp.readline()
self.assertEqual("True", is_torchelastic_launched)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_logs_logs_spec_entrypoint_must_be_defined(self):
with self.assertRaises(ValueError):
launch.main(
[
"--run-path",
"--nnodes=1",
"--nproc-per-node=1",
"--monitor-interval=1",
"--logs_specs=DOESNOT_EXIST",
path("bin/test_script_is_torchelastic_launched.py"),
]
)
def test_is_not_torchelastic_launched(self):
# launch test script without torchelastic and validate that
# torch.distributed.is_torchelastic_launched() returns False
out_file = f"{os.path.join(self.test_dir, 'out')}"
# need to run the script with runpy in the same interpreter
# as the test because otherwise (depending on the environment)
# it will not find torch as a dependency
with patch.object(
sys,
"argv",
[
path("bin/test_script_is_torchelastic_launched.py"),
f"--out-file={out_file}",
],
):
runpy.run_path(sys.argv[0], run_name="__main__")
with open(out_file) as fp:
is_torchelastic_launched = fp.readline()
self.assertEqual("False", is_torchelastic_launched)
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_init_method_tcp_with_torchelastic(self):
port = get_free_port()
launch.main(
[
"--run-path",
"--nnodes=1",
"--nproc-per-node=4",
"--master-addr=localhost",
f"--master-port={port}",
"--monitor-interval=1",
path("bin/test_script_init_method.py"),
f"--init-method=tcp://localhost:{port}",
]
)
# nothing to validate, just make sure it runs
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
def test_init_method_env_with_torchelastic(self):
port = get_free_port()
launch.main(
[
"--run-path",
"--nnodes=1",
"--nproc-per-node=4",
"--master-addr=localhost",
f"--master-port={port}",
"--monitor-interval=1",
path("bin/test_script_init_method.py"),
"--init-method=env://",
]
)
# nothing to validate, just make sure it runs
def test_capture_logs_using_default_logs_specs(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
args = [
f"--nnodes={nnodes}",
f"--nproc-per-node={nproc_per_node}",
f"--rdzv-id={run_id}",
"--redirect=3",
"--tee=3",
"--monitor-interval=1",
"--start-method=spawn",
"--no-python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
captured_out = io.StringIO()
captured_err = io.StringIO()
with redirect_stdout(captured_out), redirect_stderr(captured_err):
with patch.dict(
os.environ, {"TORCHELASTIC_LOG_LINE_PREFIX_TEMPLATE": "[rank${rank}]: "}
):
launch.main(args + script_args)
for i in range(nproc_per_node):
self.assertTrue(f"[rank{i}]: creating " in captured_out.getvalue())
@skip_but_pass_in_sandcastle_if(
TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan"
)
@skipIf(not TEST_CUDA, "requires CUDA")
def test_virtual_local_rank(self):
"""
Test that virtual-local-rank ensures consistent device IDs across ranks.
Without it, ranks may compile to different devices, leading to different code.
"""
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 2
# Helper function to run and capture output
def run_test(use_virtual_local_rank):
args = [
f"--nnodes={nnodes}",
f"--nproc-per-node={nproc_per_node}",
f"--rdzv-id={run_id}",
"--monitor-interval=1",
"--start-method=spawn",
"--redirect=3",
"--tee=3",
]
if use_virtual_local_rank:
args.append("--virtual-local-rank")
args.append(path("script_deviceid.py"))
captured_out = io.StringIO()
captured_err = io.StringIO()
with redirect_stdout(captured_out), redirect_stderr(captured_err):
launch.main(args)
return captured_out.getvalue()
def split_ranks(output):
default0 = []
default1 = []
for line in output.splitlines():
if "cuda:" not in line:
continue
if line.startswith("[default0]:"):
default0.append(line[11:])
elif line.startswith("[default1]:"):
default1.append(line[11:])
return default0, default1
# First, run WITHOUT virtual-local-rank - outputs should differ
output = run_test(use_virtual_local_rank=False)
rank0, rank1 = split_ranks(output)
# Verify we actually captured compiled code from both ranks
self.assertGreater(
len(rank0), 0, "Expected to capture compiled code from rank 0"
)
self.assertGreater(
len(rank1), 0, "Expected to capture compiled code from rank 1"
)
# Without virtual-local-rank, the ranks should have DIFFERENT compiled code
# because they see different device IDs (cuda:0 vs cuda:1)
self.assertNotEqual(
rank0,
rank1,
"Expected different compiled code without --virtual-local-rank",
)
# Now run WITH virtual-local-rank - outputs should be identical
output = run_test(use_virtual_local_rank=True)
rank0, rank1 = split_ranks(output)
# Verify we actually captured compiled code from both ranks
self.assertGreater(
len(rank0),
0,
"Expected to capture compiled code from rank 0 with --virtual-local-rank",
)
self.assertGreater(
len(rank1),
0,
"Expected to capture compiled code from rank 1 with --virtual-local-rank",
)
# With virtual-local-rank, both ranks should have IDENTICAL compiled code
# because they both see cuda:0 during compilation
self.assertEqual(
rank0, rank1, "Expected identical compiled code with --virtual-local-rank"
)
if __name__ == "__main__":
run_tests()
|
ElasticLaunchTest
|
python
|
tiangolo__fastapi
|
docs_src/dependencies/tutorial011.py
|
{
"start": 56,
"end": 504
}
|
class ____:
def __init__(self, fixed_content: str):
self.fixed_content = fixed_content
def __call__(self, q: str = ""):
if q:
return self.fixed_content in q
return False
checker = FixedContentQueryChecker("bar")
@app.get("/query-checker/")
async def read_query_check(fixed_content_included: bool = Depends(checker)):
return {"fixed_content_in_query": fixed_content_included}
|
FixedContentQueryChecker
|
python
|
huggingface__transformers
|
src/transformers/models/auto/configuration_auto.py
|
{
"start": 40390,
"end": 44579
}
|
class ____(OrderedDict[str, str]):
"""
A mapping that will load all pairs of key values at the first access (either by indexing, requestions keys, values,
etc.)
Args:
mapping: The mapping to load.
"""
def __init__(self, mapping):
self._mapping = mapping
self._initialized = False
self._data = {}
def _initialize(self):
if self._initialized:
return
for model_type, map_name in self._mapping.items():
module_name = model_type_to_module_name(model_type)
module = importlib.import_module(f".{module_name}", "transformers.models")
mapping = getattr(module, map_name)
self._data.update(mapping)
self._initialized = True
def __getitem__(self, key):
self._initialize()
return self._data[key]
def keys(self) -> KeysView[str]:
self._initialize()
return self._data.keys()
def values(self) -> ValuesView[str]:
self._initialize()
return self._data.values()
def items(self) -> KeysView[str]:
self._initialize()
return self._data.keys()
def __iter__(self) -> Iterator[str]:
self._initialize()
return iter(self._data)
def __contains__(self, item: object) -> bool:
self._initialize()
return item in self._data
def _get_class_name(model_class: Union[str, list[str]]):
if isinstance(model_class, (list, tuple)):
return " or ".join([f"[`{c}`]" for c in model_class if c is not None])
return f"[`{model_class}`]"
def _list_model_options(indent, config_to_class=None, use_model_types=True):
if config_to_class is None and not use_model_types:
raise ValueError("Using `use_model_types=False` requires a `config_to_class` dictionary.")
if use_model_types:
if config_to_class is None:
model_type_to_name = {model_type: f"[`{config}`]" for model_type, config in CONFIG_MAPPING_NAMES.items()}
else:
model_type_to_name = {
model_type: _get_class_name(model_class)
for model_type, model_class in config_to_class.items()
if model_type in MODEL_NAMES_MAPPING
}
lines = [
f"{indent}- **{model_type}** -- {model_type_to_name[model_type]} ({MODEL_NAMES_MAPPING[model_type]} model)"
for model_type in sorted(model_type_to_name.keys())
]
else:
config_to_name = {
CONFIG_MAPPING_NAMES[config]: _get_class_name(clas)
for config, clas in config_to_class.items()
if config in CONFIG_MAPPING_NAMES
}
config_to_model_name = {
config: MODEL_NAMES_MAPPING[model_type] for model_type, config in CONFIG_MAPPING_NAMES.items()
}
lines = [
f"{indent}- [`{config_name}`] configuration class:"
f" {config_to_name[config_name]} ({config_to_model_name[config_name]} model)"
for config_name in sorted(config_to_name.keys())
]
return "\n".join(lines)
def replace_list_option_in_docstrings(
config_to_class=None, use_model_types: bool = True
) -> Callable[[_CallableT], _CallableT]:
def docstring_decorator(fn):
docstrings = fn.__doc__
if docstrings is None:
# Example: -OO
return fn
lines = docstrings.split("\n")
i = 0
while i < len(lines) and re.search(r"^(\s*)List options\s*$", lines[i]) is None:
i += 1
if i < len(lines):
indent = re.search(r"^(\s*)List options\s*$", lines[i]).groups()[0]
if use_model_types:
indent = f"{indent} "
lines[i] = _list_model_options(indent, config_to_class=config_to_class, use_model_types=use_model_types)
docstrings = "\n".join(lines)
else:
raise ValueError(
f"The function {fn} should have an empty 'List options' in its docstring as placeholder, current"
f" docstring is:\n{docstrings}"
)
fn.__doc__ = docstrings
return fn
return docstring_decorator
|
_LazyLoadAllMappings
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-recurly/components.py
|
{
"start": 204,
"end": 503
}
|
class ____(RecordExtractor):
def extract_records(self, response: requests.Response) -> List[Mapping[str, Any]]:
try:
dates = response.json()["dates"]
except requests.exceptions.JSONDecodeError:
dates = []
return [{"dates": dates}]
|
ExportDatesExtractor
|
python
|
django__django
|
django/contrib/sessions/backends/base.py
|
{
"start": 676,
"end": 799
}
|
class ____(Exception):
"""
Occurs if Django tries to update a session that was deleted.
"""
pass
|
UpdateError
|
python
|
has2k1__plotnine
|
plotnine/themes/themeable.py
|
{
"start": 39877,
"end": 40044
}
|
class ____(panel_grid_minor_x, panel_grid_minor_y):
"""
Minor grid lines
Parameters
----------
theme_element : element_line
"""
|
panel_grid_minor
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.