language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | facebookresearch__faiss | demos/offline_ivf/tests/test_offline_ivf.py | {
"start": 918,
"end": 11285
} | class ____(unittest.TestCase):
"""
Unit tests for OIVF. Some of these unit tests first copy the required test data objects and puts them in the tempdir created by the context manager.
"""
def assert_file_exists(self, filepath: str) -> None:
path = pl.Path(filepath)
self.assertEqual((str(path), path.is_file()), (str(path), True))
def test_consistency_check(self) -> None:
"""
Test the OIVF consistency check step, that it throws if no other steps have been ran.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
data_creator = TestDataCreator(
tempdir=tmpdirname,
dimension=8,
data_type=np.float16,
index_factory=["OPQ4,IVF256,PQ4"],
training_sample=9984,
num_files=3,
file_size=10000,
nprobe=2,
k=2,
metric="METRIC_L2",
)
data_creator.create_test_data()
test_args = data_creator.setup_cli("consistency_check")
self.assertRaises(
AssertionError, process_options_and_run_jobs, test_args
)
def test_train_index(self) -> None:
"""
Test the OIVF train index step, that it correctly produces the empty.faissindex template file.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
data_creator = TestDataCreator(
tempdir=tmpdirname,
dimension=8,
data_type=np.float16,
index_factory=["OPQ4,IVF256,PQ4"],
training_sample=9984,
num_files=3,
file_size=10000,
nprobe=2,
k=2,
metric="METRIC_L2",
)
data_creator.create_test_data()
test_args = data_creator.setup_cli("train_index")
cfg = load_config(test_args.config)
process_options_and_run_jobs(test_args)
empty_index = (
cfg["output"]
+ "/my_test_data/"
+ cfg["index"]["prod"][-1].replace(",", "_")
+ ".empty.faissindex"
)
self.assert_file_exists(empty_index)
def test_index_shard_equal_file_sizes(self) -> None:
"""
Test the case where the shard size is a divisor of the database size and it is equal to the first file size.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
index_shard_size = 10000
num_files = 3
file_size = 10000
xb_ds_size = num_files * file_size
data_creator = TestDataCreator(
tempdir=tmpdirname,
dimension=8,
data_type=np.float16,
index_factory=["IVF256,PQ4"],
training_sample=9984,
num_files=num_files,
file_size=file_size,
nprobe=2,
k=2,
metric="METRIC_L2",
index_shard_size=index_shard_size,
query_batch_size=1000,
evaluation_sample=100,
)
data_creator.create_test_data()
test_args = data_creator.setup_cli("train_index")
process_options_and_run_jobs(test_args)
test_args = data_creator.setup_cli("index_shard")
cfg = load_config(test_args.config)
process_options_and_run_jobs(test_args)
num_shards = xb_ds_size // index_shard_size
if xb_ds_size % index_shard_size != 0:
num_shards += 1
print(f"number of shards:{num_shards}")
for i in range(num_shards):
index_shard_file = (
cfg["output"]
+ "/my_test_data/"
+ cfg["index"]["prod"][-1].replace(",", "_")
+ f".shard_{i}"
)
self.assert_file_exists(index_shard_file)
def test_index_shard_unequal_file_sizes(self) -> None:
"""
Test the case where the shard size is not a divisor of the database size and is greater than the first file size.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
file_sizes = [20000, 15001, 13990]
xb_ds_size = sum(file_sizes)
index_shard_size = 30000
data_creator = TestDataCreator(
tempdir=tmpdirname,
dimension=8,
data_type=np.float16,
index_factory=["IVF256,PQ4"],
training_sample=9984,
file_sizes=file_sizes,
nprobe=2,
k=2,
metric="METRIC_L2",
index_shard_size=index_shard_size,
evaluation_sample=100,
)
data_creator.create_test_data()
test_args = data_creator.setup_cli("train_index")
process_options_and_run_jobs(test_args)
test_args = data_creator.setup_cli("index_shard")
cfg = load_config(test_args.config)
process_options_and_run_jobs(test_args)
num_shards = xb_ds_size // index_shard_size
if xb_ds_size % index_shard_size != 0:
num_shards += 1
print(f"number of shards:{num_shards}")
for i in range(num_shards):
index_shard_file = (
cfg["output"]
+ "/my_test_data/"
+ cfg["index"]["prod"][-1].replace(",", "_")
+ f".shard_{i}"
)
self.assert_file_exists(index_shard_file)
def test_search(self) -> None:
"""
Test search step using test data objects to bypass dependencies on previous steps.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
num_files = 3
file_size = 10000
query_batch_size = 10000
total_batches = num_files * file_size // query_batch_size
if num_files * file_size % query_batch_size != 0:
total_batches += 1
data_creator = TestDataCreator(
tempdir=tmpdirname,
dimension=8,
data_type=np.float32,
index_factory=["IVF256,PQ4"],
training_sample=9984,
num_files=3,
file_size=10000,
nprobe=2,
k=2,
metric="METRIC_L2",
index_shard_size=10000,
query_batch_size=query_batch_size,
evaluation_sample=100,
)
data_creator.create_test_data()
test_args = data_creator.setup_cli("train_index")
process_options_and_run_jobs(test_args)
test_args = data_creator.setup_cli("index_shard")
process_options_and_run_jobs(test_args)
test_args = data_creator.setup_cli("search")
cfg = load_config(test_args.config)
process_options_and_run_jobs(test_args)
# TODO: add check that there are number of batches total of files
knn_file = cfg["output"] + KNN_RESULTS_FILE
self.assert_file_exists(knn_file)
def test_evaluate_without_margin(self) -> None:
"""
Test evaluate step using test data objects, no margin evaluation, single index.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
data_creator = TestDataCreator(
tempdir=tmpdirname,
dimension=8,
data_type=np.float32,
index_factory=["IVF256,PQ4"],
training_sample=9984,
num_files=3,
file_size=10000,
nprobe=2,
k=2,
metric="METRIC_L2",
index_shard_size=10000,
query_batch_size=10000,
evaluation_sample=100,
with_queries_ds=True,
)
data_creator.create_test_data()
test_args = data_creator.setup_cli("train_index")
process_options_and_run_jobs(test_args)
test_args = data_creator.setup_cli("index_shard")
process_options_and_run_jobs(test_args)
test_args = data_creator.setup_cli("merge_index")
process_options_and_run_jobs(test_args)
test_args = data_creator.setup_cli("evaluate")
process_options_and_run_jobs(test_args)
common_path = tmpdirname + "/my_queries_data_in_my_test_data/eval/"
for filename in A_INDEX_FILES:
file_to_check = common_path + "/" + filename
self.assert_file_exists(file_to_check)
def test_evaluate_without_margin_OPQ(self) -> None:
"""
Test evaluate step using test data objects, no margin evaluation, single index.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
data_creator = TestDataCreator(
tempdir=tmpdirname,
dimension=8,
data_type=np.float32,
index_factory=["OPQ4,IVF256,PQ4"],
training_sample=9984,
num_files=3,
file_size=10000,
nprobe=200,
k=2,
metric="METRIC_L2",
index_shard_size=10000,
query_batch_size=10000,
evaluation_sample=100,
with_queries_ds=True,
)
data_creator.create_test_data()
test_args = data_creator.setup_cli("train_index")
process_options_and_run_jobs(test_args)
test_args = data_creator.setup_cli("index_shard")
process_options_and_run_jobs(test_args)
test_args = data_creator.setup_cli("merge_index")
process_options_and_run_jobs(test_args)
test_args = data_creator.setup_cli("evaluate")
process_options_and_run_jobs(test_args)
common_path = tmpdirname + "/my_queries_data_in_my_test_data/eval/"
for filename in A_INDEX_OPQ_FILES:
file_to_check = common_path + filename
self.assert_file_exists(file_to_check)
| TestOIVF |
python | pyqtgraph__pyqtgraph | pyqtgraph/opengl/GLGraphicsItem.py | {
"start": 655,
"end": 10941
} | class ____(QtCore.QObject):
_nextId = 0
def __init__(self, parentItem: 'GLGraphicsItem' = None):
super().__init__()
self._id = GLGraphicsItem._nextId
GLGraphicsItem._nextId += 1
self.__parent: GLGraphicsItem | None = None
self.__view = None
self.__children: list[GLGraphicsItem] = list()
self.__transform = Transform3D()
self.__visible = True
self.__initialized = False
self.setParentItem(parentItem)
self.setDepthValue(0)
self.__glOpts = {}
def setParentItem(self, item):
"""Set this item's parent in the scenegraph hierarchy."""
if self.__parent is not None:
self.__parent.__children.remove(self)
if item is not None:
item.__children.append(self)
# if we had a __view, we were a top level object
if self.__view is not None:
self.__view.removeItem(self)
# we are now either a child or an orphan.
# either way, we don't have our own __view
self.__parent = item
self.__view = None
def setGLOptions(self, opts):
"""
Set the OpenGL state options to use immediately before drawing this item.
(Note that subclasses must call setupGLState before painting for this to work)
The simplest way to invoke this method is to pass in the name of
a predefined set of options (see the GLOptions variable):
============= ======================================================
opaque Enables depth testing and disables blending
translucent Enables depth testing and blending
Elements must be drawn sorted back-to-front for
translucency to work correctly.
additive Disables depth testing, enables blending.
Colors are added together, so sorting is not required.
============= ======================================================
It is also possible to specify any arbitrary settings as a dictionary.
This may consist of {'functionName': (args...)} pairs where functionName must
be a callable attribute of OpenGL.GL, or {GL_STATE_VAR: bool} pairs
which will be interpreted as calls to glEnable or glDisable(GL_STATE_VAR).
For example::
{
GL_ALPHA_TEST: True,
GL_CULL_FACE: False,
'glBlendFunc': (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA),
}
"""
if isinstance(opts, str):
opts = GLOptions[opts]
self.__glOpts = opts.copy()
self.update()
def updateGLOptions(self, opts):
"""
Modify the OpenGL state options to use immediately before drawing this item.
*opts* must be a dictionary as specified by setGLOptions.
Values may also be None, in which case the key will be ignored.
"""
self.__glOpts.update(opts)
def parentItem(self):
"""Return a this item's parent in the scenegraph hierarchy."""
return self.__parent
def childItems(self):
"""Return a list of this item's children in the scenegraph hierarchy."""
return list(self.__children)
def _setView(self, v):
self.__view = v
def view(self):
if self.__parent is None:
# top level object
return self.__view
else:
# recurse
return self.__parent.view()
def setDepthValue(self, value):
"""
Sets the depth value of this item. Default is 0.
This controls the order in which items are drawn--those with a greater depth value will be drawn later.
Items with negative depth values are drawn before their parent.
(This is analogous to QGraphicsItem.zValue)
The depthValue does NOT affect the position of the item or the values it imparts to the GL depth buffer.
"""
self.__depthValue = value
def depthValue(self):
"""Return the depth value of this item. See setDepthValue for more information."""
return self.__depthValue
def setTransform(self, tr):
"""Set the local transform for this object.
Parameters
----------
tr : pyqtgraph.Transform3D
Tranformation from the local coordinate system to the parent's.
"""
self.__transform = Transform3D(tr)
self.update()
def resetTransform(self):
"""Reset this item's transform to an identity transformation."""
self.__transform.setToIdentity()
self.update()
def applyTransform(self, tr, local):
"""
Multiply this object's transform by *tr*.
If local is True, then *tr* is multiplied on the right of the current transform::
newTransform = transform * tr
If local is False, then *tr* is instead multiplied on the left::
newTransform = tr * transform
"""
if local:
self.setTransform(self.transform() * tr)
else:
self.setTransform(tr * self.transform())
def transform(self):
"""Return this item's transform object."""
return self.__transform
def viewTransform(self):
"""Return the transform mapping this item's local coordinate system to the
view coordinate system."""
tr = self.__transform
p = self
while True:
p = p.parentItem()
if p is None:
break
tr = p.transform() * tr
return Transform3D(tr)
def translate(self, dx, dy, dz, local=False):
"""
Translate the object by (*dx*, *dy*, *dz*) in its parent's coordinate system.
If *local* is True, then translation takes place in local coordinates.
"""
tr = Transform3D()
tr.translate(dx, dy, dz)
self.applyTransform(tr, local=local)
def rotate(self, angle, x, y, z, local=False):
"""
Rotate the object around the axis specified by (x,y,z).
*angle* is in degrees.
"""
tr = Transform3D()
tr.rotate(angle, x, y, z)
self.applyTransform(tr, local=local)
def scale(self, x, y, z, local=True):
"""
Scale the object by (*dx*, *dy*, *dz*) in its local coordinate system.
If *local* is False, then scale takes place in the parent's coordinates.
"""
tr = Transform3D()
tr.scale(x, y, z)
self.applyTransform(tr, local=local)
def hide(self):
"""Hide this item.
This is equivalent to setVisible(False)."""
self.setVisible(False)
def show(self):
"""Make this item visible if it was previously hidden.
This is equivalent to setVisible(True)."""
self.setVisible(True)
def setVisible(self, vis):
"""Set the visibility of this item."""
self.__visible = vis
self.update()
def visible(self):
"""Return True if the item is currently set to be visible.
Note that this does not guarantee that the item actually appears in the
view, as it may be obscured or outside of the current view area."""
return self.__visible
def initialize(self):
self.initializeGL()
self.__initialized = True
def isInitialized(self):
return self.__initialized
def initializeGL(self):
"""
Called after an item is added to a GLViewWidget.
The widget's GL context is made current before this method is called.
(So this would be an appropriate time to generate lists, upload textures, etc.)
"""
pass
def setupGLState(self):
"""
This method is responsible for preparing the GL state options needed to render
this item (blending, depth testing, etc). The method is called immediately before painting the item.
"""
for k,v in self.__glOpts.items():
if v is None:
continue
if isinstance(k, str):
func = getattr(GL, k)
func(*v)
else:
if v is True:
GL.glEnable(k)
else:
GL.glDisable(k)
def paint(self):
"""
Called by the GLViewWidget to draw this item.
It is the responsibility of the item to set up its own modelview matrix,
but the caller will take care of pushing/popping.
"""
self.setupGLState()
def update(self):
"""
Indicates that this item needs to be redrawn, and schedules an update
with the view it is displayed in.
"""
v = self.view()
if v is None:
return
v.update()
def mapToParent(self, point):
tr = self.transform()
if tr is None:
return point
return tr.map(point)
def mapFromParent(self, point):
tr = self.transform()
if tr is None:
return point
return tr.inverted()[0].map(point)
def mapToView(self, point):
tr = self.viewTransform()
if tr is None:
return point
return tr.map(point)
def mapFromView(self, point):
tr = self.viewTransform()
if tr is None:
return point
return tr.inverted()[0].map(point)
def modelViewMatrix(self) -> QtGui.QMatrix4x4:
if (view := self.view()) is None:
return QtGui.QMatrix4x4()
return view.currentModelView()
def projectionMatrix(self) -> QtGui.QMatrix4x4:
if (view := self.view()) is None:
return QtGui.QMatrix4x4()
return view.currentProjection()
def mvpMatrix(self) -> QtGui.QMatrix4x4:
if (view := self.view()) is None:
return QtGui.QMatrix4x4()
return view.currentProjection() * view.currentModelView()
| GLGraphicsItem |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 80415,
"end": 84003
} | class ____(Request):
"""
Add or update task model
:param task: ID of the task
:type task: str
:param name: The task model name
:type name: str
:param model: The model ID
:type model: str
:param type: The task model type
:type type: ModelTypeEnum
:param iteration: Iteration (used to update task statistics)
:type iteration: int
"""
_service = "tasks"
_action = "add_or_update_model"
_version = "2.13"
_schema = {
"definitions": {"model_type_enum": {"enum": ["input", "output"], "type": "string"}},
"properties": {
"iteration": {
"description": "Iteration (used to update task statistics)",
"type": "integer",
},
"model": {"description": "The model ID", "type": "string"},
"name": {"description": "The task model name", "type": "string"},
"task": {"description": "ID of the task", "type": "string"},
"type": {
"$ref": "#/definitions/model_type_enum",
"description": "The task model type",
},
},
"required": ["task", "name", "model", "type"],
"type": "object",
}
def __init__(
self, task: str, name: str, model: str, type: Any, iteration: Optional[int] = None, **kwargs: Any
) -> None:
super(AddOrUpdateModelRequest, self).__init__(**kwargs)
self.task = task
self.name = name
self.model = model
self.type = type
self.iteration = iteration
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("name")
def name(self) -> str:
return self._property_name
@name.setter
def name(self, value: str) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("type")
def type(self) -> Any:
return self._property_type
@type.setter
def type(self, value: Any) -> None:
if value is None:
self._property_type = None
return
if isinstance(value, six.string_types):
try:
value = ModelTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "type", enum.Enum)
self._property_type = value
@schema_property("iteration")
def iteration(self) -> Optional[int]:
return self._property_iteration
@iteration.setter
def iteration(self, value: Optional[int]) -> None:
if value is None:
self._property_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iteration", six.integer_types)
self._property_iteration = value
| AddOrUpdateModelRequest |
python | networkx__networkx | networkx/algorithms/shortest_paths/tests/test_weighted.py | {
"start": 17496,
"end": 32885
} | class ____(WeightedTestBase):
def test_single_node_graph(self):
G = nx.DiGraph()
G.add_node(0)
assert nx.single_source_bellman_ford_path(G, 0) == {0: [0]}
assert nx.single_source_bellman_ford_path_length(G, 0) == {0: 0}
assert nx.single_source_bellman_ford(G, 0) == ({0: 0}, {0: [0]})
assert nx.bellman_ford_predecessor_and_distance(G, 0) == ({0: []}, {0: 0})
assert nx.goldberg_radzik(G, 0) == ({0: None}, {0: 0})
def test_absent_source_bellman_ford(self):
# the check is in _bellman_ford; this provides regression testing
# against later changes to "client" Bellman-Ford functions
G = nx.path_graph(2)
for fn in (
nx.bellman_ford_predecessor_and_distance,
nx.bellman_ford_path,
nx.bellman_ford_path_length,
nx.single_source_bellman_ford_path,
nx.single_source_bellman_ford_path_length,
nx.single_source_bellman_ford,
):
pytest.raises(nx.NodeNotFound, fn, G, 3, 0)
pytest.raises(nx.NodeNotFound, fn, G, 3, 3)
def test_absent_source_goldberg_radzik(self):
with pytest.raises(nx.NodeNotFound):
G = nx.path_graph(2)
nx.goldberg_radzik(G, 3, 0)
def test_negative_cycle_heuristic(self):
G = nx.DiGraph()
G.add_edge(0, 1, weight=-1)
G.add_edge(1, 2, weight=-1)
G.add_edge(2, 3, weight=-1)
G.add_edge(3, 0, weight=3)
assert not nx.negative_edge_cycle(G, heuristic=True)
G.add_edge(2, 0, weight=1.999)
assert nx.negative_edge_cycle(G, heuristic=True)
G.edges[2, 0]["weight"] = 2
assert not nx.negative_edge_cycle(G, heuristic=True)
def test_negative_cycle_consistency(self):
import random
unif = random.uniform
for random_seed in range(2): # range(20):
random.seed(random_seed)
for density in [0.1, 0.9]: # .3, .7, .9]:
for N in [1, 10, 20]: # range(1, 60 - int(30 * density)):
for max_cost in [1, 90]: # [1, 10, 40, 90]:
G = nx.binomial_graph(N, density, seed=4, directed=True)
edges = ((u, v, unif(-1, max_cost)) for u, v in G.edges)
G.add_weighted_edges_from(edges)
no_heuristic = nx.negative_edge_cycle(G, heuristic=False)
with_heuristic = nx.negative_edge_cycle(G, heuristic=True)
assert no_heuristic == with_heuristic
def test_negative_cycle(self):
G = nx.cycle_graph(5, create_using=nx.DiGraph())
G.add_edge(1, 2, weight=-7)
for i in range(5):
pytest.raises(
nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i
)
pytest.raises(
nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i
)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i)
pytest.raises(
nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i
)
pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i)
G = nx.cycle_graph(5) # undirected Graph
G.add_edge(1, 2, weight=-3)
for i in range(5):
pytest.raises(
nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, i
)
pytest.raises(
nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, i
)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, i)
pytest.raises(
nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, i
)
pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, i)
G = nx.DiGraph([(1, 1, {"weight": -1})])
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, 1)
pytest.raises(
nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, 1
)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, 1)
pytest.raises(
nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, 1
)
pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 1)
G = nx.MultiDiGraph([(1, 1, {"weight": -1})])
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford_path, G, 1)
pytest.raises(
nx.NetworkXUnbounded, nx.single_source_bellman_ford_path_length, G, 1
)
pytest.raises(nx.NetworkXUnbounded, nx.single_source_bellman_ford, G, 1)
pytest.raises(
nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, 1
)
pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 1)
def test_zero_cycle(self):
G = nx.cycle_graph(5, create_using=nx.DiGraph())
G.add_edge(2, 3, weight=-4)
# check that zero cycle doesn't raise
nx.goldberg_radzik(G, 1)
nx.bellman_ford_predecessor_and_distance(G, 1)
G.add_edge(2, 3, weight=-4.0001)
# check that negative cycle does raise
pytest.raises(
nx.NetworkXUnbounded, nx.bellman_ford_predecessor_and_distance, G, 1
)
pytest.raises(nx.NetworkXUnbounded, nx.goldberg_radzik, G, 1)
def test_find_negative_cycle_longer_cycle(self):
G = nx.cycle_graph(5, create_using=nx.DiGraph())
nx.add_cycle(G, [3, 5, 6, 7, 8, 9])
G.add_edge(1, 2, weight=-30)
assert nx.find_negative_cycle(G, 1) == [0, 1, 2, 3, 4, 0]
assert nx.find_negative_cycle(G, 7) == [2, 3, 4, 0, 1, 2]
def test_find_negative_cycle_no_cycle(self):
G = nx.path_graph(5, create_using=nx.DiGraph())
pytest.raises(nx.NetworkXError, nx.find_negative_cycle, G, 3)
def test_find_negative_cycle_single_edge(self):
G = nx.Graph()
G.add_edge(0, 1, weight=-1)
assert nx.find_negative_cycle(G, 1) == [1, 0, 1]
def test_negative_weight(self):
G = nx.cycle_graph(5, create_using=nx.DiGraph())
G.add_edge(1, 2, weight=-3)
assert nx.single_source_bellman_ford_path(G, 0) == {
0: [0],
1: [0, 1],
2: [0, 1, 2],
3: [0, 1, 2, 3],
4: [0, 1, 2, 3, 4],
}
assert nx.single_source_bellman_ford_path_length(G, 0) == {
0: 0,
1: 1,
2: -2,
3: -1,
4: 0,
}
assert nx.single_source_bellman_ford(G, 0) == (
{0: 0, 1: 1, 2: -2, 3: -1, 4: 0},
{0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3], 4: [0, 1, 2, 3, 4]},
)
assert nx.bellman_ford_predecessor_and_distance(G, 0) == (
{0: [], 1: [0], 2: [1], 3: [2], 4: [3]},
{0: 0, 1: 1, 2: -2, 3: -1, 4: 0},
)
assert nx.goldberg_radzik(G, 0) == (
{0: None, 1: 0, 2: 1, 3: 2, 4: 3},
{0: 0, 1: 1, 2: -2, 3: -1, 4: 0},
)
def test_not_connected(self):
G = nx.complete_graph(6)
G.add_edge(10, 11)
G.add_edge(10, 12)
assert nx.single_source_bellman_ford_path(G, 0) == {
0: [0],
1: [0, 1],
2: [0, 2],
3: [0, 3],
4: [0, 4],
5: [0, 5],
}
assert nx.single_source_bellman_ford_path_length(G, 0) == {
0: 0,
1: 1,
2: 1,
3: 1,
4: 1,
5: 1,
}
assert nx.single_source_bellman_ford(G, 0) == (
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]},
)
assert nx.bellman_ford_predecessor_and_distance(G, 0) == (
{0: [], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
)
assert nx.goldberg_radzik(G, 0) == (
{0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
)
# not connected, with a component not containing the source that
# contains a negative cycle.
G = nx.complete_graph(6)
G.add_edges_from(
[
("A", "B", {"load": 3}),
("B", "C", {"load": -10}),
("C", "A", {"load": 2}),
]
)
assert nx.single_source_bellman_ford_path(G, 0, weight="load") == {
0: [0],
1: [0, 1],
2: [0, 2],
3: [0, 3],
4: [0, 4],
5: [0, 5],
}
assert nx.single_source_bellman_ford_path_length(G, 0, weight="load") == {
0: 0,
1: 1,
2: 1,
3: 1,
4: 1,
5: 1,
}
assert nx.single_source_bellman_ford(G, 0, weight="load") == (
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
{0: [0], 1: [0, 1], 2: [0, 2], 3: [0, 3], 4: [0, 4], 5: [0, 5]},
)
assert nx.bellman_ford_predecessor_and_distance(G, 0, weight="load") == (
{0: [], 1: [0], 2: [0], 3: [0], 4: [0], 5: [0]},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
)
assert nx.goldberg_radzik(G, 0, weight="load") == (
{0: None, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0},
{0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1},
)
def test_multigraph(self):
assert nx.bellman_ford_path(self.MXG, "s", "v") == ["s", "x", "u", "v"]
assert nx.bellman_ford_path_length(self.MXG, "s", "v") == 9
assert nx.single_source_bellman_ford_path(self.MXG, "s")["v"] == [
"s",
"x",
"u",
"v",
]
assert nx.single_source_bellman_ford_path_length(self.MXG, "s")["v"] == 9
D, P = nx.single_source_bellman_ford(self.MXG, "s", target="v")
assert D == 9
assert P == ["s", "x", "u", "v"]
P, D = nx.bellman_ford_predecessor_and_distance(self.MXG, "s")
assert P["v"] == ["u"]
assert D["v"] == 9
P, D = nx.goldberg_radzik(self.MXG, "s")
assert P["v"] == "u"
assert D["v"] == 9
assert nx.bellman_ford_path(self.MXG4, 0, 2) == [0, 1, 2]
assert nx.bellman_ford_path_length(self.MXG4, 0, 2) == 4
assert nx.single_source_bellman_ford_path(self.MXG4, 0)[2] == [0, 1, 2]
assert nx.single_source_bellman_ford_path_length(self.MXG4, 0)[2] == 4
D, P = nx.single_source_bellman_ford(self.MXG4, 0, target=2)
assert D == 4
assert P == [0, 1, 2]
P, D = nx.bellman_ford_predecessor_and_distance(self.MXG4, 0)
assert P[2] == [1]
assert D[2] == 4
P, D = nx.goldberg_radzik(self.MXG4, 0)
assert P[2] == 1
assert D[2] == 4
def test_others(self):
assert nx.bellman_ford_path(self.XG, "s", "v") == ["s", "x", "u", "v"]
assert nx.bellman_ford_path_length(self.XG, "s", "v") == 9
assert nx.single_source_bellman_ford_path(self.XG, "s")["v"] == [
"s",
"x",
"u",
"v",
]
assert nx.single_source_bellman_ford_path_length(self.XG, "s")["v"] == 9
D, P = nx.single_source_bellman_ford(self.XG, "s", target="v")
assert D == 9
assert P == ["s", "x", "u", "v"]
(P, D) = nx.bellman_ford_predecessor_and_distance(self.XG, "s")
assert P["v"] == ["u"]
assert D["v"] == 9
(P, D) = nx.goldberg_radzik(self.XG, "s")
assert P["v"] == "u"
assert D["v"] == 9
def test_path_graph(self):
G = nx.path_graph(4)
assert nx.single_source_bellman_ford_path(G, 0) == {
0: [0],
1: [0, 1],
2: [0, 1, 2],
3: [0, 1, 2, 3],
}
assert nx.single_source_bellman_ford_path_length(G, 0) == {
0: 0,
1: 1,
2: 2,
3: 3,
}
assert nx.single_source_bellman_ford(G, 0) == (
{0: 0, 1: 1, 2: 2, 3: 3},
{0: [0], 1: [0, 1], 2: [0, 1, 2], 3: [0, 1, 2, 3]},
)
assert nx.bellman_ford_predecessor_and_distance(G, 0) == (
{0: [], 1: [0], 2: [1], 3: [2]},
{0: 0, 1: 1, 2: 2, 3: 3},
)
assert nx.goldberg_radzik(G, 0) == (
{0: None, 1: 0, 2: 1, 3: 2},
{0: 0, 1: 1, 2: 2, 3: 3},
)
assert nx.single_source_bellman_ford_path(G, 3) == {
0: [3, 2, 1, 0],
1: [3, 2, 1],
2: [3, 2],
3: [3],
}
assert nx.single_source_bellman_ford_path_length(G, 3) == {
0: 3,
1: 2,
2: 1,
3: 0,
}
assert nx.single_source_bellman_ford(G, 3) == (
{0: 3, 1: 2, 2: 1, 3: 0},
{0: [3, 2, 1, 0], 1: [3, 2, 1], 2: [3, 2], 3: [3]},
)
assert nx.bellman_ford_predecessor_and_distance(G, 3) == (
{0: [1], 1: [2], 2: [3], 3: []},
{0: 3, 1: 2, 2: 1, 3: 0},
)
assert nx.goldberg_radzik(G, 3) == (
{0: 1, 1: 2, 2: 3, 3: None},
{0: 3, 1: 2, 2: 1, 3: 0},
)
def test_4_cycle(self):
# 4-cycle
G = nx.Graph([(0, 1), (1, 2), (2, 3), (3, 0)])
dist, path = nx.single_source_bellman_ford(G, 0)
assert dist == {0: 0, 1: 1, 2: 2, 3: 1}
assert path[0] == [0]
assert path[1] == [0, 1]
assert path[2] in [[0, 1, 2], [0, 3, 2]]
assert path[3] == [0, 3]
pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0)
assert pred[0] == []
assert pred[1] == [0]
assert pred[2] in [[1, 3], [3, 1]]
assert pred[3] == [0]
assert dist == {0: 0, 1: 1, 2: 2, 3: 1}
pred, dist = nx.goldberg_radzik(G, 0)
assert pred[0] is None
assert pred[1] == 0
assert pred[2] in [1, 3]
assert pred[3] == 0
assert dist == {0: 0, 1: 1, 2: 2, 3: 1}
def test_negative_weight_bf_path(self):
G = nx.DiGraph()
G.add_nodes_from("abcd")
G.add_edge("a", "d", weight=0)
G.add_edge("a", "b", weight=1)
G.add_edge("b", "c", weight=-3)
G.add_edge("c", "d", weight=1)
assert nx.bellman_ford_path(G, "a", "d") == ["a", "b", "c", "d"]
assert nx.bellman_ford_path_length(G, "a", "d") == -1
def test_zero_cycle_smoke(self):
D = nx.DiGraph()
D.add_weighted_edges_from([(0, 1, 1), (1, 2, 1), (2, 3, 1), (3, 1, -2)])
nx.bellman_ford_path(D, 1, 3)
nx.dijkstra_path(D, 1, 3)
nx.bidirectional_dijkstra(D, 1, 3)
# FIXME nx.goldberg_radzik(D, 1)
def test_skip_visited_unweighted(self):
"""Check that `goldberg_radzik` correctly skips visited nodes in `topo_sort`.
This doesn't reliably get tested by other tests because iterating over
the `relabeled` set is not deterministic.
"""
G = nx.Graph([(0, 4), (0, 5), (1, 3), (1, 4), (2, 3), (2, 5), (3, 5), (3, 6)])
_, dist = nx.goldberg_radzik(G, 4)
assert dist == {0: 1, 1: 1, 2: 3, 3: 2, 4: 0, 5: 2, 6: 3}
| TestBellmanFordAndGoldbergRadzik |
python | kamyu104__LeetCode-Solutions | Python/decode-ways-ii.py | {
"start": 29,
"end": 1259
} | class ____(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
M, W = 1000000007, 3
dp = [0] * W
dp[0] = 1
dp[1] = 9 if s[0] == '*' else dp[0] if s[0] != '0' else 0
for i in xrange(1, len(s)):
if s[i] == '*':
dp[(i + 1) % W] = 9 * dp[i % W]
if s[i - 1] == '1':
dp[(i + 1) % W] = (dp[(i + 1) % W] + 9 * dp[(i - 1) % W]) % M
elif s[i - 1] == '2':
dp[(i + 1) % W] = (dp[(i + 1) % W] + 6 * dp[(i - 1) % W]) % M
elif s[i - 1] == '*':
dp[(i + 1) % W] = (dp[(i + 1) % W] + 15 * dp[(i - 1) % W]) % M
else:
dp[(i + 1) % W] = dp[i % W] if s[i] != '0' else 0
if s[i - 1] == '1':
dp[(i + 1) % W] = (dp[(i + 1) % W] + dp[(i - 1) % W]) % M
elif s[i - 1] == '2' and s[i] <= '6':
dp[(i + 1) % W] = (dp[(i + 1) % W] + dp[(i - 1) % W]) % M
elif s[i - 1] == '*':
dp[(i + 1) % W] = (dp[(i + 1) % W] + (2 if s[i] <= '6' else 1) * dp[(i - 1) % W]) % M
return dp[len(s) % W]
| Solution |
python | pytorch__pytorch | test/dynamo/test_repros.py | {
"start": 9900,
"end": 11652
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.dropout = 0.5
self.layer_norm = torch.nn.LayerNorm(512, eps=1.0e-12)
self.layers = [torch.nn.Linear(256, 256)]
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=[None] * 6,
num_hashes=None,
use_cache=False,
orig_sequence_length=64,
output_hidden_states=False,
output_attentions=False,
):
# hidden_states and attention lists to be filled if wished
all_hidden_states = []
all_attentions = []
past_buckets_states = [((None), (None)) for i in range(len(self.layers))]
# concat same tensor for reversible ResNet
hidden_states = torch.cat([hidden_states, hidden_states], dim=-1)
hidden_states = _ReversibleFunction.apply(
hidden_states,
self.layers,
attention_mask,
head_mask,
num_hashes,
all_hidden_states,
all_attentions,
past_buckets_states,
use_cache,
orig_sequence_length,
output_hidden_states,
output_attentions,
)
# Apply layer norm to concatenated hidden states
hidden_states = self.layer_norm(hidden_states)
# Apply dropout
hidden_states = torch.nn.functional.dropout(
hidden_states, p=self.dropout, training=self.training
)
return ReformerEncoderOutput(
hidden_states=hidden_states,
all_hidden_states=all_hidden_states,
all_attentions=all_attentions,
past_buckets_states=past_buckets_states,
)
| ReformerEncoder |
python | Lightning-AI__lightning | src/lightning/fabric/utilities/init.py | {
"start": 1044,
"end": 4589
} | class ____(TorchFunctionMode):
"""Initialize `nn.Module` with empty tensors, i.e., uninitialized memory.
Example::
with _EmptyInit():
model = BigModel()
model.load_state_dict(torch.load("checkpoint.pt"))
"""
def __init__(self, enabled: bool = True) -> None:
super().__init__()
self.enabled = enabled
@override
def __torch_function__(
self,
func: Callable,
types: Sequence,
args: Sequence[Any] = (),
kwargs: Optional[dict] = None,
) -> Any:
kwargs = kwargs or {}
if not self.enabled:
return func(*args, **kwargs)
if getattr(func, "__module__", None) == "torch.nn.init":
if "tensor" in kwargs:
return kwargs["tensor"]
return args[0]
return func(*args, **kwargs)
def _materialize(module: Module, device: _DEVICE) -> None:
"""Materialize a module."""
module.to_empty(device=device, recurse=False)
if not hasattr(module, "reset_parameters"):
raise TypeError(
f"Materialization requires that the `{type(module).__name__}.reset_parameters` method is implemented."
" This method is used to initialize any children parameters or buffers in this module."
)
if callable(module.reset_parameters):
module.reset_parameters()
def _materialize_meta_tensors(module: Module, device: _DEVICE) -> None:
"""Materialize all tensors in a given module."""
for module in module.modules():
if _has_meta_device_parameters_or_buffers(module, recurse=False):
_materialize(module, device)
def _materialize_distributed_module(module: Module, device: torch.device) -> None:
# Reference: https://github.com/pytorch/torchtitan/blob/main/docs/fsdp.md#meta-device-initialization
# TODO: Introduce `Fabric.materialize(module)` to give user control when materialization should happen
# TODO: Make `torchmetrics.Metric` compatible with the `to_empty()` + `reset_parameters()` semantics
if not _has_meta_device_parameters_or_buffers(module):
return
module.to_empty(device=device) # has to be called on the root module
uninitialized_modules = set()
for submodule in module.modules():
if all(False for _ in itertools.chain(submodule.parameters(recurse=False), submodule.buffers(recurse=False))):
# module has no parameters or buffers
continue
if callable(reset_method := getattr(submodule, "reset_parameters", None)):
reset_method()
else:
uninitialized_modules.add(type(submodule).__name__)
if uninitialized_modules:
rank_zero_warn(
"Parameter initialization incomplete. The following modules have parameters or buffers with uninitialized"
" memory because they don't define a `reset_parameters()` method for re-initialization:"
f" {', '.join(uninitialized_modules)}"
)
def _has_meta_device_parameters_or_buffers(obj: Union[Module, Optimizer], recurse: bool = True) -> bool:
if isinstance(obj, Optimizer):
return any(
t.is_meta for param_group in obj.param_groups for t in param_group["params"] if isinstance(t, Parameter)
)
if isinstance(obj, Module):
return any(t.is_meta for t in itertools.chain(obj.parameters(recurse=recurse), obj.buffers(recurse=recurse)))
raise TypeError(f"Expected `torch.nn.Module` or `torch.optim.Optimizer`, got: {type(obj).__name__}")
| _EmptyInit |
python | dask__distributed | distributed/comm/ws.py | {
"start": 14634,
"end": 14737
} | class ____(BaseTCPBackend):
_connector_class = WSConnector
_listener_class = WSListener
| WSBackend |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 11363,
"end": 12901
} | class ____(DagsterError):
"""This is the base class for any exception that is meant to wrap an
:py:class:`~python:Exception` thrown by user code. It wraps that existing user code.
The ``original_exc_info`` argument to the constructor is meant to be a tuple of the type
returned by :py:func:`sys.exc_info <python:sys.exc_info>` at the call site of the constructor.
Users should not subclass this base class for their own exceptions and should instead throw
freely from user code. User exceptions will be automatically wrapped and rethrown.
"""
def __init__(self, *args, **kwargs):
# original_exc_info should be gotten from a sys.exc_info() call at the
# callsite inside of the exception handler. this will allow consuming
# code to *re-raise* the user error in it's original format
# for cleaner error reporting that does not have framework code in it
user_exception = check.inst_param(kwargs.pop("user_exception"), "user_exception", Exception)
original_exc_info = check.tuple_param(kwargs.pop("original_exc_info"), "original_exc_info")
check.invariant(original_exc_info[0] is not None)
super().__init__(args[0], *args[1:], **kwargs)
self.user_exception = check.opt_inst_param(user_exception, "user_exception", Exception)
self.original_exc_info = original_exc_info
@property
def is_user_code_error(self) -> bool: # pyright: ignore[reportIncompatibleMethodOverride]
return True
| DagsterUserCodeExecutionError |
python | nedbat__coveragepy | tests/test_html.py | {
"start": 1008,
"end": 5172
} | class ____(CoverageTest):
"""Methods that help with HTML tests."""
def create_initial_files(self) -> None:
"""Create the source files we need to run these tests."""
self.make_file(
"main_file.py",
"""\
import helper1, helper2
helper1.func1(12)
helper2.func2(12)
""",
)
self.make_file(
"helper1.py",
"""\
def func1(x):
if x % 2:
print("odd")
""",
)
self.make_file(
"helper2.py",
"""\
def func2(x):
print("x is %d" % x)
""",
)
def run_coverage(
self,
covargs: dict[str, Any] | None = None,
htmlargs: dict[str, Any] | None = None,
) -> float:
"""Run coverage.py on main_file.py, and create an HTML report."""
self.clean_local_file_imports()
cov = coverage.Coverage(**(covargs or {}))
self.start_import_stop(cov, "main_file")
ret = cov.html_report(**(htmlargs or {}))
self.assert_valid_hrefs()
return ret
def get_html_report_content(self, module: str) -> str:
"""Return the content of the HTML report for `module`."""
filename = flat_rootname(module) + ".html"
filename = os.path.join("htmlcov", filename)
with open(filename, encoding="utf-8") as f:
return f.read()
def get_html_index_content(self) -> str:
"""Return the content of index.html.
Time stamps are replaced with a placeholder so that clocks don't matter.
"""
with open("htmlcov/index.html", encoding="utf-8") as f:
index = f.read()
index = re.sub(
r"created at \d{4}-\d{2}-\d{2} \d{2}:\d{2} \+\d{4}",
r"created at YYYY-MM-DD HH:MM +ZZZZ",
index,
)
index = re.sub(
r"created at \d{4}-\d{2}-\d{2} \d{2}:\d{2}",
r"created at YYYY-MM-DD HH:MM",
index,
)
return index
def get_html_report_text_lines(self, module: str) -> list[str]:
"""Parse the HTML report, and return a list of strings, the text rendered."""
parser = HtmlReportParser()
parser.feed(self.get_html_report_content(module))
return parser.text()
def assert_correct_timestamp(self, html: str) -> None:
"""Extract the time stamp from `html`, and assert it is recent."""
timestamp_pat = r"created at (\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2})"
m = re.search(timestamp_pat, html)
assert m, "Didn't find a time stamp!"
timestamp = datetime.datetime(*[int(v) for v in m.groups()]) # type: ignore[arg-type]
# The time stamp only records the minute, so the delta could be from
# 12:00 to 12:01:59, or two minutes.
self.assert_recent_datetime(
timestamp,
seconds=120,
msg=f"Time stamp is wrong: {timestamp}",
)
def assert_valid_hrefs(self, directory: str = "htmlcov") -> None:
"""Assert that the hrefs in htmlcov/*.html are valid.
Doesn't check external links (those with a protocol).
"""
hrefs = collections.defaultdict(set)
for fname in glob.glob(f"{directory}/*.html"):
with open(fname, encoding="utf-8") as fhtml:
html = fhtml.read()
for href in re.findall(r""" href=['"]([^'"]*)['"]""", html):
if href.startswith("#"):
assert re.search(rf""" id=['"]{href[1:]}['"]""", html), (
f"Fragment {href!r} in {fname} has no anchor"
)
continue
if "://" in href:
continue
href = href.partition("#")[0] # ignore fragment in URLs.
hrefs[href].add(fname)
for href, sources in hrefs.items():
assert os.path.exists(f"{directory}/{href}"), (
f"These files link to {href!r}, which doesn't exist: {', '.join(sources)}"
)
| HtmlTestHelpers |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 52025,
"end": 57933
} | class ____(CBaseTypeNode):
# After parsing:
# positional_args [ExprNode] List of positional arguments
# keyword_args DictNode Keyword arguments
# base_type_node CBaseTypeNode
# After analysis:
# type PyrexTypes.BufferType or PyrexTypes.CppClassType ...containing the right options
child_attrs = ["base_type_node", "positional_args",
"keyword_args", "dtype_node"]
is_templated_type_node = True
dtype_node = None
name = None
def _analyse_template_types(self, env, base_type):
require_python_types = base_type.python_type_constructor_name == 'dataclasses.ClassVar'
in_c_type_context = env.in_c_type_context and not require_python_types
template_types = []
for template_node in self.positional_args:
if template_node.is_none:
continue
# CBaseTypeNode -> allow C type declarations in a 'cdef' context again
with env.new_c_type_context(in_c_type_context or isinstance(template_node, CBaseTypeNode)):
ttype = template_node.analyse_as_type(env)
if ttype is None:
if base_type.is_cpp_class:
error(template_node.pos, "unknown type in template argument")
ttype = error_type
# For Python generics we can be a bit more flexible and allow None.
template_types.append(ttype)
if base_type.python_type_constructor_name:
if base_type.python_type_constructor_name == 'typing.Union':
base_type.contains_none = any(x.is_none for x in self.positional_args)
require_optional_types = base_type.allows_none()
else:
require_optional_types = False
for i, ttype in enumerate(template_types):
if ttype is None:
continue
if require_python_types and not ttype.is_pyobject or require_optional_types and not ttype.can_be_optional():
if ttype.equivalent_type and not template_node.as_cython_attribute():
template_types[i] = ttype.equivalent_type
else:
error(template_node.pos, "%s[...] cannot be applied to type %s" % (
base_type.python_type_constructor_name,
ttype,
))
template_types[i] = error_type
return template_types
def analyse(self, env, could_be_name=False, base_type=None):
if base_type is None:
base_type = self.base_type_node.analyse(env)
if base_type.is_error: return base_type
if ((base_type.is_cpp_class and base_type.is_template_type()) or
base_type.python_type_constructor_name):
# Templated class, Python generics, etc.
if self.keyword_args and self.keyword_args.key_value_pairs:
tp = "c++ templates" if base_type.is_cpp_class else "indexed types"
error(self.pos, "%s cannot take keyword arguments" % tp)
self.type = PyrexTypes.error_type
return self.type
template_types = self._analyse_template_types(env, base_type)
self.type = base_type.specialize_here(self.pos, env, template_types)
elif base_type.is_pyobject:
# Buffer
from . import Buffer
options = Buffer.analyse_buffer_options(
self.pos,
env,
self.positional_args,
self.keyword_args,
base_type.buffer_defaults)
self.type = PyrexTypes.BufferType(base_type, **options)
if has_np_pythran(env) and is_pythran_buffer(self.type):
self.type = PyrexTypes.PythranExpr(pythran_type(self.type), self.type)
else:
# Array
empty_declarator = CNameDeclaratorNode(self.pos, name="", cname=None)
if len(self.positional_args) > 1 or self.keyword_args.key_value_pairs:
error(self.pos, "invalid array declaration")
self.type = PyrexTypes.error_type
else:
# It would be nice to merge this class with CArrayDeclaratorNode,
# but arrays are part of the declaration, not the type...
if not self.positional_args:
dimension = None
else:
dimension = self.positional_args[0]
self.array_declarator = CArrayDeclaratorNode(
self.pos,
base=empty_declarator,
dimension=dimension)
self.type = self.array_declarator.analyse(base_type, env)[1]
if self.type and self.type.is_fused and env.fused_to_specific:
try:
self.type = self.type.specialize(env.fused_to_specific)
except CannotSpecialize:
error(self.pos,
"'%s' cannot be specialized since its type is not a fused argument to this function" %
self.name)
return self.type
def analyse_pytyping_modifiers(self, env):
# Check for declaration modifiers, e.g. "typing.Optional[...]" or "dataclasses.InitVar[...]"
# TODO: somehow bring this together with IndexNode.analyse_pytyping_modifiers()
modifiers = []
modifier_node = self
while modifier_node.is_templated_type_node and modifier_node.base_type_node and len(modifier_node.positional_args) == 1:
modifier_type = self.base_type_node.analyse_as_type(env)
if modifier_type.python_type_constructor_name and modifier_type.modifier_name:
modifiers.append(modifier_type.modifier_name)
modifier_node = modifier_node.positional_args[0]
return modifiers
| TemplatedTypeNode |
python | doocs__leetcode | lcci/01.08.Zero Matrix/Solution.py | {
"start": 0,
"end": 447
} | class ____:
def setZeroes(self, matrix: List[List[int]]) -> None:
m, n = len(matrix), len(matrix[0])
rows = [0] * m
cols = [0] * n
for i, row in enumerate(matrix):
for j, v in enumerate(row):
if v == 0:
rows[i] = cols[j] = 1
for i in range(m):
for j in range(n):
if rows[i] or cols[j]:
matrix[i][j] = 0
| Solution |
python | spack__spack | lib/spack/spack/util/unparse/unparser.py | {
"start": 2634,
"end": 3625
} | class ____(IntEnum):
"""Precedence table that originated from python grammar."""
NAMED_EXPR = auto() # <target> := <expr1>
TUPLE = auto() # <expr1>, <expr2>
YIELD = auto() # 'yield', 'yield from'
TEST = auto() # 'if'-'else', 'lambda'
OR = auto() # 'or'
AND = auto() # 'and'
NOT = auto() # 'not'
CMP = auto() # '<', '>', '==', '>=', '<=', '!=',
# 'in', 'not in', 'is', 'is not'
EXPR = auto()
BOR = EXPR # '|'
BXOR = auto() # '^'
BAND = auto() # '&'
SHIFT = auto() # '<<', '>>'
ARITH = auto() # '+', '-'
TERM = auto() # '*', '@', '/', '%', '//'
FACTOR = auto() # unary '+', '-', '~'
POWER = auto() # '**'
AWAIT = auto() # 'await'
ATOM = auto()
def next(self):
try:
return self.__class__(self + 1)
except ValueError:
return self
_SINGLE_QUOTES = ("'", '"')
_MULTI_QUOTES = ('"""', "'''")
_ALL_QUOTES = (*_SINGLE_QUOTES, *_MULTI_QUOTES)
| _Precedence |
python | ray-project__ray | python/ray/tests/unit/test_runtime_env_validation.py | {
"start": 14110,
"end": 16956
} | class ____:
def test_parse_and_validate_uv(self, test_directory):
# Valid case w/o duplication.
result = validation.parse_and_validate_uv({"packages": ["tensorflow"]})
assert result == {
"packages": ["tensorflow"],
"uv_check": False,
"uv_pip_install_options": ["--no-cache"],
}
# Valid case w/ duplication.
result = validation.parse_and_validate_uv(
{"packages": ["tensorflow", "tensorflow"]}
)
assert result == {
"packages": ["tensorflow"],
"uv_check": False,
"uv_pip_install_options": ["--no-cache"],
}
# Valid case, use `list` to represent necessary packages.
result = validation.parse_and_validate_uv(
["requests==1.0.0", "aiohttp", "ray[serve]"]
)
assert result == {
"packages": ["requests==1.0.0", "aiohttp", "ray[serve]"],
"uv_check": False,
}
# Invalid case, unsupport keys.
with pytest.raises(ValueError):
result = validation.parse_and_validate_uv({"random_key": "random_value"})
# Valid case w/ uv version.
result = validation.parse_and_validate_uv(
{"packages": ["tensorflow"], "uv_version": "==0.4.30"}
)
assert result == {
"packages": ["tensorflow"],
"uv_version": "==0.4.30",
"uv_check": False,
"uv_pip_install_options": ["--no-cache"],
}
# Valid requirement files.
_, requirements_file, _, _ = test_directory
requirements_file = requirements_file.resolve()
result = validation.parse_and_validate_uv(str(requirements_file))
assert result == {
"packages": ["requests==1.0.0", "pip-install-test"],
"uv_check": False,
}
# Invalid requiremnt files.
with pytest.raises(ValueError):
result = validation.parse_and_validate_uv("some random non-existent file")
# Invalid uv install options.
with pytest.raises(TypeError):
result = validation.parse_and_validate_uv(
{
"packages": ["tensorflow"],
"uv_version": "==0.4.30",
"uv_pip_install_options": [1],
}
)
# Valid uv install options.
result = validation.parse_and_validate_uv(
{
"packages": ["tensorflow"],
"uv_version": "==0.4.30",
"uv_pip_install_options": ["--no-cache"],
}
)
assert result == {
"packages": ["tensorflow"],
"uv_check": False,
"uv_pip_install_options": ["--no-cache"],
"uv_version": "==0.4.30",
}
| TestValidateUV |
python | huggingface__transformers | tests/models/bitnet/test_modeling_bitnet.py | {
"start": 1263,
"end": 4206
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
vocab_size=99,
hidden_size=64,
num_hidden_layers=2,
num_attention_heads=4,
num_key_value_heads=2,
intermediate_size=37,
hidden_act="gelu",
max_position_embeddings=512,
initializer_range=0.02,
pad_token_id=0,
bos_token_id=1,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device))
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return BitNetConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
pad_token_id=self.pad_token_id,
bos_token_id=self.bos_token_id,
)
def create_and_check_model(self, config, input_ids, input_mask):
model = BitNetModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
input_mask,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| BitNetModelTester |
python | huggingface__transformers | src/transformers/models/apertus/modeling_apertus.py | {
"start": 9729,
"end": 13016
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: ApertusConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.rotary_fn = apply_rotary_pos_emb
self.q_norm = ApertusRMSNorm(self.head_dim, config.rms_norm_eps)
self.k_norm = ApertusRMSNorm(self.head_dim, config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| ApertusAttention |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 44206,
"end": 44657
} | class ____(sgqlc.types.Enum):
"""The billing plans available for organizations.
Enumeration Choices:
* `BUSINESS`: Team Plan
* `BUSINESS_PLUS`: Enterprise Cloud Plan
* `FREE`: Free Plan
* `TIERED_PER_SEAT`: Tiered Per Seat Plan
* `UNLIMITED`: Legacy Unlimited Plan
"""
__schema__ = github_schema
__choices__ = ("BUSINESS", "BUSINESS_PLUS", "FREE", "TIERED_PER_SEAT", "UNLIMITED")
| OrgCreateAuditEntryBillingPlan |
python | yaml__pyyaml | lib/yaml/nodes.py | {
"start": 1,
"end": 763
} | class ____(object):
def __init__(self, tag, value, start_mark, end_mark):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
value = self.value
#if isinstance(value, list):
# if len(value) == 0:
# value = '<empty>'
# elif len(value) == 1:
# value = '<1 item>'
# else:
# value = '<%d items>' % len(value)
#else:
# if len(value) > 75:
# value = repr(value[:70]+u' ... ')
# else:
# value = repr(value)
value = repr(value)
return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
| Node |
python | sqlalchemy__sqlalchemy | test/orm/test_sync.py | {
"start": 561,
"end": 968
} | class ____:
def _get_test_uow(self, session):
uow = unitofwork.UOWTransaction(session)
deleted = set(session._deleted)
new = set(session._new)
dirty = set(session._dirty_states).difference(deleted)
for s in new.union(dirty):
uow.register_object(s)
for d in deleted:
uow.register_object(d, isdelete=True)
return uow
| AssertsUOW |
python | great-expectations__great_expectations | great_expectations/expectations/metadata_types.py | {
"start": 97,
"end": 406
} | class ____(str, Enum):
"""Data quality issues addressed by Core Expectations."""
VOLUME = "Volume"
SCHEMA = "Schema"
COMPLETENESS = "Completeness"
UNIQUENESS = "Uniqueness"
NUMERIC = "Numeric"
VALIDITY = "Validity"
SQL = "SQL"
MULTI_SOURCE = "Multi-source"
| DataQualityIssues |
python | gevent__gevent | src/greentest/3.14/test_socket.py | {
"start": 97779,
"end": 100284
} | class ____(unittest.TestCase):
def testBluetoothConstants(self):
socket.BDADDR_ANY
socket.BDADDR_LOCAL
socket.AF_BLUETOOTH
socket.BTPROTO_RFCOMM
socket.SOL_RFCOMM
if sys.platform == "win32":
socket.SO_BTH_ENCRYPT
socket.SO_BTH_MTU
socket.SO_BTH_MTU_MAX
socket.SO_BTH_MTU_MIN
if sys.platform != "win32":
socket.BTPROTO_HCI
socket.SOL_HCI
socket.BTPROTO_L2CAP
socket.SOL_L2CAP
socket.BTPROTO_SCO
socket.SOL_SCO
socket.HCI_DATA_DIR
if sys.platform == "linux":
socket.SOL_BLUETOOTH
socket.HCI_DEV_NONE
socket.HCI_CHANNEL_RAW
socket.HCI_CHANNEL_USER
socket.HCI_CHANNEL_MONITOR
socket.HCI_CHANNEL_CONTROL
socket.HCI_CHANNEL_LOGGING
socket.HCI_TIME_STAMP
socket.BT_SECURITY
socket.BT_SECURITY_SDP
socket.BT_FLUSHABLE
socket.BT_POWER
socket.BT_CHANNEL_POLICY
socket.BT_CHANNEL_POLICY_BREDR_ONLY
if hasattr(socket, 'BT_PHY'):
socket.BT_PHY_BR_1M_1SLOT
if hasattr(socket, 'BT_MODE'):
socket.BT_MODE_BASIC
if hasattr(socket, 'BT_VOICE'):
socket.BT_VOICE_TRANSPARENT
socket.BT_VOICE_CVSD_16BIT
socket.L2CAP_LM
socket.L2CAP_LM_MASTER
socket.L2CAP_LM_AUTH
if sys.platform in ("linux", "freebsd"):
socket.BDADDR_BREDR
socket.BDADDR_LE_PUBLIC
socket.BDADDR_LE_RANDOM
socket.HCI_FILTER
if sys.platform.startswith(("freebsd", "netbsd", "dragonfly")):
socket.SO_L2CAP_IMTU
socket.SO_L2CAP_FLUSH
socket.SO_RFCOMM_MTU
socket.SO_RFCOMM_FC_INFO
socket.SO_SCO_MTU
if sys.platform == "freebsd":
socket.SO_SCO_CONNINFO
if sys.platform.startswith(("netbsd", "dragonfly")):
socket.SO_HCI_EVT_FILTER
socket.SO_HCI_PKT_FILTER
socket.SO_L2CAP_IQOS
socket.SO_L2CAP_LM
socket.L2CAP_LM_AUTH
socket.SO_RFCOMM_LM
socket.RFCOMM_LM_AUTH
socket.SO_SCO_HANDLE
@unittest.skipUnless(HAVE_SOCKET_BLUETOOTH,
'Bluetooth sockets required for this test.')
| BasicBluetoothTest |
python | doocs__leetcode | solution/3100-3199/3193.Count the Number of Inversions/Solution.py | {
"start": 0,
"end": 658
} | class ____:
def numberOfPermutations(self, n: int, requirements: List[List[int]]) -> int:
req = [-1] * n
for end, cnt in requirements:
req[end] = cnt
if req[0] > 0:
return 0
req[0] = 0
mod = 10**9 + 7
m = max(req)
f = [[0] * (m + 1) for _ in range(n)]
f[0][0] = 1
for i in range(1, n):
l, r = 0, m
if req[i] >= 0:
l = r = req[i]
for j in range(l, r + 1):
for k in range(min(i, j) + 1):
f[i][j] = (f[i][j] + f[i - 1][j - k]) % mod
return f[n - 1][req[n - 1]]
| Solution |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/waiters/test_glue_databrew.py | {
"start": 1064,
"end": 2518
} | class ____:
"""Test waiters from ``amazon/aws/waiters/glue.json``."""
JOB_NAME = "test_job"
RUN_ID = "123"
@pytest.fixture(autouse=True)
def _setup_test_cases(self, monkeypatch):
self.client = boto3.client("databrew", region_name="eu-west-3")
monkeypatch.setattr(GlueDataBrewHook, "conn", self.client)
def test_service_waiters(self):
hook_waiters = GlueDataBrewHook(aws_conn_id=None).list_waiters()
assert "job_complete" in hook_waiters
@pytest.fixture
def mock_describe_job_runs(self):
"""Mock ``GlueDataBrewHook.Client.describe_job_run`` method."""
with mock.patch.object(self.client, "describe_job_run") as m:
yield m
@staticmethod
def describe_jobs(status: str):
"""
Helper function for generate minimal DescribeJobRun response for a single job.
https://docs.aws.amazon.com/databrew/latest/dg/API_DescribeJobRun.html
"""
return {"State": status}
def test_job_succeeded(self, mock_describe_job_runs):
"""Test job succeeded"""
mock_describe_job_runs.side_effect = [
self.describe_jobs(RUNNING_STATES[1]),
self.describe_jobs(TERMINAL_STATES[1]),
]
waiter = GlueDataBrewHook(aws_conn_id=None).get_waiter("job_complete")
waiter.wait(name=self.JOB_NAME, runId=self.RUN_ID, WaiterConfig={"Delay": 0.2, "MaxAttempts": 2})
| TestCustomDataBrewWaiters |
python | numpy__numpy | numpy/_core/tests/test_umath_complex.py | {
"start": 12478,
"end": 14252
} | class ____:
def setup_method(self):
self.olderr = np.seterr(invalid='ignore')
def teardown_method(self):
np.seterr(**self.olderr)
def test_simple(self):
x = np.array([1 + 1j, 0 + 2j, 1 + 2j, np.inf, np.nan])
y_r = x ** 2
y = np.power(x, 2)
assert_almost_equal(y, y_r)
def test_scalar(self):
x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5 + 1.5j, -0.5 + 1.5j, 2, 3])
lx = list(range(len(x)))
# Hardcode the expected `builtins.complex` values,
# as complex exponentiation is broken as of bpo-44698
p_r = [
1 + 0j,
0.20787957635076193 + 0j,
0.35812203996480685 + 0.6097119028618724j,
0.12659112128185032 + 0.48847676699581527j,
complex(np.inf, np.nan),
complex(np.nan, np.nan),
]
n_r = [x[i] ** y[i] for i in lx]
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
def test_array(self):
x = np.array([1, 1j, 2, 2.5 + .37j, np.inf, np.nan])
y = np.array([1, 1j, -0.5 + 1.5j, -0.5 + 1.5j, 2, 3])
lx = list(range(len(x)))
# Hardcode the expected `builtins.complex` values,
# as complex exponentiation is broken as of bpo-44698
p_r = [
1 + 0j,
0.20787957635076193 + 0j,
0.35812203996480685 + 0.6097119028618724j,
0.12659112128185032 + 0.48847676699581527j,
complex(np.inf, np.nan),
complex(np.nan, np.nan),
]
n_r = x ** y
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
| TestCpow |
python | wandb__wandb | wandb/sdk/artifacts/storage_handlers/local_file_handler.py | {
"start": 1023,
"end": 5537
} | class ____(StorageHandler):
"""Handles file:// references."""
_scheme: str
_cache: ArtifactFileCache
def __init__(self, scheme: str = "file") -> None:
"""Track files or directories on a local filesystem.
Expand directories to create an entry for each file contained.
"""
self._scheme = scheme
self._cache = get_artifact_file_cache()
def can_handle(self, parsed_url: ParseResult) -> bool:
return parsed_url.scheme == self._scheme
def load_path(
self,
manifest_entry: ArtifactManifestEntry,
local: bool = False,
) -> URIStr | FilePathStr:
if (ref_uri := manifest_entry.ref) is None:
raise ValueError(f"Cannot add path with no ref: {manifest_entry.path}")
if not os.path.exists(local_path := local_file_uri_to_path(ref_uri)):
raise ValueError(
f"Local file reference: Failed to find file at path {local_path!r}"
)
expected_digest = manifest_entry.digest
path, hit, cache_open = self._cache.check_md5_obj_path(
b64_md5=expected_digest, size=manifest_entry.size or 0
)
if hit:
return path
if (digest := md5_file_b64(local_path)) != expected_digest:
raise ValueError(
f"Local file reference: Digest mismatch for path {local_path!r}: expected {expected_digest!r} but found {digest!r}"
)
# Ensure the parent directory exists
Path(path).parent.mkdir(parents=True, exist_ok=True)
with cache_open() as f:
shutil.copy(local_path, f.name)
return path
def store_path(
self,
artifact: Artifact,
path: URIStr | FilePathStr,
name: StrPath | None = None,
checksum: bool = True,
max_objects: int | None = None,
) -> list[ArtifactManifestEntry]:
local_path = local_file_uri_to_path(path)
max_objects = max_objects or DEFAULT_MAX_OBJECTS
# If checksum=False, the file's hash should only
# depend on its absolute path/URI, not its contents
# Closure func for calculating the file hash from its path
check_md5: Callable[[str], B64MD5] = _md5_content if checksum else _md5_path
# We have a single file or directory
# Note, we follow symlinks for files contained within the directory
if os.path.isdir(local_path):
entries: deque[ArtifactManifestEntry] = deque()
with TimedIf(checksum):
if checksum:
termlog(
f"Generating checksum for up to {max_objects!r} files in {local_path!r}... ",
newline=False,
)
physical_paths = (
os.path.join(root, subpath)
for root, _, files in os.walk(local_path)
for subpath in files
)
for i, physical_path in enumerate(physical_paths):
if i >= max_objects:
raise ValueError(
f"Exceeded {max_objects!r} objects tracked, pass max_objects to add_reference"
)
# TODO(spencerpearson): this is not a "logical path" in the sense that
# `LogicalPath` returns a "logical path"; it's a relative path
# **on the local filesystem**.
file_path = os.path.relpath(physical_path, start=local_path)
artifact_path = os.path.join(name or "", file_path)
entry = ArtifactManifestEntry(
path=artifact_path,
ref=os.path.join(path, file_path),
size=os.path.getsize(physical_path),
digest=check_md5(physical_path),
)
entries.append(entry)
return list(entries)
if os.path.isfile(local_path):
return [
ArtifactManifestEntry(
path=name or os.path.basename(local_path),
ref=path,
size=os.path.getsize(local_path),
digest=check_md5(local_path),
)
]
else:
# TODO: update error message if we don't allow directories.
raise ValueError(f"Path {path!r} must be a valid file or directory path")
| LocalFileHandler |
python | astropy__astropy | astropy/utils/console.py | {
"start": 22540,
"end": 26612
} | class ____:
"""
A class to display a spinner in the terminal.
It is designed to be used with the ``with`` statement::
with Spinner("Reticulating splines", "green") as s:
for item in enumerate(items):
s.update()
"""
_default_unicode_chars = "◓◑◒◐"
_default_ascii_chars = "-/|\\"
def __init__(self, msg, color="default", file=None, step=1, chars=None):
"""
Parameters
----------
msg : str
The message to print
color : str, optional
An ANSI terminal color name. Must be one of: black, red,
green, brown, blue, magenta, cyan, lightgrey, default,
darkgrey, lightred, lightgreen, yellow, lightblue,
lightmagenta, lightcyan, white.
file : :term:`file-like (writeable)`, optional
The file to write the spinner to. Defaults to
`sys.stdout`. If ``file`` is not a tty (as determined by
calling its `isatty` member, if any, or special case hacks
to detect the IPython console), the spinner will be
completely silent.
step : int, optional
Only update the spinner every *step* steps
chars : str, optional
The character sequence to use for the spinner
"""
if file is None:
file = sys.stdout
self._msg = msg
self._color = color
self._file = file
self._step = step
if chars is None:
if conf.unicode_output:
chars = self._default_unicode_chars
else:
chars = self._default_ascii_chars
self._chars = chars
self._silent = not isatty(file)
if self._silent:
self._iter = self._silent_iterator()
else:
self._iter = self._iterator()
def _iterator(self):
chars = self._chars
index = 0
file = self._file
write = file.write
flush = file.flush
try_fallback = True
terminal_width = get_terminal_size().columns
if len(self._msg) > terminal_width:
message = self._msg[: terminal_width - 8] + " ..."
else:
message = self._msg
while True:
write("\r")
color_print(message, self._color, file=file, end="")
write(" ")
try:
if try_fallback:
write = _write_with_fallback(chars[index], write, file)
else:
write(chars[index])
except UnicodeError:
# If even _write_with_fallback failed for any reason just give
# up on trying to use the unicode characters
chars = self._default_ascii_chars
write(chars[index])
try_fallback = False # No good will come of using this again
flush()
yield
for _ in range(self._step):
yield
index = (index + 1) % len(chars)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
file = self._file
write = file.write
flush = file.flush
if not self._silent:
write("\r")
color_print(self._msg, self._color, file=file, end="")
if exc_type is None:
color_print(" [Done]", "green", file=file)
else:
color_print(" [Failed]", "red", file=file)
flush()
def __iter__(self):
return self
def __next__(self):
next(self._iter)
def update(self, value=None):
"""Update the spin wheel in the terminal.
Parameters
----------
value : int, optional
Ignored (present just for compatibility with `ProgressBar.update`).
"""
next(self)
def _silent_iterator(self):
color_print(self._msg, self._color, file=self._file, end="")
self._file.flush()
while True:
yield
| Spinner |
python | django__django | django/core/files/uploadedfile.py | {
"start": 1848,
"end": 2747
} | class ____(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset, content_type_extra=None):
_, ext = os.path.splitext(name)
file = tempfile.NamedTemporaryFile(
suffix=".upload" + ext, dir=settings.FILE_UPLOAD_TEMP_DIR
)
super().__init__(file, name, content_type, size, charset, content_type_extra)
def temporary_file_path(self):
"""Return the full path of this file."""
return self.file.name
def close(self):
try:
return self.file.close()
except FileNotFoundError:
# The file was moved or deleted before the tempfile could unlink
# it. Still sets self.file.close_called and calls
# self.file.file.close() before the exception.
pass
| TemporaryUploadedFile |
python | numpy__numpy | numpy/_typing/_nbit_base.py | {
"start": 2962,
"end": 3058
} | class ____(_16Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues]
pass
| _8Bit |
python | huggingface__transformers | tests/models/x_clip/test_modeling_x_clip.py | {
"start": 1526,
"end": 4885
} | class ____:
def __init__(
self,
parent,
batch_size=8,
image_size=30,
patch_size=2,
num_channels=3,
num_frames=8, # important; the batch size * time must be divisible by the number of frames
is_training=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
mit_hidden_size=64,
dropout=0.1,
attention_dropout=0.1,
initializer_range=0.02,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_frames = num_frames
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.mit_hidden_size = mit_hidden_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[self.batch_size * self.num_frames, self.num_channels, self.image_size, self.image_size]
)
config = self.get_config()
return config, pixel_values
def get_config(self):
return XCLIPVisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
num_frames=self.num_frames,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
mit_hidden_size=self.mit_hidden_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def create_and_check_model(self, config, pixel_values):
model = XCLIPVisionModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(pixel_values)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
image_size = (self.image_size, self.image_size)
patch_size = (self.patch_size, self.patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size * self.num_frames, num_patches + 1, self.hidden_size)
)
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size * self.num_frames, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| XCLIPVisionModelTester |
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/__init__.py | {
"start": 12167,
"end": 17706
} | class ____(Generic[T_GeneratedContext]):
"""Utility class that wraps an event generator function, that also yields a single instance of
a typed object. All events yielded before the typed object are yielded through the method
`generate_setup_events` and all events yielded after the typed object are yielded through the
method `generate_teardown_events`.
This is used to help replace the context managers used in pipeline initialization with
generators so that we can begin emitting initialization events AND construct a pipeline context
object, while managing explicit setup/teardown.
This does require calling `generate_setup_events` AND `generate_teardown_events` in order to
get the typed object.
"""
def __init__(
self,
generator: Iterator[Union["DagsterEvent", T_GeneratedContext]],
object_cls: type[T_GeneratedContext],
require_object: Optional[bool] = True,
):
self.generator = check.generator(generator)
self.object_cls: type[T_GeneratedContext] = check.class_param(object_cls, "object_cls")
self.require_object = check.bool_param(require_object, "require_object")
self.object: Optional[T_GeneratedContext] = None
self.did_setup = False
self.did_teardown = False
def generate_setup_events(self) -> Iterator["DagsterEvent"]:
self.did_setup = True
try:
while self.object is None:
obj = next(self.generator)
if isinstance(obj, self.object_cls):
self.object = obj
else:
yield obj
except StopIteration:
if self.require_object:
check.inst_param(
self.object,
"self.object",
self.object_cls,
f"generator never yielded object of type {self.object_cls.__name__}",
)
def get_object(self) -> T_GeneratedContext:
if not self.did_setup:
check.failed("Called `get_object` before `generate_setup_events`")
return cast("T_GeneratedContext", self.object)
def generate_teardown_events(self) -> Iterator["DagsterEvent"]:
self.did_teardown = True
if self.object:
yield from self.generator
def is_enum_value(value: object) -> bool:
return False if value is None else issubclass(value.__class__, Enum)
def git_repository_root() -> str:
return subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).decode("utf-8").strip()
def segfault() -> None:
"""Reliable cross-Python version segfault.
https://bugs.python.org/issue1215#msg143236
"""
import ctypes
ctypes.string_at(0)
def is_port_in_use(host, port) -> bool:
# Similar to the socket options that uvicorn uses to bind ports:
# https://github.com/encode/uvicorn/blob/62f19c1c39929c84968712c371c9b7b96a041dec/uvicorn/config.py#L565-L566
sock = socket.socket(family=socket.AF_INET)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind((host, port))
return False
except OSError as e:
return e.errno == errno.EADDRINUSE
finally:
sock.close()
@contextlib.contextmanager
def alter_sys_path(to_add: Sequence[str], to_remove: Sequence[str]) -> Iterator[None]:
to_restore = [path for path in sys.path]
# remove paths
for path in to_remove:
if path in sys.path:
sys.path.remove(path)
# add paths
for path in to_add:
sys.path.insert(0, path)
try:
yield
finally:
sys.path = to_restore
@contextlib.contextmanager
def restore_sys_modules() -> Iterator[None]:
sys_modules = {k: v for k, v in sys.modules.items()}
try:
yield
finally:
to_delete = set(sys.modules) - set(sys_modules)
for key in to_delete:
del sys.modules[key]
def process_is_alive(pid: int) -> bool:
if seven.IS_WINDOWS:
import psutil
return psutil.pid_exists(pid=pid)
# https://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid-in-python
if pid < 0:
return False
if pid == 0:
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError("invalid PID 0")
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def compose(*args: Callable[[object], object]) -> Callable[[object], object]:
"""Compose python functions args such that compose(f, g)(x) is equivalent to f(g(x)).""" # noqa: D402
# reduce using functional composition over all the arguments, with the identity function as
# initializer
return functools.reduce(lambda f, g: lambda x: f(g(x)), args, lambda x: x)
def dict_without_keys(ddict: Mapping[K, V], *keys: K) -> dict[K, V]:
return {key: value for key, value in ddict.items() if key not in set(keys)}
| EventGenerationManager |
python | walkccc__LeetCode | solutions/115. Distinct Subsequences/115.py | {
"start": 0,
"end": 406
} | class ____:
def numDistinct(self, s: str, t: str) -> int:
m = len(s)
n = len(t)
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(m + 1):
dp[i][0] = 1
for i in range(1, m + 1):
for j in range(1, n + 1):
if s[i - 1] == t[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + dp[i - 1][j]
else:
dp[i][j] = dp[i - 1][j]
return dp[m][n]
| Solution |
python | openai__openai-python | src/openai/types/chat/chat_completion_message_custom_tool_call.py | {
"start": 395,
"end": 643
} | class ____(BaseModel):
id: str
"""The ID of the tool call."""
custom: Custom
"""The custom tool that the model called."""
type: Literal["custom"]
"""The type of the tool. Always `custom`."""
| ChatCompletionMessageCustomToolCall |
python | kamyu104__LeetCode-Solutions | Python/guess-the-majority-in-a-hidden-array.py | {
"start": 271,
"end": 1204
} | class ____(object):
def guessMajority(self, reader):
"""
:type reader: ArrayReader
:rtype: integer
"""
count_a, count_b, idx_b = 1, 0, None
value_0_1_2_3 = reader.query(0, 1, 2, 3)
for i in reversed(xrange(4, reader.length())):
value_0_1_2_i = reader.query(0, 1, 2, i)
if value_0_1_2_i == value_0_1_2_3: # nums[i] == nums[3]
count_a = count_a+1
else:
count_b, idx_b = count_b+1, i
value_0_1_2_4 = value_0_1_2_i
for i in xrange(3):
value_a_b_3_4 = reader.query(*[v for v in [0, 1, 2, 3, 4] if v != i])
if value_a_b_3_4 == value_0_1_2_4: # nums[i] == nums[3]
count_a = count_a+1
else:
count_b, idx_b = count_b+1, i
if count_a == count_b:
return -1
return 3 if count_a > count_b else idx_b
| Solution |
python | lepture__authlib | tests/flask/test_oauth1/oauth1_server.py | {
"start": 1064,
"end": 1691
} | class ____(ClientMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
client_id = db.Column(db.String(48), index=True)
client_secret = db.Column(db.String(120), nullable=False)
default_redirect_uri = db.Column(db.Text, nullable=False, default="")
user_id = db.Column(db.Integer, db.ForeignKey("user.id", ondelete="CASCADE"))
user = db.relationship("User")
def get_default_redirect_uri(self):
return self.default_redirect_uri
def get_client_secret(self):
return self.client_secret
def get_rsa_public_key(self):
return read_file_path("rsa_public.pem")
| Client |
python | numpy__numpy | numpy/distutils/command/develop.py | {
"start": 246,
"end": 575
} | class ____(old_develop):
__doc__ = old_develop.__doc__
def install_for_development(self):
# Build sources in-place, too.
self.reinitialize_command('build_src', inplace=1)
# Make sure scripts are built.
self.run_command('build_scripts')
old_develop.install_for_development(self)
| develop |
python | huggingface__transformers | src/transformers/models/auto/configuration_auto.py | {
"start": 44579,
"end": 54692
} | class ____:
r"""
This is a generic configuration class that will be instantiated as one of the configuration classes of the library
when created with the [`~AutoConfig.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
def __init__(self) -> None:
raise OSError(
"AutoConfig is designed to be instantiated "
"using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
def for_model(cls, model_type: str, *args, **kwargs) -> PreTrainedConfig:
if model_type in CONFIG_MAPPING:
config_class = CONFIG_MAPPING[model_type]
return config_class(*args, **kwargs)
raise ValueError(
f"Unrecognized model identifier: {model_type}. Should contain one of {', '.join(CONFIG_MAPPING.keys())}"
)
@classmethod
@replace_list_option_in_docstrings()
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike[str]], **kwargs):
r"""
Instantiate one of the configuration classes of the library from a pretrained model configuration.
The configuration class to instantiate is selected based on the `model_type` property of the config object that
is loaded, or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
Can be either:
- A string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co.
- A path to a *directory* containing a configuration file saved using the
[`~PreTrainedConfig.save_pretrained`] method, or the [`~PreTrainedModel.save_pretrained`] method,
e.g., `./my_model_directory/`.
- A path or url to a saved configuration JSON *file*, e.g.,
`./my_model_directory/configuration.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download the model weights and configuration files and override the
cached versions if they exist.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final configuration object.
If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
part of `kwargs` which has not been used to update `config` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs(additional keyword arguments, *optional*):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
Examples:
```python
>>> from transformers import AutoConfig
>>> # Download configuration from huggingface.co and cache.
>>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased")
>>> # Download configuration from huggingface.co (user-uploaded) and cache.
>>> config = AutoConfig.from_pretrained("dbmdz/bert-base-german-cased")
>>> # If configuration file is in a directory (e.g., was saved using *save_pretrained('./test/saved_model/')*).
>>> config = AutoConfig.from_pretrained("./test/bert_saved_model/")
>>> # Load a specific configuration file.
>>> config = AutoConfig.from_pretrained("./test/bert_saved_model/my_configuration.json")
>>> # Change some config attributes when loading a pretrained config.
>>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
>>> config.output_attentions
True
>>> config, unused_kwargs = AutoConfig.from_pretrained(
... "google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
... )
>>> config.output_attentions
True
>>> unused_kwargs
{'foo': False}
```
"""
kwargs["_from_auto"] = True
kwargs["name_or_path"] = pretrained_model_name_or_path
trust_remote_code = kwargs.pop("trust_remote_code", None)
code_revision = kwargs.pop("code_revision", None)
config_dict, unused_kwargs = PreTrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
has_remote_code = "auto_map" in config_dict and "AutoConfig" in config_dict["auto_map"]
has_local_code = "model_type" in config_dict and config_dict["model_type"] in CONFIG_MAPPING
if has_remote_code:
class_ref = config_dict["auto_map"]["AutoConfig"]
if "--" in class_ref:
upstream_repo = class_ref.split("--")[0]
else:
upstream_repo = None
trust_remote_code = resolve_trust_remote_code(
trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code, upstream_repo
)
if has_remote_code and trust_remote_code:
config_class = get_class_from_dynamic_module(
class_ref, pretrained_model_name_or_path, code_revision=code_revision, **kwargs
)
config_class.register_for_auto_class()
return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs)
elif "model_type" in config_dict:
# Apply heuristic: if model_type is mistral but layer_types is present, treat as ministral
if config_dict["model_type"] == "mistral" and "layer_types" in config_dict:
logger.info(
"Detected mistral model with layer_types, treating as ministral for alternating attention compatibility. "
)
config_dict["model_type"] = "ministral"
try:
config_class = CONFIG_MAPPING[config_dict["model_type"]]
except KeyError:
raise ValueError(
f"The checkpoint you are trying to load has model type `{config_dict['model_type']}` "
"but Transformers does not recognize this architecture. This could be because of an "
"issue with the checkpoint, or because your version of Transformers is out of date.\n\n"
"You can update Transformers with the command `pip install --upgrade transformers`. If this "
"does not work, and the checkpoint is very new, then there may not be a release version "
"that supports this model yet. In this case, you can get the most up-to-date code by installing "
"Transformers from source with the command "
"`pip install git+https://github.com/huggingface/transformers.git`"
)
return config_class.from_dict(config_dict, **unused_kwargs)
else:
# Fallback: use pattern matching on the string.
# We go from longer names to shorter names to catch roberta before bert (for instance)
for pattern in sorted(CONFIG_MAPPING.keys(), key=len, reverse=True):
if pattern in str(pretrained_model_name_or_path):
return CONFIG_MAPPING[pattern].from_dict(config_dict, **unused_kwargs)
raise ValueError(
f"Unrecognized model in {pretrained_model_name_or_path}. "
f"Should have a `model_type` key in its {CONFIG_NAME}, or contain one of the following strings "
f"in its name: {', '.join(CONFIG_MAPPING.keys())}"
)
@staticmethod
def register(model_type, config, exist_ok=False) -> None:
"""
Register a new configuration for this class.
Args:
model_type (`str`): The model type like "bert" or "gpt".
config ([`PreTrainedConfig`]): The config to register.
"""
if issubclass(config, PreTrainedConfig) and config.model_type != model_type:
raise ValueError(
"The config you are passing has a `model_type` attribute that is not consistent with the model type "
f"you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they "
"match!"
)
CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok)
__all__ = ["CONFIG_MAPPING", "MODEL_NAMES_MAPPING", "AutoConfig"]
| AutoConfig |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/override1.py | {
"start": 245,
"end": 305
} | class ____:
def method1(self) -> None:
pass
| ClassA |
python | celery__celery | t/integration/test_tasks.py | {
"start": 24615,
"end": 26212
} | class ____:
@pytest.fixture()
def manager(self, manager):
if not manager.app.conf.result_backend.startswith('redis'):
raise pytest.skip('Requires redis result backend.')
return manager
def test_ignoring_result_no_subscriptions(self, manager):
channels_before_test = get_active_redis_channels()
result = add_ignore_result.delay(1, 2)
assert result.ignored is True
new_channels = [channel for channel in get_active_redis_channels() if channel not in channels_before_test]
assert new_channels == []
@flaky
def test_asyncresult_forget_cancels_subscription(self, manager):
channels_before_test = get_active_redis_channels()
result = add.delay(1, 2)
assert set(get_active_redis_channels()) == {
f"celery-task-meta-{result.id}".encode(), *channels_before_test
}
result.forget()
new_channels = [channel for channel in get_active_redis_channels() if channel not in channels_before_test]
assert new_channels == []
@flaky
def test_asyncresult_get_cancels_subscription(self, manager):
channels_before_test = get_active_redis_channels()
result = add.delay(1, 2)
assert set(get_active_redis_channels()) == {
f"celery-task-meta-{result.id}".encode(), *channels_before_test
}
assert result.get(timeout=3) == 3
new_channels = [channel for channel in get_active_redis_channels() if channel not in channels_before_test]
assert new_channels == []
| test_task_redis_result_backend |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/lambda4.py | {
"start": 750,
"end": 824
} | class ____(Protocol):
def __call__(self, p0: str) -> bool: ...
| Callable2 |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/assets.py | {
"start": 3056,
"end": 3369
} | class ____(StrictBaseModel):
"""DAGRun serializer for asset responses."""
run_id: str
dag_id: str
logical_date: datetime | None
start_date: datetime
end_date: datetime | None
state: str
data_interval_start: datetime | None
data_interval_end: datetime | None
| DagRunAssetReference |
python | tornadoweb__tornado | tornado/auth.py | {
"start": 33477,
"end": 39209
} | class ____(OAuth2Mixin):
"""Google authentication using OAuth2.
In order to use, register your application with Google and copy the
relevant parameters to your application settings.
* Go to the Google Dev Console at http://console.developers.google.com
* Select a project, or create a new one.
* Depending on permissions required, you may need to set your app to
"testing" mode and add your account as a test user, or go through
a verfication process. You may also need to use the "Enable
APIs and Services" command to enable specific services.
* In the sidebar on the left, select Credentials.
* Click CREATE CREDENTIALS and click OAuth client ID.
* Under Application type, select Web application.
* Name OAuth 2.0 client and click Create.
* Copy the "Client secret" and "Client ID" to the application settings as
``{"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}}``
* You must register the ``redirect_uri`` you plan to use with this class
on the Credentials page.
.. versionadded:: 3.2
"""
_OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/v2/auth"
_OAUTH_ACCESS_TOKEN_URL = "https://www.googleapis.com/oauth2/v4/token"
_OAUTH_USERINFO_URL = "https://www.googleapis.com/oauth2/v1/userinfo"
_OAUTH_NO_CALLBACKS = False
_OAUTH_SETTINGS_KEY = "google_oauth"
def get_google_oauth_settings(self) -> Dict[str, str]:
"""Return the Google OAuth 2.0 credentials that you created with
[Google Cloud
Platform](https://console.cloud.google.com/apis/credentials). The dict
format is::
{
"key": "your_client_id", "secret": "your_client_secret"
}
If your credentials are stored differently (e.g. in a db) you can
override this method for custom provision.
"""
handler = cast(RequestHandler, self)
return handler.settings[self._OAUTH_SETTINGS_KEY]
async def get_authenticated_user(
self,
redirect_uri: str,
code: str,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
) -> Dict[str, Any]:
"""Handles the login for the Google user, returning an access token.
The result is a dictionary containing an ``access_token`` field
([among others](https://developers.google.com/identity/protocols/OAuth2WebServer#handlingtheresponse)).
Unlike other ``get_authenticated_user`` methods in this package,
this method does not return any additional information about the user.
The returned access token can be used with `OAuth2Mixin.oauth2_request`
to request additional information (perhaps from
``https://www.googleapis.com/oauth2/v2/userinfo``)
Example usage:
.. testsetup::
import urllib
.. testcode::
class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleOAuth2Mixin):
async def get(self):
# Google requires an exact match for redirect_uri, so it's
# best to get it from your app configuration instead of from
# self.request.full_uri().
redirect_uri = urllib.parse.urljoin(self.application.settings['redirect_base_uri'],
self.reverse_url('google_oauth'))
async def get(self):
if self.get_argument('code', False):
access = await self.get_authenticated_user(
redirect_uri=redirect_uri,
code=self.get_argument('code'))
user = await self.oauth2_request(
"https://www.googleapis.com/oauth2/v1/userinfo",
access_token=access["access_token"])
# Save the user and access token. For example:
user_cookie = dict(id=user["id"], access_token=access["access_token"])
self.set_signed_cookie("user", json.dumps(user_cookie))
self.redirect("/")
else:
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.get_google_oauth_settings()['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
.. versionchanged:: 6.0
The ``callback`` argument was removed. Use the returned awaitable object instead.
""" # noqa: E501
if client_id is None or client_secret is None:
settings = self.get_google_oauth_settings()
if client_id is None:
client_id = settings["key"]
if client_secret is None:
client_secret = settings["secret"]
http = self.get_auth_http_client()
body = urllib.parse.urlencode(
{
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
"grant_type": "authorization_code",
}
)
response = await http.fetch(
self._OAUTH_ACCESS_TOKEN_URL,
method="POST",
headers={"Content-Type": "application/x-www-form-urlencoded"},
body=body,
)
return escape.json_decode(response.body)
| GoogleOAuth2Mixin |
python | joke2k__faker | faker/providers/python/en_US/__init__.py | {
"start": 64,
"end": 125
} | class ____(PythonProvider): # pragma: no cover
pass
| Provider |
python | python-excel__xlrd | xlrd/sheet.py | {
"start": 95597,
"end": 95638
} | class ____(BaseObject):
pass
| MSODrawing |
python | walkccc__LeetCode | solutions/2075. Decode the Slanted Ciphertext/2075.py | {
"start": 0,
"end": 482
} | class ____:
def decodeCiphertext(self, encodedText: str, rows: int) -> str:
n = len(encodedText)
cols = n // rows
ans = []
matrix = [[' '] * cols for _ in range(rows)]
for i in range(rows):
for j in range(cols):
matrix[i][j] = encodedText[i * cols + j]
for col in range(cols):
i = 0
j = col
while i < rows and j < cols:
ans.append(matrix[i][j])
i += 1
j += 1
return ''.join(ans).rstrip()
| Solution |
python | astropy__astropy | astropy/extern/configobj/configobj.py | {
"start": 6459,
"end": 6716
} | class ____(InterpolationError):
"""A value specified for interpolation was missing."""
def __init__(self, option):
msg = 'missing option "%s" in interpolation.' % option
InterpolationError.__init__(self, msg)
| MissingInterpolationOption |
python | ray-project__ray | release/ray_release/tests/test_cluster_manager.py | {
"start": 26571,
"end": 30477
} | class ____(MinimalSessionManagerTest):
cls = FullClusterManager
def testSessionStartCreationError(self):
self.cluster_manager.cluster_env_id = "correct"
self.cluster_manager.cluster_compute_id = "correct"
self.sdk.returns["create_cluster"] = _fail
with self.assertRaises(ClusterCreationError):
self.cluster_manager.start_cluster()
def testSessionStartStartupError(self):
self.cluster_manager.cluster_env_id = "correct"
self.cluster_manager.cluster_compute_id = "correct"
self.sdk.returns["create_cluster"] = APIDict(result=APIDict(id="success"))
self.sdk.returns["start_cluster"] = _fail
with self.assertRaises(ClusterStartupError):
self.cluster_manager.start_cluster()
@patch("time.sleep", lambda *a, **kw: None)
def testSessionStartStartupTimeout(self):
self.cluster_manager.cluster_env_id = "correct"
self.cluster_manager.cluster_compute_id = "correct"
self.sdk.returns["create_cluster"] = APIDict(result=APIDict(id="success"))
self.sdk.returns["start_cluster"] = APIDict(
result=APIDict(id="cop_id", completed=False)
)
with freeze_time() as frozen_time, self.assertRaises(ClusterStartupTimeout):
self.sdk.returns["get_cluster_operation"] = _DelayedResponse(
lambda: frozen_time.tick(delta=10),
finish_after=300,
before=APIDict(result=APIDict(completed=False)),
after=APIDict(result=APIDict(completed=True)),
)
# Timeout before startup finishes
self.cluster_manager.start_cluster(timeout=200)
@patch("time.sleep", lambda *a, **kw: None)
def testSessionStartStartupFailed(self):
self.cluster_manager.cluster_env_id = "correct"
self.cluster_manager.cluster_compute_id = "correct"
self.sdk.returns["create_cluster"] = APIDict(result=APIDict(id="success"))
self.sdk.returns["start_cluster"] = APIDict(
result=APIDict(id="cop_id", completed=False)
)
with freeze_time() as frozen_time, self.assertRaises(ClusterStartupFailed):
frozen_time.tick(delta=0.1)
self.sdk.returns["get_cluster_operation"] = _DelayedResponse(
lambda: frozen_time.tick(delta=10),
finish_after=300,
before=APIDict(result=APIDict(completed=False)),
after=APIDict(result=APIDict(completed=True)),
)
self.sdk.returns["get_cluster"] = APIDict(
result=APIDict(state="Terminated")
)
# Timeout is long enough
self.cluster_manager.start_cluster(timeout=400)
@patch("time.sleep", lambda *a, **kw: None)
def testSessionStartStartupSuccess(self):
self.cluster_manager.cluster_env_id = "correct"
self.cluster_manager.cluster_compute_id = "correct"
self.sdk.returns["create_cluster"] = APIDict(result=APIDict(id="success"))
self.sdk.returns["start_cluster"] = APIDict(
result=APIDict(id="cop_id", completed=False)
)
with freeze_time() as frozen_time:
frozen_time.tick(delta=0.1)
self.sdk.returns["get_cluster_operation"] = _DelayedResponse(
lambda: frozen_time.tick(delta=10),
finish_after=300,
before=APIDict(result=APIDict(completed=False)),
after=APIDict(result=APIDict(completed=True)),
)
self.sdk.returns["get_cluster"] = APIDict(result=APIDict(state="Running"))
# Timeout is long enough
self.cluster_manager.start_cluster(timeout=400)
@unittest.skipUnless(
os.environ.get("RELEASE_UNIT_TEST_NO_ANYSCALE", "0") == "1",
reason="RELEASE_UNIT_TEST_NO_ANYSCALE is set to 1",
)
| FullSessionManagerTest |
python | allegroai__clearml | clearml/backend_interface/setupuploadmixin.py | {
"start": 236,
"end": 6733
} | class ____(object):
log = abstractproperty()
storage_uri = abstractproperty()
def setup_upload(
self,
bucket_name: str,
host: Optional[str] = None,
access_key: Optional[str] = None,
secret_key: Optional[str] = None,
multipart: bool = True,
https: bool = True,
region: Optional[str] = None,
verify: bool = True,
) -> None:
"""
(Deprecated) Setup upload options. Only S3 is supported.
Please note that this function is deprecated. Use `setup_aws_upload`, `setup_gcp_upload` or
`setup_azure_upload` to setup the upload options for the corresponding cloud.
:param bucket_name: AWS bucket name
:param host: Hostname. Only required in case a Non-AWS S3 solution such as a local Minio server is used)
:param access_key: AWS access key. If not provided, we'll attempt to obtain the key from the
configuration file (bucket-specific, than global)
:param secret_key: AWS secret key. If not provided, we'll attempt to obtain the secret from the
configuration file (bucket-specific, than global)
:param multipart: Server supports multipart. Only required when using a Non-AWS S3 solution that doesn't support
multipart.
:param https: Server supports HTTPS. Only required when using a Non-AWS S3 solution that only supports HTTPS.
:param region: Bucket region. Required if the bucket doesn't reside in the default region (us-east-1)
:param verify: Whether or not to verify SSL certificates.
Only required when using a Non-AWS S3 solution that only supports HTTPS with self-signed certificate.
"""
warnings.warn(
"Warning: 'Task.setup_upload' is deprecated. "
"Use 'setup_aws_upload', 'setup_gcp_upload' or 'setup_azure_upload' instead",
DeprecationWarning,
)
self.setup_aws_upload(
bucket_name,
host=host,
key=access_key,
secret=secret_key,
region=region,
multipart=multipart,
secure=https,
verify=verify,
)
def setup_aws_upload(
self,
bucket: str, # str
subdir: Optional[str] = None, # Optional[str]
host: Optional[str] = None, # Optional[str]
key: Optional[str] = None, # Optional[str]
secret: Optional[str] = None, # Optional[str]
token: Optional[str] = None, # Optional[str]
region: Optional[str] = None, # Optional[str]
multipart: bool = True, # bool
secure: bool = True, # bool
verify: bool = True, # bool
profile: Optional[str] = None, # Optional[str]
) -> None:
"""
Setup S3 upload options.
:param bucket: AWS bucket name
:param subdir: Subdirectory in the AWS bucket
:param host: Hostname. Only required in case a Non-AWS S3 solution such as a local Minio server is used)
:param key: AWS access key. If not provided, we'll attempt to obtain the key from the
configuration file (bucket-specific, than global)
:param secret: AWS secret key. If not provided, we'll attempt to obtain the secret from the
configuration file (bucket-specific, than global)
:param token: AWS 2FA token
:param region: Bucket region. Required if the bucket doesn't reside in the default region (us-east-1)
:param multipart: Server supports multipart. Only required when using a Non-AWS S3 solution that doesn't support
multipart.
:param secure: Server supports HTTPS. Only required when using a Non-AWS S3 solution that only supports HTTPS.
:param verify: Whether or not to verify SSL certificates.
:param profile: The AWS profile
Only required when using a Non-AWS S3 solution that only supports HTTPS with self-signed certificate.
"""
self._bucket_config = S3BucketConfig( # noqa
bucket=bucket,
subdir=subdir,
host=host,
key=key,
secret=secret,
token=token,
region=region,
multipart=multipart,
secure=secure,
verify=verify,
profile=profile,
)
StorageHelper.add_aws_configuration(self._bucket_config, log=self.log)
self.storage_uri = StorageHelper.get_aws_storage_uri_from_config(self._bucket_config)
def setup_gcp_upload(
self,
bucket: str,
subdir: str = "",
project: Optional[str] = None,
credentials_json: Optional[str] = None,
pool_connections: Optional[int] = None,
pool_maxsize: Optional[int] = None,
) -> None:
"""
Setup GCP upload options.
:param bucket: Bucket to upload to
:param subdir: Subdir in bucket to upload to
:param project: Project the bucket belongs to
:param credentials_json: Path to the JSON file that contains the credentials
:param pool_connections: The number of urllib3 connection pools to cache
:param pool_maxsize: The maximum number of connections to save in the pool
"""
self._bucket_config = GSBucketConfig( # noqa
bucket,
subdir=subdir,
project=project,
credentials_json=credentials_json,
pool_connections=pool_connections,
pool_maxsize=pool_maxsize,
)
StorageHelper.add_gcp_configuration(self._bucket_config, log=self.log)
self.storage_uri = StorageHelper.get_gcp_storage_uri_from_config(self._bucket_config)
def setup_azure_upload(
self,
account_name: str,
account_key: str,
container_name: Optional[str] = None,
) -> None:
"""
Setup Azure upload options.
:param account_name: Name of the account
:param account_key: Secret key used to authenticate the account
:param container_name: The name of the blob container to upload to
"""
self._bucket_config = AzureContainerConfig( # noqa
account_name=account_name,
account_key=account_key,
container_name=container_name,
)
StorageHelper.add_azure_configuration(self._bucket_config, log=self.log)
self.storage_uri = StorageHelper.get_azure_storage_uri_from_config(self._bucket_config)
| SetupUploadMixin |
python | kubernetes-client__python | kubernetes/client/models/v1_cluster_role_list.py | {
"start": 383,
"end": 6920
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1ClusterRole]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1ClusterRoleList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1ClusterRoleList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ClusterRoleList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ClusterRoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ClusterRoleList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1ClusterRoleList. # noqa: E501
Items is a list of ClusterRoles # noqa: E501
:return: The items of this V1ClusterRoleList. # noqa: E501
:rtype: list[V1ClusterRole]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1ClusterRoleList.
Items is a list of ClusterRoles # noqa: E501
:param items: The items of this V1ClusterRoleList. # noqa: E501
:type: list[V1ClusterRole]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1ClusterRoleList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ClusterRoleList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ClusterRoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ClusterRoleList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ClusterRoleList. # noqa: E501
:return: The metadata of this V1ClusterRoleList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ClusterRoleList.
:param metadata: The metadata of this V1ClusterRoleList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ClusterRoleList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ClusterRoleList):
return True
return self.to_dict() != other.to_dict()
| V1ClusterRoleList |
python | numba__numba | numba/cuda/tests/cudapy/test_atomics.py | {
"start": 14529,
"end": 58449
} | class ____(CUDATestCase):
def setUp(self):
super().setUp()
np.random.seed(0)
def test_atomic_add(self):
ary = np.random.randint(0, 32, size=32).astype(np.uint32)
ary_wrap = ary.copy()
orig = ary.copy()
cuda_atomic_add = cuda.jit('void(uint32[:])')(atomic_add)
cuda_atomic_add[1, 32](ary)
cuda_atomic_add_wrap = cuda.jit('void(uint32[:])')(atomic_add_wrap)
cuda_atomic_add_wrap[1, 32](ary_wrap)
gold = np.zeros(32, dtype=np.uint32)
for i in range(orig.size):
gold[orig[i]] += 1
self.assertTrue(np.all(ary == gold))
self.assertTrue(np.all(ary_wrap == gold))
def test_atomic_add2(self):
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
ary_wrap = ary.copy()
orig = ary.copy()
cuda_atomic_add2 = cuda.jit('void(uint32[:,:])')(atomic_add2)
cuda_atomic_add2[1, (4, 8)](ary)
cuda_atomic_add2_wrap = cuda.jit('void(uint32[:,:])')(atomic_add2_wrap)
cuda_atomic_add2_wrap[1, (4, 8)](ary_wrap)
self.assertTrue(np.all(ary == orig + 1))
self.assertTrue(np.all(ary_wrap == orig + 1))
def test_atomic_add3(self):
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_add3 = cuda.jit('void(uint32[:,:])')(atomic_add3)
cuda_atomic_add3[1, (4, 8)](ary)
self.assertTrue(np.all(ary == orig + 1))
def test_atomic_add_float(self):
ary = np.random.randint(0, 32, size=32).astype(np.float32)
ary_wrap = ary.copy()
orig = ary.copy().astype(np.intp)
cuda_atomic_add_float = cuda.jit('void(float32[:])')(atomic_add_float)
cuda_atomic_add_float[1, 32](ary)
add_float_wrap = cuda.jit('void(float32[:])')(atomic_add_float_wrap)
add_float_wrap[1, 32](ary_wrap)
gold = np.zeros(32, dtype=np.uint32)
for i in range(orig.size):
gold[orig[i]] += 1.0
self.assertTrue(np.all(ary == gold))
self.assertTrue(np.all(ary_wrap == gold))
def test_atomic_add_float_2(self):
ary = np.random.randint(0, 32, size=32).astype(np.float32).reshape(4, 8)
ary_wrap = ary.copy()
orig = ary.copy()
cuda_atomic_add2 = cuda.jit('void(float32[:,:])')(atomic_add_float_2)
cuda_atomic_add2[1, (4, 8)](ary)
cuda_func_wrap = cuda.jit('void(float32[:,:])')(atomic_add_float_2_wrap)
cuda_func_wrap[1, (4, 8)](ary_wrap)
self.assertTrue(np.all(ary == orig + 1))
self.assertTrue(np.all(ary_wrap == orig + 1))
def test_atomic_add_float_3(self):
ary = np.random.randint(0, 32, size=32).astype(np.float32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_add3 = cuda.jit('void(float32[:,:])')(atomic_add_float_3)
cuda_atomic_add3[1, (4, 8)](ary)
self.assertTrue(np.all(ary == orig + 1))
def assertCorrectFloat64Atomics(self, kernel, shared=True):
if config.ENABLE_CUDASIM:
return
# Use the first (and only) definition
asm = next(iter(kernel.inspect_asm().values()))
if cc_X_or_above(6, 0):
if cuda.runtime.get_version() > (12, 1):
# CUDA 12.2 and above generate a more optimized reduction
# instruction, because the result does not need to be
# placed in a register.
inst = 'red'
else:
inst = 'atom'
if shared:
inst = f'{inst}.shared'
self.assertIn(f'{inst}.add.f64', asm)
else:
if shared:
self.assertIn('atom.shared.cas.b64', asm)
else:
self.assertIn('atom.cas.b64', asm)
def test_atomic_add_double(self):
idx = np.random.randint(0, 32, size=32, dtype=np.int64)
ary = np.zeros(32, np.float64)
ary_wrap = ary.copy()
cuda_fn = cuda.jit('void(int64[:], float64[:])')(atomic_add_double)
cuda_fn[1, 32](idx, ary)
wrap_fn = cuda.jit('void(int64[:], float64[:])')(atomic_add_double_wrap)
wrap_fn[1, 32](idx, ary_wrap)
gold = np.zeros(32, dtype=np.uint32)
for i in range(idx.size):
gold[idx[i]] += 1.0
np.testing.assert_equal(ary, gold)
np.testing.assert_equal(ary_wrap, gold)
self.assertCorrectFloat64Atomics(cuda_fn)
self.assertCorrectFloat64Atomics(wrap_fn)
def test_atomic_add_double_2(self):
ary = np.random.randint(0, 32, size=32).astype(np.float64).reshape(4, 8)
ary_wrap = ary.copy()
orig = ary.copy()
cuda_fn = cuda.jit('void(float64[:,:])')(atomic_add_double_2)
cuda_fn[1, (4, 8)](ary)
cuda_fn_wrap = cuda.jit('void(float64[:,:])')(atomic_add_double_2_wrap)
cuda_fn_wrap[1, (4, 8)](ary_wrap)
np.testing.assert_equal(ary, orig + 1)
np.testing.assert_equal(ary_wrap, orig + 1)
self.assertCorrectFloat64Atomics(cuda_fn)
self.assertCorrectFloat64Atomics(cuda_fn_wrap)
def test_atomic_add_double_3(self):
ary = np.random.randint(0, 32, size=32).astype(np.float64).reshape(4, 8)
orig = ary.copy()
cuda_func = cuda.jit('void(float64[:,:])')(atomic_add_double_3)
cuda_func[1, (4, 8)](ary)
np.testing.assert_equal(ary, orig + 1)
self.assertCorrectFloat64Atomics(cuda_func)
def test_atomic_add_double_global(self):
idx = np.random.randint(0, 32, size=32, dtype=np.int64)
ary = np.zeros(32, np.float64)
ary_wrap = ary.copy()
sig = 'void(int64[:], float64[:])'
cuda_func = cuda.jit(sig)(atomic_add_double_global)
wrap_cuda_func = cuda.jit(sig)(atomic_add_double_global_wrap)
cuda_func[1, 32](idx, ary)
wrap_cuda_func[1, 32](idx, ary_wrap)
gold = np.zeros(32, dtype=np.uint32)
for i in range(idx.size):
gold[idx[i]] += 1.0
np.testing.assert_equal(ary, gold)
np.testing.assert_equal(ary_wrap, gold)
self.assertCorrectFloat64Atomics(cuda_func, shared=False)
self.assertCorrectFloat64Atomics(wrap_cuda_func, shared=False)
def test_atomic_add_double_global_2(self):
ary = np.random.randint(0, 32, size=32).astype(np.float64).reshape(4, 8)
ary_wrap = ary.copy()
orig = ary.copy()
sig = 'void(float64[:,:])'
cuda_func = cuda.jit(sig)(atomic_add_double_global_2)
wrap_cuda_func = cuda.jit(sig)(atomic_add_double_global_2_wrap)
cuda_func[1, (4, 8)](ary)
wrap_cuda_func[1, (4, 8)](ary_wrap)
np.testing.assert_equal(ary, orig + 1)
np.testing.assert_equal(ary_wrap, orig + 1)
self.assertCorrectFloat64Atomics(cuda_func, shared=False)
self.assertCorrectFloat64Atomics(wrap_cuda_func, shared=False)
def test_atomic_add_double_global_3(self):
ary = np.random.randint(0, 32, size=32).astype(np.float64).reshape(4, 8)
orig = ary.copy()
cuda_func = cuda.jit('void(float64[:,:])')(atomic_add_double_global_3)
cuda_func[1, (4, 8)](ary)
np.testing.assert_equal(ary, orig + 1)
self.assertCorrectFloat64Atomics(cuda_func, shared=False)
def test_atomic_sub(self):
ary = np.random.randint(0, 32, size=32).astype(np.uint32)
orig = ary.copy()
cuda_atomic_sub = cuda.jit('void(uint32[:])')(atomic_sub)
cuda_atomic_sub[1, 32](ary)
gold = np.zeros(32, dtype=np.uint32)
for i in range(orig.size):
gold[orig[i]] -= 1
self.assertTrue(np.all(ary == gold))
def test_atomic_sub2(self):
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_sub2 = cuda.jit('void(uint32[:,:])')(atomic_sub2)
cuda_atomic_sub2[1, (4, 8)](ary)
self.assertTrue(np.all(ary == orig - 1))
def test_atomic_sub3(self):
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_sub3 = cuda.jit('void(uint32[:,:])')(atomic_sub3)
cuda_atomic_sub3[1, (4, 8)](ary)
self.assertTrue(np.all(ary == orig - 1))
def test_atomic_sub_float(self):
ary = np.random.randint(0, 32, size=32).astype(np.float32)
orig = ary.copy().astype(np.intp)
cuda_atomic_sub_float = cuda.jit('void(float32[:])')(atomic_sub_float)
cuda_atomic_sub_float[1, 32](ary)
gold = np.zeros(32, dtype=np.float32)
for i in range(orig.size):
gold[orig[i]] -= 1.0
self.assertTrue(np.all(ary == gold))
def test_atomic_sub_float_2(self):
ary = np.random.randint(0, 32, size=32).astype(np.float32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_sub2 = cuda.jit('void(float32[:,:])')(atomic_sub_float_2)
cuda_atomic_sub2[1, (4, 8)](ary)
self.assertTrue(np.all(ary == orig - 1))
def test_atomic_sub_float_3(self):
ary = np.random.randint(0, 32, size=32).astype(np.float32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_sub3 = cuda.jit('void(float32[:,:])')(atomic_sub_float_3)
cuda_atomic_sub3[1, (4, 8)](ary)
self.assertTrue(np.all(ary == orig - 1))
def test_atomic_sub_double(self):
idx = np.random.randint(0, 32, size=32, dtype=np.int64)
ary = np.zeros(32, np.float64)
cuda_func = cuda.jit('void(int64[:], float64[:])')(atomic_sub_double)
cuda_func[1, 32](idx, ary)
gold = np.zeros(32, dtype=np.float64)
for i in range(idx.size):
gold[idx[i]] -= 1.0
np.testing.assert_equal(ary, gold)
def test_atomic_sub_double_2(self):
ary = np.random.randint(0, 32, size=32).astype(np.float64).reshape(4, 8)
orig = ary.copy()
cuda_func = cuda.jit('void(float64[:,:])')(atomic_sub_double_2)
cuda_func[1, (4, 8)](ary)
np.testing.assert_equal(ary, orig - 1)
def test_atomic_sub_double_3(self):
ary = np.random.randint(0, 32, size=32).astype(np.float64).reshape(4, 8)
orig = ary.copy()
cuda_func = cuda.jit('void(float64[:,:])')(atomic_sub_double_3)
cuda_func[1, (4, 8)](ary)
np.testing.assert_equal(ary, orig - 1)
def test_atomic_sub_double_global(self):
idx = np.random.randint(0, 32, size=32, dtype=np.int64)
ary = np.zeros(32, np.float64)
sig = 'void(int64[:], float64[:])'
cuda_func = cuda.jit(sig)(atomic_sub_double_global)
cuda_func[1, 32](idx, ary)
gold = np.zeros(32, dtype=np.float64)
for i in range(idx.size):
gold[idx[i]] -= 1.0
np.testing.assert_equal(ary, gold)
def test_atomic_sub_double_global_2(self):
ary = np.random.randint(0, 32, size=32).astype(np.float64).reshape(4, 8)
orig = ary.copy()
cuda_func = cuda.jit('void(float64[:,:])')(atomic_sub_double_global_2)
cuda_func[1, (4, 8)](ary)
np.testing.assert_equal(ary, orig - 1)
def test_atomic_sub_double_global_3(self):
ary = np.random.randint(0, 32, size=32).astype(np.float64).reshape(4, 8)
orig = ary.copy()
cuda_func = cuda.jit('void(float64[:,:])')(atomic_sub_double_global_3)
cuda_func[1, (4, 8)](ary)
np.testing.assert_equal(ary, orig - 1)
def test_atomic_and(self):
rand_const = np.random.randint(500)
ary = np.random.randint(0, 32, size=32).astype(np.uint32)
orig = ary.copy()
cuda_func = cuda.jit('void(uint32[:], uint32)')(atomic_and)
cuda_func[1, 32](ary, rand_const)
gold = ary.copy()
for i in range(orig.size):
gold[orig[i]] &= rand_const
self.assertTrue(np.all(ary == gold))
def test_atomic_and2(self):
rand_const = np.random.randint(500)
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_and2 = cuda.jit('void(uint32[:,:], uint32)')(atomic_and2)
cuda_atomic_and2[1, (4, 8)](ary, rand_const)
self.assertTrue(np.all(ary == orig & rand_const))
def test_atomic_and3(self):
rand_const = np.random.randint(500)
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_and3 = cuda.jit('void(uint32[:,:], uint32)')(atomic_and3)
cuda_atomic_and3[1, (4, 8)](ary, rand_const)
self.assertTrue(np.all(ary == orig & rand_const))
def test_atomic_and_global(self):
rand_const = np.random.randint(500)
idx = np.random.randint(0, 32, size=32, dtype=np.int32)
ary = np.random.randint(0, 32, size=32, dtype=np.int32)
sig = 'void(int32[:], int32[:], int32)'
cuda_func = cuda.jit(sig)(atomic_and_global)
cuda_func[1, 32](idx, ary, rand_const)
gold = ary.copy()
for i in range(idx.size):
gold[idx[i]] &= rand_const
np.testing.assert_equal(ary, gold)
def test_atomic_and_global_2(self):
rand_const = np.random.randint(500)
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_func = cuda.jit('void(uint32[:,:], uint32)')(atomic_and_global_2)
cuda_func[1, (4, 8)](ary, rand_const)
np.testing.assert_equal(ary, orig & rand_const)
def test_atomic_or(self):
rand_const = np.random.randint(500)
ary = np.random.randint(0, 32, size=32).astype(np.uint32)
orig = ary.copy()
cuda_func = cuda.jit('void(uint32[:], uint32)')(atomic_or)
cuda_func[1, 32](ary, rand_const)
gold = np.zeros(32, dtype=np.uint32)
for i in range(orig.size):
gold[orig[i]] |= rand_const
self.assertTrue(np.all(ary == gold))
def test_atomic_or2(self):
rand_const = np.random.randint(500)
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_and2 = cuda.jit('void(uint32[:,:], uint32)')(atomic_or2)
cuda_atomic_and2[1, (4, 8)](ary, rand_const)
self.assertTrue(np.all(ary == orig | rand_const))
def test_atomic_or3(self):
rand_const = np.random.randint(500)
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_and3 = cuda.jit('void(uint32[:,:], uint32)')(atomic_or3)
cuda_atomic_and3[1, (4, 8)](ary, rand_const)
self.assertTrue(np.all(ary == orig | rand_const))
def test_atomic_or_global(self):
rand_const = np.random.randint(500)
idx = np.random.randint(0, 32, size=32, dtype=np.int32)
ary = np.random.randint(0, 32, size=32, dtype=np.int32)
sig = 'void(int32[:], int32[:], int32)'
cuda_func = cuda.jit(sig)(atomic_or_global)
cuda_func[1, 32](idx, ary, rand_const)
gold = ary.copy()
for i in range(idx.size):
gold[idx[i]] |= rand_const
np.testing.assert_equal(ary, gold)
def test_atomic_or_global_2(self):
rand_const = np.random.randint(500)
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_func = cuda.jit('void(uint32[:,:], uint32)')(atomic_or_global_2)
cuda_func[1, (4, 8)](ary, rand_const)
np.testing.assert_equal(ary, orig | rand_const)
def test_atomic_xor(self):
rand_const = np.random.randint(500)
ary = np.random.randint(0, 32, size=32).astype(np.uint32)
orig = ary.copy()
cuda_func = cuda.jit('void(uint32[:], uint32)')(atomic_xor)
cuda_func[1, 32](ary, rand_const)
gold = np.zeros(32, dtype=np.uint32)
for i in range(orig.size):
gold[orig[i]] ^= rand_const
self.assertTrue(np.all(ary == gold))
def test_atomic_xor2(self):
rand_const = np.random.randint(500)
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_xor2 = cuda.jit('void(uint32[:,:], uint32)')(atomic_xor2)
cuda_atomic_xor2[1, (4, 8)](ary, rand_const)
self.assertTrue(np.all(ary == orig ^ rand_const))
def test_atomic_xor3(self):
rand_const = np.random.randint(500)
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_atomic_xor3 = cuda.jit('void(uint32[:,:], uint32)')(atomic_xor3)
cuda_atomic_xor3[1, (4, 8)](ary, rand_const)
self.assertTrue(np.all(ary == orig ^ rand_const))
def test_atomic_xor_global(self):
rand_const = np.random.randint(500)
idx = np.random.randint(0, 32, size=32, dtype=np.int32)
ary = np.random.randint(0, 32, size=32, dtype=np.int32)
gold = ary.copy()
sig = 'void(int32[:], int32[:], int32)'
cuda_func = cuda.jit(sig)(atomic_xor_global)
cuda_func[1, 32](idx, ary, rand_const)
for i in range(idx.size):
gold[idx[i]] ^= rand_const
np.testing.assert_equal(ary, gold)
def test_atomic_xor_global_2(self):
rand_const = np.random.randint(500)
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
orig = ary.copy()
cuda_func = cuda.jit('void(uint32[:,:], uint32)')(atomic_xor_global_2)
cuda_func[1, (4, 8)](ary, rand_const)
np.testing.assert_equal(ary, orig ^ rand_const)
def inc_dec_1dim_setup(self, dtype):
rconst = np.random.randint(32, dtype=dtype)
rary = np.random.randint(0, 32, size=32).astype(dtype)
ary_idx = np.arange(32, dtype=dtype)
return rconst, rary, ary_idx
def inc_dec_2dim_setup(self, dtype):
rconst = np.random.randint(32, dtype=dtype)
rary = np.random.randint(0, 32, size=32).astype(dtype).reshape(4, 8)
return rconst, rary
def check_inc_index(self, ary, idx, rconst, sig, nblocks, blksize, func):
orig = ary.copy()
cuda_func = cuda.jit(sig)(func)
cuda_func[nblocks, blksize](ary, idx, rconst)
np.testing.assert_equal(ary, np.where(orig >= rconst, 0, orig + 1))
def check_inc_index2(self, ary, idx, rconst, sig, nblocks, blksize, func):
orig = ary.copy()
cuda_func = cuda.jit(sig)(func)
cuda_func[nblocks, blksize](idx, ary, rconst)
np.testing.assert_equal(ary, np.where(orig >= rconst, 0, orig + 1))
def check_inc(self, ary, rconst, sig, nblocks, blksize, func):
orig = ary.copy()
cuda_func = cuda.jit(sig)(func)
cuda_func[nblocks, blksize](ary, rconst)
np.testing.assert_equal(ary, np.where(orig >= rconst, 0, orig + 1))
def test_atomic_inc_32(self):
rand_const, ary, idx = self.inc_dec_1dim_setup(dtype=np.uint32)
sig = 'void(uint32[:], uint32[:], uint32)'
self.check_inc_index(ary, idx, rand_const, sig, 1, 32, atomic_inc32)
def test_atomic_inc_64(self):
rand_const, ary, idx = self.inc_dec_1dim_setup(dtype=np.uint64)
sig = 'void(uint64[:], uint64[:], uint64)'
self.check_inc_index(ary, idx, rand_const, sig, 1, 32, atomic_inc64)
def test_atomic_inc2_32(self):
rand_const, ary = self.inc_dec_2dim_setup(np.uint32)
sig = 'void(uint32[:,:], uint32)'
self.check_inc(ary, rand_const, sig, 1, (4,8), atomic_inc2_32)
def test_atomic_inc2_64(self):
rand_const, ary = self.inc_dec_2dim_setup(np.uint64)
sig = 'void(uint64[:,:], uint64)'
self.check_inc(ary, rand_const, sig, 1, (4,8), atomic_inc2_64)
def test_atomic_inc3(self):
rand_const, ary = self.inc_dec_2dim_setup(np.uint32)
sig = 'void(uint32[:,:], uint32)'
self.check_inc(ary, rand_const, sig, 1, (4,8), atomic_inc3)
def test_atomic_inc_global_32(self):
rand_const, ary, idx = self.inc_dec_1dim_setup(dtype=np.uint32)
sig = 'void(uint32[:], uint32[:], uint32)'
self.check_inc_index2(ary, idx, rand_const, sig, 1, 32,
atomic_inc_global)
def test_atomic_inc_global_64(self):
rand_const, ary, idx = self.inc_dec_1dim_setup(dtype=np.uint64)
sig = 'void(uint64[:], uint64[:], uint64)'
self.check_inc_index2(ary, idx, rand_const, sig, 1, 32,
atomic_inc_global)
def test_atomic_inc_global_2_32(self):
rand_const, ary = self.inc_dec_2dim_setup(np.uint32)
sig = 'void(uint32[:,:], uint32)'
self.check_inc(ary, rand_const, sig, 1, (4,8), atomic_inc_global_2)
def test_atomic_inc_global_2_64(self):
rand_const, ary = self.inc_dec_2dim_setup(np.uint64)
sig = 'void(uint64[:,:], uint64)'
self.check_inc(ary, rand_const, sig, 1, (4,8), atomic_inc_global_2)
def check_dec_index(self, ary, idx, rconst, sig, nblocks, blksize, func):
orig = ary.copy()
cuda_func = cuda.jit(sig)(func)
cuda_func[nblocks, blksize](ary, idx, rconst)
np.testing.assert_equal(ary, np.where(orig == 0, rconst,
np.where(orig > rconst,
rconst,
orig - 1)))
def check_dec_index2(self, ary, idx, rconst, sig, nblocks, blksize, func):
orig = ary.copy()
cuda_func = cuda.jit(sig)(func)
cuda_func[nblocks, blksize](idx, ary, rconst)
np.testing.assert_equal(ary, np.where(orig == 0, rconst,
np.where(orig > rconst,
rconst,
orig - 1)))
def check_dec(self, ary, rconst, sig, nblocks, blksize, func):
orig = ary.copy()
cuda_func = cuda.jit(sig)(func)
cuda_func[nblocks, blksize](ary, rconst)
np.testing.assert_equal(ary, np.where(orig == 0, rconst,
np.where(orig > rconst,
rconst,
orig - 1)))
def test_atomic_dec_32(self):
rand_const, ary, idx = self.inc_dec_1dim_setup(dtype=np.uint32)
sig = 'void(uint32[:], uint32[:], uint32)'
self.check_dec_index(ary, idx, rand_const, sig, 1, 32, atomic_dec32)
def test_atomic_dec_64(self):
rand_const, ary, idx = self.inc_dec_1dim_setup(dtype=np.uint64)
sig = 'void(uint64[:], uint64[:], uint64)'
self.check_dec_index(ary, idx, rand_const, sig, 1, 32, atomic_dec64)
def test_atomic_dec2_32(self):
rand_const, ary = self.inc_dec_2dim_setup(np.uint32)
sig = 'void(uint32[:,:], uint32)'
self.check_dec(ary, rand_const, sig, 1, (4,8), atomic_dec2_32)
def test_atomic_dec2_64(self):
rand_const, ary = self.inc_dec_2dim_setup(np.uint64)
sig = 'void(uint64[:,:], uint64)'
self.check_dec(ary, rand_const, sig, 1, (4,8), atomic_dec2_64)
def test_atomic_dec3_new(self):
rand_const, ary = self.inc_dec_2dim_setup(np.uint32)
sig = 'void(uint32[:,:], uint32)'
self.check_dec(ary, rand_const, sig, 1, (4,8), atomic_dec3)
def test_atomic_dec_global_32(self):
rand_const, ary, idx = self.inc_dec_1dim_setup(dtype=np.uint32)
sig = 'void(uint32[:], uint32[:], uint32)'
self.check_dec_index2(ary, idx, rand_const, sig, 1, 32,
atomic_dec_global)
def test_atomic_dec_global_64(self):
rand_const, ary, idx = self.inc_dec_1dim_setup(dtype=np.uint64)
sig = 'void(uint64[:], uint64[:], uint64)'
self.check_dec_index2(ary, idx, rand_const, sig, 1, 32,
atomic_dec_global)
def test_atomic_dec_global2_32(self):
rand_const, ary = self.inc_dec_2dim_setup(np.uint32)
sig = 'void(uint32[:,:], uint32)'
self.check_dec(ary, rand_const, sig, 1, (4,8), atomic_dec_global_2)
def test_atomic_dec_global2_64(self):
rand_const, ary = self.inc_dec_2dim_setup(np.uint64)
sig = 'void(uint64[:,:], uint64)'
self.check_dec(ary, rand_const, sig, 1, (4,8), atomic_dec_global_2)
def test_atomic_exch(self):
rand_const = np.random.randint(50, 100, dtype=np.uint32)
ary = np.random.randint(0, 32, size=32).astype(np.uint32)
idx = np.arange(32, dtype=np.uint32)
cuda_func = cuda.jit('void(uint32[:], uint32[:], uint32)')(atomic_exch)
cuda_func[1, 32](ary, idx, rand_const)
np.testing.assert_equal(ary, rand_const)
def test_atomic_exch2(self):
rand_const = np.random.randint(50, 100, dtype=np.uint32)
ary = np.random.randint(0, 32, size=32).astype(np.uint32).reshape(4, 8)
cuda_func = cuda.jit('void(uint32[:,:], uint32)')(atomic_exch2)
cuda_func[1, (4, 8)](ary, rand_const)
np.testing.assert_equal(ary, rand_const)
def test_atomic_exch3(self):
rand_const = np.random.randint(50, 100, dtype=np.uint64)
ary = np.random.randint(0, 32, size=32).astype(np.uint64).reshape(4, 8)
cuda_func = cuda.jit('void(uint64[:,:], uint64)')(atomic_exch3)
cuda_func[1, (4, 8)](ary, rand_const)
np.testing.assert_equal(ary, rand_const)
def test_atomic_exch_global(self):
rand_const = np.random.randint(50, 100, dtype=np.uint32)
idx = np.arange(32, dtype=np.uint32)
ary = np.random.randint(0, 32, size=32, dtype=np.uint32)
sig = 'void(uint32[:], uint32[:], uint32)'
cuda_func = cuda.jit(sig)(atomic_exch_global)
cuda_func[1, 32](idx, ary, rand_const)
np.testing.assert_equal(ary, rand_const)
def check_atomic_max(self, dtype, lo, hi):
vals = np.random.randint(lo, hi, size=(32, 32)).astype(dtype)
res = np.zeros(1, dtype=vals.dtype)
cuda_func = cuda.jit(atomic_max)
cuda_func[32, 32](res, vals)
gold = np.max(vals)
np.testing.assert_equal(res, gold)
def test_atomic_max_int32(self):
self.check_atomic_max(dtype=np.int32, lo=-65535, hi=65535)
def test_atomic_max_uint32(self):
self.check_atomic_max(dtype=np.uint32, lo=0, hi=65535)
def test_atomic_max_int64(self):
self.check_atomic_max(dtype=np.int64, lo=-65535, hi=65535)
def test_atomic_max_uint64(self):
self.check_atomic_max(dtype=np.uint64, lo=0, hi=65535)
def test_atomic_max_float32(self):
self.check_atomic_max(dtype=np.float32, lo=-65535, hi=65535)
def test_atomic_max_double(self):
self.check_atomic_max(dtype=np.float64, lo=-65535, hi=65535)
def test_atomic_max_double_normalizedindex(self):
vals = np.random.randint(0, 65535, size=(32, 32)).astype(np.float64)
res = np.zeros(1, np.float64)
cuda_func = cuda.jit('void(float64[:], float64[:,:])')(
atomic_max_double_normalizedindex)
cuda_func[32, 32](res, vals)
gold = np.max(vals)
np.testing.assert_equal(res, gold)
def test_atomic_max_double_oneindex(self):
vals = np.random.randint(0, 128, size=32).astype(np.float64)
res = np.zeros(1, np.float64)
cuda_func = cuda.jit('void(float64[:], float64[:])')(
atomic_max_double_oneindex)
cuda_func[1, 32](res, vals)
gold = np.max(vals)
np.testing.assert_equal(res, gold)
def check_atomic_min(self, dtype, lo, hi):
vals = np.random.randint(lo, hi, size=(32, 32)).astype(dtype)
res = np.array([65535], dtype=vals.dtype)
cuda_func = cuda.jit(atomic_min)
cuda_func[32, 32](res, vals)
gold = np.min(vals)
np.testing.assert_equal(res, gold)
def test_atomic_min_int32(self):
self.check_atomic_min(dtype=np.int32, lo=-65535, hi=65535)
def test_atomic_min_uint32(self):
self.check_atomic_min(dtype=np.uint32, lo=0, hi=65535)
def test_atomic_min_int64(self):
self.check_atomic_min(dtype=np.int64, lo=-65535, hi=65535)
def test_atomic_min_uint64(self):
self.check_atomic_min(dtype=np.uint64, lo=0, hi=65535)
def test_atomic_min_float(self):
self.check_atomic_min(dtype=np.float32, lo=-65535, hi=65535)
def test_atomic_min_double(self):
self.check_atomic_min(dtype=np.float64, lo=-65535, hi=65535)
def test_atomic_min_double_normalizedindex(self):
vals = np.random.randint(0, 65535, size=(32, 32)).astype(np.float64)
res = np.ones(1, np.float64) * 65535
cuda_func = cuda.jit('void(float64[:], float64[:,:])')(
atomic_min_double_normalizedindex)
cuda_func[32, 32](res, vals)
gold = np.min(vals)
np.testing.assert_equal(res, gold)
def test_atomic_min_double_oneindex(self):
vals = np.random.randint(0, 128, size=32).astype(np.float64)
res = np.ones(1, np.float64) * 128
cuda_func = cuda.jit('void(float64[:], float64[:])')(
atomic_min_double_oneindex)
cuda_func[1, 32](res, vals)
gold = np.min(vals)
np.testing.assert_equal(res, gold)
# Taken together, _test_atomic_minmax_nan_location and
# _test_atomic_minmax_nan_val check that NaNs are treated similarly to the
# way they are in Python / NumPy - that is, {min,max}(a, b) == a if either
# a or b is a NaN. For the atomics, this means that the max is taken as the
# value stored in the memory location rather than the value supplied - i.e.
# for:
#
# cuda.atomic.{min,max}(ary, idx, val)
#
# the result will be ary[idx] for either of ary[idx] or val being NaN.
def _test_atomic_minmax_nan_location(self, func):
cuda_func = cuda.jit('void(float64[:], float64[:,:])')(func)
vals = np.random.randint(0, 128, size=(1,1)).astype(np.float64)
res = np.zeros(1, np.float64) + np.nan
cuda_func[1, 1](res, vals)
np.testing.assert_equal(res, [np.nan])
def _test_atomic_minmax_nan_val(self, func):
cuda_func = cuda.jit('void(float64[:], float64[:,:])')(func)
res = np.random.randint(0, 128, size=1).astype(np.float64)
gold = res.copy()
vals = np.zeros((1, 1), np.float64) + np.nan
cuda_func[1, 1](res, vals)
np.testing.assert_equal(res, gold)
def test_atomic_min_nan_location(self):
self._test_atomic_minmax_nan_location(atomic_min)
def test_atomic_max_nan_location(self):
self._test_atomic_minmax_nan_location(atomic_max)
def test_atomic_min_nan_val(self):
self._test_atomic_minmax_nan_val(atomic_min)
def test_atomic_max_nan_val(self):
self._test_atomic_minmax_nan_val(atomic_max)
def test_atomic_max_double_shared(self):
vals = np.random.randint(0, 32, size=32).astype(np.float64)
res = np.zeros(1, np.float64)
sig = 'void(float64[:], float64[:])'
cuda_func = cuda.jit(sig)(atomic_max_double_shared)
cuda_func[1, 32](res, vals)
gold = np.max(vals)
np.testing.assert_equal(res, gold)
def test_atomic_min_double_shared(self):
vals = np.random.randint(0, 32, size=32).astype(np.float64)
res = np.ones(1, np.float64) * 32
sig = 'void(float64[:], float64[:])'
cuda_func = cuda.jit(sig)(atomic_min_double_shared)
cuda_func[1, 32](res, vals)
gold = np.min(vals)
np.testing.assert_equal(res, gold)
def check_cas(self, n, fill, unfill, dtype, cas_func, ndim=1):
res = [fill] * (n // 2) + [unfill] * (n // 2)
np.random.shuffle(res)
res = np.asarray(res, dtype=dtype)
if ndim == 2:
res.shape = (10, -1)
out = np.zeros_like(res)
ary = np.random.randint(1, 10, size=res.shape).astype(res.dtype)
fill_mask = res == fill
unfill_mask = res == unfill
expect_res = np.zeros_like(res)
expect_res[fill_mask] = ary[fill_mask]
expect_res[unfill_mask] = unfill
expect_out = res.copy()
cuda_func = cuda.jit(cas_func)
if ndim == 1:
cuda_func[10, 10](res, out, ary, fill)
else:
cuda_func[(10, 10), (10, 10)](res, out, ary, fill)
np.testing.assert_array_equal(expect_res, res)
np.testing.assert_array_equal(expect_out, out)
def test_atomic_compare_and_swap(self):
self.check_cas(n=100, fill=-99, unfill=-1, dtype=np.int32,
cas_func=atomic_compare_and_swap)
def test_atomic_compare_and_swap2(self):
self.check_cas(n=100, fill=-45, unfill=-1, dtype=np.int64,
cas_func=atomic_compare_and_swap)
def test_atomic_compare_and_swap3(self):
rfill = np.random.randint(50, 500, dtype=np.uint32)
runfill = np.random.randint(1, 25, dtype=np.uint32)
self.check_cas(n=100, fill=rfill, unfill=runfill, dtype=np.uint32,
cas_func=atomic_compare_and_swap)
def test_atomic_compare_and_swap4(self):
rfill = np.random.randint(50, 500, dtype=np.uint64)
runfill = np.random.randint(1, 25, dtype=np.uint64)
self.check_cas(n=100, fill=rfill, unfill=runfill, dtype=np.uint64,
cas_func=atomic_compare_and_swap)
def test_atomic_cas_1dim(self):
self.check_cas(n=100, fill=-99, unfill=-1, dtype=np.int32,
cas_func=atomic_cas_1dim)
def test_atomic_cas_2dim(self):
self.check_cas(n=100, fill=-99, unfill=-1, dtype=np.int32,
cas_func=atomic_cas_2dim, ndim=2)
def test_atomic_cas2_1dim(self):
self.check_cas(n=100, fill=-45, unfill=-1, dtype=np.int64,
cas_func=atomic_cas_1dim)
def test_atomic_cas2_2dim(self):
self.check_cas(n=100, fill=-45, unfill=-1, dtype=np.int64,
cas_func=atomic_cas_2dim, ndim=2)
def test_atomic_cas3_1dim(self):
rfill = np.random.randint(50, 500, dtype=np.uint32)
runfill = np.random.randint(1, 25, dtype=np.uint32)
self.check_cas(n=100, fill=rfill, unfill=runfill, dtype=np.uint32,
cas_func=atomic_cas_1dim)
def test_atomic_cas3_2dim(self):
rfill = np.random.randint(50, 500, dtype=np.uint32)
runfill = np.random.randint(1, 25, dtype=np.uint32)
self.check_cas(n=100, fill=rfill, unfill=runfill, dtype=np.uint32,
cas_func=atomic_cas_2dim, ndim=2)
def test_atomic_cas4_1dim(self):
rfill = np.random.randint(50, 500, dtype=np.uint64)
runfill = np.random.randint(1, 25, dtype=np.uint64)
self.check_cas(n=100, fill=rfill, unfill=runfill, dtype=np.uint64,
cas_func=atomic_cas_1dim)
def test_atomic_cas4_2dim(self):
rfill = np.random.randint(50, 500, dtype=np.uint64)
runfill = np.random.randint(1, 25, dtype=np.uint64)
self.check_cas(n=100, fill=rfill, unfill=runfill, dtype=np.uint64,
cas_func=atomic_cas_2dim, ndim=2)
# Tests that the atomic add, min, and max operations return the old value -
# in the simulator, they did not (see Issue #5458). The max and min have
# special handling for NaN values, so we explicitly test with a NaN in the
# array being modified and the value provided.
def _test_atomic_returns_old(self, kernel, initial):
x = np.zeros(2, dtype=np.float32)
x[0] = initial
kernel[1, 1](x)
if np.isnan(initial):
self.assertTrue(np.isnan(x[1]))
else:
self.assertEqual(x[1], initial)
def test_atomic_add_returns_old(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.add(x, 0, 1)
self._test_atomic_returns_old(kernel, 10)
def test_atomic_max_returns_no_replace(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.max(x, 0, 1)
self._test_atomic_returns_old(kernel, 10)
def test_atomic_max_returns_old_replace(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.max(x, 0, 10)
self._test_atomic_returns_old(kernel, 1)
def test_atomic_max_returns_old_nan_in_array(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.max(x, 0, 1)
self._test_atomic_returns_old(kernel, np.nan)
def test_atomic_max_returns_old_nan_val(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.max(x, 0, np.nan)
self._test_atomic_returns_old(kernel, 10)
def test_atomic_min_returns_old_no_replace(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.min(x, 0, 11)
self._test_atomic_returns_old(kernel, 10)
def test_atomic_min_returns_old_replace(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.min(x, 0, 10)
self._test_atomic_returns_old(kernel, 11)
def test_atomic_min_returns_old_nan_in_array(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.min(x, 0, 11)
self._test_atomic_returns_old(kernel, np.nan)
def test_atomic_min_returns_old_nan_val(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.min(x, 0, np.nan)
self._test_atomic_returns_old(kernel, 11)
# Tests for atomic nanmin/nanmax
# nanmax tests
def check_atomic_nanmax(self, dtype, lo, hi, init_val):
vals = np.random.randint(lo, hi, size=(32, 32)).astype(dtype)
vals[1::2] = init_val
res = np.zeros(1, dtype=vals.dtype)
cuda_func = cuda.jit(atomic_nanmax)
cuda_func[32, 32](res, vals)
gold = np.nanmax(vals)
np.testing.assert_equal(res, gold)
def test_atomic_nanmax_int32(self):
self.check_atomic_nanmax(dtype=np.int32, lo=-65535, hi=65535,
init_val=0)
def test_atomic_nanmax_uint32(self):
self.check_atomic_nanmax(dtype=np.uint32, lo=0, hi=65535,
init_val=0)
def test_atomic_nanmax_int64(self):
self.check_atomic_nanmax(dtype=np.int64, lo=-65535, hi=65535,
init_val=0)
def test_atomic_nanmax_uint64(self):
self.check_atomic_nanmax(dtype=np.uint64, lo=0, hi=65535,
init_val=0)
def test_atomic_nanmax_float32(self):
self.check_atomic_nanmax(dtype=np.float32, lo=-65535, hi=65535,
init_val=np.nan)
def test_atomic_nanmax_double(self):
self.check_atomic_nanmax(dtype=np.float64, lo=-65535, hi=65535,
init_val=np.nan)
def test_atomic_nanmax_double_shared(self):
vals = np.random.randint(0, 32, size=32).astype(np.float64)
vals[1::2] = np.nan
res = np.array([0], dtype=vals.dtype)
sig = 'void(float64[:], float64[:])'
cuda_func = cuda.jit(sig)(atomic_nanmax_double_shared)
cuda_func[1, 32](res, vals)
gold = np.nanmax(vals)
np.testing.assert_equal(res, gold)
def test_atomic_nanmax_double_oneindex(self):
vals = np.random.randint(0, 128, size=32).astype(np.float64)
vals[1::2] = np.nan
res = np.zeros(1, np.float64)
cuda_func = cuda.jit('void(float64[:], float64[:])')(
atomic_max_double_oneindex)
cuda_func[1, 32](res, vals)
gold = np.nanmax(vals)
np.testing.assert_equal(res, gold)
# nanmin tests
def check_atomic_nanmin(self, dtype, lo, hi, init_val):
vals = np.random.randint(lo, hi, size=(32, 32)).astype(dtype)
vals[1::2] = init_val
res = np.array([65535], dtype=vals.dtype)
cuda_func = cuda.jit(atomic_nanmin)
cuda_func[32, 32](res, vals)
gold = np.nanmin(vals)
np.testing.assert_equal(res, gold)
def test_atomic_nanmin_int32(self):
self.check_atomic_nanmin(dtype=np.int32, lo=-65535, hi=65535,
init_val=0)
def test_atomic_nanmin_uint32(self):
self.check_atomic_nanmin(dtype=np.uint32, lo=0, hi=65535,
init_val=0)
def test_atomic_nanmin_int64(self):
self.check_atomic_nanmin(dtype=np.int64, lo=-65535, hi=65535,
init_val=0)
def test_atomic_nanmin_uint64(self):
self.check_atomic_nanmin(dtype=np.uint64, lo=0, hi=65535,
init_val=0)
def test_atomic_nanmin_float(self):
self.check_atomic_nanmin(dtype=np.float32, lo=-65535, hi=65535,
init_val=np.nan)
def test_atomic_nanmin_double(self):
self.check_atomic_nanmin(dtype=np.float64, lo=-65535, hi=65535,
init_val=np.nan)
def test_atomic_nanmin_double_shared(self):
vals = np.random.randint(0, 32, size=32).astype(np.float64)
vals[1::2] = np.nan
res = np.array([32], dtype=vals.dtype)
sig = 'void(float64[:], float64[:])'
cuda_func = cuda.jit(sig)(atomic_nanmin_double_shared)
cuda_func[1, 32](res, vals)
gold = np.nanmin(vals)
np.testing.assert_equal(res, gold)
def test_atomic_nanmin_double_oneindex(self):
vals = np.random.randint(0, 128, size=32).astype(np.float64)
vals[1::2] = np.nan
res = np.array([128], np.float64)
cuda_func = cuda.jit('void(float64[:], float64[:])')(
atomic_min_double_oneindex)
cuda_func[1, 32](res, vals)
gold = np.nanmin(vals)
np.testing.assert_equal(res, gold)
# Returning old value tests
def _test_atomic_nan_returns_old(self, kernel, initial):
x = np.zeros(2, dtype=np.float32)
x[0] = initial
x[1] = np.nan
kernel[1, 1](x)
if np.isnan(initial):
self.assertFalse(np.isnan(x[0]))
self.assertTrue(np.isnan(x[1]))
else:
self.assertEqual(x[1], initial)
def test_atomic_nanmax_returns_old_no_replace(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.nanmax(x, 0, 1)
self._test_atomic_nan_returns_old(kernel, 10)
def test_atomic_nanmax_returns_old_replace(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.nanmax(x, 0, 10)
self._test_atomic_nan_returns_old(kernel, 1)
def test_atomic_nanmax_returns_old_nan_in_array(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.nanmax(x, 0, 1)
self._test_atomic_nan_returns_old(kernel, np.nan)
def test_atomic_nanmax_returns_old_nan_val(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.nanmax(x, 0, np.nan)
self._test_atomic_nan_returns_old(kernel, 10)
def test_atomic_nanmin_returns_old_no_replace(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.nanmin(x, 0, 11)
self._test_atomic_nan_returns_old(kernel, 10)
def test_atomic_nanmin_returns_old_replace(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.nanmin(x, 0, 10)
self._test_atomic_nan_returns_old(kernel, 11)
def test_atomic_nanmin_returns_old_nan_in_array(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.nanmin(x, 0, 11)
self._test_atomic_nan_returns_old(kernel, np.nan)
def test_atomic_nanmin_returns_old_nan_val(self):
@cuda.jit
def kernel(x):
x[1] = cuda.atomic.nanmin(x, 0, np.nan)
self._test_atomic_nan_returns_old(kernel, 11)
if __name__ == '__main__':
unittest.main()
| TestCudaAtomics |
python | apache__airflow | providers/databricks/src/airflow/providers/databricks/exceptions.py | {
"start": 1146,
"end": 1261
} | class ____(DatabricksSqlExecutionError):
"""Raised when a sql execution times out."""
| DatabricksSqlExecutionTimeout |
python | modin-project__modin | asv_bench/benchmarks/benchmarks.py | {
"start": 28129,
"end": 30284
} | class ____:
param_names = ["shape"]
params = [get_benchmark_shapes("TimeReindex")]
def setup(self, shape):
rows, cols = shape
rng = IMPL.date_range(start="1/1/1970", periods=rows, freq="1min")
self.df = IMPL.DataFrame(
np.random.rand(rows, cols), index=rng, columns=range(cols)
)
self.df["foo"] = "bar"
self.rng_subset = IMPL.Index(rng[::2])
self.df2 = IMPL.DataFrame(
index=range(rows), data=np.random.rand(rows, cols), columns=range(cols)
)
level1 = IMPL.Index(
[f"i-{i}" for i in range(rows // 10)], dtype=object
).values.repeat(10)
level2 = np.tile(
IMPL.Index([f"i-{i}" for i in range(10)], dtype=object).values, rows // 10
)
index = IMPL.MultiIndex.from_arrays([level1, level2])
self.s = IMPL.Series(np.random.randn(rows), index=index)
self.s_subset = self.s[::2]
self.s_subset_no_cache = self.s[::2].copy()
mi = IMPL.MultiIndex.from_product([rng[: len(rng) // 10], range(10)])
self.s2 = IMPL.Series(np.random.randn(len(mi)), index=mi)
self.s2_subset = self.s2[::2].copy()
execute(self.df), execute(self.df2)
execute(self.s), execute(self.s_subset)
execute(self.s2), execute(self.s2_subset)
execute(self.s_subset_no_cache)
def time_reindex_dates(self, shape):
execute(self.df.reindex(self.rng_subset))
def time_reindex_columns(self, shape):
execute(self.df2.reindex(columns=self.df.columns[1:5]))
def time_reindex_multiindex_with_cache(self, shape):
# MultiIndex._values gets cached (pandas specific)
execute(self.s.reindex(self.s_subset.index))
def time_reindex_multiindex_no_cache(self, shape):
# Copy to avoid MultiIndex._values getting cached (pandas specific)
execute(self.s.reindex(self.s_subset_no_cache.index.copy()))
def time_reindex_multiindex_no_cache_dates(self, shape):
# Copy to avoid MultiIndex._values getting cached (pandas specific)
execute(self.s2_subset.reindex(self.s2.index.copy()))
| TimeReindex |
python | google__jax | tests/pallas/tpu_pallas_interpret_test.py | {
"start": 1257,
"end": 1926
} | class ____:
"""Wraps the I/O callback `store` into a callback that counts the number of calls to `store`."""
def __init__(self):
self._num_stores = 0
self._saved = mosaic_interpret.store
def __enter__(self):
def _store_callback(self, *args, **kwargs):
self._num_stores += 1
return self._saved(*args, **kwargs)
mosaic_interpret.store = functools.partial(_store_callback, self)
return self
def __exit__(self, ty, value, traceback):
del ty, value, traceback
mosaic_interpret.store = self._saved
@property
def num_stores(self):
return self._num_stores
@dataclasses.dataclass(frozen=True)
| CountStoreCallbacksContext |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 29706,
"end": 29959
} | class ____(Constant):
__slots__ = ()
def to_dict(self):
ast_dict = super().to_dict()
# python ast ellipsis() is not json serializable; use a string
ast_dict["value"] = self.node_source_code
return ast_dict
| Ellipsis |
python | getsentry__sentry | src/sentry/grouping/fingerprinting/utils.py | {
"start": 551,
"end": 637
} | class ____(TypedDict):
logger: NotRequired[str]
level: NotRequired[str]
| _LogInfo |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_operator.py | {
"start": 29448,
"end": 29623
} | class ____(OperatorPickleTestCase, __TestCase):
module = py_operator
module2 = c_operator
@unittest.skipUnless(c_operator, 'requires _operator')
| PyCOperatorPickleTestCase |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver27.py | {
"start": 791,
"end": 1030
} | class ____:
def get_features(self) -> list[str]: ...
def func1(specs: Iterable[str] | ClassB) -> None:
if isinstance(specs, ClassB):
features = specs.get_features()
else:
features = specs
set(features)
| ClassB |
python | python-excel__xlrd | xlrd/formatting.py | {
"start": 5492,
"end": 5837
} | class ____(object):
"""
This mixin class exists solely so that :class:`Format`, :class:`Font`, and
:class:`XF` objects can be compared by value of their attributes.
"""
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return self.__dict__ != other.__dict__
| EqNeAttrs |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 599431,
"end": 599868
} | class ____(LegendBinding):
"""
LegendStreamBinding schema wrapper.
Parameters
----------
legend : str, dict, :class:`Stream`, :class:`EventStream`, :class:`MergedStream`, :class:`DerivedStream`
"""
_schema = {"$ref": "#/definitions/LegendStreamBinding"}
def __init__(self, legend: Optional[str | SchemaBase | Map] = Undefined, **kwds):
super().__init__(legend=legend, **kwds)
| LegendStreamBinding |
python | numba__numba | numba/experimental/jitclass/base.py | {
"start": 2554,
"end": 8048
} | class ____(type):
"""
The type of any jitclass.
"""
def __new__(cls, name, bases, dct):
if len(bases) != 1:
raise TypeError("must have exactly one base class")
[base] = bases
if isinstance(base, JitClassType):
raise TypeError("cannot subclass from a jitclass")
assert 'class_type' in dct, 'missing "class_type" attr'
outcls = type.__new__(cls, name, bases, dct)
outcls._set_init()
return outcls
def _set_init(cls):
"""
Generate a wrapper for calling the constructor from pure Python.
Note the wrapper will only accept positional arguments.
"""
init = cls.class_type.instance_type.methods['__init__']
init_sig = utils.pysignature(init)
# get postitional and keyword arguments
# offset by one to exclude the `self` arg
args = _getargs(init_sig)[1:]
cls._ctor_sig = init_sig
ctor_source = _ctor_template.format(args=', '.join(args))
glbls = {"__numba_cls_": cls}
exec(ctor_source, glbls)
ctor = glbls['ctor']
cls._ctor = njit(ctor)
def __instancecheck__(cls, instance):
if isinstance(instance, _box.Box):
return instance._numba_type_.class_type is cls.class_type
return False
def __call__(cls, *args, **kwargs):
# The first argument of _ctor_sig is `cls`, which here
# is bound to None and then skipped when invoking the constructor.
bind = cls._ctor_sig.bind(None, *args, **kwargs)
bind.apply_defaults()
return cls._ctor(*bind.args[1:], **bind.kwargs)
##############################################################################
# Registration utils
def _validate_spec(spec):
for k, v in spec.items():
if not isinstance(k, str):
raise TypeError("spec keys should be strings, got %r" % (k,))
if not isinstance(v, types.Type):
raise TypeError("spec values should be Numba type instances, got %r"
% (v,))
def _fix_up_private_attr(clsname, spec):
"""
Apply the same changes to dunder names as CPython would.
"""
out = OrderedDict()
for k, v in spec.items():
if k.startswith('__') and not k.endswith('__'):
k = '_' + clsname + k
out[k] = v
return out
def _add_linking_libs(context, call):
"""
Add the required libs for the callable to allow inlining.
"""
libs = getattr(call, "libs", ())
if libs:
context.add_linking_libs(libs)
def register_class_type(cls, spec, class_ctor, builder):
"""
Internal function to create a jitclass.
Args
----
cls: the original class object (used as the prototype)
spec: the structural specification contains the field types.
class_ctor: the numba type to represent the jitclass
builder: the internal jitclass builder
"""
# Normalize spec
if spec is None:
spec = OrderedDict()
elif isinstance(spec, Sequence):
spec = OrderedDict(spec)
# Extend spec with class annotations.
for attr, py_type in pt.get_type_hints(cls).items():
if attr not in spec:
spec[attr] = as_numba_type(py_type)
_validate_spec(spec)
# Fix up private attribute names
spec = _fix_up_private_attr(cls.__name__, spec)
# Copy methods from base classes
clsdct = {}
for basecls in reversed(inspect.getmro(cls)):
clsdct.update(basecls.__dict__)
methods, props, static_methods, others = {}, {}, {}, {}
for k, v in clsdct.items():
if isinstance(v, pytypes.FunctionType):
methods[k] = v
elif isinstance(v, property):
props[k] = v
elif isinstance(v, staticmethod):
static_methods[k] = v
else:
others[k] = v
# Check for name shadowing
shadowed = (set(methods) | set(props) | set(static_methods)) & set(spec)
if shadowed:
raise NameError("name shadowing: {0}".format(', '.join(shadowed)))
docstring = others.pop('__doc__', "")
_drop_ignored_attrs(others)
if others:
msg = "class members are not yet supported: {0}"
members = ', '.join(others.keys())
raise TypeError(msg.format(members))
for k, v in props.items():
if v.fdel is not None:
raise TypeError("deleter is not supported: {0}".format(k))
jit_methods = {k: njit(v) for k, v in methods.items()}
jit_props = {}
for k, v in props.items():
dct = {}
if v.fget:
dct['get'] = njit(v.fget)
if v.fset:
dct['set'] = njit(v.fset)
jit_props[k] = dct
jit_static_methods = {
k: njit(v.__func__) for k, v in static_methods.items()}
# Instantiate class type
class_type = class_ctor(
cls,
ConstructorTemplate,
spec,
jit_methods,
jit_props,
jit_static_methods)
jit_class_dct = dict(class_type=class_type, __doc__=docstring)
jit_class_dct.update(jit_static_methods)
cls = JitClassType(cls.__name__, (cls,), jit_class_dct)
# Register resolution of the class object
typingctx = cpu_target.typing_context
typingctx.insert_global(cls, class_type)
# Register class
targetctx = cpu_target.target_context
builder(class_type, typingctx, targetctx).register()
as_numba_type.register(cls, class_type.instance_type)
return cls
| JitClassType |
python | openai__gym | gym/wrappers/compatibility.py | {
"start": 1273,
"end": 4288
} | class ____(gym.Env):
r"""A wrapper which can transform an environment from the old API to the new API.
Old step API refers to step() method returning (observation, reward, done, info), and reset() only retuning the observation.
New step API refers to step() method returning (observation, reward, terminated, truncated, info) and reset() returning (observation, info).
(Refer to docs for details on the API change)
Known limitations:
- Environments that use `self.np_random` might not work as expected.
"""
def __init__(self, old_env: LegacyEnv, render_mode: Optional[str] = None):
"""A wrapper which converts old-style envs to valid modern envs.
Some information may be lost in the conversion, so we recommend updating your environment.
Args:
old_env (LegacyEnv): the env to wrap, implemented with the old API
render_mode (str): the render mode to use when rendering the environment, passed automatically to env.render
"""
self.metadata = getattr(old_env, "metadata", {"render_modes": []})
self.render_mode = render_mode
self.reward_range = getattr(old_env, "reward_range", None)
self.spec = getattr(old_env, "spec", None)
self.env = old_env
self.observation_space = old_env.observation_space
self.action_space = old_env.action_space
def reset(
self, seed: Optional[int] = None, options: Optional[dict] = None
) -> Tuple[ObsType, dict]:
"""Resets the environment.
Args:
seed: the seed to reset the environment with
options: the options to reset the environment with
Returns:
(observation, info)
"""
if seed is not None:
self.env.seed(seed)
# Options are ignored
if self.render_mode == "human":
self.render()
return self.env.reset(), {}
def step(self, action: Any) -> Tuple[Any, float, bool, bool, Dict]:
"""Steps through the environment.
Args:
action: action to step through the environment with
Returns:
(observation, reward, terminated, truncated, info)
"""
obs, reward, done, info = self.env.step(action)
if self.render_mode == "human":
self.render()
return convert_to_terminated_truncated_step_api((obs, reward, done, info))
def render(self) -> Any:
"""Renders the environment.
Returns:
The rendering of the environment, depending on the render mode
"""
return self.env.render(mode=self.render_mode)
def close(self):
"""Closes the environment."""
self.env.close()
def __str__(self):
"""Returns the wrapper name and the unwrapped environment string."""
return f"<{type(self).__name__}{self.env}>"
def __repr__(self):
"""Returns the string representation of the wrapper."""
return str(self)
| EnvCompatibility |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass3.py | {
"start": 303,
"end": 341
} | class ____(Base1, Base2):
pass
| Base3 |
python | wandb__wandb | wandb/automations/_generated/fragments.py | {
"start": 1155,
"end": 1535
} | class ____(GQLResult):
typename__: Typename[Literal["GenericWebhookTriggeredAction"]] = (
"GenericWebhookTriggeredAction"
)
integration: Union[
GenericWebhookActionFieldsIntegrationIntegration, WebhookIntegrationFields
] = Field(discriminator="typename__")
request_payload: Optional[str] = Field(alias="requestPayload")
| GenericWebhookActionFields |
python | fastapi__sqlmodel | sqlmodel/default.py | {
"start": 34,
"end": 850
} | class ____:
"""
You shouldn't use this class directly.
It's used internally to recognize when a default value has been overwritten, even
if the overridden default value was truthy.
"""
def __init__(self, value: Any):
self.value = value
def __bool__(self) -> bool:
return bool(self.value)
def __eq__(self, o: object) -> bool:
return isinstance(o, _DefaultPlaceholder) and o.value == self.value
_TDefaultType = TypeVar("_TDefaultType")
def Default(value: _TDefaultType) -> _TDefaultType:
"""
You shouldn't use this function directly.
It's used internally to recognize when a default value has been overwritten, even
if the overridden default value was truthy.
"""
return _DefaultPlaceholder(value) # type: ignore
| _DefaultPlaceholder |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 40130,
"end": 42128
} | class ____(AssetSelection):
"""Used to represent a UI asset selection by project. This should not be resolved against
an in-process asset graph.
"""
selected_code_location: Optional[str]
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
from dagster._core.definitions.assets.graph.remote_asset_graph import RemoteAssetGraph
check.invariant(
isinstance(asset_graph, RemoteAssetGraph),
"code_location: cannot be used to select assets in user code.",
)
asset_graph = cast("RemoteAssetGraph", asset_graph)
location_name = self.selected_code_location
# If the code location is in the form of "repo_name@location_name", we need to
# split the string and filter the asset keys based on the repository and location name.
if location_name and "@" in location_name:
asset_keys = set()
location = location_name.split("@")[1]
name = location_name.split("@")[0]
for asset_key, node in asset_graph.remote_asset_nodes_by_key.items():
repo_handle = node.resolve_to_singular_repo_scoped_node().repository_handle
if repo_handle.location_name == location and repo_handle.repository_name == name:
asset_keys.add(asset_key)
return asset_keys
# Otherwise, filter only by location name
return {
key
for key, node in asset_graph.remote_asset_nodes_by_key.items()
if (
node.resolve_to_singular_repo_scoped_node().repository_handle.location_name
== self.selected_code_location
)
}
def to_selection_str(self) -> str:
if self.selected_code_location is None:
return "code_location:<null>"
return f'code_location:"{self.selected_code_location}"'
@whitelist_for_serdes
@record
| CodeLocationAssetSelection |
python | dask__dask | dask/array/_array_expr/_slicing.py | {
"start": 11715,
"end": 12055
} | class ____(ArrayExpr):
@functools.cached_property
def _name(self):
return f"getitem-{self.deterministic_token}"
@functools.cached_property
def _meta(self):
meta = meta_from_array(self.array._meta, ndim=len(self.chunks))
if np.isscalar(meta):
meta = np.array(meta)
return meta
| Slice |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 99747,
"end": 102509
} | class ____(BaseModel, extra="forbid"):
"""
Recommendation request. Provides positive and negative examples of the vectors, which can be ids of points that are already stored in the collection, raw vectors, or even ids and vectors combined. Service should look for the points which are closer to positive examples and at the same time further to negative examples. The concrete way of how to compare negative and positive distances is up to the `strategy` chosen.
"""
shard_key: Optional["ShardKeySelector"] = Field(
default=None,
description="Specify in which shards to look for the points, if not specified - look in all shards",
)
positive: Optional[List["RecommendExample"]] = Field(default=[], description="Look for vectors closest to those")
negative: Optional[List["RecommendExample"]] = Field(default=[], description="Try to avoid vectors like this")
strategy: Optional["RecommendStrategy"] = Field(
default=None, description="How to use positive and negative examples to find the results"
)
filter: Optional["Filter"] = Field(default=None, description="Look only for points which satisfies this conditions")
params: Optional["SearchParams"] = Field(default=None, description="Additional search params")
limit: int = Field(..., description="Max number of result to return")
offset: Optional[int] = Field(
default=None,
description="Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues.",
)
with_payload: Optional["WithPayloadInterface"] = Field(
default=None, description="Select which payload to return with the response. Default is false."
)
with_vector: Optional["WithVector"] = Field(
default=None, description="Options for specifying which vectors to include into response. Default is false."
)
score_threshold: Optional[float] = Field(
default=None,
description="Define a minimal score threshold for the result. If defined, less similar results will not be returned. Score of the returned result might be higher or smaller than the threshold depending on the Distance function used. E.g. for cosine similarity only higher scores will be returned.",
)
using: Optional["UsingVector"] = Field(
default=None,
description="Define which vector to use for recommendation, if not specified - try to use default vector",
)
lookup_from: Optional["LookupLocation"] = Field(
default=None,
description="The location used to lookup vectors. If not specified - use current collection. Note: the other collection should have the same vector size as the current collection",
)
| RecommendRequest |
python | PrefectHQ__prefect | src/integrations/prefect-dbt/prefect_dbt/cli/configs/snowflake.py | {
"start": 467,
"end": 4547
} | class ____(BaseTargetConfigs):
"""
Target configs contain credentials and
settings, specific to Snowflake.
To find valid keys, head to the [Snowflake Profile](
https://docs.getdbt.com/reference/warehouse-profiles/snowflake-profile)
page.
Attributes:
connector: The connector to use.
Examples:
Load stored SnowflakeTargetConfigs:
```python
from prefect_dbt.cli.configs import SnowflakeTargetConfigs
snowflake_target_configs = SnowflakeTargetConfigs.load("BLOCK_NAME")
```
Instantiate SnowflakeTargetConfigs.
```python
from prefect_dbt.cli.configs import SnowflakeTargetConfigs
from prefect_snowflake.credentials import SnowflakeCredentials
from prefect_snowflake.database import SnowflakeConnector
credentials = SnowflakeCredentials(
user="user",
password="password",
account="account.region.aws",
role="role",
)
connector = SnowflakeConnector(
schema="public",
database="database",
warehouse="warehouse",
credentials=credentials,
)
target_configs = SnowflakeTargetConfigs(
connector=connector,
extras={"retry_on_database_errors": True},
)
```
"""
_block_type_name = "dbt CLI Snowflake Target Configs"
_logo_url = "https://images.ctfassets.net/gm98wzqotmnx/5zE9lxfzBHjw3tnEup4wWL/9a001902ed43a84c6c96d23b24622e19/dbt-bit_tm.png?h=250" # noqa
_documentation_url = "https://docs.prefect.io/integrations/prefect-dbt" # noqa
type: Literal["snowflake"] = Field(
default="snowflake", description="The type of the target configs."
)
schema_: Optional[str] = Field(
default=None,
alias="schema",
description="The schema to use for the target configs.",
)
connector: SnowflakeConnector = Field(
default=..., description="The connector to use."
)
def get_configs(self) -> Dict[str, Any]:
"""
Returns the dbt configs specific to Snowflake profile.
Returns:
A configs JSON.
"""
all_configs_json = super().get_configs()
# decouple prefect-snowflake from prefect-dbt
# by mapping all the keys dbt snowflake accepts
# https://docs.getdbt.com/reference/warehouse-setups/snowflake-setup
rename_keys = {
# dbt
"type": "type",
"schema": "schema",
"threads": "threads",
# general
"account": "account",
"user": "user",
"role": "role",
"database": "database",
"warehouse": "warehouse",
# user and password
"password": "password",
# duo mfa / sso
"authenticator": "authenticator",
# key pair
"private_key": "private_key",
"private_key_path": "private_key_path",
"private_key_passphrase": "private_key_passphrase",
# optional
"client_session_keep_alive": "client_session_keep_alive",
"query_tag": "query_tag",
"connect_retries": "connect_retries",
"connect_timeout": "connect_timeout",
"retry_on_database_errors": "retry_on_database_errors",
"retry_all": "retry_all",
}
configs_json = {}
extras = self.extras or {}
for key in all_configs_json.keys():
if key not in rename_keys and key not in extras:
# skip invalid keys, like fetch_size + poll_frequency_s
continue
# rename key to something dbt profile expects
dbt_key = rename_keys.get(key) or key
value = all_configs_json[key]
if key == "private_key":
# SnowflakeCredentials stores private_key as SecretBytes
value = value.decode()
configs_json[dbt_key] = value
return configs_json
| SnowflakeTargetConfigs |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictReadOnly1.py | {
"start": 974,
"end": 1021
} | class ____(F1):
b: ReadOnly[Required[int]]
| F5 |
python | mlflow__mlflow | mlflow/pyfunc/loaders/responses_agent.py | {
"start": 781,
"end": 4134
} | class ____:
"""
Wrapper class that converts dict inputs to pydantic objects accepted by
:class:`~ResponsesAgent`.
"""
def __init__(self, responses_agent, context):
self.responses_agent = responses_agent
self.context = context
def get_raw_model(self):
"""
Returns the underlying model.
"""
return self.responses_agent
def _convert_input(self, model_input) -> ResponsesAgentRequest:
import pandas
if isinstance(model_input, pandas.DataFrame):
model_input = {
k: _convert_llm_ndarray_to_list(v[0])
for k, v in model_input.to_dict(orient="list").items()
}
elif not isinstance(model_input, dict):
raise MlflowException(
"Unsupported model input type. Expected a dict or pandas.DataFrame, but got "
f"{type(model_input)} instead.",
error_code=INTERNAL_ERROR,
)
return ResponsesAgentRequest(**model_input)
def _response_to_dict(self, response, pydantic_class) -> dict[str, Any]:
if isinstance(response, pydantic_class):
return response.model_dump(exclude_none=True)
try:
model_validate(pydantic_class, response)
except pydantic.ValidationError as e:
raise MlflowException(
message=(
f"Model returned an invalid response. Expected a {pydantic_class.__name__} "
f"object or dictionary with the same schema. Pydantic validation error: {e}"
),
error_code=INTERNAL_ERROR,
) from e
return response
def predict(self, model_input: dict[str, Any], params=None) -> dict[str, Any]:
"""
Args:
model_input: A dict with the
:py:class:`ResponsesRequest <mlflow.types.responses.ResponsesRequest>` schema.
params: Unused in this function, but required in the signature because
`load_model_and_predict` in `utils/_capture_modules.py` expects a params field
Returns:
A dict with the
(:py:class:`ResponsesResponse <mlflow.types.responses.ResponsesResponse>`)
schema.
"""
request = self._convert_input(model_input)
response = self.responses_agent.predict(request)
return self._response_to_dict(response, ResponsesAgentResponse)
def predict_stream(
self, model_input: dict[str, Any], params=None
) -> Generator[dict[str, Any], None, None]:
"""
Args:
model_input: A dict with the
:py:class:`ResponsesRequest <mlflow.types.responses.ResponsesRequest>` schema.
params: Unused in this function, but required in the signature because
`load_model_and_predict` in `utils/_capture_modules.py` expects a params field
Returns:
A generator over dicts with the
(:py:class:`ResponsesStreamEvent <mlflow.types.responses.ResponsesStreamEvent>`)
schema.
"""
request = self._convert_input(model_input)
for response in self.responses_agent.predict_stream(request):
yield self._response_to_dict(response, ResponsesAgentStreamEvent)
| _ResponsesAgentPyfuncWrapper |
python | langchain-ai__langchain | libs/core/langchain_core/output_parsers/list.py | {
"start": 4058,
"end": 5514
} | class ____(ListOutputParser):
"""Parse the output of a model to a comma-separated list."""
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return `True` as this class is serializable."""
return True
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "output_parsers", "list"]`
"""
return ["langchain", "output_parsers", "list"]
@override
def get_format_instructions(self) -> str:
"""Return the format instructions for the comma-separated list output."""
return (
"Your response should be a list of comma separated values, "
"eg: `foo, bar, baz` or `foo,bar,baz`"
)
@override
def parse(self, text: str) -> list[str]:
"""Parse the output of an LLM call.
Args:
text: The output of an LLM call.
Returns:
A list of strings.
"""
try:
reader = csv.reader(
StringIO(text), quotechar='"', delimiter=",", skipinitialspace=True
)
return [item for sublist in reader for item in sublist]
except csv.Error:
# Keep old logic for backup
return [part.strip() for part in text.split(",")]
@property
def _type(self) -> str:
return "comma-separated-list"
| CommaSeparatedListOutputParser |
python | ray-project__ray | python/ray/data/_internal/logical/interfaces/logical_operator.py | {
"start": 308,
"end": 3298
} | class ____(Operator):
"""Abstract class for logical operators.
A logical operator describes transformation, and later is converted into
physical operator.
"""
def __init__(
self,
name: str,
input_dependencies: List["LogicalOperator"],
num_outputs: Optional[int] = None,
):
super().__init__(
name,
input_dependencies,
)
for x in input_dependencies:
assert isinstance(x, LogicalOperator), x
self._num_outputs: Optional[int] = num_outputs
def estimated_num_outputs(self) -> Optional[int]:
"""Returns the estimated number of blocks that
would be outputted by this logical operator.
This method does not execute the plan, so it does not take into consideration
block splitting. This method only considers high-level block constraints like
`Dataset.repartition(num_blocks=X)`. A more accurate estimation can be given by
`PhysicalOperator.num_outputs_total()` during execution.
"""
if self._num_outputs is not None:
return self._num_outputs
elif len(self._input_dependencies) == 1:
return self._input_dependencies[0].estimated_num_outputs()
return None
# Override the following 3 methods to correct type hints.
@property
def input_dependencies(self) -> List["LogicalOperator"]:
return super().input_dependencies # type: ignore
@property
def output_dependencies(self) -> List["LogicalOperator"]:
return super().output_dependencies # type: ignore
def post_order_iter(self) -> Iterator["LogicalOperator"]:
return super().post_order_iter() # type: ignore
def _apply_transform(
self, transform: Callable[["LogicalOperator"], "LogicalOperator"]
) -> "LogicalOperator":
return super()._apply_transform(transform) # type: ignore
def _get_args(self) -> Dict[str, Any]:
"""This Dict must be serializable"""
return vars(self)
def infer_schema(self) -> Optional["Schema"]:
"""Returns the inferred schema of the output blocks."""
return None
def infer_metadata(self) -> "BlockMetadata":
"""A ``BlockMetadata`` that represents the aggregate metadata of the outputs.
This method is used by methods like :meth:`~ray.data.Dataset.schema` to
efficiently return metadata.
"""
return BlockMetadata(None, None, None, None)
def is_lineage_serializable(self) -> bool:
"""Returns whether the lineage of this operator can be serialized.
An operator is lineage serializable if you can serialize it on one machine and
deserialize it on another without losing information. Operators that store
object references (e.g., ``InputData``) aren't lineage serializable because the
objects aren't available on the deserialized machine.
"""
return True
| LogicalOperator |
python | gevent__gevent | src/greentest/3.12/test_ssl.py | {
"start": 72754,
"end": 89527
} | class ____(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
self.server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.server_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=self.server_context)
self.enterContext(server)
self.server_addr = (HOST, server.port)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
# Allow for flexible libssl error messages.
regex = re.compile(r"""(
certificate verify failed # OpenSSL
|
CERTIFICATE_VERIFY_FAILED # AWS-LC
)""", re.X)
self.assertRaisesRegex(ssl.SSLError, regex,
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
s = ctx.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME
)
self.addCleanup(s.close)
# Allow for flexible libssl error messages.
regex = re.compile(r"""(
certificate verify failed # OpenSSL
|
CERTIFICATE_VERIFY_FAILED # AWS-LC
)""", re.X)
self.assertRaisesRegex(ssl.SSLError, regex,
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_sni(self):
host, port = self.server_addr
server_names = []
# We store servername_cb arguments to make sure they match the host
def servername_cb(ssl_sock, server_name, initial_context):
server_names.append(server_name)
self.server_context.set_servername_callback(servername_cb)
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=SIGNING_CA)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port, pem))
self.assertEqual(server_names, [host, host])
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_get_server_certificate_timeout(self):
def servername_cb(ssl_sock, server_name, initial_context):
time.sleep(0.2)
self.server_context.set_servername_callback(servername_cb)
with self.assertRaises(socket.timeout):
ssl.get_server_certificate(self.server_addr, ca_certs=SIGNING_CA,
timeout=0.1)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', support.SHORT_TIMEOUT)
count = 0
for _ in support.busy_retry(timeout):
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
# tls-unique is not defined for TLSv1.3
# https://datatracker.ietf.org/doc/html/rfc8446#appendix-C.5
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES and sslobj.version() != "TLSv1.3":
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES and sslobj.version() != "TLSv1.3":
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
def test_transport_eof(self):
client_context, server_context, hostname = testing_context()
with socket.socket(socket.AF_INET) as sock:
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
sslobj = client_context.wrap_bio(incoming, outgoing,
server_hostname=hostname)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
# Simulate EOF from the transport.
incoming.write_eof()
self.assertRaises(ssl.SSLEOFError, sslobj.read)
@support.requires_resource('network')
| SimpleBackgroundTests |
python | huggingface__transformers | src/transformers/models/swinv2/modeling_swinv2.py | {
"start": 35326,
"end": 39831
} | class ____(nn.Module):
def __init__(self, config, grid_size, pretrained_window_sizes=(0, 0, 0, 0)):
super().__init__()
self.num_layers = len(config.depths)
self.config = config
if self.config.pretrained_window_sizes is not None:
pretrained_window_sizes = config.pretrained_window_sizes
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu")]
layers = []
for i_layer in range(self.num_layers):
stage = Swinv2Stage(
config=config,
dim=int(config.embed_dim * 2**i_layer),
input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
depth=config.depths[i_layer],
num_heads=config.num_heads[i_layer],
drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
downsample=Swinv2PatchMerging if (i_layer < self.num_layers - 1) else None,
pretrained_window_size=pretrained_window_sizes[i_layer],
)
layers.append(stage)
self.layers = nn.ModuleList(layers)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: tuple[int, int],
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
output_hidden_states_before_downsampling: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[tuple, Swinv2EncoderOutput]:
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, layer_module in enumerate(self.layers):
layer_outputs = layer_module(
hidden_states,
input_dimensions,
output_attentions,
)
hidden_states = layer_outputs[0]
hidden_states_before_downsampling = layer_outputs[1]
output_dimensions = layer_outputs[2]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
if output_hidden_states and output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states_before_downsampling.shape
# rearrange b (h w) c -> b c h w
# here we use the original (not downsampled) height and width
reshaped_hidden_state = hidden_states_before_downsampling.view(
batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states_before_downsampling,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
elif output_hidden_states and not output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if output_attentions:
all_self_attentions += layer_outputs[3:]
if not return_dict:
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attentions, all_reshaped_hidden_states]
if v is not None
)
return Swinv2EncoderOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
reshaped_hidden_states=all_reshaped_hidden_states,
)
@auto_docstring
| Swinv2Encoder |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/settings.py | {
"start": 15788,
"end": 18083
} | class ____:
"""
CompletionCriteriaSettings contains the information needed to figure out if the next
lesson must start.
"""
class MeasureType(Enum):
PROGRESS: str = "progress"
REWARD: str = "reward"
behavior: str
measure: MeasureType = attr.ib(default=MeasureType.REWARD)
min_lesson_length: int = 0
signal_smoothing: bool = True
threshold: float = attr.ib(default=0.0)
require_reset: bool = False
@threshold.validator
def _check_threshold_value(self, attribute, value):
"""
Verify that the threshold has a value between 0 and 1 when the measure is
PROGRESS
"""
if self.measure == self.MeasureType.PROGRESS:
if self.threshold > 1.0:
raise TrainerConfigError(
"Threshold for next lesson cannot be greater than 1 when the measure is progress."
)
if self.threshold < 0.0:
raise TrainerConfigError(
"Threshold for next lesson cannot be negative when the measure is progress."
)
def need_increment(
self, progress: float, reward_buffer: List[float], smoothing: float
) -> Tuple[bool, float]:
"""
Given measures, this method returns a boolean indicating if the lesson
needs to change now, and a float corresponding to the new smoothed value.
"""
# Is the min number of episodes reached
if len(reward_buffer) < self.min_lesson_length:
return False, smoothing
if self.measure == CompletionCriteriaSettings.MeasureType.PROGRESS:
if progress > self.threshold:
return True, smoothing
if self.measure == CompletionCriteriaSettings.MeasureType.REWARD:
if len(reward_buffer) < 1:
return False, smoothing
measure = np.mean(reward_buffer)
if math.isnan(measure):
return False, smoothing
if self.signal_smoothing:
measure = 0.25 * smoothing + 0.75 * measure
smoothing = measure
if measure > self.threshold:
return True, smoothing
return False, smoothing
@attr.s(auto_attribs=True)
| CompletionCriteriaSettings |
python | keras-team__keras | keras/src/trainers/data_adapters/array_slicing.py | {
"start": 4077,
"end": 4324
} | class ____(TensorflowSliceable):
@classmethod
def convert_to_jax_compatible(cls, x):
return cls.convert_to_numpy(x)
@classmethod
def convert_to_torch_compatible(cls, x):
return x.to_tensor()
| TensorflowRaggedSliceable |
python | numba__numba | numba/tests/test_array_exprs.py | {
"start": 20934,
"end": 24229
} | class ____(MemoryLeakMixin, unittest.TestCase):
""" Tests RewriteArrayExprs with external (user defined) types,
see #5157"""
source_lines = textwrap.dedent("""
from numba.core import types
class FooType(types.Type):
def __init__(self):
super(FooType, self).__init__(name='Foo')
""")
def make_foo_type(self, FooType):
class Foo(object):
def __init__(self, value):
self.value = value
@register_model(FooType)
class FooModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [("value", types.intp)]
models.StructModel.__init__(self, dmm, fe_type, members)
make_attribute_wrapper(FooType, "value", "value")
@type_callable(Foo)
def type_foo(context):
def typer(value):
return FooType()
return typer
@lower_builtin(Foo, types.intp)
def impl_foo(context, builder, sig, args):
typ = sig.return_type
[value] = args
foo = cgutils.create_struct_proxy(typ)(context, builder)
foo.value = value
return foo._getvalue()
@typeof_impl.register(Foo)
def typeof_foo(val, c):
return FooType()
return Foo, FooType
def test_external_type(self):
with create_temp_module(self.source_lines) as test_module:
Foo, FooType = self.make_foo_type(test_module.FooType)
# sum of foo class instance and array return an array
# binary operation with foo class instance as one of args
@overload(operator.add)
def overload_foo_add(lhs, rhs):
if isinstance(lhs, FooType) and isinstance(rhs, types.Array):
def imp(lhs, rhs):
return np.array([lhs.value, rhs[0]])
return imp
# sum of 2 foo class instances return an array
# binary operation with 2 foo class instances as args
@overload(operator.add)
def overload_foo_add(lhs, rhs):
if isinstance(lhs, FooType) and isinstance(rhs, FooType):
def imp(lhs, rhs):
return np.array([lhs.value, rhs.value])
return imp
# neg of foo class instance return an array
# unary operation with foo class instance arg
@overload(operator.neg)
def overload_foo_neg(x):
if isinstance(x, FooType):
def imp(x):
return np.array([-x.value])
return imp
@njit
def arr_expr_sum1(x, y):
return Foo(x) + np.array([y])
@njit
def arr_expr_sum2(x, y):
return Foo(x) + Foo(y)
@njit
def arr_expr_neg(x):
return -Foo(x)
np.testing.assert_array_equal(arr_expr_sum1(0, 1), np.array([0, 1]))
np.testing.assert_array_equal(arr_expr_sum2(2, 3), np.array([2, 3]))
np.testing.assert_array_equal(arr_expr_neg(4), np.array([-4]))
if __name__ == "__main__":
unittest.main()
| TestExternalTypes |
python | walkccc__LeetCode | solutions/591. Tag Validator/591.py | {
"start": 0,
"end": 1491
} | class ____:
def isValid(self, code: str) -> bool:
if code[0] != '<' or code[-1] != '>':
return False
containsTag = False
stack = []
def isValidCdata(s: str) -> bool:
return s.find('[CDATA[') == 0
def isValidTagName(tagName: str, isEndTag: bool) -> bool:
nonlocal containsTag
if not tagName or len(tagName) > 9:
return False
if any(not c.isupper() for c in tagName):
return False
if isEndTag:
return stack and stack.pop() == tagName
containsTag = True
stack.append(tagName)
return True
i = 0
while i < len(code):
if not stack and containsTag:
return False
if code[i] == '<':
# It's inside a tag, so check if it's a cdata.
if stack and code[i + 1] == '!':
closeIndex = code.find(']]>', i + 2)
if closeIndex == -1 or not isValidCdata(code[i + 2:closeIndex]):
return False
elif code[i + 1] == '/': # the end tag
closeIndex = code.find('>', i + 2)
if closeIndex == -1 or not isValidTagName(
code[i + 2: closeIndex],
True):
return False
else: # the start tag
closeIndex = code.find('>', i + 1)
if closeIndex == -1 or not isValidTagName(
code[i + 1: closeIndex],
False):
return False
i = closeIndex
i += 1
return not stack and containsTag
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_pie05.py | {
"start": 315,
"end": 1226
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_pie05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "pie"})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$3",
"values": "=Sheet1!$B$1:$B$3",
}
)
chart.set_rotation(45)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 39671,
"end": 40231
} | class ____:
"""Test he_IL address provider methods"""
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city_name = faker.city_name()
assert isinstance(city_name, str)
assert city_name in HeIlAddressProvider.city_names
def test_street_title(self, faker, num_samples):
for _ in range(num_samples):
street_title = faker.street_title()
assert isinstance(street_title, str)
assert street_title in HeIlAddressProvider.street_titles
| TestHeIl |
python | google__jax | jax/_src/pallas/fuser/custom_fusion_lib.py | {
"start": 1660,
"end": 1776
} | class ____:
out_block_specs: tuple[pallas_core.BlockSpec, ...]
out_block_indices: tuple[Any, ...]
| CustomEvalContext |
python | pypa__warehouse | warehouse/oidc/forms/activestate.py | {
"start": 701,
"end": 751
} | class ____(TypedDict):
user_id: str
| UserResponse |
python | openai__openai-python | src/openai/resources/vector_stores/file_batches.py | {
"start": 32480,
"end": 33092
} | class ____:
def __init__(self, file_batches: AsyncFileBatches) -> None:
self._file_batches = file_batches
self.create = _legacy_response.async_to_raw_response_wrapper(
file_batches.create,
)
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
file_batches.retrieve,
)
self.cancel = _legacy_response.async_to_raw_response_wrapper(
file_batches.cancel,
)
self.list_files = _legacy_response.async_to_raw_response_wrapper(
file_batches.list_files,
)
| AsyncFileBatchesWithRawResponse |
python | kamyu104__LeetCode-Solutions | Python/resulting-string-after-adjacent-removals.py | {
"start": 49,
"end": 444
} | class ____(object):
def resultingString(self, s):
"""
:type s: str
:rtype: str
"""
result = []
for x in s:
if result:
diff = abs(ord(x)-ord(result[-1]))
if diff in (1, 25):
result.pop()
continue
result.append(x)
return "".join(result)
| Solution |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 95693,
"end": 97264
} | class ____(BiffRecord):
"""
Record EXTERNNAME for external names and Analysis add-in functions, BIFF5-BIFF8:
Offset Size Contents
0 2 Option flags (see below)
2 2 0 for global names, or:
BIFF5: One-based index to EXTERNSHEET record containing the sheet name,
BIFF8: One-based index to sheet list in preceding EXTERNALBOOK record.
4 2 Not used
6 var. BIFF5: Name (byte string, 8-bit string length, ?2.5.2).
BIFF8: Name (Unicode string, 8-bit string length, ?2.5.3).
See DEFINEDNAME record (?5.33) for a list of built-in names, if the built-in flag is set
in the option flags above.
var. var. Formula data (RPN token array, ?3)
Option flags for external names (BIFF5-BIFF8)
Bit Mask Contents
0 0001H 0 = Standard name; 1 = Built-in name
1 0002H 0 = Manual link; 1 = Automatic link (DDE links and OLE links only)
2 0004H 1 = Picture link (DDE links and OLE links only)
3 0008H 1 = This is the “StdDocumentName” identifier (DDE links only)
4 0010H 1 = OLE link
14-5 7FE0H Clipboard format of last successful update (DDE links and OLE links only)
15 8000H 1 = Iconified picture link (BIFF8 OLE links only)
"""
_REC_ID = 0x0023
def __init__(self, options=0, index=0, name=None, fmla=None):
self._rec_data = pack('<HHH', options, index, 0) + upack1(name) + fmla
| ExternnameRecord |
python | TheAlgorithms__Python | data_structures/linked_list/is_palindrome.py | {
"start": 83,
"end": 4633
} | class ____:
val: int = 0
next_node: ListNode | None = None
def is_palindrome(head: ListNode | None) -> bool:
"""
Check if a linked list is a palindrome.
Args:
head: The head of the linked list.
Returns:
bool: True if the linked list is a palindrome, False otherwise.
Examples:
>>> is_palindrome(None)
True
>>> is_palindrome(ListNode(1))
True
>>> is_palindrome(ListNode(1, ListNode(2)))
False
>>> is_palindrome(ListNode(1, ListNode(2, ListNode(1))))
True
>>> is_palindrome(ListNode(1, ListNode(2, ListNode(2, ListNode(1)))))
True
"""
if not head:
return True
# split the list to two parts
fast: ListNode | None = head.next_node
slow: ListNode | None = head
while fast and fast.next_node:
fast = fast.next_node.next_node
slow = slow.next_node if slow else None
if slow:
# slow will always be defined,
# adding this check to resolve mypy static check
second = slow.next_node
slow.next_node = None # Don't forget here! But forget still works!
# reverse the second part
node: ListNode | None = None
while second:
nxt = second.next_node
second.next_node = node
node = second
second = nxt
# compare two parts
# second part has the same or one less node
while node and head:
if node.val != head.val:
return False
node = node.next_node
head = head.next_node
return True
def is_palindrome_stack(head: ListNode | None) -> bool:
"""
Check if a linked list is a palindrome using a stack.
Args:
head (ListNode): The head of the linked list.
Returns:
bool: True if the linked list is a palindrome, False otherwise.
Examples:
>>> is_palindrome_stack(None)
True
>>> is_palindrome_stack(ListNode(1))
True
>>> is_palindrome_stack(ListNode(1, ListNode(2)))
False
>>> is_palindrome_stack(ListNode(1, ListNode(2, ListNode(1))))
True
>>> is_palindrome_stack(ListNode(1, ListNode(2, ListNode(2, ListNode(1)))))
True
"""
if not head or not head.next_node:
return True
# 1. Get the midpoint (slow)
slow: ListNode | None = head
fast: ListNode | None = head
while fast and fast.next_node:
fast = fast.next_node.next_node
slow = slow.next_node if slow else None
# slow will always be defined,
# adding this check to resolve mypy static check
if slow:
stack = [slow.val]
# 2. Push the second half into the stack
while slow.next_node:
slow = slow.next_node
stack.append(slow.val)
# 3. Comparison
cur: ListNode | None = head
while stack and cur:
if stack.pop() != cur.val:
return False
cur = cur.next_node
return True
def is_palindrome_dict(head: ListNode | None) -> bool:
"""
Check if a linked list is a palindrome using a dictionary.
Args:
head (ListNode): The head of the linked list.
Returns:
bool: True if the linked list is a palindrome, False otherwise.
Examples:
>>> is_palindrome_dict(None)
True
>>> is_palindrome_dict(ListNode(1))
True
>>> is_palindrome_dict(ListNode(1, ListNode(2)))
False
>>> is_palindrome_dict(ListNode(1, ListNode(2, ListNode(1))))
True
>>> is_palindrome_dict(ListNode(1, ListNode(2, ListNode(2, ListNode(1)))))
True
>>> is_palindrome_dict(
... ListNode(
... 1, ListNode(2, ListNode(1, ListNode(3, ListNode(2, ListNode(1)))))
... )
... )
False
"""
if not head or not head.next_node:
return True
d: dict[int, list[int]] = {}
pos = 0
while head:
if head.val in d:
d[head.val].append(pos)
else:
d[head.val] = [pos]
head = head.next_node
pos += 1
checksum = pos - 1
middle = 0
for v in d.values():
if len(v) % 2 != 0:
middle += 1
else:
for step, i in enumerate(range(len(v))):
if v[i] + v[len(v) - 1 - step] != checksum:
return False
if middle > 1:
return False
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| ListNode |
python | jina-ai__jina | tests/integration/hot_reload/exec3/my_executor3.py | {
"start": 170,
"end": 315
} | class ____(A):
@requests
def y(self, docs, **kwargs):
for doc in docs:
doc.text = 'EnhancedBeforeReload'
| EnhancedExecutor |
python | pytorch__pytorch | torch/cuda/_memory_viz.py | {
"start": 5923,
"end": 28174
} | class ____:
def __init__(self, value):
self.value = value
def __add__(self, rhs):
return Bytes(self.value + rhs)
def __repr__(self):
return _format_size(self.value)
def calc_active(seg):
return sum(b["size"] for b in seg["blocks"] if b["state"] == "active_allocated")
def _report_free(free_external, free_internal):
total = free_external + free_internal
suffix = ""
if total != 0:
pct = (free_internal / total) * 100
suffix = f" ({pct:.1f}% internal)"
return f"{Bytes(total)}{suffix}"
PAGE_SIZE = 1024 * 1024 * 20
legend = f"""\
Legend:
[a ] - a segment in the allocator
^-- a page {Bytes(PAGE_SIZE)} of memory in the segment
a-z: pages filled with a single block's content
' ': page is completely free
*: page if completely full with multiple blocks
0-9: page is partially full with tensors of multiple blocks (9 == 90% full)
(X% internal) - of the free memory, X% is free because we rounded the size of the allocation.
"""
def segsum(data):
r"""Visually reports how the allocator has filled its segments.
This printout can help debug fragmentation issues since free fragments
will appear as gaps in this printout. The amount of free space is reported
for each segment.
We distinguish between internal free memory which occurs because the
allocator rounds the allocation size, and external free memory, which are
the gaps between allocations in a segment.
Args:
data: snapshot dictionary created from _snapshot()
"""
out = io.StringIO()
out.write(f"Summary of segments >= {Bytes(PAGE_SIZE)} in size\n")
total_reserved = 0
total_allocated = 0
free_external = 0
free_internal = 0
for seg in sorted(
data["segments"], key=lambda x: (x["total_size"], calc_active(x))
):
total_reserved += seg["total_size"]
seg_free_external = 0
seg_free_internal = 0
seg_allocated = 0
all_ranges = []
boffset = 0
for b in seg["blocks"]:
active = b["state"] == "active_allocated"
if active:
_, allocated_size = _block_extra(b)
all_ranges.append((boffset, allocated_size, True))
seg_allocated += allocated_size
seg_free_internal += b["size"] - allocated_size
else:
seg_free_external += b["size"]
boffset += b["size"]
total_allocated += seg_allocated
free_external += seg_free_external
free_internal += seg_free_internal
nseg = (seg["total_size"] - 1) // PAGE_SIZE + 1
occupied = [" " for _ in range(nseg)]
frac = [0.0 for _ in range(nseg)]
active_size = 0
for i, (start_, size, active) in enumerate(all_ranges):
active_size += size
finish_ = start_ + size
start = start_ // PAGE_SIZE
finish = (finish_ - 1) // PAGE_SIZE + 1
m = chr(ord("a" if active else "A") + (i % 26))
for j in range(start, finish):
s = max(start_, j * PAGE_SIZE)
e = min(finish_, (j + 1) * PAGE_SIZE)
frac[j] += (e - s) / PAGE_SIZE
if occupied[j] != " ":
occupied[j] = "0123456789*"[int(frac[j] * 10)]
else:
occupied[j] = m
stream = "" if seg["stream"] == 0 else f", stream_{seg['stream']}"
body = "".join(occupied)
assert (
seg_free_external + seg_free_internal + seg_allocated == seg["total_size"]
)
stream = f" stream_{seg['stream']}" if seg["stream"] != 0 else ""
if seg["total_size"] >= PAGE_SIZE:
out.write(
f"[{body}] {Bytes(seg['total_size'])} allocated, "
f"{_report_free(seg_free_external, seg_free_internal)} free{stream}\n"
)
out.write(f"segments: {len(data['segments'])}\n")
out.write(f"total_reserved: {Bytes(total_reserved)}\n")
out.write(f"total_allocated: {Bytes(total_allocated)}\n")
out.write(f"total_free: {_report_free(free_external, free_internal)}\n")
out.write(legend)
assert free_internal + free_external + total_allocated == total_reserved
return out.getvalue()
def trace(data):
out = io.StringIO()
def format(entries):
segment_intervals: list = []
segment_addr_to_name = {}
allocation_addr_to_name = {}
free_names: list = []
next_name = 0
def _name():
nonlocal next_name
if free_names:
return free_names.pop()
r, m = next_name // 26, next_name % 26
next_name += 1
return f"{chr(ord('a') + m)}{'' if r == 0 else r}"
def find_segment(addr):
for name, saddr, size in segment_intervals:
if addr >= saddr and addr < saddr + size:
return name, saddr
for i, seg in enumerate(data["segments"]):
saddr = seg["address"]
size = seg["allocated_size"]
if addr >= saddr and addr < saddr + size:
return f"seg_{i}", saddr
return None, None
count = 0
out.write(f"{len(entries)} entries\n")
total_reserved = 0
for seg in data["segments"]:
total_reserved += seg["total_size"]
for count, e in enumerate(entries):
if e["action"] == "alloc":
addr, size = e["addr"], e["size"]
n = _name()
seg_name, seg_addr = find_segment(addr)
if seg_name is None:
seg_name = "MEM"
offset = addr
else:
offset = addr - seg_addr
out.write(f"{n} = {seg_name}[{offset}:{Bytes(size)}]\n")
allocation_addr_to_name[addr] = (n, size, count)
count += size
elif e["action"] == "free_requested":
addr, size = e["addr"], e["size"]
name, _, _ = allocation_addr_to_name.get(addr, (addr, None, None))
out.write(f"del {name} # {Bytes(size)}\n")
elif e["action"] == "free_completed":
addr, size = e["addr"], e["size"]
count -= size
name, _, _ = allocation_addr_to_name.get(addr, (addr, None, None))
out.write(f"# free completed for {name} {Bytes(size)}\n")
if name in allocation_addr_to_name:
free_names.append(name)
del allocation_addr_to_name[name]
elif e["action"] == "segment_alloc":
addr, size = e["addr"], e["size"]
name = _name()
out.write(f"{name} = cudaMalloc({addr}, {Bytes(size)})\n")
segment_intervals.append((name, addr, size))
segment_addr_to_name[addr] = name
elif e["action"] == "segment_free":
addr, size = e["addr"], e["size"]
name = segment_addr_to_name.get(addr, addr)
out.write(f"cudaFree({name}) # {Bytes(size)}\n")
if name in segment_addr_to_name:
free_names.append(name)
del segment_addr_to_name[name]
elif e["action"] == "oom":
size = e["size"]
free = e["device_free"]
out.write(
f"raise OutOfMemoryError # {Bytes(size)} requested, {Bytes(free)} free in CUDA\n"
)
else:
out.write(f"{e}\n")
out.write(f"TOTAL MEM: {Bytes(count)}")
for i, d in enumerate(data["device_traces"]):
if d:
out.write(f"Device {i} ----------------\n")
format(d)
return out.getvalue()
_memory_viz_template = r"""
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<script type="module">
import {add_local_files} from "https://cdn.jsdelivr.net/gh/pytorch/pytorch@main/torch/utils/viz/MemoryViz.js"
const local_files = $SNAPSHOT
add_local_files(local_files, $VIZ_KIND)
</script>
</body>
"""
def _format_viz(data, viz_kind, device):
if device is not None:
warnings.warn(
"device argument is deprecated, plots now contain all device",
FutureWarning,
stacklevel=3,
)
buffer = pickle.dumps(data)
buffer += b"\x00" * (3 - len(buffer) % 3)
# Encode the buffer with base64
encoded_buffer = base64.b64encode(buffer).decode("utf-8")
json_format = json.dumps([{"name": "snapshot.pickle", "base64": encoded_buffer}])
return _memory_viz_template.replace("$VIZ_KIND", repr(viz_kind)).replace(
"$SNAPSHOT", json_format
)
def filter_alloc_free_pairs(data):
for dev_id in range(len(data["device_traces"])):
# set of indexes of trace events for alloc-free pairs
filterSet = set()
# map from addr to index of alloc event
allocMap = {}
# set of addrs from free_requested events
freeRequested = set()
for idx, event in enumerate(data["device_traces"][dev_id]):
if event["action"] == "alloc":
allocMap[event["addr"]] = idx
elif event["action"] == "free_requested":
freeRequested.add(event["addr"])
if allocMap.get(event["addr"]) is not None:
filterSet.add(idx)
filterSet.add(allocMap[event["addr"]])
allocMap.pop(event["addr"])
elif event["action"] == "free_completed":
if event["addr"] in freeRequested:
freeRequested.remove(event["addr"])
filterSet.add(idx)
else:
print(f"free_completed without free_requested: {event}")
# Remove events whose index is in filterSet
if filterSet:
# Create a new list excluding events with indices in filterSet
data["device_traces"][dev_id] = [
event
for idx, event in enumerate(data["device_traces"][dev_id])
if idx not in filterSet
]
return data
def trace_plot(data, device=None, plot_segments=False, filter_freed=False):
"""Generate a visualization over time of the memory usage recorded by the trace as an html file.
Args:
data: Memory snapshot as generated from torch.cuda.memory._snapshot()
device (torch.device, optional): Generate the trace for this device, needed if multiple devices have allocations.
plot_segments (bool, optional): Plots memory returned from cudaMalloc, rather than individual allocations.
Defaults to False.
filter_freed (bool, optional): Filter out alloc-free paired events to only plot allocations that are not freed yet.
Defaults to False to plot all trace events.
Returns:
str: HTML of visualization
"""
if filter_freed:
data = filter_alloc_free_pairs(data)
return _format_viz(
data,
"Active Memory Timeline"
if not plot_segments
else "Active Cached Memory Timeline",
device,
)
def _profile_to_snapshot(profile):
import torch
from torch._C._profiler import _EventType
from torch.profiler._memory_profiler import Action, TensorKey
memory_profile = profile._memory_profile()
allocation_stacks = {}
for event in memory_profile._op_tree.sorted_nodes:
if event.tag == _EventType.Allocation:
parent = event.parent
python_parents = []
while parent:
if parent.tag in (_EventType.PyCall, _EventType.PyCCall):
python_parents.append(parent)
parent = parent.parent
key = TensorKey.from_allocation(event.extra_fields)
# Corner case: If allocation doesn't have an ID (can't prove it was used as a Tensor)
# key will be None. I should add some way to identify these, I just haven't yet.
if key and event.extra_fields.alloc_size > 0:
allocation_stacks[key] = python_parents
device_count = torch.cuda.device_count()
snapshot: dict[str, list[Any]] = {
"device_traces": [[] for _ in range(device_count + 1)],
"segments": [
{
"device": device,
"address": None,
"total_size": 0,
"stream": 0,
"blocks": [],
}
for device in range(device_count + 1)
],
}
def to_device(device):
if device.type == "cuda":
return device.index
else:
return device_count
def allocate(size, tensor_key, version, during_trace=True):
device = to_device(tensor_key.device)
addr = tensor_key.storage.ptr
seg = snapshot["segments"][device] # type: ignore[index]
if seg["address"] is None or seg["address"] > addr:
seg["address"] = addr
seg["total_size"] = max(
seg["total_size"], addr + size
) # record max addr for now, we will make it the size later
category = memory_profile._categories.get(tensor_key, version)
category = category.name.lower() if category is not None else "unknown"
stack = allocation_stacks.get(tensor_key, ())
stack = [{"filename": "none", "line": 0, "name": p.name} for p in stack]
r = {
"action": "alloc",
"addr": addr,
"size": size,
"stream": 0,
"frames": stack,
"category": category,
}
if during_trace:
snapshot["device_traces"][device].append(r)
return r
def free(alloc, device):
for e in ("free_requested", "free_completed"):
snapshot["device_traces"][device].append(
{
"action": e,
"addr": alloc["addr"],
"size": alloc["size"],
"stream": 0,
"frames": alloc["frames"],
}
)
kv_to_elem = {}
# create the device trace
for _time, action, (tensor_key, version), size in memory_profile.timeline:
if not isinstance(tensor_key, TensorKey):
continue
if action == Action.CREATE:
kv_to_elem[(tensor_key, version)] = allocate(size, tensor_key, version)
elif action == Action.DESTROY:
free(kv_to_elem.pop((tensor_key, version)), to_device(tensor_key.device))
elif action == Action.INCREMENT_VERSION:
free(kv_to_elem.pop((tensor_key, version)), to_device(tensor_key.device))
kv_to_elem[(tensor_key, version + 1)] = allocate(
size, tensor_key, version + 1
)
elif action == Action.PREEXISTING:
kv_to_elem[(tensor_key, version)] = allocate(
size, tensor_key, version, during_trace=False
)
# create the final snapshot state
blocks_at_end = [
(to_device(tensor_key.device), event["addr"], event["size"], event["frames"])
for (tensor_key, version), event in kv_to_elem.items()
]
for device, blocks in groupby(sorted(blocks_at_end), key=operator.itemgetter(0)):
seg = snapshot["segments"][device] # type: ignore[index]
last_addr = seg["address"]
for _, addr, size, frames in blocks:
if last_addr < addr:
seg["blocks"].append({"size": addr - last_addr, "state": "inactive"})
seg["blocks"].append(
{
"size": size,
"state": "active_allocated",
"requested_size": size,
"frames": frames,
}
)
last_addr = addr + size
if last_addr < seg["total_size"]:
seg["blocks"].append(
{"size": seg["total_size"] - last_addr, "state": "inactive"}
)
snapshot["segments"] = [seg for seg in snapshot["segments"] if seg["blocks"]] # type: ignore[attr-defined]
for seg in snapshot["segments"]: # type: ignore[attr-defined, name-defined, no-redef]
seg["total_size"] -= seg["address"]
if not seg["blocks"]:
seg["blocks"].append({"size": seg["total_size"], "state": "inactive"})
return snapshot
def profile_plot(profile, device=None):
"""Generate a visualization over time of the memory usage recorded by kineto memory profiling as an html file.
Args:
profile: profile as generated by `torch.profiler.profile(profile_memory=True)`
device (torch.device, optional): Generate the trace for this device, needed if multiple devices have allocations.
Returns:
str: HTML of visualization
"""
snapshot = _profile_to_snapshot(profile)
return _format_viz(snapshot, "Active Memory Timeline", device)
def segment_plot(data: Any, device=None):
return _format_viz(data, "Allocator State History", device)
if __name__ == "__main__":
import os.path
thedir = os.path.realpath(os.path.dirname(__file__))
if thedir in sys.path:
# otherwise we find cuda/random.py as random...
sys.path.remove(thedir)
import argparse
fn_name = "torch.cuda.memory._snapshot()"
pickled = f"pickled memory statistics from {fn_name}"
parser = argparse.ArgumentParser(
description=f"Visualize memory dumps produced by {fn_name}"
)
subparsers = parser.add_subparsers(dest="action")
def _output(p):
p.add_argument(
"-o",
"--output",
default="output.svg",
help="flamegraph svg (default: output.svg)",
)
description = "Prints overall allocation statistics and a visualization of how the allocators segments are currently filled."
stats_a = subparsers.add_parser("stats", description=description)
stats_a.add_argument("input", help=pickled)
description = "Prints buffer of the most recent allocation events embedded in the snapshot in a Pythonic style."
trace_a = subparsers.add_parser("trace", description=description)
trace_a.add_argument("input", help=pickled)
description = "Generate a flamegraph that visualizes what memory is stored in each allocator segment (aka block)"
segments_a = subparsers.add_parser("segments", description=description)
segments_a.add_argument("input", help=pickled)
_output(segments_a)
description = (
"Generate a flamegraph the program locations contributing to CUDA memory usage."
)
memory_a = subparsers.add_parser("memory", description=description)
memory_a.add_argument("input", help=pickled)
_output(memory_a)
description = (
"Generate a flamegraph that shows segments (aka blocks) that have been added "
"or removed between two different memorys snapshots."
)
compare_a = subparsers.add_parser("compare", description=description)
compare_a.add_argument("before", help=pickled)
compare_a.add_argument("after", help=pickled)
_output(compare_a)
plots = (
(
"trace_plot",
"Generate a visualization over time of the memory usage recorded by the trace as an html file.",
),
(
"segment_plot",
"Visualize how allocations are packed into allocator segments at each point in a trace as an html file.",
),
)
for cmd, description in plots:
trace_plot_a = subparsers.add_parser(cmd, description=description)
trace_plot_a.add_argument("input", help=pickled)
help = "visualize trace from this device (default: chooses the only device with trace info or errors)"
trace_plot_a.add_argument("-d", "--device", type=int, default=None, help=help)
help = "path to save the visualization(default: output.html)"
trace_plot_a.add_argument("-o", "--output", default="output.html", help=help)
if cmd == "trace_plot":
help = "visualize change to segments rather than individual allocations"
trace_plot_a.add_argument(
"-s", "--segments", action="store_true", help=help
)
help = (
"filter out allocation-free pairs to only visualize the allocations that are not freed yet;"
"useful to reduce the number of events for large traces for debugging OOM"
)
trace_plot_a.add_argument(
"-f", "--filter_freed", action="store_true", help=help
)
args = parser.parse_args()
def _read(name):
if name == "-":
data = pickle.load(sys.stdin.buffer)
else:
with open(name, "rb") as f:
data = pickle.load(f)
if isinstance(data, list): # segments only...
data = {"segments": data, "traces": []}
return data
def _write(name, data):
with open(name, "w") as f:
f.write(data)
if args.action == "segments":
data = _read(args.input)
_write(args.output, segments(data))
elif args.action == "memory":
data = _read(args.input)
_write(args.output, memory(data))
elif args.action == "stats":
data = _read(args.input)
print(segsum(data))
elif args.action == "trace":
data = _read(args.input)
print(trace(data))
elif args.action == "compare":
before = _read(args.before)
after = _read(args.after)
_write(args.output, compare(before, after))
elif args.action == "trace_plot":
data = _read(args.input)
_write(
args.output,
trace_plot(
data,
device=args.device,
plot_segments=args.segments,
filter_freed=args.filter_freed,
),
)
elif args.action == "segment_plot":
data = _read(args.input)
_write(args.output, segment_plot(data, device=args.device))
| Bytes |
python | encode__django-rest-framework | tests/test_renderers.py | {
"start": 17624,
"end": 20627
} | class ____(TestCase):
"""
Default USE_TZ is True.
Default TIME_ZONE is 'America/Chicago'.
"""
def _assert_datetime_rendering(self, appointment, expected, datetimefield_kwargs=None):
datetimefield_kwargs = datetimefield_kwargs or {}
class TestSerializer(serializers.Serializer):
appointment = serializers.DateTimeField(**datetimefield_kwargs)
serializer = TestSerializer(data={"appointment": appointment})
serializer.is_valid()
renderer = HTMLFormRenderer()
field = serializer['appointment']
rendered = renderer.render_field(field, {})
expected_html = (
'<input name="appointment" class="form-control" '
f'type="datetime-local" value="{expected}">'
)
self.assertInHTML(expected_html, rendered)
def test_datetime_field_rendering_milliseconds(self):
self._assert_datetime_rendering(
datetime(2024, 12, 24, 0, 55, 30, 345678), "2024-12-24T00:55:30.345"
)
def test_datetime_field_rendering_no_seconds_and_no_milliseconds(self):
self._assert_datetime_rendering(
datetime(2024, 12, 24, 0, 55, 0, 0), "2024-12-24T00:55:00.000"
)
def test_datetime_field_rendering_with_format_as_none(self):
self._assert_datetime_rendering(
datetime(2024, 12, 24, 0, 55, 30, 345678),
"2024-12-24T00:55:30.345",
{"format": None}
)
def test_datetime_field_rendering_with_format(self):
self._assert_datetime_rendering(
datetime(2024, 12, 24, 0, 55, 30, 345678),
"2024-12-24T00:55:00.000",
{"format": "%a %d %b %Y, %I:%M%p"}
)
# New project templates default to 'UTC'.
@override_settings(TIME_ZONE='UTC')
def test_datetime_field_rendering_utc(self):
self._assert_datetime_rendering(
datetime(2024, 12, 24, 0, 55, 30, 345678),
"2024-12-24T00:55:30.345"
)
@override_settings(REST_FRAMEWORK={'DATETIME_FORMAT': '%a %d %b %Y, %I:%M%p'})
def test_datetime_field_rendering_with_custom_datetime_format(self):
self._assert_datetime_rendering(
datetime(2024, 12, 24, 0, 55, 30, 345678),
"2024-12-24T00:55:00.000"
)
@override_settings(REST_FRAMEWORK={'DATETIME_FORMAT': None})
def test_datetime_field_rendering_datetime_format_is_none(self):
self._assert_datetime_rendering(
datetime(2024, 12, 24, 0, 55, 30, 345678),
"2024-12-24T00:55:30.345"
)
# Enforce it in True because in Django versions under 4.2 was False by default.
@override_settings(USE_TZ=True)
def test_datetime_field_rendering_timezone_aware_datetime(self):
self._assert_datetime_rendering(
datetime(2024, 12, 24, 0, 55, 30, 345678, tzinfo=ZoneInfo('Asia/Tokyo')), # +09:00
"2024-12-23T09:55:30.345" # Rendered in -06:00
)
| TestDateTimeFieldHTMLFormRender |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/check/builder.py | {
"start": 4698,
"end": 17311
} | class ____(Generic[T]): ...
# use a sample to avoid direct private imports (_GenericAlias)
_SampleGeneric = _GenClass[str]
def _coerce_type(
ttype: Optional[TypeOrTupleOfTypes],
eval_ctx: EvalContext,
) -> Optional[TypeOrTupleOfTypes]:
# coerce input type in to the type we want to pass to the check call
# Any type translates to passing None for the of_type argument
if ttype is Any or ttype is None:
return None
# assume naked strings should be ForwardRefs
if isinstance(ttype, str):
return eval_ctx.eval_forward_ref(ForwardRef(ttype))
if isinstance(ttype, ForwardRef):
return eval_ctx.eval_forward_ref(ttype)
if isinstance(ttype, TypeVar):
return _coerce_type(ttype.__bound__, eval_ctx) if ttype.__bound__ else None
origin = get_origin(ttype)
args = get_args(ttype)
if _is_annotated(origin, args):
_process_annotated(ttype, args, eval_ctx)
return _coerce_type(args[0], eval_ctx)
# cant do isinstance against TypeDict (and we cant subclass check for it)
# so just coerce any dict subclasses in to dict
if isinstance(ttype, type) and issubclass(ttype, dict):
return dict
# Unions should become a tuple of types to pass to the of_type argument
# ultimately used as second arg in isinstance(target, tuple_of_types)
if origin in (UnionType, Union):
union_types = get_args(ttype)
coerced_types = []
for t in union_types:
# coerce all the inner types
coerced = _coerce_type(t, eval_ctx)
if coerced is None or isinstance(coerced, tuple):
failed(f"Unable to coerce Union member {t} in {ttype}")
coerced_types.append(coerced)
return tuple(t for t in coerced_types)
return ttype
def _container_pair_args(
args: tuple[type, ...], eval_ctx
) -> tuple[Optional[TypeOrTupleOfTypes], Optional[TypeOrTupleOfTypes]]:
# process tuple of types as if its two arguments to a container type
if len(args) == 2:
return _coerce_type(args[0], eval_ctx), _coerce_type(args[1], eval_ctx)
return None, None
def _container_single_arg(
args: tuple[type, ...], eval_ctx: EvalContext
) -> Optional[TypeOrTupleOfTypes]:
# process tuple of types as if its the single argument to a container type
if len(args) == 1:
return _coerce_type(args[0], eval_ctx)
return None
def _name(target: Optional[TypeOrTupleOfTypes]) -> str:
# turn a type or tuple of types in to its string representation for printing
if target is None:
return "None"
if target is NoneType:
return f"{INJECTED_CHECK_VAR}.NoneType"
if isinstance(target, tuple):
return f"({', '.join(_name(tup_type) for tup_type in target)})"
if hasattr(target, "__name__"):
return target.__name__
if hasattr(target, "_name"):
n = getattr(target, "_name")
if n is not None:
return n
# If a generic falls through to here, just target the base class
# and ignore the type hint (for now).
# Use a sample generic to avoid custom py version handling
if target.__class__ is _SampleGeneric.__class__:
return _name(get_origin(target))
failed(f"Could not calculate string name for {target}")
def _is_annotated(origin, args):
# 3.9+: origin is Annotated, 3.8: origin == args[0]
return (origin is Annotated and args) or (len(args) == 1 and args[0] == origin)
def _process_annotated(ttype, args, eval_ctx: EvalContext):
target_type = args[0]
# 3.9+: args[1:] has Annotated args, 3.8: its in __metadata__
annotated_args = getattr(ttype, "__metadata__", args[1:])
for arg in annotated_args:
if isinstance(arg, ImportFrom):
if isinstance(target_type, ForwardRef):
eval_ctx.register_lazy_import(args[0].__forward_arg__, arg.module)
elif isinstance(target_type, str):
eval_ctx.register_lazy_import(args[0], arg.module)
else:
failed(
f"ImportFrom in Annotated expected to be used with string or ForwardRef only, got {args[0]}",
)
def build_check_call_str(
ttype: type,
name: str,
eval_ctx: EvalContext,
) -> str:
from dagster_shared.record import is_record
# assumes this module is in global/local scope as check
origin = get_origin(ttype)
args = get_args(ttype)
# scalars
if origin is None:
if ttype is str:
return f'{name} if isinstance({name}, str) else {INJECTED_CHECK_VAR}.str_param({name}, "{name}")'
elif ttype is float:
return f'{name} if isinstance({name}, float) else {INJECTED_CHECK_VAR}.float_param({name}, "{name}")'
elif ttype is int:
return f'{name} if isinstance({name}, int) else {INJECTED_CHECK_VAR}.int_param({name}, "{name}")'
elif ttype is bool:
return f'{name} if isinstance({name}, bool) else {INJECTED_CHECK_VAR}.bool_param({name}, "{name}")'
elif ttype is Any:
return name # no-op
# fallback to inst
inst_type = _coerce_type(ttype, eval_ctx)
if inst_type:
it = _name(inst_type)
return f'{name} if isinstance({name}, {it}) else {INJECTED_CHECK_VAR}.inst_param({name}, "{name}", {it})'
else:
return name # no-op
elif origin is Literal:
return f'{INJECTED_CHECK_VAR}.literal_param({name}, "{name}", {args})'
elif origin is Callable or origin is collections.abc.Callable:
return f'{INJECTED_CHECK_VAR}.callable_param({name}, "{name}")'
else:
if _is_annotated(origin, args):
_process_annotated(ttype, args, eval_ctx)
return build_check_call_str(args[0], f"{name}", eval_ctx)
pair_left, pair_right = _container_pair_args(args, eval_ctx)
single = _container_single_arg(args, eval_ctx)
# containers
if origin is list:
return f'{INJECTED_CHECK_VAR}.list_param({name}, "{name}", {_name(single)})'
elif origin is dict:
return f'{INJECTED_CHECK_VAR}.dict_param({name}, "{name}", {_name(pair_left)}, {_name(pair_right)})'
elif origin is set:
return f'{INJECTED_CHECK_VAR}.set_param({name}, "{name}", {_name(single)})'
elif origin is collections.abc.Sequence:
return f'{INJECTED_CHECK_VAR}.sequence_param({name}, "{name}", {_name(single)})'
elif origin is collections.abc.Iterable:
return f'{INJECTED_CHECK_VAR}.iterable_param({name}, "{name}", {_name(single)})'
elif origin is collections.abc.Mapping:
return f'{INJECTED_CHECK_VAR}.mapping_param({name}, "{name}", {_name(pair_left)}, {_name(pair_right)})'
elif origin is collections.abc.Set:
return f'{INJECTED_CHECK_VAR}.set_param({name}, "{name}", {_name(single)})'
elif origin in (UnionType, Union):
# optional
if pair_right is type(None):
inner_origin = get_origin(pair_left)
# optional scalar
if inner_origin is None:
if pair_left is str:
return f'{name} if {name} is None or isinstance({name}, str) else {INJECTED_CHECK_VAR}.opt_str_param({name}, "{name}")'
elif pair_left is float:
return f'{name} if {name} is None or isinstance({name}, float) else {INJECTED_CHECK_VAR}.opt_float_param({name}, "{name}")'
elif pair_left is int:
return f'{name} if {name} is None or isinstance({name}, int) else {INJECTED_CHECK_VAR}.opt_int_param({name}, "{name}")'
elif pair_left is bool:
return f'{name} if {name} is None or isinstance({name}, bool) else {INJECTED_CHECK_VAR}.opt_bool_param({name}, "{name}")'
# fallback to opt_inst
inst_type = _coerce_type(pair_left, eval_ctx)
it = _name(inst_type)
if inst_type:
return f'{name} if {name} is None or isinstance({name}, {it}) else {INJECTED_CHECK_VAR}.opt_inst_param({name}, "{name}", {it})'
else:
return name # no-op
# optional container
else:
inner_args = get_args(pair_left)
inner_pair_left, inner_pair_right = _container_pair_args(inner_args, eval_ctx)
inner_single = _container_single_arg(inner_args, eval_ctx)
if inner_origin is list:
return f'{name} if {name} is None else {INJECTED_CHECK_VAR}.opt_nullable_list_param({name}, "{name}", {_name(inner_single)})'
elif inner_origin is dict:
return f'{name} if {name} is None else {INJECTED_CHECK_VAR}.opt_nullable_dict_param({name}, "{name}", {_name(inner_pair_left)}, {_name(inner_pair_right)})'
elif inner_origin is set:
return f'{name} if {name} is None else {INJECTED_CHECK_VAR}.opt_nullable_set_param({name}, "{name}", {_name(inner_single)})'
elif inner_origin is collections.abc.Sequence:
return f'{name} if {name} is None else {INJECTED_CHECK_VAR}.opt_nullable_sequence_param({name}, "{name}", {_name(inner_single)})'
elif inner_origin is collections.abc.Iterable:
return f'{name} if {name} is None else {INJECTED_CHECK_VAR}.opt_nullable_iterable_param({name}, "{name}", {_name(inner_single)})'
elif inner_origin is collections.abc.Mapping:
return f'{name} if {name} is None else {INJECTED_CHECK_VAR}.opt_nullable_mapping_param({name}, "{name}", {_name(inner_pair_left)}, {_name(inner_pair_right)})'
elif inner_origin is collections.abc.Set:
return f'{name} if {name} is None else {INJECTED_CHECK_VAR}.opt_nullable_set_param({name}, "{name}", {_name(inner_single)})'
elif is_record(inner_origin):
it = _name(inner_origin)
return f'{name} if {name} is None or isinstance({name}, {it}) else {INJECTED_CHECK_VAR}.opt_inst_param({name}, "{name}", {it})'
elif inner_origin is Callable or inner_origin is collections.abc.Callable:
return f'{name} if {name} is None else {INJECTED_CHECK_VAR}.opt_callable_param({name}, "{name}")'
# union
else:
tuple_types = _coerce_type(ttype, eval_ctx)
if tuple_types is not None:
tt_name = _name(tuple_types)
return f'{name} if isinstance({name}, {tt_name}) else {INJECTED_CHECK_VAR}.inst_param({name}, "{name}", {tt_name})'
# origin is some other type, assume ttype is Generic representation
else:
inst_type = _coerce_type(ttype, eval_ctx)
if inst_type:
it = _name(inst_type)
return f'{name} if isinstance({name}, {it}) else {INJECTED_CHECK_VAR}.inst_param({name}, "{name}", {it})'
failed(f"Unhandled {ttype}")
def build_args_and_assignment_strs(
fn_args: Iterable[str],
defaults: Mapping[str, Any],
kw_only: bool,
) -> tuple[str, str]:
"""Utility function to create the arguments to the function as well as any
assignment calls that need to happen for default values.
"""
args = []
set_calls = []
for arg in fn_args:
if arg in defaults:
default = defaults[arg]
if default is None:
args.append(f"{arg} = None")
# dont share class instance of default empty containers
elif default == []:
args.append(f"{arg} = None")
set_calls.append(f"{arg} = {arg} if {arg} is not None else []")
elif default == {}:
args.append(f"{arg} = None")
set_calls.append(f"{arg} = {arg} if {arg} is not None else {'{}'}")
# fallback to direct reference if unknown
else:
args.append(f"{arg} = {INJECTED_DEFAULT_VALS_LOCAL_VAR}['{arg}']")
else:
args.append(arg)
args_str = ""
if args:
args_str = f", {'*,' if kw_only else ''} {', '.join(args)}"
set_calls_str = ""
if set_calls:
set_calls_str = "\n ".join(set_calls)
return args_str, set_calls_str
| _GenClass |
python | Pylons__pyramid | tests/test_paster.py | {
"start": 6131,
"end": 6313
} | class ____:
application_url = 'http://example.com:5432'
script_name = ''
def __init__(self, environ):
self.environ = environ
self.matchdict = {}
| DummyRequest |
python | huggingface__transformers | tests/models/bit/test_modeling_bit.py | {
"start": 10331,
"end": 10593
} | class ____(BackboneTesterMixin, unittest.TestCase):
all_model_classes = (BitBackbone,) if is_torch_available() else ()
config_class = BitConfig
has_attentions = False
def setUp(self):
self.model_tester = BitModelTester(self)
| BitBackboneTest |
python | wandb__wandb | wandb/vendor/pygments/lexers/css.py | {
"start": 29269,
"end": 31076
} | class ____(RegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS'
aliases = ['scss']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'(@media)(\s+)', bygroups(Keyword, Text), 'value'),
(r'@[\w-]+', Keyword, 'selector'),
(r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
# TODO: broken, and prone to infinite loops.
# (r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
# (r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
default('selector'),
],
'attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*:', Operator, 'value'),
default('#pop'),
],
'inline-comment': [
(r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in iteritems(common_sass_tokens):
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')])
| ScssLexer |
python | pytest-dev__pytest | testing/test_capture.py | {
"start": 28251,
"end": 28913
} | class ____:
def test_text(self) -> None:
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self) -> None:
f = capture.CaptureIO()
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_write_bytes_to_buffer(self) -> None:
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b"foo\r\n")
assert f.getvalue() == "foo\r\n"
| TestCaptureIO |
python | getsentry__sentry | src/sentry/issues/grouptype.py | {
"start": 5189,
"end": 5411
} | class ____(StrEnum):
EVENTS = "Events"
USERS_AFFECTED = "Users Affected"
STATE = "State"
FIRST_SEEN = "First Seen"
APPROX_START_TIME = "Approx. Start Time"
@dataclass(frozen=True)
| NotificationContextField |
python | scipy__scipy | scipy/optimize/tests/test_linprog.py | {
"start": 104988,
"end": 105055
} | class ____(RRTests):
options = {"rr_method": "pivot"}
| TestRRPivot |
python | run-llama__llama_index | llama-index-integrations/program/llama-index-program-lmformatenforcer/llama_index/program/lmformatenforcer/base.py | {
"start": 499,
"end": 3738
} | class ____(BaseLLMFunctionProgram):
"""
A lm-format-enforcer-based function that returns a pydantic model.
In LMFormatEnforcerPydanticProgram, prompt_template_str can also have a {json_schema} parameter
that will be automatically filled by the json_schema of output_cls.
Note: this interface is not yet stable.
"""
def __init__(
self,
output_cls: Type[BaseModel],
prompt_template_str: str,
llm: Optional[Union[LlamaCPP, HuggingFaceLLM]] = None,
verbose: bool = False,
):
try:
import lmformatenforcer
except ImportError as e:
raise ImportError(
"lm-format-enforcer package not found."
"please run `pip install lm-format-enforcer`"
) from e
if llm is None:
try:
from llama_index.core.llms import LlamaCPP
llm = LlamaCPP()
except ImportError as e:
raise ImportError(
"llama.cpp package not found."
"please run `pip install llama-cpp-python`"
) from e
self.llm = llm
self._prompt_template_str = prompt_template_str
self._output_cls = output_cls
self._verbose = verbose
json_schema_parser = lmformatenforcer.JsonSchemaParser(self.output_cls.schema())
self._token_enforcer_fn = build_lm_format_enforcer_function(
self.llm, json_schema_parser
)
@classmethod
def from_defaults(
cls,
output_cls: Type[BaseModel],
prompt_template_str: Optional[str] = None,
prompt: Optional[PromptTemplate] = None,
llm: Optional[Union["LlamaCPP", "HuggingFaceLLM"]] = None,
**kwargs: Any,
) -> "BaseLLMFunctionProgram":
"""From defaults."""
if prompt is None and prompt_template_str is None:
raise ValueError("Must provide either prompt or prompt_template_str.")
if prompt is not None and prompt_template_str is not None:
raise ValueError("Must provide either prompt or prompt_template_str.")
if prompt is not None:
prompt_template_str = prompt.template
prompt_template_str = cast(str, prompt_template_str)
return cls(
output_cls,
prompt_template_str,
llm=llm,
**kwargs,
)
@property
def output_cls(self) -> Type[BaseModel]:
return self._output_cls
def __call__(
self,
llm_kwargs: Optional[Dict[str, Any]] = None,
*args: Any,
**kwargs: Any,
) -> BaseModel:
llm_kwargs = llm_kwargs or {}
# While the format enforcer is active, any calls to the llm will have the format enforced.
with activate_lm_format_enforcer(self.llm, self._token_enforcer_fn):
json_schema_str = json.dumps(self.output_cls.schema())
full_str = self._prompt_template_str.format(
*args, **kwargs, json_schema=json_schema_str
)
output = self.llm.complete(full_str, **llm_kwargs)
text = output.text
return self.output_cls.parse_raw(text)
| LMFormatEnforcerPydanticProgram |
python | jina-ai__jina | jina/serve/runtimes/head/request_handling.py | {
"start": 1374,
"end": 23196
} | class ____(MonitoringRequestMixin):
"""
Class that handles the requests arriving to the head and the results extracted from the requests future.
:param metrics_registry: optional metrics registry for prometheus. Used if we need to expose metrics from the executor or from the data request handler
:param runtime_name: optional runtime_name that will be registered during monitoring
"""
DEFAULT_POLLING = PollingType.ANY
def __init__(
self,
args: 'argparse.Namespace',
logger: 'JinaLogger',
metrics_registry: Optional['CollectorRegistry'] = None,
meter=None,
runtime_name: Optional[str] = None,
aio_tracing_client_interceptors=None,
tracing_client_interceptor=None,
**kwargs,
):
if args.name is None:
args.name = ''
self.logger = logger
self.args = args
self.meter = meter
self.metrics_registry = metrics_registry
self.name = args.name
self._deployment_name = os.getenv('JINA_DEPLOYMENT_NAME', 'worker')
self.aio_tracing_client_interceptors = aio_tracing_client_interceptors
self.tracing_client_interceptor = tracing_client_interceptor
self.connection_pool = GrpcConnectionPool(
runtime_name=self.name,
logger=self.logger,
compression=args.compression,
metrics_registry=self.metrics_registry,
meter=self.meter,
aio_tracing_client_interceptors=self.aio_tracing_client_interceptors,
tracing_client_interceptor=self.tracing_client_interceptor,
channel_options=self.args.grpc_channel_options,
)
self._retries = self.args.retries
polling = getattr(args, 'polling', self.DEFAULT_POLLING.name)
try:
# try loading the polling args as json
endpoint_polling = json.loads(polling)
# '*' is used a wildcard and will match all endpoints, except /index, /search and explicitly defined endpoins
default_polling = (
PollingType.from_string(endpoint_polling['*'])
if '*' in endpoint_polling
else self.DEFAULT_POLLING
)
self._polling = self._default_polling_dict(default_polling)
for endpoint in endpoint_polling:
self._polling[endpoint] = PollingType(
endpoint_polling[endpoint]
if type(endpoint_polling[endpoint]) == int
else PollingType.from_string(endpoint_polling[endpoint])
)
except (ValueError, TypeError):
# polling args is not a valid json, try interpreting as a polling enum type
default_polling = (
polling
if type(polling) == PollingType
else PollingType.from_string(polling)
)
self._polling = self._default_polling_dict(default_polling)
if hasattr(args, 'connection_list') and args.connection_list:
connection_list = json.loads(args.connection_list)
for shard_id in connection_list:
shard_connections = connection_list[shard_id]
if isinstance(shard_connections, str):
self.connection_pool.add_connection(
deployment=self._deployment_name,
address=shard_connections,
shard_id=int(shard_id),
)
else:
for connection in shard_connections:
self.connection_pool.add_connection(
deployment=self._deployment_name,
address=connection,
shard_id=int(shard_id),
)
self.uses_before_address = args.uses_before_address
self.timeout_send = args.timeout_send
if self.timeout_send:
self.timeout_send /= 1e3 # convert ms to seconds
if self.uses_before_address:
self.connection_pool.add_connection(
deployment='uses_before', address=self.uses_before_address
)
self.uses_after_address = args.uses_after_address
if self.uses_after_address:
self.connection_pool.add_connection(
deployment='uses_after', address=self.uses_after_address
)
self._reduce = not args.no_reduce
super().__init__(metrics_registry, meter, runtime_name)
self.logger = logger
self._executor_endpoint_mapping = None
self._gathering_endpoints = False
self.runtime_name = runtime_name
self._pydantic_models_by_endpoint = None
self.endpoints_discovery_stop_event = asyncio.Event()
self.endpoints_discovery_task = None
if docarray_v2:
self.endpoints_discovery_task = asyncio.create_task(
self._get_endpoints_from_workers(
connection_pool=self.connection_pool,
name=self._deployment_name,
retries=self._retries,
stop_event=self.endpoints_discovery_stop_event,
)
)
def _default_polling_dict(self, default_polling):
return defaultdict(
lambda: default_polling,
{'/search': PollingType.ALL, '/index': PollingType.ANY},
)
async def _gather_worker_tasks(
self,
requests,
connection_pool,
deployment_name,
polling_type,
timeout_send,
retries,
):
worker_send_tasks = connection_pool.send_requests(
requests=requests,
deployment=deployment_name,
polling_type=polling_type,
timeout=timeout_send,
retries=retries,
)
all_worker_results = await asyncio.gather(*worker_send_tasks)
worker_results = list(
filter(lambda x: isinstance(x, Tuple), all_worker_results)
)
exceptions = list(
filter(
lambda x: issubclass(type(x), BaseException),
all_worker_results,
)
)
total_shards = len(worker_send_tasks)
failed_shards = len(exceptions)
if failed_shards:
self.logger.warning(f'{failed_shards} shards out of {total_shards} failed.')
return worker_results, exceptions, total_shards, failed_shards
@staticmethod
def _merge_metadata(
metadata,
uses_after_metadata,
uses_before_metadata,
total_shards,
failed_shards,
):
merged_metadata = {}
if uses_before_metadata:
for key, value in uses_before_metadata:
merged_metadata[key] = value
for meta in metadata:
for key, value in meta:
merged_metadata[key] = value
if uses_after_metadata:
for key, value in uses_after_metadata:
merged_metadata[key] = value
merged_metadata['total_shards'] = str(total_shards)
merged_metadata['failed_shards'] = str(failed_shards)
return merged_metadata
async def _handle_data_request(
self,
requests,
connection_pool,
uses_before_address,
uses_after_address,
timeout_send,
retries,
reduce,
polling_type,
deployment_name,
endpoint,
) -> Tuple['DataRequest', Dict]:
for req in requests:
if docarray_v2:
req.document_array_cls = DocList[AnyDoc]
self._update_start_request_metrics(req)
if self._pydantic_models_by_endpoint is None and docarray_v2:
try:
await self.endpoints_discovery_task
except:
raise
WorkerRequestHandler.merge_routes(requests)
uses_before_metadata = None
if uses_before_address:
result = await connection_pool.send_requests_once(
requests,
deployment='uses_before',
timeout=timeout_send,
retries=retries,
)
if issubclass(type(result), BaseException):
raise result
else:
response, uses_before_metadata = result
requests = [response]
(
worker_results,
exceptions,
total_shards,
failed_shards,
) = await self._gather_worker_tasks(
requests=requests,
deployment_name=deployment_name,
timeout_send=timeout_send,
connection_pool=connection_pool,
polling_type=polling_type,
retries=retries,
)
if len(worker_results) == 0:
if exceptions:
# raise the underlying error first
self._update_end_failed_requests_metrics()
raise exceptions[0]
raise RuntimeError(
f'Head {self.runtime_name} did not receive a response when sending message to worker pods'
)
worker_results, metadata = zip(*worker_results)
response_request = worker_results[0]
found = False
if docarray_v2:
check_endpoint = endpoint
if endpoint not in self._pydantic_models_by_endpoint:
check_endpoint = __default_endpoint__
model = self._pydantic_models_by_endpoint[check_endpoint]['output']
for i, worker_result in enumerate(worker_results):
if docarray_v2:
worker_result.document_array_cls = DocList[model]
if not found and worker_result.status.code == jina_pb2.StatusProto.SUCCESS:
response_request = worker_result
found = True
uses_after_metadata = None
if uses_after_address:
result = await connection_pool.send_requests_once(
worker_results,
deployment='uses_after',
timeout=timeout_send,
retries=retries,
)
if issubclass(type(result), BaseException):
self._update_end_failed_requests_metrics()
raise result
else:
response_request, uses_after_metadata = result
elif len(worker_results) > 1 and reduce:
response_request = WorkerRequestHandler.reduce_requests(worker_results)
elif len(worker_results) > 1 and not reduce:
# worker returned multiple responses, but the head is configured to skip reduction
# just concatenate the docs in this case
response_request.data.docs = WorkerRequestHandler.get_docs_from_request(
requests
)
merged_metadata = self._merge_metadata(
metadata,
uses_after_metadata,
uses_before_metadata,
total_shards,
failed_shards,
)
self._update_end_request_metrics(response_request)
return response_request, merged_metadata
def _get_endpoints_from_workers(
self, connection_pool: GrpcConnectionPool, name: str, retries: int, stop_event
):
from google.protobuf import json_format
async def task():
self.logger.debug(
f'starting get endpoints from workers task for deployment {name}'
)
while not stop_event.is_set():
try:
endpoints = await connection_pool.send_discover_endpoint(
name, retries=retries, shard_id=0, head=False
)
if endpoints is not None:
endp, _ = endpoints
schemas = json_format.MessageToDict(endp.schemas)
self._pydantic_models_by_endpoint = {}
models_created_by_name = {}
for endpoint, inner_dict in schemas.items():
input_model_name = inner_dict['input']['name']
input_model_schema = inner_dict['input']['model']
output_model_name = inner_dict['output']['name']
output_model_schema = inner_dict['output']['model']
if input_model_schema == legacy_doc_schema:
models_created_by_name[input_model_name] = (
LegacyDocumentJina
)
elif input_model_name not in models_created_by_name:
input_model = create_base_doc_from_schema(
input_model_schema, input_model_name, {}
)
models_created_by_name[input_model_name] = input_model
if output_model_name == legacy_doc_schema:
models_created_by_name[output_model_name] = (
LegacyDocumentJina
)
elif output_model_name not in models_created_by_name:
output_model = create_base_doc_from_schema(
output_model_schema, output_model_name, {}
)
models_created_by_name[output_model_name] = output_model
self._pydantic_models_by_endpoint[endpoint] = {
'input': models_created_by_name[input_model_name],
'output': models_created_by_name[output_model_name],
}
stop_event.set()
return
else:
await asyncio.sleep(0.1)
except Exception as exc:
self.logger.debug(
f'Exception raised from sending discover endpoint {exc}'
)
await asyncio.sleep(0.1)
return task()
def cancel_endpoint_discovery_from_workers_task(self):
"""Cancel endpoint_discovery_from_worker task if exists and is not completed. Cancellation is required if the Flow is being terminated before the
task is successful or hasn't reached the max timeout.
"""
if self.endpoints_discovery_task:
try:
if not self.endpoints_discovery_task.done():
self.logger.debug(f'Cancelling endpoint discovery task.')
self.endpoints_discovery_stop_event.set() # this event is useless if simply cancel
self.endpoints_discovery_task.cancel()
except Exception as ex:
self.logger.debug(
f'exception during endpoint discovery task cancellation: {ex}'
)
pass
async def close(self):
"""Close the data request handler, by closing the executor and the batch queues."""
self.logger.debug(f'Closing Request Handler')
self.cancel_endpoint_discovery_from_workers_task()
await self.connection_pool.close()
self.logger.debug(f'Request Handler closed')
async def process_single_data(self, request: DataRequest, context) -> DataRequest:
"""
Process the received requests and return the result as a new request
:param request: the data request to process
:param context: grpc context
:returns: the response request
"""
return await self.process_data([request], context)
def _handle_internalnetworkerror(self, err, context, response):
err_code = err.code()
if err_code == grpc.StatusCode.UNAVAILABLE:
context.set_details(
f'|Head: Failed to connect to worker (Executor) pod at address {err.dest_addr}. It may be down.'
)
elif err_code == grpc.StatusCode.DEADLINE_EXCEEDED:
context.set_details(
f'|Head: Connection to worker (Executor) pod at address {err.dest_addr} could be established, but timed out.'
)
elif err_code == grpc.StatusCode.NOT_FOUND:
context.set_details(
f'|Head: Connection to worker (Executor) pod at address {err.dest_addr} could be established, but resource was not found.'
)
context.set_code(err.code())
self.logger.error(f'Error while getting responses from Pods: {err.details()}')
if err.request_id:
response.header.request_id = err.request_id
return response
async def process_data(self, requests: List[DataRequest], context) -> DataRequest:
"""
Process the received data request and return the result as a new request
:param requests: the data requests to process
:param context: grpc context
:returns: the response request
"""
try:
endpoint = dict(context.invocation_metadata()).get('endpoint')
self.logger.debug(f'recv {len(requests)} DataRequest(s)')
response, metadata = await self._handle_data_request(
requests=requests,
connection_pool=self.connection_pool,
uses_before_address=self.uses_before_address,
uses_after_address=self.uses_after_address,
retries=self._retries,
reduce=self._reduce,
timeout_send=self.timeout_send,
polling_type=self._polling[endpoint],
deployment_name=self._deployment_name,
endpoint=endpoint,
)
context.set_trailing_metadata(metadata.items())
return response
except (
InternalNetworkError
) as err: # can't connect, Flow broken, interrupt the streaming through gRPC error mechanism
return self._handle_internalnetworkerror(
err=err, context=context, response=Response()
)
except (
RuntimeError,
Exception,
) as ex: # some other error, keep streaming going just add error info
self.logger.error(
(
f'{ex!r}'
+ f'\n add "--quiet-error" to suppress the exception details'
if not self.args.quiet_error
else ''
),
exc_info=not self.args.quiet_error,
)
requests[0].add_exception(ex, executor=None)
context.set_trailing_metadata((('is-error', 'true'),))
return requests[0]
async def endpoint_discovery(self, empty, context) -> jina_pb2.EndpointsProto:
"""
Uses the connection pool to send a discover endpoint call to the workers
:param empty: The service expects an empty protobuf message
:param context: grpc context
:returns: the response request
"""
self.logger.debug('got an endpoint discovery request')
response = jina_pb2.EndpointsProto()
try:
if self.uses_before_address:
(
uses_before_response,
_,
) = await self.connection_pool.send_discover_endpoint(
deployment='uses_before', head=False
)
response.endpoints.extend(uses_before_response.endpoints)
if self.uses_after_address:
(
uses_after_response,
_,
) = await self.connection_pool.send_discover_endpoint(
deployment='uses_after', head=False
)
response.endpoints.extend(uses_after_response.endpoints)
worker_response, _ = await self.connection_pool.send_discover_endpoint(
deployment=self._deployment_name, head=False
)
response.endpoints.extend(worker_response.endpoints)
response.schemas.update(worker_response.schemas)
except (
InternalNetworkError
) as err: # can't connect, Flow broken, interrupt the streaming through gRPC error mechanism
return self._handle_internalnetworkerror(
err=err, context=context, response=response
)
return response
async def _status(self, empty, context) -> jina_pb2.JinaInfoProto:
"""
Process the the call requested and return the JinaInfo of the Runtime
:param empty: The service expects an empty protobuf message
:param context: grpc context
:returns: the response request
"""
self.logger.debug('recv _status request')
infoProto = jina_pb2.JinaInfoProto()
version, env_info = get_full_version()
for k, v in version.items():
infoProto.jina[k] = str(v)
for k, v in env_info.items():
infoProto.envs[k] = str(v)
return infoProto
async def stream(
self, request_iterator, context=None, *args, **kwargs
) -> AsyncIterator['Request']:
"""
stream requests from client iterator and stream responses back.
:param request_iterator: iterator of requests
:param context: context of the grpc call
:param args: positional arguments
:param kwargs: keyword arguments
:yield: responses to the request
"""
async for request in request_iterator:
yield await self.process_data([request], context)
Call = stream
| HeaderRequestHandler |
python | SmileyChris__easy-thumbnails | easy_thumbnails/tests/test_engine.py | {
"start": 100,
"end": 1089
} | class ____(TestCase):
def test_save_jpeg_rgba(self):
source = Image.new('RGBA', (100, 100), (255, 255, 255, 0))
data = engine.save_pil_image(source, filename='test.jpg')
with Image.open(data) as img:
self.assertEqual(img.mode, 'RGB')
def test_save_jpeg_la(self):
source = Image.new('LA', (100, 100), (255, 0))
data = engine.save_pil_image(source, filename='test.jpg')
with Image.open(data) as img:
self.assertEqual(img.mode, 'L')
def test_save_with_icc_profile(self):
source = Image.new('RGB', (100, 100), (255, 255, 255))
profile = ImageCms.createProfile('sRGB')
source.save('source.jpg', icc_profile=ImageCms.ImageCmsProfile(profile).tobytes())
source = Image.open('source.jpg')
data = engine.save_pil_image(source, filename='test.jpg', keep_icc_profile=True)
img = Image.open(data)
self.assertNotEqual(img.info.get('icc_profile'), None)
| SaveTest |
python | ray-project__ray | python/ray/experimental/channel/common.py | {
"start": 8587,
"end": 11659
} | class ____:
def __init__(
self,
input_channels: List[ChannelInterface],
):
assert isinstance(input_channels, list)
for chan in input_channels:
assert isinstance(chan, ChannelInterface)
self._input_channels = input_channels
self._closed = False
self._num_reads = 0
# A list of channels that were not read in the last `read` call
# because the reader returned immediately when a RayTaskError was found.
# These channels must be consumed before the next read to avoid reading
# stale data remaining from the last read.
self._leftover_channels: List[ChannelInterface] = []
def get_num_reads(self) -> int:
return self._num_reads
def start(self):
raise NotImplementedError
def _read_list(self, timeout: Optional[float] = None) -> List[Any]:
"""
Read a list of values from this reader.
Args:
timeout: The maximum time in seconds to wait for reading.
None means using default timeout which is infinite, 0 means immediate
timeout (immediate success or timeout without blocking), -1 means
infinite timeout (block indefinitely).
"""
raise NotImplementedError
def read(self, timeout: Optional[float] = None) -> List[Any]:
"""
Read from this reader.
Args:
timeout: The maximum time in seconds to wait for reading.
None means using default timeout, 0 means immediate timeout
(immediate success or timeout without blocking), -1 means
infinite timeout (block indefinitely).
"""
assert (
timeout is None or timeout >= 0 or timeout == -1
), "Timeout must be non-negative or -1."
outputs = self._read_list(timeout)
self._num_reads += 1
return outputs
def close(self) -> None:
self._closed = True
for channel in self._input_channels:
channel.close()
def _consume_leftover_channels_if_needed(
self, timeout: Optional[float] = None
) -> None:
# Consume the channels that were not read in the last `read` call because a
# RayTaskError was returned from another channel. If we don't do this, the
# read operation will read stale versions of the object refs.
#
# If a RayTaskError is returned from a leftover channel, it will be ignored.
# If a read operation times out, a RayChannelTimeoutError exception will be
# raised.
#
# TODO(kevin85421): Currently, a DAG with NCCL channels and fast fail enabled
# may not be reusable. Revisit this in the future.
for c in self._leftover_channels:
start_time = time.monotonic()
c.read(timeout)
if timeout is not None:
timeout -= time.monotonic() - start_time
timeout = max(timeout, 0)
self._leftover_channels = []
@DeveloperAPI
| ReaderInterface |
python | huggingface__transformers | src/transformers/models/mobilebert/modeling_mobilebert.py | {
"start": 34617,
"end": 38079
} | class ____(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.cls = MobileBertOnlyNSPHead(config)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, NextSentencePredictorOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring) Indices should be in `[0, 1]`.
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Examples:
```python
>>> from transformers import AutoTokenizer, MobileBertForNextSentencePrediction
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
>>> model = MobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
" `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
return_dict=True,
**kwargs,
)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), labels.view(-1))
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
"""
)
# Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing
| MobileBertForNextSentencePrediction |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.