language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | tests/template_tests/test_response.py | {
"start": 8789,
"end": 13242
} | class ____(SimpleTestCase):
factory = RequestFactory()
def _response(self, template="foo", *args, **kwargs):
self._request = self.factory.get("/")
template = engines["django"].from_string(template)
return TemplateResponse(self._request, template, *args, **kwargs)
def test_render(self):
response = self._response("{{ foo }}{{ processors }}").render()
self.assertEqual(response.content, b"yes")
def test_render_with_requestcontext(self):
response = self._response("{{ foo }}{{ processors }}", {"foo": "bar"}).render()
self.assertEqual(response.content, b"baryes")
def test_context_processor_priority(self):
# context processors should be overridden by passed-in context
response = self._response(
"{{ foo }}{{ processors }}", {"processors": "no"}
).render()
self.assertEqual(response.content, b"no")
def test_kwargs(self):
response = self._response(content_type="application/json", status=504)
self.assertEqual(response.headers["content-type"], "application/json")
self.assertEqual(response.status_code, 504)
def test_args(self):
response = TemplateResponse(
self.factory.get("/"), "", {}, "application/json", 504
)
self.assertEqual(response.headers["content-type"], "application/json")
self.assertEqual(response.status_code, 504)
@require_jinja2
def test_using(self):
request = self.factory.get("/")
response = TemplateResponse(request, "template_tests/using.html").render()
self.assertEqual(response.content, b"DTL\n")
response = TemplateResponse(
request, "template_tests/using.html", using="django"
).render()
self.assertEqual(response.content, b"DTL\n")
response = TemplateResponse(
request, "template_tests/using.html", using="jinja2"
).render()
self.assertEqual(response.content, b"Jinja2\n")
def test_pickling(self):
# Create a template response. The context is
# known to be unpicklable (e.g., a function).
response = TemplateResponse(
self.factory.get("/"),
"first/test.html",
{
"value": 123,
"fn": datetime.now,
},
)
with self.assertRaises(ContentNotRenderedError):
pickle.dumps(response)
# But if we render the response, we can pickle it.
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.content, response.content)
self.assertEqual(
unpickled_response.headers["content-type"], response.headers["content-type"]
)
self.assertEqual(unpickled_response.status_code, response.status_code)
# ...and the unpickled response doesn't have the
# template-related attributes, so it can't be re-rendered
template_attrs = (
"template_name",
"context_data",
"_post_render_callbacks",
"_request",
)
for attr in template_attrs:
self.assertFalse(hasattr(unpickled_response, attr))
# ...and requesting any of those attributes raises an exception
for attr in template_attrs:
with self.assertRaises(AttributeError):
getattr(unpickled_response, attr)
def test_repickling(self):
response = SimpleTemplateResponse(
"first/test.html",
{
"value": 123,
"fn": datetime.now,
},
)
with self.assertRaises(ContentNotRenderedError):
pickle.dumps(response)
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
pickle.dumps(unpickled_response)
def test_headers(self):
response = TemplateResponse(
self.factory.get("/"),
"first/test.html",
{"value": 123, "fn": datetime.now},
headers={"X-Foo": "foo"},
)
self.assertEqual(response.headers["X-Foo"], "foo")
@modify_settings(
MIDDLEWARE={"append": ["template_tests.test_response.custom_urlconf_middleware"]}
)
@override_settings(ROOT_URLCONF="template_tests.urls")
| TemplateResponseTest |
python | django__django | tests/admin_views/tests.py | {
"start": 3807,
"end": 4889
} | class ____:
"""
Helper methods for extracting data from AdminForm.
"""
def get_admin_form_fields(self, response):
"""
Return a list of AdminFields for the AdminForm in the response.
"""
fields = []
for fieldset in response.context["adminform"]:
for field_line in fieldset:
fields.extend(field_line)
return fields
def get_admin_readonly_fields(self, response):
"""
Return the readonly fields for the response's AdminForm.
"""
return [f for f in self.get_admin_form_fields(response) if f.is_readonly]
def get_admin_readonly_field(self, response, field_name):
"""
Return the readonly field for the given field_name.
"""
admin_readonly_fields = self.get_admin_readonly_fields(response)
for field in admin_readonly_fields:
if field.field["name"] == field_name:
return field
@override_settings(ROOT_URLCONF="admin_views.urls", USE_I18N=True, LANGUAGE_CODE="en")
| AdminFieldExtractionMixin |
python | streamlit__streamlit | lib/tests/streamlit/components_test.py | {
"start": 10306,
"end": 13465
} | class ____(unittest.TestCase):
"""Test component registration."""
def setUp(self) -> None:
config = RuntimeConfig(
script_path="mock/script/path.py",
command_line=None,
component_registry=LocalComponentRegistry(),
media_file_storage=MemoryMediaFileStorage("/mock/media"),
uploaded_file_manager=MemoryUploadedFileManager("/mock/upload"),
)
self.runtime = Runtime(config)
def tearDown(self) -> None:
Runtime._instance = None
def test_register_component_with_path(self):
"""Registering a component should associate it with its path."""
test_path = "/a/test/component/directory"
def isdir(path):
return path == test_path
registry = ComponentRegistry.instance()
with mock.patch(
"streamlit.components.types.base_custom_component.os.path.isdir",
side_effect=isdir,
):
registry.register_component(
CustomComponent("test_component", path=test_path)
)
assert test_path == registry.get_component_path("test_component")
def test_register_component_no_path(self):
"""It's not an error to register a component without a path."""
registry = ComponentRegistry.instance()
# Return None when the component hasn't been registered
assert registry.get_component_path("test_component") is None
# And also return None when the component doesn't have a path
registry.register_component(
CustomComponent("test_component", url="http://not.a.url")
)
assert registry.get_component_path("test_component") is None
def test_register_invalid_path(self):
"""We raise an exception if a component is registered with a
non-existent path.
"""
test_path = "/a/test/component/directory"
registry = ComponentRegistry.instance()
with pytest.raises(StreamlitAPIException) as ctx:
registry.register_component(CustomComponent("test_component", test_path))
assert "No such component directory" in str(ctx.value)
def test_register_duplicate_path(self):
"""It's not an error to re-register a component.
(This can happen during development).
"""
test_path_1 = "/a/test/component/directory"
test_path_2 = "/another/test/component/directory"
def isdir(path):
return path in (test_path_1, test_path_2)
registry = ComponentRegistry.instance()
with mock.patch(
"streamlit.components.types.base_custom_component.os.path.isdir",
side_effect=isdir,
):
registry.register_component(CustomComponent("test_component", test_path_1))
registry.register_component(CustomComponent("test_component", test_path_1))
assert test_path_1 == registry.get_component_path("test_component")
registry.register_component(CustomComponent("test_component", test_path_2))
assert test_path_2 == registry.get_component_path("test_component")
| ComponentRegistryTest |
python | pytest-dev__pytest-xdist | testing/test_newhooks.py | {
"start": 16,
"end": 2553
} | class ____:
@pytest.fixture(autouse=True)
def create_test_file(self, pytester: pytest.Pytester) -> None:
pytester.makepyfile(
"""
import os
def test_a(): pass
def test_b(): pass
def test_c(): pass
"""
)
def test_runtest_logreport(self, pytester: pytest.Pytester) -> None:
"""Test that log reports from pytest_runtest_logreport when running with
xdist contain "node", "nodeid", "worker_id", and "testrun_uid"
attributes (#8)."""
pytester.makeconftest(
"""
def pytest_runtest_logreport(report):
if hasattr(report, 'node'):
if report.when == "call":
workerid = report.node.workerinput['workerid']
testrunuid = report.node.workerinput['testrunuid']
if workerid != report.worker_id:
print("HOOK: Worker id mismatch: %s %s"
% (workerid, report.worker_id))
elif testrunuid != report.testrun_uid:
print("HOOK: Testrun uid mismatch: %s %s"
% (testrunuid, report.testrun_uid))
else:
print("HOOK: %s %s %s"
% (report.nodeid, report.worker_id, report.testrun_uid))
"""
)
res = pytester.runpytest("-n1", "-s")
res.stdout.fnmatch_lines(
[
"*HOOK: test_runtest_logreport.py::test_a gw0 *",
"*HOOK: test_runtest_logreport.py::test_b gw0 *",
"*HOOK: test_runtest_logreport.py::test_c gw0 *",
"*3 passed*",
]
)
def test_node_collection_finished(self, pytester: pytest.Pytester) -> None:
"""Test pytest_xdist_node_collection_finished hook (#8)."""
pytester.makeconftest(
"""
def pytest_xdist_node_collection_finished(node, ids):
workerid = node.workerinput['workerid']
stripped_ids = [x.split('::')[1] for x in ids]
print("HOOK: %s %s" % (workerid, ', '.join(stripped_ids)))
"""
)
res = pytester.runpytest("-n2", "-s")
res.stdout.fnmatch_lines_random(
["*HOOK: gw0 test_a, test_b, test_c", "*HOOK: gw1 test_a, test_b, test_c"]
)
res.stdout.fnmatch_lines(["*3 passed*"])
| TestHooks |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-make-columns-strictly-increasing.py | {
"start": 42,
"end": 483
} | class ____(object):
def minimumOperations(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
result = 0
for i in xrange(len(grid)-1):
for j in xrange(len(grid[0])):
if grid[i][j]+1 <= grid[i+1][j]:
continue
result += (grid[i][j]+1)-grid[i+1][j]
grid[i+1][j] = grid[i][j]+1
return result
| Solution |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-weather/llama_index/readers/weather/base.py | {
"start": 195,
"end": 2910
} | class ____(BaseReader):
"""
Weather Reader.
Reads the forecast & current weather of any location using OpenWeatherMap's free API.
Check 'https://openweathermap.org/appid' \
on how to generate a free OpenWeatherMap API, It's free.
Args:
token (str): bearer_token that you get from OWM API.
"""
def __init__(
self,
token: str,
) -> None:
"""Initialize with parameters."""
super().__init__()
self.token = token
def load_data(
self,
places: List[str],
) -> List[Document]:
"""
Load weather data for the given locations.
OWM's One Call API provides the following weather data for any geographical coordinate:
- Current weather
- Hourly forecast for 48 hours
- Daily forecast for 7 days.
Args:
places (List[str]) - places you want the weather data for.
"""
try:
import pyowm
except ImportError:
raise ImportError("install pyowm using `pip install pyowm`")
owm = pyowm.OWM(api_key=self.token)
mgr = owm.weather_manager()
reg = owm.city_id_registry()
results = []
for place in places:
info_dict = {}
extra_info = {}
list_of_locations = reg.locations_for(city_name=place)
try:
city = list_of_locations[0]
except ValueError:
raise ValueError(
f"Unable to find {place}, try checking the spelling and try again"
)
lat = city.lat
lon = city.lon
res = mgr.one_call(lat=lat, lon=lon)
extra_info["latitude"] = lat
extra_info["longitude"] = lon
extra_info["timezone"] = res.timezone
info_dict["location"] = place
info_dict["current weather"] = res.current.to_dict()
if res.forecast_daily:
info_dict["daily forecast"] = [i.to_dict() for i in res.forecast_daily]
if res.forecast_hourly:
info_dict["hourly forecast"] = [
i.to_dict() for i in res.forecast_hourly
]
if res.forecast_minutely:
info_dict["minutely forecast"] = [
i.to_dict() for i in res.forecast_minutely
]
if res.national_weather_alerts:
info_dict["national weather alerts"] = [
i.to_dict() for i in res.national_weather_alerts
]
results.append(Document(text=str(info_dict), extra_info=extra_info))
return results
| WeatherReader |
python | viewflow__viewflow | tests/test_templates.py | {
"start": 1694,
"end": 2053
} | class ____(IndexViewMixin, AppMenuMixin, Viewset):
title = 'Test Viewset'
page_path = path('test/', TemplateView.as_view(template_name='viewflow/base_page.html'), name="page")
urlpatterns = [
path('', Site(viewsets=[
Application(
title='Test Application',
viewsets=[TestViewset()]
)
]).urls)
]
| TestViewset |
python | openai__openai-python | src/openai/types/model.py | {
"start": 181,
"end": 532
} | class ____(BaseModel):
id: str
"""The model identifier, which can be referenced in the API endpoints."""
created: int
"""The Unix timestamp (in seconds) when the model was created."""
object: Literal["model"]
"""The object type, which is always "model"."""
owned_by: str
"""The organization that owns the model."""
| Model |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/side_channel/stats_side_channel.py | {
"start": 736,
"end": 1876
} | class ____(SideChannel):
"""
Side channel that receives (string, float) pairs from the environment, so that they can eventually
be passed to a StatsReporter.
"""
def __init__(self) -> None:
# >>> uuid.uuid5(uuid.NAMESPACE_URL, "com.unity.ml-agents/StatsSideChannel")
# UUID('a1d8f7b7-cec8-50f9-b78b-d3e165a78520')
super().__init__(uuid.UUID("a1d8f7b7-cec8-50f9-b78b-d3e165a78520"))
self.stats: EnvironmentStats = defaultdict(list)
def on_message_received(self, msg: IncomingMessage) -> None:
"""
Receive the message from the environment, and save it for later retrieval.
:param msg:
:return:
"""
key = msg.read_string()
val = msg.read_float32()
agg_type = StatsAggregationMethod(msg.read_int32())
self.stats[key].append((val, agg_type))
def get_and_reset_stats(self) -> EnvironmentStats:
"""
Returns the current stats, and resets the internal storage of the stats.
:return:
"""
s = self.stats
self.stats = defaultdict(list)
return s
| StatsSideChannel |
python | django__django | tests/middleware_exceptions/middleware.py | {
"start": 2952,
"end": 3109
} | class ____(BaseMiddleware):
def process_template_response(self, request, response):
return None
@async_only_middleware
| NoTemplateResponseMiddleware |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/metrics_test.py | {
"start": 124320,
"end": 131700
} | class ____(test.TestCase):
def setUp(self):
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_squared_error/count:0', 'mean_squared_error/total:0'))
@test_util.run_deprecated_v1
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
@test_util.run_deprecated_v1
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
@test_util.run_deprecated_v1
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
self.evaluate(update_op)
# Then verify idempotency.
initial_error = self.evaluate(error)
for _ in range(10):
self.assertEqual(initial_error, self.evaluate(error))
@test_util.run_deprecated_v1
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertEqual(0, self.evaluate(update_op))
self.assertEqual(0, self.evaluate(error))
@test_util.run_deprecated_v1
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertEqual(6, self.evaluate(update_op))
self.assertEqual(6, self.evaluate(error))
@test_util.run_deprecated_v1
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.mean_squared_error(labels, predictions, weights)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertEqual(13, self.evaluate(update_op))
self.assertEqual(13, self.evaluate(error))
@test_util.run_deprecated_v1
def testMultipleBatchesOfSizeOne(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.mean_squared_error(labels, predictions)
self.evaluate(variables.local_variables_initializer())
self.evaluate(update_op)
self.assertAlmostEqual(208.0 / 6, self.evaluate(update_op), 5)
self.assertAlmostEqual(208.0 / 6, self.evaluate(error), 5)
@test_util.run_deprecated_v1
def testMetricsComputedConcurrently(self):
with self.cached_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.mean_squared_error(
labels0, predictions0, name='msd0')
mse1, update_op1 = metrics.mean_squared_error(
labels1, predictions1, name='msd1')
self.evaluate(variables.local_variables_initializer())
self.evaluate([update_op0, update_op1])
self.evaluate([update_op0, update_op1])
mse0, mse1 = self.evaluate([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
@test_util.run_deprecated_v1
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.cached_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.mean_absolute_error(labels, predictions)
mse, ms_update_op = metrics.mean_squared_error(labels, predictions)
self.evaluate(variables.local_variables_initializer())
self.evaluate([ma_update_op, ms_update_op])
self.evaluate([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, self.evaluate(mae), 5)
self.assertAlmostEqual(208.0 / 6, self.evaluate(mse), 5)
| MeanSquaredErrorTest |
python | pandas-dev__pandas | asv_bench/benchmarks/frame_methods.py | {
"start": 14417,
"end": 15193
} | class ____:
def setup(self):
self.df = DataFrame(np.random.randn(1000, 100))
self.s = Series(np.arange(1028.0))
self.df2 = DataFrame(dict.fromkeys(range(1028), self.s))
self.df3 = DataFrame(np.random.randn(1000, 3), columns=list("ABC"))
def time_apply_user_func(self):
self.df2.apply(lambda x: np.corrcoef(x, self.s)[(0, 1)])
def time_apply_axis_1(self):
self.df.apply(lambda x: x + 1, axis=1)
def time_apply_lambda_mean(self):
self.df.apply(lambda x: x.mean())
def time_apply_str_mean(self):
self.df.apply("mean")
def time_apply_pass_thru(self):
self.df.apply(lambda x: x)
def time_apply_ref_by_name(self):
self.df3.apply(lambda x: x["A"] + x["B"], axis=1)
| Apply |
python | Pylons__pyramid | tests/test_security.py | {
"start": 18097,
"end": 19474
} | class ____:
def __init__(self, result):
self.result = result
def permits(self, context, principals, permission):
return self.result
def principals_allowed_by_permission(self, context, permission):
return self.result
def _registerSecurityPolicy(reg, result):
from pyramid.interfaces import ISecurityPolicy
policy = DummySecurityPolicy(result)
reg.registerUtility(policy, ISecurityPolicy)
return policy
def _registerLegacySecurityPolicy(reg):
from pyramid.interfaces import ISecurityPolicy
from pyramid.security import LegacySecurityPolicy
policy = LegacySecurityPolicy()
reg.registerUtility(policy, ISecurityPolicy)
return policy
def _registerAuthenticationPolicy(reg, result):
from pyramid.interfaces import IAuthenticationPolicy
policy = DummyAuthenticationPolicy(result)
reg.registerUtility(policy, IAuthenticationPolicy)
return policy
def _registerAuthorizationPolicy(reg, result):
from pyramid.interfaces import IAuthorizationPolicy
policy = DummyAuthorizationPolicy(result)
reg.registerUtility(policy, IAuthorizationPolicy)
return policy
def _makeRequest():
from pyramid.registry import Registry
request = testing.DummyRequest(environ={})
request.registry = Registry()
request.context = object()
return request
| DummyAuthorizationPolicy |
python | scikit-learn__scikit-learn | sklearn/linear_model/_coordinate_descent.py | {
"start": 69338,
"end": 79029
} | class ____(RegressorMixin, LinearModelCV):
"""Lasso linear model with iterative fitting along a regularization path.
See glossary entry for :term:`cross-validation estimator`.
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, default=1e-3
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, default=100
Number of alphas along the regularization path.
.. deprecated:: 1.7
`n_alphas` was deprecated in 1.7 and will be removed in 1.9. Use `alphas`
instead.
alphas : array-like or int, default=None
Values of alphas to test along the regularization path.
If int, `alphas` values are generated automatically.
If array-like, list of alpha values to use.
.. versionchanged:: 1.7
`alphas` accepts an integer value which removes the need to pass
`n_alphas`.
.. deprecated:: 1.7
`alphas=None` was deprecated in 1.7 and will be removed in 1.9, at which
point the default value will be set to 100.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
precompute : 'auto', bool or array-like of shape \
(n_features, n_features), default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=1000
The maximum number of iterations.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are smaller or equal to
``tol``, the optimization code checks the dual gap for optimality and continues
until it is smaller or equal to ``tol``.
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
cv : int, cross-validation generator or iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross-validation,
- int, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, :class:`~sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
verbose : bool or int, default=False
Amount of verbosity.
n_jobs : int, default=None
Number of CPUs to use during the cross validation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
positive : bool, default=False
If positive, restrict regression coefficients to be positive.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation.
coef_ : ndarray of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the cost function formula).
intercept_ : float or ndarray of shape (n_targets,)
Independent term in decision function.
mse_path_ : ndarray of shape (n_alphas, n_folds)
Mean square error for the test set on each fold, varying alpha.
alphas_ : ndarray of shape (n_alphas,)
The grid of alphas used for fitting.
dual_gap_ : float or ndarray of shape (n_targets,)
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
lars_path : Compute Least Angle Regression or Lasso path using LARS
algorithm.
lasso_path : Compute Lasso path with coordinate descent.
Lasso : The Lasso is a linear model that estimates sparse coefficients.
LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars.
LassoCV : Lasso linear model with iterative fitting along a regularization
path.
LassoLarsCV : Cross-validated Lasso using the LARS algorithm.
Notes
-----
In `fit`, once the best parameter `alpha` is found through
cross-validation, the model is fit again using the entire training set.
To avoid unnecessary memory duplication the `X` argument of the `fit`
method should be directly passed as a Fortran-contiguous numpy array.
For an example, see :ref:`examples/linear_model/plot_lasso_model_selection.py
<sphx_glr_auto_examples_linear_model_plot_lasso_model_selection.py>`.
:class:`LassoCV` leads to different results than a hyperparameter
search using :class:`~sklearn.model_selection.GridSearchCV` with a
:class:`Lasso` model. In :class:`LassoCV`, a model for a given
penalty `alpha` is warm started using the coefficients of the
closest model (trained at the previous iteration) on the
regularization path. It tends to speed up the hyperparameter
search.
The underlying coordinate descent solver uses gap safe screening rules to speedup
fitting time, see :ref:`User Guide on coordinate descent <coordinate_descent>`.
Examples
--------
>>> from sklearn.linear_model import LassoCV
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(noise=4, random_state=0)
>>> reg = LassoCV(cv=5, random_state=0).fit(X, y)
>>> reg.score(X, y)
0.9993
>>> reg.predict(X[:1,])
array([-79.4755331])
"""
path = staticmethod(lasso_path)
def __init__(
self,
*,
eps=1e-3,
n_alphas="deprecated",
alphas="warn",
fit_intercept=True,
precompute="auto",
max_iter=1000,
tol=1e-4,
copy_X=True,
cv=None,
verbose=False,
n_jobs=None,
positive=False,
random_state=None,
selection="cyclic",
):
super().__init__(
eps=eps,
n_alphas=n_alphas,
alphas=alphas,
fit_intercept=fit_intercept,
precompute=precompute,
max_iter=max_iter,
tol=tol,
copy_X=copy_X,
cv=cv,
verbose=verbose,
n_jobs=n_jobs,
positive=positive,
random_state=random_state,
selection=selection,
)
def _get_estimator(self):
return Lasso()
def _is_multitask(self):
return False
def fit(self, X, y, sample_weight=None, **params):
"""Fit Lasso model with coordinate descent.
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse. Note that large sparse matrices and arrays
requiring `int64` indices are not accepted.
y : array-like of shape (n_samples,)
Target values.
sample_weight : float or array-like of shape (n_samples,), \
default=None
Sample weights used for fitting and evaluation of the weighted
mean squared error of each cv-fold. Note that the cross validated
MSE that is finally used to find the best model is the unweighted
mean over the (weighted) MSEs of each test fold.
**params : dict, default=None
Parameters to be passed to the CV splitter.
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`,
which can be set by using
``sklearn.set_config(enable_metadata_routing=True)``.
See :ref:`Metadata Routing User Guide <metadata_routing>` for
more details.
Returns
-------
self : object
Returns an instance of fitted model.
"""
return super().fit(X, y, sample_weight=sample_weight, **params)
| LassoCV |
python | cython__cython | Cython/Plex/Actions.py | {
"start": 2209,
"end": 2524
} | class ____(Action):
"""
IGNORE is a Plex action which causes its associated token
to be ignored. See the docstring of Plex.Lexicon for more
information.
"""
def perform(self, token_stream, text):
return None
def __repr__(self):
return "IGNORE"
IGNORE = Ignore()
| Ignore |
python | getsentry__sentry | src/sentry/models/distribution.py | {
"start": 268,
"end": 743
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
organization_id = BoundedBigIntegerField(db_index=True)
release = FlexibleForeignKey("sentry.Release")
name = models.CharField(max_length=64)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_distribution"
unique_together = (("release", "name"),)
__repr__ = sane_repr("release", "name")
| Distribution |
python | plotly__plotly.py | plotly/graph_objs/scattermapbox/_cluster.py | {
"start": 233,
"end": 9697
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattermapbox"
_path_str = "scattermapbox.cluster"
_valid_props = {
"color",
"colorsrc",
"enabled",
"maxzoom",
"opacity",
"opacitysrc",
"size",
"sizesrc",
"step",
"stepsrc",
}
@property
def color(self):
"""
Sets the color for each cluster step.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def enabled(self):
"""
Determines whether clustering is enabled or disabled.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def maxzoom(self):
"""
Sets the maximum zoom level. At zoom levels equal to or greater
than this, points will never be clustered.
The 'maxzoom' property is a number and may be specified as:
- An int or float in the interval [0, 24]
Returns
-------
int|float
"""
return self["maxzoom"]
@maxzoom.setter
def maxzoom(self, val):
self["maxzoom"] = val
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def opacitysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `opacity`.
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
@property
def size(self):
"""
Sets the size for each cluster step.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def step(self):
"""
Sets how many points it takes to create a cluster or advance to
the next cluster step. Use this in conjunction with arrays for
`size` and / or `color`. If an integer, steps start at
multiples of this number. If an array, each step extends from
the given value until one less than the next value.
The 'step' property is a number and may be specified as:
- An int or float in the interval [-1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["step"]
@step.setter
def step(self, val):
self["step"] = val
@property
def stepsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `step`.
The 'stepsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stepsrc"]
@stepsrc.setter
def stepsrc(self, val):
self["stepsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the color for each cluster step.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
enabled
Determines whether clustering is enabled or disabled.
maxzoom
Sets the maximum zoom level. At zoom levels equal to or
greater than this, points will never be clustered.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
size
Sets the size for each cluster step.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
step
Sets how many points it takes to create a cluster or
advance to the next cluster step. Use this in
conjunction with arrays for `size` and / or `color`. If
an integer, steps start at multiples of this number. If
an array, each step extends from the given value until
one less than the next value.
stepsrc
Sets the source reference on Chart Studio Cloud for
`step`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
enabled=None,
maxzoom=None,
opacity=None,
opacitysrc=None,
size=None,
sizesrc=None,
step=None,
stepsrc=None,
**kwargs,
):
"""
Construct a new Cluster object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattermapbox.Cluster`
color
Sets the color for each cluster step.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
enabled
Determines whether clustering is enabled or disabled.
maxzoom
Sets the maximum zoom level. At zoom levels equal to or
greater than this, points will never be clustered.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
size
Sets the size for each cluster step.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
step
Sets how many points it takes to create a cluster or
advance to the next cluster step. Use this in
conjunction with arrays for `size` and / or `color`. If
an integer, steps start at multiples of this number. If
an array, each step extends from the given value until
one less than the next value.
stepsrc
Sets the source reference on Chart Studio Cloud for
`step`.
Returns
-------
Cluster
"""
super().__init__("cluster")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattermapbox.Cluster
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermapbox.Cluster`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("enabled", arg, enabled)
self._set_property("maxzoom", arg, maxzoom)
self._set_property("opacity", arg, opacity)
self._set_property("opacitysrc", arg, opacitysrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("step", arg, step)
self._set_property("stepsrc", arg, stepsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Cluster |
python | pytorch__pytorch | torch/distributed/_local_tensor/__init__.py | {
"start": 15021,
"end": 19835
} | class ____:
"""
Like a LocalTensor, but for an int. We can't use a 0D tensor to represent this
because often only a SymInt is accepted where we wish to use this.
"""
def __new__(cls, local_ints: dict[int, int]) -> "ConstantIntNode | LocalIntNode": # type: ignore[misc]
if len(set(local_ints.values())) == 1:
return ConstantIntNode(next(iter(local_ints.values())))
return super().__new__(cls)
def __init__(self, local_ints: dict[int, int]):
self._local_ints = local_ints
def maybe_as_int(self) -> Optional[int]:
return None
def is_int(self) -> bool:
return True
def is_float(self) -> bool:
return False
def is_bool(self) -> bool:
return False
def is_nested_int(self) -> bool:
return False
def clone(self) -> "LocalIntNode":
return self
def _str(self) -> str:
return f"LocalIntNode({self._local_ints})"
def __str__(self) -> str:
return self._str()
def __repr__(self) -> str:
return self._str()
def _graph_repr(self) -> str:
return self._str()
def is_symbolic(self) -> bool:
return False
def is_constant(self) -> bool:
return False
def sym_max(
self, other: "int | LocalIntNode | ConstantIntNode"
) -> "LocalIntNode | ConstantIntNode":
return LocalIntNode(
{
r: max(self._local_ints[r], _int_on_rank(other, r))
for r in self._local_ints
}
)
def sym_sum(self, other: Any) -> "LocalIntNode | ConstantIntNode":
t = LocalIntNode(dict.fromkeys(self._local_ints, 0))
for o in other:
t = t.add(o)
return t
def neg(self) -> "LocalIntNode | ConstantIntNode":
return LocalIntNode({r: -self._local_ints[r] for r in self._local_ints})
def add(
self, other: "int | LocalIntNode | ConstantIntNode"
) -> "LocalIntNode | ConstantIntNode":
return LocalIntNode(
{r: self._local_ints[r] + _int_on_rank(other, r) for r in self._local_ints}
)
def sub(
self, other: "int | LocalIntNode | ConstantIntNode"
) -> "LocalIntNode | ConstantIntNode":
return LocalIntNode(
{r: self._local_ints[r] - _int_on_rank(other, r) for r in self._local_ints}
)
def mul(
self, other: "int | LocalIntNode | ConstantIntNode"
) -> "LocalIntNode | ConstantIntNode":
return LocalIntNode(
{r: self._local_ints[r] * _int_on_rank(other, r) for r in self._local_ints}
)
def floordiv(
self, other: "int | LocalIntNode | ConstantIntNode"
) -> "LocalIntNode | ConstantIntNode":
return LocalIntNode(
{r: self._local_ints[r] // _int_on_rank(other, r) for r in self._local_ints}
)
def mod(
self, other: "int | LocalIntNode | ConstantIntNode"
) -> "LocalIntNode | ConstantIntNode":
return LocalIntNode(
{r: self._local_ints[r] % _int_on_rank(other, r) for r in self._local_ints}
)
def int_floordiv(
self, other: "int | LocalIntNode | ConstantIntNode"
) -> "LocalIntNode | ConstantIntNode":
return LocalIntNode(
{r: self._local_ints[r] // _int_on_rank(other, r) for r in self._local_ints}
)
def eq(self, other: "int | LocalIntNode | ConstantIntNode") -> bool | SymBool:
r = {self._local_ints[r] == _int_on_rank(other, r) for r in self._local_ints}
return torch._C._get_constant_bool_symnode(len(r) == 1 and next(iter(r)))
def ne(self, other: "int | LocalIntNode | ConstantIntNode") -> bool | SymBool:
r = {self._local_ints[r] != _int_on_rank(other, r) for r in self._local_ints}
return torch._C._get_constant_bool_symnode(len(r) > 1 or next(iter(r)))
def ge(self, other: "int | LocalIntNode | ConstantIntNode") -> bool | SymBool:
r = {self._local_ints[r] >= _int_on_rank(other, r) for r in self._local_ints}
assert len(r) == 1, (self, other)
return torch._C._get_constant_bool_symnode(next(iter(r)))
def gt(self, other: "int | LocalIntNode | ConstantIntNode") -> bool | SymBool:
r = {self._local_ints[r] > _int_on_rank(other, r) for r in self._local_ints}
assert len(r) == 1, (self, other)
return torch._C._get_constant_bool_symnode(next(iter(r)))
def lt(self, other: "int | LocalIntNode | ConstantIntNode") -> bool | SymBool:
r = {self._local_ints[r] < _int_on_rank(other, r) for r in self._local_ints}
assert len(r) == 1, (self, other)
return torch._C._get_constant_bool_symnode(next(iter(r)))
def wrap_int(self, num: int) -> "LocalIntNode | ConstantIntNode":
return ConstantIntNode(num)
| LocalIntNode |
python | openai__openai-python | src/openai/resources/audio/translations.py | {
"start": 13974,
"end": 14237
} | class ____:
def __init__(self, translations: AsyncTranslations) -> None:
self._translations = translations
self.create = _legacy_response.async_to_raw_response_wrapper(
translations.create,
)
| AsyncTranslationsWithRawResponse |
python | great-expectations__great_expectations | tests/execution_engine/test_sparkdf_execution_engine.py | {
"start": 42037,
"end": 52661
} | class ____:
@pytest.mark.parametrize(
"condition,expected_output",
[
pytest.param(
ComparisonCondition(column=Column("age"), operator=Operator.EQUAL, parameter=5),
"age == 5",
id="equal",
),
pytest.param(
ComparisonCondition(
column=Column("age"), operator=Operator.NOT_EQUAL, parameter=10
),
"age != 10",
id="not_equal",
),
pytest.param(
ComparisonCondition(
column=Column("age"), operator=Operator.LESS_THAN, parameter=18
),
"age < 18",
id="less_than",
),
pytest.param(
ComparisonCondition(
column=Column("age"), operator=Operator.GREATER_THAN, parameter=65
),
"age > 65",
id="greater_than",
),
pytest.param(
ComparisonCondition(
column=Column("age"), operator=Operator.LESS_THAN_OR_EQUAL, parameter=100
),
"age <= 100",
id="less_or_equal",
),
pytest.param(
ComparisonCondition(
column=Column("age"), operator=Operator.GREATER_THAN_OR_EQUAL, parameter=0
),
"age >= 0",
id="greater_or_equal",
),
pytest.param(
ComparisonCondition(
column=Column("name"), operator=Operator.EQUAL, parameter="John"
),
"name == 'John'",
id="equal_string",
),
pytest.param(
ComparisonCondition(
column=Column("name"), operator=Operator.NOT_EQUAL, parameter="Jane"
),
"name != 'Jane'",
id="not_equal_string",
),
],
)
def test_comparison_condition_to_filter_clause_basic_operators(
self, condition: ComparisonCondition, expected_output: str
) -> None:
engine = SparkDFExecutionEngine()
result = engine.condition_to_filter_clause(condition)
assert result == expected_output
@pytest.mark.parametrize(
"condition,expected_output",
[
pytest.param(
ComparisonCondition(
column=Column("status"), operator=Operator.IN, parameter=[1, 2, 3]
),
"status IN (1, 2, 3)",
id="integers",
),
pytest.param(
ComparisonCondition(
column=Column("status"),
operator=Operator.IN,
parameter=["active", "pending"],
),
"status IN ('active', 'pending')",
id="strings",
),
pytest.param(
ComparisonCondition(
column=Column("status"), operator=Operator.NOT_IN, parameter=[1, 2, 3]
),
"status NOT IN (1, 2, 3)",
id="not_in",
),
],
)
def test_comparison_condition_to_filter_clause_in_not_in_operators(
self, condition: ComparisonCondition, expected_output: str
) -> None:
engine = SparkDFExecutionEngine()
result = engine.condition_to_filter_clause(condition)
assert result == expected_output
@pytest.mark.parametrize(
"condition,expected_output",
[
pytest.param(
NullityCondition(column=Column("email"), is_null=True),
"email IS NULL",
id="is_null",
),
pytest.param(
NullityCondition(column=Column("email"), is_null=False),
"email IS NOT NULL",
id="is_not_null",
),
],
)
def test_nullity_condition_to_filter_clause(
self, condition: NullityCondition, expected_output: str
) -> None:
engine = SparkDFExecutionEngine()
result = engine.condition_to_filter_clause(condition)
assert result == expected_output
def test_and_condition_to_filter_clause_simple(self) -> None:
engine = SparkDFExecutionEngine()
and_condition = AndCondition(
conditions=[
ComparisonCondition(
column=Column("age"), operator=Operator.GREATER_THAN, parameter=18
),
ComparisonCondition(
column=Column("age"), operator=Operator.LESS_THAN, parameter=65
),
]
)
result = engine.condition_to_filter_clause(and_condition)
assert result == "(age > 18 AND age < 65)"
def test_or_condition_to_filter_clause_simple(self) -> None:
"""Test that OR conditions generate correct Spark SQL query strings."""
engine = SparkDFExecutionEngine()
or_condition = OrCondition(
conditions=[
ComparisonCondition(
column=Column("status"), operator=Operator.EQUAL, parameter="active"
),
ComparisonCondition(
column=Column("status"), operator=Operator.EQUAL, parameter="pending"
),
]
)
result = engine.condition_to_filter_clause(or_condition)
assert result == "(status == 'active' OR status == 'pending')"
def test_nested_conditions(self) -> None:
engine = SparkDFExecutionEngine()
or_condition = OrCondition(
conditions=[
AndCondition(
conditions=[
ComparisonCondition(
column=Column("age"),
operator=Operator.GREATER_THAN_OR_EQUAL,
parameter=18,
),
ComparisonCondition(
column=Column("age"),
operator=Operator.LESS_THAN_OR_EQUAL,
parameter=65,
),
]
),
ComparisonCondition(
column=Column("status"), operator=Operator.EQUAL, parameter="exempt"
),
]
)
result = engine.condition_to_filter_clause(or_condition)
assert result == "((age >= 18 AND age <= 65) OR status == 'exempt')"
def test_comparison_filter_clause_filters_dataframe(
self, spark_session, spark_df_from_pandas_df
) -> None:
engine = SparkDFExecutionEngine()
pd_df = pd.DataFrame({"age": [15, 25, 35, 45, 55], "name": ["A", "B", "C", "D", "E"]})
df = spark_df_from_pandas_df(spark_session, pd_df)
condition = ComparisonCondition(
column=Column("age"),
operator=Operator.GREATER_THAN,
parameter=30,
)
filter_clause = engine.condition_to_filter_clause(condition)
result_df = df.filter(filter_clause)
assert result_df.count() == 3
result_rows = result_df.select("age", "name").collect()
assert [row.age for row in result_rows] == [35, 45, 55]
assert [row.name for row in result_rows] == ["C", "D", "E"]
def test_in_filter_clause_filters_dataframe(
self, spark_session, spark_df_from_pandas_df
) -> None:
engine = SparkDFExecutionEngine()
pd_df = pd.DataFrame(
{
"status": ["active", "pending", "inactive", "active", "deleted"],
"id": [1, 2, 3, 4, 5],
}
)
df = spark_df_from_pandas_df(spark_session, pd_df)
condition = ComparisonCondition(
column=Column("status"),
operator=Operator.IN,
parameter=["active", "pending"],
)
filter_clause = engine.condition_to_filter_clause(condition)
result_df = df.filter(filter_clause)
assert result_df.count() == 3
result_rows = result_df.select("id").collect()
assert [row.id for row in result_rows] == [1, 2, 4]
def test_nullity_filter_clause_filters_dataframe(
self, spark_session, spark_df_from_pandas_df
) -> None:
engine = SparkDFExecutionEngine()
pd_df = pd.DataFrame(
{
"email": ["a@example.com", None, "c@example.com", None, "e@example.com"],
"id": [1, 2, 3, 4, 5],
}
)
df = spark_df_from_pandas_df(spark_session, pd_df)
condition = NullityCondition(column=Column("email"), is_null=False)
filter_clause = engine.condition_to_filter_clause(condition)
result_df = df.filter(filter_clause)
assert result_df.count() == 3
result_rows = result_df.select("id").collect()
assert [row.id for row in result_rows] == [1, 3, 5]
def test_nested_condition_filters_dataframe(
self, spark_session, spark_df_from_pandas_df
) -> None:
engine = SparkDFExecutionEngine()
pd_df = pd.DataFrame(
{
"age": [15, 25, 35, 45, 75],
"status": ["active", "active", "active", "active", "exempt"],
"id": [1, 2, 3, 4, 5],
}
)
df = spark_df_from_pandas_df(spark_session, pd_df)
or_condition = OrCondition(
conditions=[
AndCondition(
conditions=[
ComparisonCondition(
column=Column("age"),
operator=Operator.GREATER_THAN_OR_EQUAL,
parameter=18,
),
ComparisonCondition(
column=Column("age"),
operator=Operator.LESS_THAN_OR_EQUAL,
parameter=65,
),
]
),
ComparisonCondition(
column=Column("status"), operator=Operator.EQUAL, parameter="exempt"
),
]
)
filter_clause = engine.condition_to_filter_clause(or_condition)
result_df = df.filter(filter_clause)
assert result_df.count() == 4
result_rows = result_df.select("id").collect()
assert [row.id for row in result_rows] == [2, 3, 4, 5]
| TestConditionToFilterClause |
python | huggingface__transformers | tests/sagemaker/test_multi_node_data_parallel.py | {
"start": 1120,
"end": 3850
} | class ____(unittest.TestCase):
def setUp(self):
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(),
encoding="utf-8",
check=True,
)
assert hasattr(self, "env")
def create_estimator(self, instance_count):
job_name = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"
# distributed data settings
distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script,
source_dir=self.env.test_path,
role=self.env.role,
image_uri=self.env.image_uri,
base_job_name=job_name,
instance_count=instance_count,
instance_type=self.instance_type,
debugger_hook_config=False,
hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path},
metric_definitions=self.env.metric_definitions,
distribution=distribution,
py_version="py36",
)
def save_results_as_csv(self, job_name):
TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv")
# @parameterized.expand([(2,), (4,),])
@parameterized.expand([(2,)])
def test_script(self, instance_count):
# create estimator
estimator = self.create_estimator(instance_count)
# run training
estimator.fit()
# result dataframe
result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
train_runtime = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json", "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
| MultiNodeTest |
python | pytest-dev__pytest | testing/example_scripts/fixtures/custom_item/conftest.py | {
"start": 148,
"end": 375
} | class ____(pytest.File):
def collect(self):
yield CustomItem.from_parent(name="foo", parent=self)
def pytest_collect_file(file_path, parent):
return CustomFile.from_parent(path=file_path, parent=parent)
| CustomFile |
python | huggingface__transformers | src/transformers/models/clipseg/modeling_clipseg.py | {
"start": 30974,
"end": 33178
} | class ____(nn.Module):
# Copied from transformers.models.altclip.modeling_altclip.AltCLIPVisionTransformer.__init__ with AltCLIP->CLIPSeg
def __init__(self, config: CLIPSegVisionConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = CLIPSegVisionEmbeddings(config)
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.encoder = CLIPSegEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor],
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
interpolate_pos_encoding: Optional[bool] = True,
) -> Union[tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
hidden_states = self.pre_layrnorm(hidden_states)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
| CLIPSegVisionTransformer |
python | spack__spack | lib/spack/spack/vendor/jinja2/nodes.py | {
"start": 30615,
"end": 31188
} | class ____(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~spack.vendor.jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not treated specially by the compiler.
"""
fields = ("name",)
name: str
def __init__(self) -> None:
raise TypeError(
"Can't create internal names. Use the "
"`free_identifier` method on a parser."
)
| InternalName |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/observability.py | {
"start": 7898,
"end": 8045
} | class ____(BaseObservation):
type: InfoObservationType
title: str
content: str | dict
@dataclass(slots=True, frozen=True)
| InfoObservation |
python | streamlit__streamlit | lib/streamlit/elements/widgets/button_group.py | {
"start": 3891,
"end": 5240
} | class ____(Generic[T]):
"""Only meant to be used internally for the button_group element.
Uses the ButtonGroup's _MultiSelectSerde under-the-hood, but accepts a single
index value and deserializes to a single index value.
This is because button_group can be single and multi select, but we use the same
proto for both and, thus, map single values to a list of values and a receiving
value wrapped in a list to a single value.
When a default_value is provided is provided, the option corresponding to the
index is serialized/deserialized.
"""
def __init__(
self,
option_indices: Sequence[T],
default_value: list[int] | None = None,
) -> None:
# see docstring about why we use MultiSelectSerde here
self.multiselect_serde: _MultiSelectSerde[T] = _MultiSelectSerde(
option_indices, default_value if default_value is not None else []
)
def serialize(self, value: T | None) -> list[int]:
_value = [value] if value is not None else []
return self.multiselect_serde.serialize(_value)
def deserialize(self, ui_value: list[int] | None) -> T | None:
deserialized = self.multiselect_serde.deserialize(ui_value)
if len(deserialized) == 0:
return None
return deserialized[0]
| _SingleSelectSerde |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_checks.py | {
"start": 3722,
"end": 4990
} | class ____(graphene.ObjectType):
id = graphene.NonNull(graphene.String)
runId = graphene.NonNull(graphene.String)
status = graphene.NonNull(GrapheneAssetCheckExecutionResolvedStatus)
evaluation = graphene.Field(GrapheneAssetCheckEvaluation)
timestamp = graphene.Field(
graphene.NonNull(graphene.Float), description="When the check run started"
)
stepKey = graphene.Field(graphene.String)
class Meta:
name = "AssetCheckExecution"
def __init__(self, execution: AssetCheckExecutionRecord):
super().__init__()
self._execution = execution
self.id = str(execution.id)
self.runId = execution.run_id
self.evaluation = (
GrapheneAssetCheckEvaluation(execution.event)
if execution.event
and execution.event.dagster_event_type == DagsterEventType.ASSET_CHECK_EVALUATION
else None
)
self.timestamp = execution.create_timestamp
self.stepKey = execution.event.step_key if execution.event else None
async def resolve_status(
self, graphene_info: "ResolveInfo"
) -> AssetCheckExecutionResolvedStatus:
return await self._execution.resolve_status(graphene_info.context)
| GrapheneAssetCheckExecution |
python | pandas-dev__pandas | pandas/core/interchange/dataframe_protocol.py | {
"start": 4678,
"end": 12805
} | class ____(ABC):
"""
A column object, with only the methods and properties required by the
interchange protocol defined.
A column can contain one or more chunks. Each chunk can contain up to three
buffers - a data buffer, a mask buffer (depending on null representation),
and an offsets buffer (if variable-size binary; e.g., variable-length
strings).
TBD: Arrow has a separate "null" dtype, and has no separate mask concept.
Instead, it seems to use "children" for both columns with a bit mask,
and for nested dtypes. Unclear whether this is elegant or confusing.
This design requires checking the null representation explicitly.
The Arrow design requires checking:
1. the ARROW_FLAG_NULLABLE (for sentinel values)
2. if a column has two children, combined with one of those children
having a null dtype.
Making the mask concept explicit seems useful. One null dtype would
not be enough to cover both bit and byte masks, so that would mean
even more checking if we did it the Arrow way.
TBD: there's also the "chunk" concept here, which is implicit in Arrow as
multiple buffers per array (= column here). Semantically it may make
sense to have both: chunks were meant for example for lazy evaluation
of data which doesn't fit in memory, while multiple buffers per column
could also come from doing a selection operation on a single
contiguous buffer.
Given these concepts, one would expect chunks to be all of the same
size (say a 10,000 row dataframe could have 10 chunks of 1,000 rows),
while multiple buffers could have data-dependent lengths. Not an issue
in pandas if one column is backed by a single NumPy array, but in
Arrow it seems possible.
Are multiple chunks *and* multiple buffers per column necessary for
the purposes of this interchange protocol, or must producers either
reuse the chunk concept for this or copy the data?
Note: this Column object can only be produced by ``__dataframe__``, so
doesn't need its own version or ``__column__`` protocol.
"""
@abstractmethod
def size(self) -> int:
"""
Size of the column, in elements.
Corresponds to DataFrame.num_rows() if column is a single chunk;
equal to size of this current chunk otherwise.
"""
@property
@abstractmethod
def offset(self) -> int:
"""
Offset of first element.
May be > 0 if using chunks; for example for a column with N chunks of
equal size M (only the last chunk may be shorter),
``offset = n * M``, ``n = 0 .. N-1``.
"""
@property
@abstractmethod
def dtype(self) -> tuple[DtypeKind, int, str, str]:
"""
Dtype description as a tuple ``(kind, bit-width, format string, endianness)``.
Bit-width : the number of bits as an integer
Format string : data type description format string in Apache Arrow C
Data Interface format.
Endianness : current only native endianness (``=``) is supported
Notes:
- Kind specifiers are aligned with DLPack where possible (hence the
jump to 20, leave enough room for future extension)
- Masks must be specified as boolean with either bit width 1 (for bit
masks) or 8 (for byte masks).
- Dtype width in bits was preferred over bytes
- Endianness isn't too useful, but included now in case in the future
we need to support non-native endianness
- Went with Apache Arrow format strings over NumPy format strings
because they're more complete from a dataframe perspective
- Format strings are mostly useful for datetime specification, and
for categoricals.
- For categoricals, the format string describes the type of the
categorical in the data buffer. In case of a separate encoding of
the categorical (e.g. an integer to string mapping), this can
be derived from ``self.describe_categorical``.
- Data types not included: complex, Arrow-style null, binary, decimal,
and nested (list, struct, map, union) dtypes.
"""
@property
@abstractmethod
def describe_categorical(self) -> CategoricalDescription:
"""
If the dtype is categorical, there are two options:
- There are only values in the data buffer.
- There is a separate non-categorical Column encoding for categorical values.
Raises TypeError if the dtype is not categorical
Returns the dictionary with description on how to interpret the data buffer:
- "is_ordered" : bool, whether the ordering of dictionary indices is
semantically meaningful.
- "is_dictionary" : bool, whether a mapping of
categorical values to other objects exists
- "categories" : Column representing the (implicit) mapping of indices to
category values (e.g. an array of cat1, cat2, ...).
None if not a dictionary-style categorical.
TBD: are there any other in-memory representations that are needed?
"""
@property
@abstractmethod
def describe_null(self) -> tuple[ColumnNullType, Any]:
"""
Return the missing value (or "null") representation the column dtype
uses, as a tuple ``(kind, value)``.
Value : if kind is "sentinel value", the actual value. If kind is a bit
mask or a byte mask, the value (0 or 1) indicating a missing value. None
otherwise.
"""
@property
@abstractmethod
def null_count(self) -> int | None:
"""
Number of null elements, if known.
Note: Arrow uses -1 to indicate "unknown", but None seems cleaner.
"""
@property
@abstractmethod
def metadata(self) -> dict[str, Any]:
"""
The metadata for the column. See `DataFrame.metadata` for more details.
"""
@abstractmethod
def num_chunks(self) -> int:
"""
Return the number of chunks the column consists of.
"""
@abstractmethod
def get_chunks(self, n_chunks: int | None = None) -> Iterable[Column]:
"""
Return an iterator yielding the chunks.
See `DataFrame.get_chunks` for details on ``n_chunks``.
"""
@abstractmethod
def get_buffers(self) -> ColumnBuffers:
"""
Return a dictionary containing the underlying buffers.
The returned dictionary has the following contents:
- "data": a two-element tuple whose first element is a buffer
containing the data and whose second element is the data
buffer's associated dtype.
- "validity": a two-element tuple whose first element is a buffer
containing mask values indicating missing data and
whose second element is the mask value buffer's
associated dtype. None if the null representation is
not a bit or byte mask.
- "offsets": a two-element tuple whose first element is a buffer
containing the offset values for variable-size binary
data (e.g., variable-length strings) and whose second
element is the offsets buffer's associated dtype. None
if the data buffer does not have an associated offsets
buffer.
"""
# def get_children(self) -> Iterable[Column]:
# """
# Children columns underneath the column, each object in this iterator
# must adhere to the column specification.
# """
# pass
@set_module("pandas.api.interchange")
| Column |
python | huggingface__transformers | tests/models/xlm_roberta/test_tokenization_xlm_roberta.py | {
"start": 882,
"end": 2626
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "FacebookAI/xlm-roberta-base"
tokenizer_class = XLMRobertaTokenizer
integration_expected_tokens = ['▁This', '▁is', '▁a', '▁test', '▁', '😊', '▁I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fals', 'é', '.', '▁', '生活的', '真', '谛', '是', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁Hello', '<s>', '▁hi', '<s>', '▁there', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁en', 'code', 'd', ':', '▁Hello', '.', '▁But', '▁ir', 'd', '▁and', '▁ปี', '▁ir', 'd', '▁ด', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_token_ids = [3293, 83, 10, 3034, 6, 82803, 87, 509, 103122, 23, 483, 13821, 4, 136, 903, 83, 84047, 446, 5, 6, 62668, 5364, 245875, 354, 2673, 35378, 2673, 35378, 35378, 0, 1274, 0, 2685, 581, 25632, 79315, 5608, 186, 155965, 22, 40899, 71, 12, 35378, 5, 4966, 193, 71, 136, 10249, 193, 71, 48229, 28240, 3642, 621, 398, 20594] # fmt: skip
expected_tokens_from_ids = ['▁This', '▁is', '▁a', '▁test', '▁', '😊', '▁I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fals', 'é', '.', '▁', '生活的', '真', '谛', '是', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁Hello', '<s>', '▁hi', '<s>', '▁there', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁en', 'code', 'd', ':', '▁Hello', '.', '▁But', '▁ir', 'd', '▁and', '▁ปี', '▁ir', 'd', '▁ด', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_decoded_text = "This is a test 😊 I was born in 92000, and this is falsé. 生活的真谛是 Hi Hello Hi Hello Hello<s> hi<s> there The following string should be properly encoded: Hello. But ird and ปี ird ด Hey how are you doing"
| XLMRobertaTokenizationTest |
python | sympy__sympy | sympy/integrals/risch.py | {
"start": 32428,
"end": 60298
} | class ____(Exception):
"""
Exception used by subroutines within the Risch algorithm to indicate to one
another that the function being integrated does not have an elementary
integral in the given differential field.
"""
# TODO: Rewrite algorithms below to use this (?)
# TODO: Pass through information about why the integral was nonelementary,
# and store that in the resulting NonElementaryIntegral somehow.
pass
def gcdex_diophantine(a, b, c):
"""
Extended Euclidean Algorithm, Diophantine version.
Explanation
===========
Given ``a``, ``b`` in K[x] and ``c`` in (a, b), the ideal generated by ``a`` and
``b``, return (s, t) such that s*a + t*b == c and either s == 0 or s.degree()
< b.degree().
"""
# Extended Euclidean Algorithm (Diophantine Version) pg. 13
# TODO: This should go in densetools.py.
# XXX: Better name?
s, g = a.half_gcdex(b)
s *= c.exquo(g) # Inexact division means c is not in (a, b)
if s and s.degree() >= b.degree():
_, s = s.div(b)
t = (c - s*a).exquo(b)
return (s, t)
def frac_in(f, t, *, cancel=False, **kwargs):
"""
Returns the tuple (fa, fd), where fa and fd are Polys in t.
Explanation
===========
This is a common idiom in the Risch Algorithm functions, so we abstract
it out here. ``f`` should be a basic expression, a Poly, or a tuple (fa, fd),
where fa and fd are either basic expressions or Polys, and f == fa/fd.
**kwargs are applied to Poly.
"""
if isinstance(f, tuple):
fa, fd = f
f = fa.as_expr()/fd.as_expr()
fa, fd = f.as_expr().as_numer_denom()
fa, fd = fa.as_poly(t, **kwargs), fd.as_poly(t, **kwargs)
if cancel:
fa, fd = fa.cancel(fd, include=True)
if fa is None or fd is None:
raise ValueError("Could not turn %s into a fraction in %s." % (f, t))
return (fa, fd)
def as_poly_1t(p, t, z):
"""
(Hackish) way to convert an element ``p`` of K[t, 1/t] to K[t, z].
In other words, ``z == 1/t`` will be a dummy variable that Poly can handle
better.
See issue 5131.
Examples
========
>>> from sympy import random_poly
>>> from sympy.integrals.risch import as_poly_1t
>>> from sympy.abc import x, z
>>> p1 = random_poly(x, 10, -10, 10)
>>> p2 = random_poly(x, 10, -10, 10)
>>> p = p1 + p2.subs(x, 1/x)
>>> as_poly_1t(p, x, z).as_expr().subs(z, 1/x) == p
True
"""
# TODO: Use this on the final result. That way, we can avoid answers like
# (...)*exp(-x).
pa, pd = frac_in(p, t, cancel=True)
if not pd.is_monomial:
# XXX: Is there a better Poly exception that we could raise here?
# Either way, if you see this (from the Risch Algorithm) it indicates
# a bug.
raise PolynomialError("%s is not an element of K[%s, 1/%s]." % (p, t, t))
t_part, remainder = pa.div(pd)
ans = t_part.as_poly(t, z, expand=False)
if remainder:
one = remainder.one
tp = t*one
r = pd.degree() - remainder.degree()
z_part = remainder.transform(one, tp) * tp**r
z_part = z_part.replace(t, z).to_field().quo_ground(pd.LC())
ans += z_part.as_poly(t, z, expand=False)
return ans
def derivation(p, DE, coefficientD=False, basic=False):
"""
Computes Dp.
Explanation
===========
Given the derivation D with D = d/dx and p is a polynomial in t over
K(x), return Dp.
If coefficientD is True, it computes the derivation kD
(kappaD), which is defined as kD(sum(ai*Xi**i, (i, 0, n))) ==
sum(Dai*Xi**i, (i, 1, n)) (Definition 3.2.2, page 80). X in this case is
T[-1], so coefficientD computes the derivative just with respect to T[:-1],
with T[-1] treated as a constant.
If ``basic=True``, the returns a Basic expression. Elements of D can still be
instances of Poly.
"""
if basic:
r = 0
else:
r = Poly(0, DE.t)
t = DE.t
if coefficientD:
if DE.level <= -len(DE.T):
# 'base' case, the answer is 0.
return r
DE.decrement_level()
D = DE.D[:len(DE.D) + DE.level + 1]
T = DE.T[:len(DE.T) + DE.level + 1]
for d, v in zip(D, T):
pv = p.as_poly(v)
if pv is None or basic:
pv = p.as_expr()
if basic:
r += d.as_expr()*pv.diff(v)
else:
r += (d.as_expr()*pv.diff(v).as_expr()).as_poly(t)
if basic:
r = cancel(r)
if coefficientD:
DE.increment_level()
return r
def get_case(d, t):
"""
Returns the type of the derivation d.
Returns one of {'exp', 'tan', 'base', 'primitive', 'other_linear',
'other_nonlinear'}.
"""
if not d.expr.has(t):
if d.is_one:
return 'base'
return 'primitive'
if d.rem(Poly(t, t)).is_zero:
return 'exp'
if d.rem(Poly(1 + t**2, t)).is_zero:
return 'tan'
if d.degree(t) > 1:
return 'other_nonlinear'
return 'other_linear'
def splitfactor(p, DE, coefficientD=False, z=None):
"""
Splitting factorization.
Explanation
===========
Given a derivation D on k[t] and ``p`` in k[t], return (p_n, p_s) in
k[t] x k[t] such that p = p_n*p_s, p_s is special, and each square
factor of p_n is normal.
Page. 100
"""
kinv = [1/x for x in DE.T[:DE.level]]
if z:
kinv.append(z)
One = Poly(1, DE.t, domain=p.get_domain())
Dp = derivation(p, DE, coefficientD=coefficientD)
# XXX: Is this right?
if p.is_zero:
return (p, One)
if not p.expr.has(DE.t):
s = p.as_poly(*kinv).gcd(Dp.as_poly(*kinv)).as_poly(DE.t)
n = p.exquo(s)
return (n, s)
if not Dp.is_zero:
h = p.gcd(Dp).to_field()
g = p.gcd(p.diff(DE.t)).to_field()
s = h.exquo(g)
if s.degree(DE.t) == 0:
return (p, One)
q_split = splitfactor(p.exquo(s), DE, coefficientD=coefficientD)
return (q_split[0], q_split[1]*s)
else:
return (p, One)
def splitfactor_sqf(p, DE, coefficientD=False, z=None, basic=False):
"""
Splitting Square-free Factorization.
Explanation
===========
Given a derivation D on k[t] and ``p`` in k[t], returns (N1, ..., Nm)
and (S1, ..., Sm) in k[t]^m such that p =
(N1*N2**2*...*Nm**m)*(S1*S2**2*...*Sm**m) is a splitting
factorization of ``p`` and the Ni and Si are square-free and coprime.
"""
# TODO: This algorithm appears to be faster in every case
# TODO: Verify this and splitfactor() for multiple extensions
kkinv = [1/x for x in DE.T[:DE.level]] + DE.T[:DE.level]
if z:
kkinv = [z]
S = []
N = []
p_sqf = p.sqf_list_include()
if p.is_zero:
return (((p, 1),), ())
for pi, i in p_sqf:
Si = pi.as_poly(*kkinv).gcd(derivation(pi, DE,
coefficientD=coefficientD,basic=basic).as_poly(*kkinv)).as_poly(DE.t)
pi = Poly(pi, DE.t)
Si = Poly(Si, DE.t)
Ni = pi.exquo(Si)
if not Si.is_one:
S.append((Si, i))
if not Ni.is_one:
N.append((Ni, i))
return (tuple(N), tuple(S))
def canonical_representation(a, d, DE):
"""
Canonical Representation.
Explanation
===========
Given a derivation D on k[t] and f = a/d in k(t), return (f_p, f_s,
f_n) in k[t] x k(t) x k(t) such that f = f_p + f_s + f_n is the
canonical representation of f (f_p is a polynomial, f_s is reduced
(has a special denominator), and f_n is simple (has a normal
denominator).
"""
# Make d monic
l = Poly(1/d.LC(), DE.t)
a, d = a.mul(l), d.mul(l)
q, r = a.div(d)
dn, ds = splitfactor(d, DE)
b, c = gcdex_diophantine(dn.as_poly(DE.t), ds.as_poly(DE.t), r.as_poly(DE.t))
b, c = b.as_poly(DE.t), c.as_poly(DE.t)
return (q, (b, ds), (c, dn))
def hermite_reduce(a, d, DE):
"""
Hermite Reduction - Mack's Linear Version.
Given a derivation D on k(t) and f = a/d in k(t), returns g, h, r in
k(t) such that f = Dg + h + r, h is simple, and r is reduced.
"""
# Make d monic
l = Poly(1/d.LC(), DE.t)
a, d = a.mul(l), d.mul(l)
fp, fs, fn = canonical_representation(a, d, DE)
a, d = fn
l = Poly(1/d.LC(), DE.t)
a, d = a.mul(l), d.mul(l)
ga = Poly(0, DE.t)
gd = Poly(1, DE.t)
dd = derivation(d, DE)
dm = gcd(d.to_field(), dd.to_field()).as_poly(DE.t)
ds, _ = d.div(dm)
while dm.degree(DE.t) > 0:
ddm = derivation(dm, DE)
dm2 = gcd(dm.to_field(), ddm.to_field())
dms, _ = dm.div(dm2)
ds_ddm = ds.mul(ddm)
ds_ddm_dm, _ = ds_ddm.div(dm)
b, c = gcdex_diophantine(-ds_ddm_dm.as_poly(DE.t),
dms.as_poly(DE.t), a.as_poly(DE.t))
b, c = b.as_poly(DE.t), c.as_poly(DE.t)
db = derivation(b, DE).as_poly(DE.t)
ds_dms, _ = ds.div(dms)
a = c.as_poly(DE.t) - db.mul(ds_dms).as_poly(DE.t)
ga = ga*dm + b*gd
gd = gd*dm
ga, gd = ga.cancel(gd, include=True)
dm = dm2
q, r = a.div(ds)
ga, gd = ga.cancel(gd, include=True)
r, d = r.cancel(ds, include=True)
rra = q*fs[1] + fp*fs[1] + fs[0]
rrd = fs[1]
rra, rrd = rra.cancel(rrd, include=True)
return ((ga, gd), (r, d), (rra, rrd))
def polynomial_reduce(p, DE):
"""
Polynomial Reduction.
Explanation
===========
Given a derivation D on k(t) and p in k[t] where t is a nonlinear
monomial over k, return q, r in k[t] such that p = Dq + r, and
deg(r) < deg_t(Dt).
"""
q = Poly(0, DE.t)
while p.degree(DE.t) >= DE.d.degree(DE.t):
m = p.degree(DE.t) - DE.d.degree(DE.t) + 1
q0 = Poly(DE.t**m, DE.t).mul(Poly(p.as_poly(DE.t).LC()/
(m*DE.d.LC()), DE.t))
q += q0
p = p - derivation(q0, DE)
return (q, p)
def laurent_series(a, d, F, n, DE):
"""
Contribution of ``F`` to the full partial fraction decomposition of A/D.
Explanation
===========
Given a field K of characteristic 0 and ``A``,``D``,``F`` in K[x] with D monic,
nonzero, coprime with A, and ``F`` the factor of multiplicity n in the square-
free factorization of D, return the principal parts of the Laurent series of
A/D at all the zeros of ``F``.
"""
if F.degree()==0:
return 0
Z = _symbols('z', n)
z = Symbol('z')
Z.insert(0, z)
delta_a = Poly(0, DE.t)
delta_d = Poly(1, DE.t)
E = d.quo(F**n)
ha, hd = (a, E*Poly(z**n, DE.t))
dF = derivation(F,DE)
B, _ = gcdex_diophantine(E, F, Poly(1,DE.t))
C, _ = gcdex_diophantine(dF, F, Poly(1,DE.t))
# initialization
F_store = F
V, DE_D_list, H_list= [], [], []
for j in range(0, n):
# jth derivative of z would be substituted with dfnth/(j+1) where dfnth =(d^n)f/(dx)^n
F_store = derivation(F_store, DE)
v = (F_store.as_expr())/(j + 1)
V.append(v)
DE_D_list.append(Poly(Z[j + 1],Z[j]))
DE_new = DifferentialExtension(extension = {'D': DE_D_list}) #a differential indeterminate
for j in range(0, n):
zEha = Poly(z**(n + j), DE.t)*E**(j + 1)*ha
zEhd = hd
Pa, Pd = cancel((zEha, zEhd))[1], cancel((zEha, zEhd))[2]
Q = Pa.quo(Pd)
for i in range(0, j + 1):
Q = Q.subs(Z[i], V[i])
Dha = (hd*derivation(ha, DE, basic=True).as_poly(DE.t)
+ ha*derivation(hd, DE, basic=True).as_poly(DE.t)
+ hd*derivation(ha, DE_new, basic=True).as_poly(DE.t)
+ ha*derivation(hd, DE_new, basic=True).as_poly(DE.t))
Dhd = Poly(j + 1, DE.t)*hd**2
ha, hd = Dha, Dhd
Ff, _ = F.div(gcd(F, Q))
F_stara, F_stard = frac_in(Ff, DE.t)
if F_stara.degree(DE.t) - F_stard.degree(DE.t) > 0:
QBC = Poly(Q, DE.t)*B**(1 + j)*C**(n + j)
H = QBC
H_list.append(H)
H = (QBC*F_stard).rem(F_stara)
alphas = real_roots(F_stara)
for alpha in list(alphas):
delta_a = delta_a*Poly((DE.t - alpha)**(n - j), DE.t) + Poly(H.eval(alpha), DE.t)
delta_d = delta_d*Poly((DE.t - alpha)**(n - j), DE.t)
return (delta_a, delta_d, H_list)
def recognize_derivative(a, d, DE, z=None):
"""
Compute the squarefree factorization of the denominator of f
and for each Di the polynomial H in K[x] (see Theorem 2.7.1), using the
LaurentSeries algorithm. Write Di = GiEi where Gj = gcd(Hn, Di) and
gcd(Ei,Hn) = 1. Since the residues of f at the roots of Gj are all 0, and
the residue of f at a root alpha of Ei is Hi(a) != 0, f is the derivative of a
rational function if and only if Ei = 1 for each i, which is equivalent to
Di | H[-1] for each i.
"""
flag =True
a, d = a.cancel(d, include=True)
_, r = a.div(d)
Np, Sp = splitfactor_sqf(d, DE, coefficientD=True, z=z)
j = 1
for s, _ in Sp:
delta_a, delta_d, H = laurent_series(r, d, s, j, DE)
g = gcd(d, H[-1]).as_poly()
if g is not d:
flag = False
break
j = j + 1
return flag
def recognize_log_derivative(a, d, DE, z=None):
"""
There exists a v in K(x)* such that f = dv/v
where f a rational function if and only if f can be written as f = A/D
where D is squarefree,deg(A) < deg(D), gcd(A, D) = 1,
and all the roots of the Rothstein-Trager resultant are integers. In that case,
any of the Rothstein-Trager, Lazard-Rioboo-Trager or Czichowski algorithm
produces u in K(x) such that du/dx = uf.
"""
z = z or Dummy('z')
a, d = a.cancel(d, include=True)
_, a = a.div(d)
pz = Poly(z, DE.t)
Dd = derivation(d, DE)
q = a - pz*Dd
r, _ = d.resultant(q, includePRS=True)
r = Poly(r, z)
Np, Sp = splitfactor_sqf(r, DE, coefficientD=True, z=z)
for s, _ in Sp:
# TODO also consider the complex roots which should
# turn the flag false
a = real_roots(s.as_poly(z))
if not all(j.is_Integer for j in a):
return False
return True
def residue_reduce(a, d, DE, z=None, invert=True):
"""
Lazard-Rioboo-Rothstein-Trager resultant reduction.
Explanation
===========
Given a derivation ``D`` on k(t) and f in k(t) simple, return g
elementary over k(t) and a Boolean b in {True, False} such that f -
Dg in k[t] if b == True or f + h and f + h - Dg do not have an
elementary integral over k(t) for any h in k<t> (reduced) if b ==
False.
Returns (G, b), where G is a tuple of tuples of the form (s_i, S_i),
such that g = Add(*[RootSum(s_i, lambda z: z*log(S_i(z, t))) for
S_i, s_i in G]). f - Dg is the remaining integral, which is elementary
only if b == True, and hence the integral of f is elementary only if
b == True.
f - Dg is not calculated in this function because that would require
explicitly calculating the RootSum. Use residue_reduce_derivation().
"""
# TODO: Use log_to_atan() from rationaltools.py
# If r = residue_reduce(...), then the logarithmic part is given by:
# sum([RootSum(a[0].as_poly(z), lambda i: i*log(a[1].as_expr()).subs(z,
# i)).subs(t, log(x)) for a in r[0]])
z = z or Dummy('z')
a, d = a.cancel(d, include=True)
a, d = a.to_field().mul_ground(1/d.LC()), d.to_field().mul_ground(1/d.LC())
kkinv = [1/x for x in DE.T[:DE.level]] + DE.T[:DE.level]
if a.is_zero:
return ([], True)
_, a = a.div(d)
pz = Poly(z, DE.t)
Dd = derivation(d, DE)
q = a - pz*Dd
if Dd.degree(DE.t) <= d.degree(DE.t):
r, R = d.resultant(q, includePRS=True)
else:
r, R = q.resultant(d, includePRS=True)
R_map, H = {}, []
for i in R:
R_map[i.degree()] = i
r = Poly(r, z)
Np, Sp = splitfactor_sqf(r, DE, coefficientD=True, z=z)
for s, i in Sp:
if i == d.degree(DE.t):
s = Poly(s, z).monic()
H.append((s, d))
else:
h = R_map.get(i)
if h is None:
continue
h_lc = Poly(h.as_poly(DE.t).LC(), DE.t, field=True)
h_lc_sqf = h_lc.sqf_list_include(all=True)
for a, j in h_lc_sqf:
h = Poly(h, DE.t, field=True).exquo(Poly(gcd(a, s**j, *kkinv),
DE.t))
s = Poly(s, z).monic()
if invert:
h_lc = Poly(h.as_poly(DE.t).LC(), DE.t, field=True, expand=False)
inv, coeffs = h_lc.as_poly(z, field=True).invert(s), [S.One]
for coeff in h.coeffs()[1:]:
L = reduced(inv*coeff.as_poly(inv.gens), [s])[1]
coeffs.append(L.as_expr())
h = Poly(dict(list(zip(h.monoms(), coeffs))), DE.t)
H.append((s, h))
b = not any(cancel(i.as_expr()).has(DE.t, z) for i, _ in Np)
return (H, b)
def residue_reduce_to_basic(H, DE, z):
"""
Converts the tuple returned by residue_reduce() into a Basic expression.
"""
# TODO: check what Lambda does with RootOf
i = Dummy('i')
s = list(zip(reversed(DE.T), reversed([f(DE.x) for f in DE.Tfuncs])))
return sum(RootSum(a[0].as_poly(z), Lambda(i, i*log(a[1].as_expr()).subs(
{z: i}).subs(s))) for a in H)
def residue_reduce_derivation(H, DE, z):
"""
Computes the derivation of an expression returned by residue_reduce().
In general, this is a rational function in t, so this returns an
as_expr() result.
"""
# TODO: verify that this is correct for multiple extensions
i = Dummy('i')
return S(sum(RootSum(a[0].as_poly(z), Lambda(i, i*derivation(a[1],
DE).as_expr().subs(z, i)/a[1].as_expr().subs(z, i))) for a in H))
def integrate_primitive_polynomial(p, DE):
"""
Integration of primitive polynomials.
Explanation
===========
Given a primitive monomial t over k, and ``p`` in k[t], return q in k[t],
r in k, and a bool b in {True, False} such that r = p - Dq is in k if b is
True, or r = p - Dq does not have an elementary integral over k(t) if b is
False.
"""
Zero = Poly(0, DE.t)
q = Poly(0, DE.t)
if not p.expr.has(DE.t):
return (Zero, p, True)
from .prde import limited_integrate
while True:
if not p.expr.has(DE.t):
return (q, p, True)
Dta, Dtb = frac_in(DE.d, DE.T[DE.level - 1])
with DecrementLevel(DE): # We had better be integrating the lowest extension (x)
# with ratint().
a = p.LC()
aa, ad = frac_in(a, DE.t)
try:
rv = limited_integrate(aa, ad, [(Dta, Dtb)], DE)
if rv is None:
raise NonElementaryIntegralException
(ba, bd), c = rv
except NonElementaryIntegralException:
return (q, p, False)
m = p.degree(DE.t)
q0 = c[0].as_poly(DE.t)*Poly(DE.t**(m + 1)/(m + 1), DE.t) + \
(ba.as_expr()/bd.as_expr()).as_poly(DE.t)*Poly(DE.t**m, DE.t)
p = p - derivation(q0, DE)
q = q + q0
def integrate_primitive(a, d, DE, z=None):
"""
Integration of primitive functions.
Explanation
===========
Given a primitive monomial t over k and f in k(t), return g elementary over
k(t), i in k(t), and b in {True, False} such that i = f - Dg is in k if b
is True or i = f - Dg does not have an elementary integral over k(t) if b
is False.
This function returns a Basic expression for the first argument. If b is
True, the second argument is Basic expression in k to recursively integrate.
If b is False, the second argument is an unevaluated Integral, which has
been proven to be nonelementary.
"""
# XXX: a and d must be canceled, or this might return incorrect results
z = z or Dummy("z")
s = list(zip(reversed(DE.T), reversed([f(DE.x) for f in DE.Tfuncs])))
g1, h, r = hermite_reduce(a, d, DE)
g2, b = residue_reduce(h[0], h[1], DE, z=z)
if not b:
i = cancel(a.as_expr()/d.as_expr() - (g1[1]*derivation(g1[0], DE) -
g1[0]*derivation(g1[1], DE)).as_expr()/(g1[1]**2).as_expr() -
residue_reduce_derivation(g2, DE, z))
i = NonElementaryIntegral(cancel(i).subs(s), DE.x)
return ((g1[0].as_expr()/g1[1].as_expr()).subs(s) +
residue_reduce_to_basic(g2, DE, z), i, b)
# h - Dg2 + r
p = cancel(h[0].as_expr()/h[1].as_expr() - residue_reduce_derivation(g2,
DE, z) + r[0].as_expr()/r[1].as_expr())
p = p.as_poly(DE.t)
q, i, b = integrate_primitive_polynomial(p, DE)
ret = ((g1[0].as_expr()/g1[1].as_expr() + q.as_expr()).subs(s) +
residue_reduce_to_basic(g2, DE, z))
if not b:
# TODO: This does not do the right thing when b is False
i = NonElementaryIntegral(cancel(i.as_expr()).subs(s), DE.x)
else:
i = cancel(i.as_expr())
return (ret, i, b)
def integrate_hyperexponential_polynomial(p, DE, z):
"""
Integration of hyperexponential polynomials.
Explanation
===========
Given a hyperexponential monomial t over k and ``p`` in k[t, 1/t], return q in
k[t, 1/t] and a bool b in {True, False} such that p - Dq in k if b is True,
or p - Dq does not have an elementary integral over k(t) if b is False.
"""
t1 = DE.t
dtt = DE.d.exquo(Poly(DE.t, DE.t))
qa = Poly(0, DE.t)
qd = Poly(1, DE.t)
b = True
if p.is_zero:
return(qa, qd, b)
from sympy.integrals.rde import rischDE
with DecrementLevel(DE):
for i in range(-p.degree(z), p.degree(t1) + 1):
if not i:
continue
elif i < 0:
# If you get AttributeError: 'NoneType' object has no attribute 'nth'
# then this should really not have expand=False
# But it shouldn't happen because p is already a Poly in t and z
a = p.as_poly(z, expand=False).nth(-i)
else:
# If you get AttributeError: 'NoneType' object has no attribute 'nth'
# then this should really not have expand=False
a = p.as_poly(t1, expand=False).nth(i)
aa, ad = frac_in(a, DE.t, field=True)
aa, ad = aa.cancel(ad, include=True)
iDt = Poly(i, t1)*dtt
iDta, iDtd = frac_in(iDt, DE.t, field=True)
try:
va, vd = rischDE(iDta, iDtd, Poly(aa, DE.t), Poly(ad, DE.t), DE)
va, vd = frac_in((va, vd), t1, cancel=True)
except NonElementaryIntegralException:
b = False
else:
# q += v*t**i
if i > 0:
ti = Poly(t1**i, t1)
else:
ti = Poly(z**-i, z)
qa = qa*vd + va*ti*qd
qd *= vd
return (qa, qd, b)
def integrate_hyperexponential(a, d, DE, z=None, conds='piecewise'):
"""
Integration of hyperexponential functions.
Explanation
===========
Given a hyperexponential monomial t over k and f in k(t), return g
elementary over k(t), i in k(t), and a bool b in {True, False} such that
i = f - Dg is in k if b is True or i = f - Dg does not have an elementary
integral over k(t) if b is False.
This function returns a Basic expression for the first argument. If b is
True, the second argument is Basic expression in k to recursively integrate.
If b is False, the second argument is an unevaluated Integral, which has
been proven to be nonelementary.
"""
# XXX: a and d must be canceled, or this might return incorrect results
z = z or Dummy("z")
s = [(z, DE.t**-1)] + list(zip(reversed(DE.T), reversed([f(DE.x) for f in DE.Tfuncs])))
g1, h, r = hermite_reduce(a, d, DE)
g2, b = residue_reduce(h[0], h[1], DE, z=z)
if not b:
i = cancel(a.as_expr()/d.as_expr() - (g1[1]*derivation(g1[0], DE) -
g1[0]*derivation(g1[1], DE)).as_expr()/(g1[1]**2).as_expr() -
residue_reduce_derivation(g2, DE, z))
i = NonElementaryIntegral(cancel(i.subs(s)), DE.x)
return ((g1[0].as_expr()/g1[1].as_expr()).subs(s) +
residue_reduce_to_basic(g2, DE, z), i, b)
# p should be a polynomial in t and 1/t, because Sirr == k[t, 1/t]
# h - Dg2 + r
p = cancel(h[0].as_expr()/h[1].as_expr() - residue_reduce_derivation(g2,
DE, z) + r[0].as_expr()/r[1].as_expr())
pp = as_poly_1t(p, DE.t, z)
qa, qd, b = integrate_hyperexponential_polynomial(pp, DE, z)
i = pp.nth(0, 0)
ret = ((g1[0].as_expr()/g1[1].as_expr()).subs(s) \
+ residue_reduce_to_basic(g2, DE, z))
qas = qa.as_expr().subs(s)
qds = qd.as_expr().subs(s)
if conds == 'piecewise' and DE.x not in qds.free_symbols:
# We have to be careful if the exponent is S.Zero!
# XXX: Does qd = 0 always necessarily correspond to the exponential
# equaling 1?
ret += Piecewise(
(qas/qds, Ne(qds, 0)),
(integrate((p - i).subs(DE.t, 1).subs(s), DE.x), True)
)
else:
ret += qas/qds
if not b:
i = p - (qd*derivation(qa, DE) - qa*derivation(qd, DE)).as_expr()/\
(qd**2).as_expr()
i = NonElementaryIntegral(cancel(i).subs(s), DE.x)
return (ret, i, b)
def integrate_hypertangent_polynomial(p, DE):
"""
Integration of hypertangent polynomials.
Explanation
===========
Given a differential field k such that sqrt(-1) is not in k, a
hypertangent monomial t over k, and p in k[t], return q in k[t] and
c in k such that p - Dq - c*D(t**2 + 1)/(t**1 + 1) is in k and p -
Dq does not have an elementary integral over k(t) if Dc != 0.
"""
# XXX: Make sure that sqrt(-1) is not in k.
q, r = polynomial_reduce(p, DE)
a = DE.d.exquo(Poly(DE.t**2 + 1, DE.t))
c = Poly(r.nth(1)/(2*a.as_expr()), DE.t)
return (q, c)
def integrate_nonlinear_no_specials(a, d, DE, z=None):
"""
Integration of nonlinear monomials with no specials.
Explanation
===========
Given a nonlinear monomial t over k such that Sirr ({p in k[t] | p is
special, monic, and irreducible}) is empty, and f in k(t), returns g
elementary over k(t) and a Boolean b in {True, False} such that f - Dg is
in k if b == True, or f - Dg does not have an elementary integral over k(t)
if b == False.
This function is applicable to all nonlinear extensions, but in the case
where it returns b == False, it will only have proven that the integral of
f - Dg is nonelementary if Sirr is empty.
This function returns a Basic expression.
"""
# TODO: Integral from k?
# TODO: split out nonelementary integral
# XXX: a and d must be canceled, or this might not return correct results
z = z or Dummy("z")
s = list(zip(reversed(DE.T), reversed([f(DE.x) for f in DE.Tfuncs])))
g1, h, r = hermite_reduce(a, d, DE)
g2, b = residue_reduce(h[0], h[1], DE, z=z)
if not b:
return ((g1[0].as_expr()/g1[1].as_expr()).subs(s) +
residue_reduce_to_basic(g2, DE, z), b)
# Because f has no specials, this should be a polynomial in t, or else
# there is a bug.
p = cancel(h[0].as_expr()/h[1].as_expr() - residue_reduce_derivation(g2,
DE, z).as_expr() + r[0].as_expr()/r[1].as_expr()).as_poly(DE.t)
q1, q2 = polynomial_reduce(p, DE)
if q2.expr.has(DE.t):
b = False
else:
b = True
ret = (cancel(g1[0].as_expr()/g1[1].as_expr() + q1.as_expr()).subs(s) +
residue_reduce_to_basic(g2, DE, z))
return (ret, b)
| NonElementaryIntegralException |
python | numba__numba | numba/core/typing/enumdecl.py | {
"start": 356,
"end": 498
} | class ____(AttributeTemplate):
key = types.EnumMember
def resolve_value(self, ty):
return ty.dtype
@infer_getattr
| EnumAttribute |
python | psf__black | scripts/release.py | {
"start": 2205,
"end": 7567
} | class ____:
def __init__(self, black_repo_dir: Path):
# File path fun all pathlib to be platform agnostic
self.black_repo_path = black_repo_dir
self.changes_path = self.black_repo_path / "CHANGES.md"
self.docs_path = self.black_repo_path / "docs"
self.version_doc_paths = (
self.docs_path / "integrations" / "source_version_control.md",
self.docs_path / "usage_and_configuration" / "the_basics.md",
)
self.current_version = self.get_current_version()
self.next_version = self.get_next_version()
def __str__(self) -> str:
return f"""\
> SourceFiles ENV:
Repo path: {self.black_repo_path}
CHANGES.md path: {self.changes_path}
docs path: {self.docs_path}
Current version: {self.current_version}
Next version: {self.next_version}
"""
def add_template_to_changes(self) -> int:
"""Add the template to CHANGES.md if it does not exist"""
LOG.info(f"Adding template to {self.changes_path}")
with self.changes_path.open("r", encoding="utf-8") as cfp:
changes_string = cfp.read()
if "## Unreleased" in changes_string:
LOG.error(f"{self.changes_path} already has unreleased template")
return 1
templated_changes_string = changes_string.replace(
"# Change Log\n",
f"# Change Log\n\n{NEW_VERSION_CHANGELOG_TEMPLATE}",
)
with self.changes_path.open("w", encoding="utf-8") as cfp:
cfp.write(templated_changes_string)
LOG.info(f"Added template to {self.changes_path}")
return 0
def cleanup_changes_template_for_release(self) -> None:
LOG.info(f"Cleaning up {self.changes_path}")
with self.changes_path.open("r", encoding="utf-8") as cfp:
changes_string = cfp.read()
# Change Unreleased to next version
changes_string = changes_string.replace(
"## Unreleased", f"## {self.next_version}"
)
# Remove all comments
changes_string = re.sub(r"(?m)^<!--(?>(?:.|\n)*?-->)\n\n", "", changes_string)
# Remove empty subheadings
changes_string = re.sub(r"(?m)^###.+\n\n(?=#)", "", changes_string)
with self.changes_path.open("w", encoding="utf-8") as cfp:
cfp.write(changes_string)
LOG.debug(f"Finished Cleaning up {self.changes_path}")
def get_current_version(self) -> str:
"""Get the latest git (version) tag as latest version"""
return sorted(get_git_tags(), key=lambda k: tuple_calver(k))[-1]
def get_next_version(self) -> str:
"""Workout the year and month + version number we need to move to"""
base_calver = datetime.today().strftime("%y.%m")
calver_parts = base_calver.split(".")
base_calver = f"{calver_parts[0]}.{int(calver_parts[1])}" # Remove leading 0
git_tags = get_git_tags()
same_month_releases = [
t for t in git_tags if t.startswith(base_calver) and "a" not in t
]
if len(same_month_releases) < 1:
return f"{base_calver}.0"
same_month_version = same_month_releases[-1].split(".", 2)[-1]
return f"{base_calver}.{int(same_month_version) + 1}"
def update_repo_for_release(self) -> int:
"""Update CHANGES.md + doc files ready for release"""
self.cleanup_changes_template_for_release()
self.update_version_in_docs()
return 0 # return 0 if no exceptions hit
def update_version_in_docs(self) -> None:
for doc_path in self.version_doc_paths:
LOG.info(f"Updating black version to {self.next_version} in {doc_path}")
with doc_path.open("r", encoding="utf-8") as dfp:
doc_string = dfp.read()
next_version_doc = doc_string.replace(
self.current_version, self.next_version
)
with doc_path.open("w", encoding="utf-8") as dfp:
dfp.write(next_version_doc)
LOG.debug(
f"Finished updating black version to {self.next_version} in {doc_path}"
)
def _handle_debug(debug: bool) -> None:
"""Turn on debugging if asked otherwise INFO default"""
log_level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(
format="[%(asctime)s] %(levelname)s: %(message)s (%(filename)s:%(lineno)d)",
level=log_level,
)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"-a",
"--add-changes-template",
action="store_true",
help="Add the Unreleased template to CHANGES.md",
)
parser.add_argument(
"-d", "--debug", action="store_true", help="Verbose debug output"
)
args = parser.parse_args()
_handle_debug(args.debug)
return args
def main() -> int:
args = parse_args()
# Need parent.parent cause script is in scripts/ directory
sf = SourceFiles(Path(__file__).parent.parent)
if args.add_changes_template:
return sf.add_template_to_changes()
LOG.info(f"Current version detected to be {sf.current_version}")
LOG.info(f"Next version will be {sf.next_version}")
return sf.update_repo_for_release()
if __name__ == "__main__": # pragma: no cover
sys.exit(main())
| SourceFiles |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-code-hierarchy/tests/test_code_hierarchy_with_skeleton.py | {
"start": 15976,
"end": 18426
} | class ____ {{
{double_forward_slash} {CodeHierarchyNodeParser._get_comment_text(chunks[1])}
}}
"""
)
def test_skeletonize_with_repeated_function() -> None:
"""Test case for code splitting using python."""
if "CI" in os.environ:
return
code_splitter = CodeHierarchyNodeParser(
language="python", skeleton=True, chunk_min_characters=0
)
text = """\
def _handle_extra_radiation_types(datetime_or_doy, epoch_year):
if np.isscalar(datetime_or_doy):
def to_doy(x): return x # noqa: E306
to_datetimeindex = partial(tools._doy_to_datetimeindex,
epoch_year=epoch_year)
to_output = tools._scalar_out
else:
def to_doy(x): return x # noqa: E306
to_datetimeindex = partial(tools._doy_to_datetimeindex,
epoch_year=epoch_year)
to_output = tools._array_out
return to_doy, to_datetimeindex, to_output"""
text_node = TextNode(
text=text,
metadata={
"module": "example.foo",
},
)
chunks: List[TextNode] = code_splitter.get_nodes_from_documents([text_node])
assert len(chunks) == 4
assert (
chunks[0].text
== f"""def _handle_extra_radiation_types(datetime_or_doy, epoch_year):
# {CodeHierarchyNodeParser._get_comment_text(chunks[1])}"""
)
assert (
chunks[1].text
== f"""def _handle_extra_radiation_types(datetime_or_doy, epoch_year):
if np.isscalar(datetime_or_doy):
def to_doy(x):
# {CodeHierarchyNodeParser._get_comment_text(chunks[2])}
to_datetimeindex = partial(tools._doy_to_datetimeindex,
epoch_year=epoch_year)
to_output = tools._scalar_out
else:
def to_doy(x):
# {CodeHierarchyNodeParser._get_comment_text(chunks[3])}
to_datetimeindex = partial(tools._doy_to_datetimeindex,
epoch_year=epoch_year)
to_output = tools._array_out
return to_doy, to_datetimeindex, to_output"""
)
assert (
chunks[2].text
== """ def to_doy(x): return x # noqa: E306"""
)
assert (
chunks[3].text
== """ def to_doy(x): return x # noqa: E306"""
)
| Example |
python | getsentry__sentry | src/sentry/monitors/processing_errors/errors.py | {
"start": 2541,
"end": 2708
} | class ____(TypedDict):
"""
Monitor was disabled for a non-billing related reason
"""
type: Literal[ProcessingErrorType.MONITOR_DISABLED]
| MonitorDisabled |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 5100,
"end": 5509
} | class ____(
typing.Iterator[int]
): # Y022 Use "collections.abc.Iterator[T]" instead of "typing.Iterator[T]" (PEP 585 syntax)
def __iter__(self) -> collections.abc.Iterator[int]:
... # Y034 "__iter__" methods in classes like "BadIterator3" usually return "self" at runtime. Consider using "typing_extensions.Self" in "BadIterator3.__iter__", e.g. "def __iter__(self) -> Self: ..."
| BadIterator3 |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 18895,
"end": 19326
} | class ____(BaseModel):
"""
Pool serializer for patch bodies.
"""
model_config = ConfigDict(
extra="forbid",
)
pool: Annotated[str | None, Field(title="Pool")] = None
slots: Annotated[int | None, Field(title="Slots")] = None
description: Annotated[str | None, Field(title="Description")] = None
include_deferred: Annotated[bool | None, Field(title="Include Deferred")] = None
| PoolPatchBody |
python | kamyu104__LeetCode-Solutions | Python/jump-game-ix.py | {
"start": 581,
"end": 1166
} | class ____(object):
def maxValue(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
suffix = [float("inf")]*(len(nums)+1)
for i in reversed(xrange(len(nums))):
suffix[i] = min(suffix[i+1], nums[i])
result = [0]*len(nums)
mx = left = 0
for right in xrange(len(nums)):
mx = max(mx, nums[right])
if mx > suffix[right+1]:
continue
while left <= right:
result[left] = mx
left += 1
return result
| Solution2 |
python | django__django | django/core/files/images.py | {
"start": 151,
"end": 2643
} | class ____(File):
"""
A mixin for use alongside django.core.files.base.File, which provides
additional features for dealing with images.
"""
@property
def width(self):
return self._get_image_dimensions()[0]
@property
def height(self):
return self._get_image_dimensions()[1]
def _get_image_dimensions(self):
if not hasattr(self, "_dimensions_cache"):
close = self.closed
self.open()
self._dimensions_cache = get_image_dimensions(self, close=close)
return self._dimensions_cache
def get_image_dimensions(file_or_path, close=False):
"""
Return the (width, height) of an image, given an open file or a path. Set
'close' to True to close the file at the end if it is initially in an open
state.
"""
from PIL import ImageFile as PillowImageFile
p = PillowImageFile.Parser()
if hasattr(file_or_path, "read"):
file = file_or_path
file_pos = file.tell()
file.seek(0)
else:
try:
file = open(file_or_path, "rb")
except OSError:
return (None, None)
close = True
try:
# Most of the time Pillow only needs a small chunk to parse the image
# and get the dimensions, but with some TIFF files Pillow needs to
# parse the whole file.
chunk_size = 1024
while 1:
data = file.read(chunk_size)
if not data:
break
try:
p.feed(data)
except zlib.error as e:
# ignore zlib complaining on truncated stream, just feed more
# data to parser (ticket #19457).
if e.args[0].startswith("Error -5"):
pass
else:
raise
except struct.error:
# Ignore PIL failing on a too short buffer when reads return
# less bytes than expected. Skip and feed more data to the
# parser (ticket #24544).
pass
except RuntimeError:
# e.g. "RuntimeError: could not create decoder object" for
# WebP files. A different chunk_size may work.
pass
if p.image:
return p.image.size
chunk_size *= 2
return (None, None)
finally:
if close:
file.close()
else:
file.seek(file_pos)
| ImageFile |
python | wandb__wandb | tests/unit_tests/test_step_upload.py | {
"start": 7192,
"end": 16455
} | class ____:
def test_upload(self, tmp_path: Path):
api = make_api()
cmd = make_request_upload(make_tmp_file(tmp_path))
run_step_upload([cmd], api=api)
api.upload_file_retry.assert_called_once()
assert api.upload_file_retry.call_args[0][0] == get_upload_url(cmd.save_name)
def test_reuploads_if_event_during_upload(self, tmp_path: Path):
f = make_tmp_file(tmp_path)
api = UploadBlockingMockApi()
q = queue.Queue()
q.put(make_request_upload(f))
step_upload = make_step_upload(api=api, event_queue=q)
step_upload.start()
unblock = api.wait_for_upload(2)
q.put(make_request_upload(f))
# TODO(spencerpearson): if we RequestUpload _several_ more times,
# it seems like we should still only reupload once?
# But as of 2022-12-15, the behavior is to reupload several more times,
# the not-yet-actionable requests not being deduped against each other.
time.sleep(0.1) # TODO: better way to wait for the message to be processed
assert api.upload_file_retry.call_count == 1
unblock()
unblock = api.wait_for_upload(2)
assert unblock
unblock()
finish_and_wait(q)
assert api.upload_file_retry.call_count == 2
@pytest.mark.parametrize("copied", [True, False])
def test_deletes_after_upload_iff_copied(self, tmp_path: Path, copied: bool):
f = make_tmp_file(tmp_path)
api = UploadBlockingMockApi()
q = queue.Queue()
q.put(make_request_upload(f, copied=copied))
step_upload = make_step_upload(api=api, event_queue=q)
step_upload.start()
unblock = api.wait_for_upload(2)
assert f.exists()
unblock()
finish_and_wait(q)
if copied:
assert not f.exists()
else:
assert f.exists()
class TestErrorDoesntStopFutureUploads:
def test_nonexistent_file_upload(self, tmp_path: Path):
api = make_api()
good_cmd = make_request_upload(make_tmp_file(tmp_path))
run_step_upload(
[make_request_upload(tmp_path / "nonexistent-file.txt"), good_cmd],
api=api,
max_threads=1,
)
good_url = get_upload_url(good_cmd.save_name)
assert api.upload_file_retry.call_args[0][0] == good_url
def test_upload_urls_err(self, tmp_path: Path):
api = make_api(
upload_urls=Mock(
wraps=mock_upload_urls,
side_effect=[Exception("upload_urls failed"), DEFAULT],
)
)
good_cmd = make_request_upload(make_tmp_file(tmp_path))
run_step_upload(
[make_request_upload(make_tmp_file(tmp_path)), good_cmd],
api=api,
max_threads=1,
)
good_url = get_upload_url(good_cmd.save_name)
assert api.upload_file_retry.call_args[0][0] == good_url
def test_upload_file_retry_err(self, tmp_path: Path):
api = make_api(
upload_file_retry=Mock(
wraps=mock_upload_file_retry,
side_effect=[Exception("upload_file_retry failed"), DEFAULT],
),
)
good_cmd = make_request_upload(make_tmp_file(tmp_path))
run_step_upload(
[make_request_upload(make_tmp_file(tmp_path)), good_cmd],
api=api,
max_threads=1,
)
good_url = get_upload_url(good_cmd.save_name)
assert api.upload_file_retry.call_args[0][0] == good_url
def test_save_fn_err(self, tmp_path: Path):
api = make_api()
good_cmd = make_request_upload(make_tmp_file(tmp_path))
run_step_upload(
[
make_request_upload(
make_tmp_file(tmp_path),
save_fn=Mock(side_effect=Exception("save_fn failed")),
),
good_cmd,
],
api=api,
max_threads=1,
)
good_url = get_upload_url(good_cmd.save_name)
assert api.upload_file_retry.call_args[0][0] == good_url
class TestStats:
def test_updates_on_read_without_save_fn(self, tmp_path: Path):
f = make_tmp_file(tmp_path)
mock_stats = Mock(spec=stats.Stats)
run_step_upload([make_request_upload(f)], stats=mock_stats)
mock_stats.update_uploaded_file.assert_called_with(str(f), f.stat().st_size)
def test_updates_on_read_with_save_fn(self, tmp_path: Path):
f = make_tmp_file(tmp_path)
size = f.stat().st_size
mock_stats = Mock(spec=stats.Stats)
run_step_upload(
[make_request_upload(f, save_fn=lambda progress: progress(size, size))],
stats=mock_stats,
)
mock_stats.update_uploaded_file.assert_called_with(str(f), f.stat().st_size)
@pytest.mark.parametrize(
"save_fn",
[
None,
Mock(side_effect=Exception("save_fn failed")),
],
)
def test_updates_on_failure(
self,
tmp_path: Path,
save_fn: Optional[Callable[[int, int], None]],
):
f = make_tmp_file(tmp_path)
api = make_api(
upload_file_retry=Mock(
side_effect=Exception("upload_file_retry failed")
),
)
mock_stats = Mock(spec=stats.Stats)
run_step_upload(
[make_request_upload(f, save_fn=save_fn)],
api=api,
stats=mock_stats,
)
mock_stats.update_failed_file.assert_called_once_with(str(f))
@pytest.mark.parametrize("deduped", [True, False])
def test_update_on_deduped(
self,
tmp_path: Path,
deduped: bool,
):
f = make_tmp_file(tmp_path)
mock_stats = Mock(spec=stats.Stats)
run_step_upload(
[make_request_upload(f, save_fn=Mock(return_value=deduped))],
stats=mock_stats,
)
if deduped:
mock_stats.set_file_deduped.assert_called_once_with(str(f))
else:
mock_stats.set_file_deduped.assert_not_called()
class TestNotifiesFileStreamOnSuccess:
class TestWithoutSaveFn:
def test_notifies_on_success(self, tmp_path: Path):
api = make_api()
cmd = make_request_upload(make_tmp_file(tmp_path))
mock_file_stream = Mock(spec=file_stream.FileStreamApi)
run_step_upload(
[cmd],
api=api,
file_stream=mock_file_stream,
)
mock_file_stream.push_success.assert_called_once_with(
cmd.artifact_id, cmd.save_name
)
def test_no_notify_on_upload_urls_err(self, tmp_path: Path):
api = make_api(upload_urls=Mock(side_effect=Exception()))
cmd = make_request_upload(make_tmp_file(tmp_path))
mock_file_stream = Mock(spec=file_stream.FileStreamApi)
run_step_upload([cmd], api=api, file_stream=mock_file_stream)
api.upload_urls.assert_called_once()
mock_file_stream.push_success.assert_not_called()
def test_no_notify_on_upload_file_err(self, tmp_path: Path):
api = make_api(upload_file_retry=Mock(side_effect=Exception()))
cmd = make_request_upload(make_tmp_file(tmp_path))
mock_file_stream = Mock(spec=file_stream.FileStreamApi)
run_step_upload([cmd], api=api, file_stream=mock_file_stream)
api.upload_file_retry.assert_called_once()
mock_file_stream.push_success.assert_not_called()
class TestWithSaveFn:
@pytest.mark.parametrize(
"deduped",
[True, False],
)
def test_notifies_on_success(self, tmp_path: Path, deduped: bool):
cmd = make_request_upload(
make_tmp_file(tmp_path), save_fn=Mock(return_value=deduped)
)
mock_file_stream = Mock(spec=file_stream.FileStreamApi)
run_step_upload([cmd], file_stream=mock_file_stream)
mock_file_stream.push_success.assert_called_once_with(
cmd.artifact_id, cmd.save_name
)
def test_no_notify_on_err(self, tmp_path: Path):
cmd = make_request_upload(
make_tmp_file(tmp_path), save_fn=Mock(side_effect=Exception())
)
mock_file_stream = Mock(spec=file_stream.FileStreamApi)
run_step_upload([cmd], file_stream=mock_file_stream)
mock_file_stream.push_success.assert_not_called()
| TestUpload |
python | viewflow__viewflow | viewflow/views/list.py | {
"start": 10795,
"end": 11168
} | class ____(object):
bulk_actions = None
def get_bulk_actions(self, *actions):
if self.viewset is not None and hasattr(self.viewset, "get_list_bulk_actions"):
actions = self.viewset.get_list_bulk_actions(self.request) + actions
if self.bulk_actions:
actions = self.bulk_actions + actions
return actions
| BulkActionsMixin |
python | rushter__MLAlgorithms | mla/ensemble/gbm.py | {
"start": 2008,
"end": 4058
} | class ____(BaseEstimator):
"""Gradient boosting trees with Taylor's expansion approximation (as in xgboost)."""
def __init__(
self,
n_estimators,
learning_rate=0.1,
max_features=10,
max_depth=2,
min_samples_split=10,
):
self.min_samples_split = min_samples_split
self.learning_rate = learning_rate
self.max_depth = max_depth
self.max_features = max_features
self.n_estimators = n_estimators
self.trees = []
self.loss = None
def fit(self, X, y=None):
self._setup_input(X, y)
self.y_mean = np.mean(y)
self._train()
def _train(self):
# Initialize model with zeros
y_pred = np.zeros(self.n_samples, np.float32)
# Or mean
# y_pred = np.full(self.n_samples, self.y_mean)
for n in range(self.n_estimators):
residuals = self.loss.grad(self.y, y_pred)
tree = Tree(regression=True, criterion=mse_criterion)
# Pass multiple target values to the tree learner
targets = {
# Residual values
"y": residuals,
# Actual target values
"actual": self.y,
# Predictions from previous step
"y_pred": y_pred,
}
tree.train(
self.X,
targets,
max_features=self.max_features,
min_samples_split=self.min_samples_split,
max_depth=self.max_depth,
loss=self.loss,
)
predictions = tree.predict(self.X)
y_pred += self.learning_rate * predictions
self.trees.append(tree)
def _predict(self, X=None):
y_pred = np.zeros(X.shape[0], np.float32)
for i, tree in enumerate(self.trees):
y_pred += self.learning_rate * tree.predict(X)
return y_pred
def predict(self, X=None):
return self.loss.transform(self._predict(X))
| GradientBoosting |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 32125,
"end": 33747
} | class ____(GeneratedAirbyteSource):
class OAuth20:
@public
def __init__(
self,
client_id: str,
client_secret: str,
refresh_token: str,
auth_method: Optional[str] = None,
):
self.auth_method = check.opt_str_param(auth_method, "auth_method")
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
class AccessToken:
@public
def __init__(self, access_token: str, auth_method: Optional[str] = None):
self.auth_method = check.opt_str_param(auth_method, "auth_method")
self.access_token = check.str_param(access_token, "access_token")
@public
def __init__(
self,
name: str,
org_id: int,
credentials: Union["LinkedinPagesSource.OAuth20", "LinkedinPagesSource.AccessToken"],
):
"""Airbyte Source for Linkedin Pages.
Documentation can be found at https://docs.airbyte.com/integrations/sources/linkedin-pages/
Args:
name (str): The name of the destination.
org_id (int): Specify the Organization ID
"""
self.org_id = check.int_param(org_id, "org_id")
self.credentials = check.inst_param(
credentials,
"credentials",
(LinkedinPagesSource.OAuth20, LinkedinPagesSource.AccessToken),
)
super().__init__("Linkedin Pages", name)
| LinkedinPagesSource |
python | getsentry__sentry | src/sentry/analytics/events/sentryapp_issue_webhooks.py | {
"start": 582,
"end": 697
} | class ____(SentryAppIssueEvent):
pass
@analytics.eventclass("sentry_app.issue.unresolved")
| SentryAppIssueResolved |
python | huggingface__transformers | src/transformers/models/florence2/modeling_florence2.py | {
"start": 10390,
"end": 12712
} | class ____(nn.Module):
def __init__(
self,
config: Florence2VisionConfig,
stage_idx: int,
drop_path_rate: float,
):
super().__init__()
self.config = config
dim_in = config.embed_dim[stage_idx]
self.conv1 = nn.Conv2d(
dim_in,
dim_in,
kernel_size=3,
padding=1,
groups=dim_in,
)
self.norm1 = nn.LayerNorm(config.embed_dim[stage_idx])
self.channel_attn = Florence2VisionChannelAttention(config=config, stage_idx=stage_idx)
self.drop_path1 = Florence2VisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
self.conv2 = nn.Conv2d(
dim_in,
dim_in,
kernel_size=3,
padding=1,
groups=dim_in,
)
self.norm2 = nn.LayerNorm(config.embed_dim[stage_idx])
self.ffn = Florence2VisionMLP(config=config, stage_idx=stage_idx)
self.drop_path2 = Florence2VisionDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity()
def forward(self, hidden_states: torch.Tensor):
batch_size, embed_dim, height, width = hidden_states.shape
# First channel block: Depthwise Conv + Channel Attention
hidden_states = self.conv1(hidden_states) + hidden_states
hidden_states = hidden_states.flatten(2).transpose(1, 2)
residual = hidden_states
# Channel group attention self-attention mechanism
hidden_states = self.norm1(hidden_states)
hidden_states = self.channel_attn(hidden_states)
hidden_states = residual + self.drop_path1(hidden_states)
hidden_states = hidden_states.transpose(1, 2).view(batch_size, embed_dim, height, width)
# Second channel block: Depthwise Conv + FFN
hidden_states = self.conv2(hidden_states) + hidden_states
hidden_states = hidden_states.flatten(2).transpose(1, 2)
residual = hidden_states
# FFN
hidden_states = self.norm2(hidden_states)
hidden_states = self.ffn(hidden_states)
hidden_states = residual + self.drop_path2(hidden_states)
hidden_states = hidden_states.transpose(1, 2).view(batch_size, embed_dim, height, width)
return hidden_states
| Florence2VisionChannelBlock |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_boolean_trap/FBT.py | {
"start": 1951,
"end": 2807
} | class ____:
def __init__(self) -> None:
self._switches = [False] * len(Switch)
# FBT001: Boolean positional arg in function definition
def __setitem__(self, switch: Switch, value: bool) -> None:
self._switches[switch.value] = value
@foo.setter
def foo(self, value: bool) -> None:
pass
# FBT001: Boolean positional arg in function definition
def foo(self, value: bool) -> None:
pass
def foo(self) -> None:
object.__setattr__(self, "flag", True)
from typing import Optional, Union, Self
def func(x: Union[list, Optional[int | str | float | bool]]):
pass
def func(x: bool | str):
pass
def func(x: int | str):
pass
from typing import override
@override
def func(x: bool):
pass
settings(True)
from dataclasses import dataclass, InitVar
@dataclass
| Registry |
python | ansible__ansible | test/units/module_utils/facts/test_collectors.py | {
"start": 11897,
"end": 12490
} | class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'pkg_mgr']
valid_subsets = ['pkg_mgr']
fact_namespace = 'ansible_pkgmgr'
collector_class = OpenBSDPkgMgrFactCollector
def test_collect(self):
module = self._mock_module()
fact_collector = self.collector_class()
facts_dict = fact_collector.collect(module=module, collected_facts=self.collected_facts)
self.assertIsInstance(facts_dict, dict)
self.assertIn('pkg_mgr', facts_dict)
self.assertEqual(facts_dict['pkg_mgr'], 'openbsd_pkg')
| TestOpenBSDPkgMgrFacts |
python | paramiko__paramiko | paramiko/proxy.py | {
"start": 1240,
"end": 4648
} | class ____(ClosingContextManager):
"""
Wraps a subprocess running ProxyCommand-driven programs.
This class implements a the socket-like interface needed by the
`.Transport` and `.Packetizer` classes. Using this class instead of a
regular socket makes it possible to talk with a Popen'd command that will
proxy traffic between the client and a server hosted in another machine.
Instances of this class may be used as context managers.
"""
def __init__(self, command_line):
"""
Create a new CommandProxy instance. The instance created by this
class can be passed as an argument to the `.Transport` class.
:param str command_line:
the command that should be executed and used as the proxy.
"""
if subprocess is None:
raise subprocess_import_error
self.cmd = shlex.split(command_line)
self.process = subprocess.Popen(
self.cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
)
self.timeout = None
def send(self, content):
"""
Write the content received from the SSH client to the standard
input of the forked command.
:param str content: string to be sent to the forked command
"""
try:
self.process.stdin.write(content)
except IOError as e:
# There was a problem with the child process. It probably
# died and we can't proceed. The best option here is to
# raise an exception informing the user that the informed
# ProxyCommand is not working.
raise ProxyCommandFailure(" ".join(self.cmd), e.strerror)
return len(content)
def recv(self, size):
"""
Read from the standard output of the forked program.
:param int size: how many chars should be read
:return: the string of bytes read, which may be shorter than requested
"""
try:
buffer = b""
start = time.time()
while len(buffer) < size:
select_timeout = None
if self.timeout is not None:
elapsed = time.time() - start
if elapsed >= self.timeout:
raise socket.timeout()
select_timeout = self.timeout - elapsed
r, w, x = select([self.process.stdout], [], [], select_timeout)
if r and r[0] == self.process.stdout:
buffer += os.read(
self.process.stdout.fileno(), size - len(buffer)
)
return buffer
except socket.timeout:
if buffer:
# Don't raise socket.timeout, return partial result instead
return buffer
raise # socket.timeout is a subclass of IOError
except IOError as e:
raise ProxyCommandFailure(" ".join(self.cmd), e.strerror)
def close(self):
os.kill(self.process.pid, signal.SIGTERM)
@property
def closed(self):
return self.process.returncode is not None
@property
def _closed(self):
# Concession to Python 3 socket-like API
return self.closed
def settimeout(self, timeout):
self.timeout = timeout
| ProxyCommand |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/multi_mesh_test.py | {
"start": 2600,
"end": 28241
} | class ____(test_util.DTensorBaseTest):
def setUp(self):
super(MultiMeshTest, self).setUp()
self.first_mesh = _ONE_D_CPU_MESH
if test_util.is_tpu_present():
self.second_mesh = _ONE_D_TPU_MESH
elif test_util.is_gpu_present():
self.second_mesh = _ONE_D_GPU_MESH
else:
self.second_mesh = _ONE_D_CPU_MESH_Y
device_type = config.preferred_device_type()
if device_type != 'TPU':
test_util.reset_logical_devices(device_type, 2)
accelerator_util.initialize_accelerator_system(device_type)
def testBasicCopyToMesh(self):
target_layout = Layout.replicated(self.first_mesh, rank=1)
numpy_value = np.zeros([3], dtype=np.int32)
dtensor_copy_from_numpy = api.copy_to_mesh(numpy_value, target_layout)
self.assertDTensorEqual(numpy_value, target_layout, dtensor_copy_from_numpy)
numpy_value = np.ones([3], dtype=np.int32)
src_mesh = api.copy_to_mesh(
numpy_value, Layout.replicated(self.second_mesh, rank=1)
)
dtensor_from_another_mesh = api.copy_to_mesh(src_mesh, target_layout)
self.assertDTensorEqual(
numpy_value, target_layout, dtensor_from_another_mesh
)
@parameterized.named_parameters(
dict(testcase_name='Graph', is_eager=False),
dict(testcase_name='Eager', is_eager=True),
)
def testCopyToMeshOneToOneSharded(self, is_eager):
if not test_util.is_tpu_present():
self.skipForDeviceType(
['CPU'], 'Need at least one device mesh for this test.'
)
replicated_layout = Layout.replicated(self.first_mesh, rank=1)
first_layout = Layout([_MESH_DIM_X], self.first_mesh)
second_layout = Layout([_MESH_DIM_X], self.second_mesh)
numpy_value = np.zeros([8], dtype=np.int32)
dt_value = api.copy_to_mesh(numpy_value, replicated_layout)
self.assertDTensorEqual(numpy_value, replicated_layout, dt_value)
def fn(val):
dt_source = api.relayout(val, first_layout)
dt_target = api.copy_to_mesh(dt_source, second_layout)
return dt_source, dt_target
if not is_eager:
fn = polymorphic_function.function(fn)
dt_source, dt_target = fn(dt_value)
self.assertDTensorEqual(numpy_value, first_layout, dt_source)
self.assertDTensorEqual(numpy_value, second_layout, dt_target)
@parameterized.named_parameters(
dict(testcase_name='Graph', is_eager=False),
dict(testcase_name='Eager', is_eager=True),
)
def testCopyToMeshToShardedLayout(self, is_eager):
target_layout = Layout([_MESH_DIM_X], self.first_mesh)
a_np = array_ops.zeros([8], dtype=dtypes.int32)
def fn(val):
return api.copy_to_mesh(val, target_layout)
if not is_eager:
fn = polymorphic_function.function(fn)
with api.default_mesh(self.first_mesh):
dt_value = fn(a_np)
self.assertDTensorEqual(a_np, target_layout, dt_value)
def testNestedDefaultMesh(self):
@polymorphic_function.function
def func(a):
return a + 3.0
with api.default_mesh(self.first_mesh):
with api.default_mesh(self.second_mesh):
with api.default_mesh(self.first_mesh):
result = func(array_ops.ones(shape=()))
self.assertEqual(api.fetch_layout(result).mesh, self.first_mesh)
result = func(array_ops.ones(shape=()))
self.assertEqual(api.fetch_layout(result).mesh, self.second_mesh)
result = func(array_ops.ones(shape=()))
self.assertEqual(api.fetch_layout(result).mesh, self.first_mesh)
def testImplicitCopyToCPUMeshForStrings(self):
self.skipForDeviceType(
['CPU'],
'Skipping test as only CPU mesh is available for multi-meshtest.',
)
host_device_id = test_util.create_device_ids_array((1,))
host_cpu_mesh = Mesh(
[_MESH_DIM_X],
host_device_id,
np.ravel(host_device_id).tolist(),
test_util.create_device_list((1,), 'CPU'),
)
replicated_layout_on_cpu = Layout.replicated(host_cpu_mesh, rank=0)
replicated_layout_on_tpu = Layout.replicated(self.second_mesh, rank=0)
string_tensor = constant_op.constant('hello')
@polymorphic_function.function
def f(tensor, dtensor_a, dtensor_b):
# Return an identity op for all three inputs so that linter does not
# complain about unused variables.
return tensor, dtensor_a, dtensor_b
cpu_dtensor = api.copy_to_mesh(
constant_op.constant(1), replicated_layout_on_cpu
)
tpu_dtensor = api.copy_to_mesh(
constant_op.constant(1), replicated_layout_on_tpu
)
string_dtensor, _, _ = f(string_tensor, cpu_dtensor, tpu_dtensor)
# Regular string tensor should be implicitly copied onto the CPU mesh,
# not the TPU mesh.
self.assertEqual(api.fetch_layout(string_dtensor), replicated_layout_on_cpu)
def testMultiMeshBroadcast(self):
first_mesh_a = api.copy_to_mesh(
np.zeros([3], dtype=np.int32),
Layout.replicated(self.first_mesh, rank=1),
)
second_mesh_a = api.copy_to_mesh(
np.ones([3], dtype=np.int32),
Layout.replicated(self.second_mesh, rank=1),
)
self.assertDTensorEqual(
np.asarray([1, 1, 1], dtype=np.int32),
Layout.replicated(self.first_mesh, rank=1), first_mesh_a + 1)
# Run an add with small constant - the constant should be broadcasted
# onto the second mesh rather than the first.
self.assertDTensorEqual(
np.asarray([2, 2, 2], dtype=np.int32),
Layout.replicated(self.second_mesh, rank=1), second_mesh_a + 1)
def testMultiMeshAdd(self):
a = constant_op.constant(1, dtype=dtypes.int32)
b = constant_op.constant(2, dtype=dtypes.int32)
with ops.device_v2(api.device_name()):
first_mesh_a = api.copy_to_mesh(
a, Layout.replicated(self.first_mesh, rank=0)
)
first_mesh_b = api.copy_to_mesh(
b, Layout.replicated(self.first_mesh, rank=0)
)
# Copy-to-mesh doesn't work with multi-mesh as we always broadcast to
# default mesh.
# TODO(hthu): Use copy-to-mesh after the generic Relayout CL is in.
second_mesh_a = api.copy_to_mesh(
np.ones([3], dtype=np.int32),
Layout.replicated(self.second_mesh, rank=1),
)
second_mesh_b = api.copy_to_mesh(
np.zeros([3], dtype=np.int32),
Layout.replicated(self.second_mesh, rank=1),
)
first_mesh_result = first_mesh_a + first_mesh_b
second_mesh_result = second_mesh_a + second_mesh_b
self.assertDTensorEqual(
np.asarray(3, dtype=np.int32),
Layout.replicated(self.first_mesh, rank=0), first_mesh_result)
self.assertDTensorEqual(
np.ones([3], dtype=np.int32),
Layout.replicated(self.second_mesh, rank=1), second_mesh_result)
def testMultiMeshFunc(self):
a = constant_op.constant([1, 2, 3, 4], dtype=dtypes.int32)
with ops.device_v2(api.device_name()):
first_mesh_a = api.copy_to_mesh(
a, Layout.replicated(self.first_mesh, rank=1)
)
second_mesh_a = api.copy_to_mesh(
np.ones([4], dtype=np.int32),
Layout.replicated(self.second_mesh, rank=1),
)
with self.assertRaises(errors_impl.UnknownError):
# fails mesh propagation as it requires all inputs to be on the same
# mesh.
# pylint: disable=pointless-statement
first_mesh_a + second_mesh_a
# pylint: enable=pointless-statement
def testMultiMeshInSideFunctionLayoutV2(self):
self.skipForDeviceType(
['CPU'],
'Skipping test as only CPU mesh is available for multi-meshtest.',
)
replicated_layout_on_tpu = Layout.replicated(self.second_mesh, rank=1)
host_device_id = test_util.create_device_ids_array((1,))
host_cpu_mesh = Mesh(
[_MESH_DIM_X],
host_device_id,
np.ravel(host_device_id).tolist(),
test_util.create_device_list((1,), 'CPU'),
)
replicated_layout_on_cpu = Layout.replicated(host_cpu_mesh, rank=0)
a = constant_op.constant([1, 2, 3, 4], dtype=dtypes.int32)
def func(t):
t = math_ops.cast(t, dtypes.float32)
t = math_ops.reduce_sum(t)
return math_ops.sqrt(t)
golden_result = func(a)
a = api.copy_to_mesh(a, replicated_layout_on_tpu)
@polymorphic_function.function
def cpu_func(t):
return math_ops.sqrt(t)
@polymorphic_function.function
def tpu_func(t):
t = math_ops.cast(t, dtypes.float32)
t = math_ops.reduce_sum(t)
cpu_tensor = api.copy_to_mesh(t, replicated_layout_on_cpu)
return cpu_func(cpu_tensor)
with ops.device_v2(api.device_name()):
output = tpu_func(a)
self.assertDTensorEqual(golden_result, replicated_layout_on_cpu, output)
def testMultiMeshCancellation(self):
self.skipForDeviceType(
['CPU'],
'Skipping test as only CPU mesh is available for multi-meshtest.',
)
host_device_id = test_util.create_device_ids_array((1,))
host_cpu_mesh = Mesh(
[_MESH_DIM_X],
host_device_id,
np.ravel(host_device_id).tolist(),
test_util.create_device_list((1,), 'CPU'),
)
replicated_layout_on_cpu = Layout([UNSHARDED], host_cpu_mesh)
replicated_layout_on_tpu = Layout([UNSHARDED], self.second_mesh)
@polymorphic_function.function
def cpu_func(x):
# Integer division by 0, which returns a bad status.
x = math_ops.cast(gen_math_ops.div(x=x, y=x), dtypes.float32)
return math_ops.cast(x, dtypes.float32)
@polymorphic_function.function
def tpu_func(cpu_tensor):
cpu_result = cpu_func(cpu_tensor)
tpu_tensor = api.copy_to_mesh(cpu_result, replicated_layout_on_tpu)
# A reduction on the TPU mesh which must be cancelled in response to the
# CPU mesh's failure.
return math_ops.reduce_sum(tpu_tensor)
cpu_tensor = api.copy_to_mesh(
constant_op.constant([0, 1]), replicated_layout_on_cpu
)
with self.assertRaisesRegex(Exception, 'Integer division by zero'):
# Ensure any errors are raised at end of scope.
with context.async_scope():
with ops.device_v2(api.device_name()):
tpu_func(cpu_tensor)
def testMultiMeshCPUToTPUTransfer(self):
self.skipForDeviceType(
['CPU'],
'Skipping test as only CPU mesh is available for multi-meshtest.',
)
multiple_host_device_id = test_util.create_device_ids_array((2,))
host_multi_cpu_mesh = Mesh(
[_MESH_DIM_X],
multiple_host_device_id,
np.ravel(multiple_host_device_id).tolist(),
test_util.create_device_list((2,), 'CPU'),
)
replicated_layout_on_cpu = Layout.replicated(host_multi_cpu_mesh, rank=1)
sharded_layout_on_tpu_r1 = Layout([_MESH_DIM_X], self.second_mesh)
replicated_layout_on_tpu_r1 = Layout.replicated(self.second_mesh, rank=1)
a = constant_op.constant([1, 2, 3, 4], dtype=dtypes.int32)
a = api.copy_to_mesh(a, replicated_layout_on_cpu)
@polymorphic_function.function
def tpu_func(t):
return api.relayout(t, sharded_layout_on_tpu_r1)
@polymorphic_function.function
def cpu_func(t):
t = math_ops.cast(t, dtypes.float32)
tpu_tensor = api.copy_to_mesh(t, replicated_layout_on_tpu_r1)
return tpu_func(tpu_tensor)
with ops.device_v2(api.device_name()):
output = cpu_func(a)
api.check_layout(output, sharded_layout_on_tpu_r1)
def testMultiMeshUnsupportedTypes(self):
self.skipForDeviceType(
['CPU'],
'Skipping test as only CPU mesh is available for multi-meshtest.',
)
host_device_id = test_util.create_device_ids_array((1,))
host_cpu_mesh = Mesh(
[_MESH_DIM_X],
host_device_id,
np.ravel(host_device_id).tolist(),
test_util.create_device_list((1,), 'CPU'),
)
replicated_layout_on_cpu = Layout.replicated(host_cpu_mesh, rank=1)
replicated_layout_on_tpu_r1 = Layout.replicated(self.second_mesh, rank=1)
s = constant_op.constant(['a', 'b', 'c'], dtype=dtypes.string)
s = api.copy_to_mesh(s, replicated_layout_on_cpu)
@polymorphic_function.function
def tpu_func(t):
return array_ops.identity(t)
@polymorphic_function.function
def cpu_func(t):
t = array_ops.identity(t)
tpu_tensor = api.copy_to_mesh(t, replicated_layout_on_tpu_r1)
return tpu_func(tpu_tensor)
with self.assertRaises(errors_impl.UnknownError) as ex:
with ops.device_v2(api.device_name()):
_ = str(cpu_func(s))
self.assertIn('unsupported output type', ex.exception.message)
def testMultiMeshCPUToCPUTransfer(self):
send_device_id = test_util.create_device_ids_array((1,))
send_cpu_mesh = Mesh(
[_MESH_DIM_X],
send_device_id,
np.ravel(send_device_id).tolist(),
test_util.create_device_list((1,), 'CPU'),
)
recv_cpu_mesh = Mesh.from_string(
'|x=1|0|0|/job:localhost/replica:0/task:0/device:CPU:1'
)
replicated_layout_on_cpu_send = Layout.replicated(send_cpu_mesh, rank=1)
replicated_layout_on_cpu_recv = Layout.replicated(recv_cpu_mesh, rank=1)
replicated_layout_on_cpu_r0 = Layout.replicated(recv_cpu_mesh, rank=0)
def func(t):
t = math_ops.cast(t, dtypes.float32)
t = math_ops.reduce_sum(t)
return math_ops.sqrt(t)
@polymorphic_function.function
def cpu_recv_func(t):
t = math_ops.reduce_sum(t)
t = math_ops.sqrt(t)
return t
@polymorphic_function.function
def cpu_send_func(t):
t = math_ops.cast(t, dtypes.float32)
cpu_recv_tensor = api.copy_to_mesh(t, replicated_layout_on_cpu_recv)
t = cpu_recv_func(cpu_recv_tensor)
return t
a = constant_op.constant([1, 2, 3, 4], dtype=dtypes.int32)
golden_result = func(a)
a = api.copy_to_mesh(a, replicated_layout_on_cpu_send)
with ops.device_v2(api.device_name()):
output = cpu_send_func(a)
self.assertDTensorEqual(golden_result, replicated_layout_on_cpu_r0,
output)
def testMultiMeshCPUTest(self):
device_ids = test_util.create_device_ids_array((2,))
cpu_mesh_a = Mesh(
['x'],
device_ids,
np.ravel(device_ids).tolist(),
test_util.create_device_list((2,), 'CPU'),
)
cpu_mesh_b = Mesh(
['y'],
device_ids,
np.ravel(device_ids).tolist(),
test_util.create_device_list((2,), 'CPU'),
)
replicated_layout_on_a = Layout.replicated(cpu_mesh_a, rank=1)
replicated_layout_on_b = Layout.replicated(cpu_mesh_b, rank=1)
x = constant_op.constant([1, 2, 3, 4], dtype=dtypes.int32)
y = constant_op.constant([1, 2, 3, 4], dtype=dtypes.int32)
a = api.copy_to_mesh(x, replicated_layout_on_a)
b = api.copy_to_mesh(y, replicated_layout_on_b)
@polymorphic_function.function
def func2(t1, t2):
t1 = math_ops.cast(t1, dtypes.float32)
t1 = t1 * t1
t2 = math_ops.cast(t2, dtypes.float32)
t2 = math_ops.sqrt(t2)
return t1, t2
with ops.device_v2(api.device_name()):
output1, output2 = func2(a, b)
api.check_layout(output1, replicated_layout_on_a)
api.check_layout(output2, replicated_layout_on_b)
def testFunctionWithMultiMeshInputOutputs(self):
self.skipForDeviceType(
['CPU'],
'Skipping test as only CPU mesh is available for multi-meshtest.',
)
host_device_id = test_util.create_device_ids_array((1,))
host_cpu_mesh = Mesh(
[_MESH_DIM_X],
host_device_id,
np.ravel(host_device_id).tolist(),
test_util.create_device_list((1,), 'CPU'),
)
replicated_layout_on_cpu = Layout.replicated(host_cpu_mesh, rank=1)
replicated_layout_on_cpu_r0 = Layout.replicated(host_cpu_mesh, rank=0)
replicated_layout_on_tpu_r0 = Layout.replicated(self.second_mesh, rank=0)
replicated_layout_on_tpu = Layout.replicated(self.second_mesh, rank=1)
a = constant_op.constant([1, 2, 3, 4], dtype=dtypes.int32)
b = constant_op.constant([1, 2, 3, 4], dtype=dtypes.int32)
def golden_func(t1, t2):
t1 = math_ops.cast(t1, dtypes.float32)
t1 = t1 * t1
t2 = math_ops.cast(t2, dtypes.float32)
t2 = math_ops.reduce_sum(t2)
out1 = gen_math_ops.neg(t2)
t2 = t2 + t1
out0 = math_ops.sqrt(t2)
return out0, out1
golden_result0, golden_result1 = golden_func(a, b)
cpu_dtensor = api.copy_to_mesh(a, replicated_layout_on_cpu)
tpu_dtensor = api.copy_to_mesh(b, replicated_layout_on_tpu)
@polymorphic_function.function
def cpu_func(t1, t2):
t2 = t2 + t1
return math_ops.sqrt(t2)
@polymorphic_function.function
def func(tpu_input, cpu_input):
cpu_input = math_ops.cast(cpu_input, dtypes.float32)
cpu_input = cpu_input * cpu_input
tpu_input = math_ops.cast(tpu_input, dtypes.float32)
tpu_input = math_ops.reduce_sum(tpu_input)
tpu_output = gen_math_ops.neg(tpu_input)
cpu_tensor = api.copy_to_mesh(tpu_input, replicated_layout_on_cpu_r0)
cpu_output = cpu_func(cpu_tensor, cpu_input)
return cpu_output, tpu_output
with ops.device_v2(api.device_name()):
output0, output1 = func(tpu_dtensor, cpu_dtensor)
self.assertDTensorEqual(golden_result0, replicated_layout_on_cpu, output0)
self.assertDTensorEqual(golden_result1, replicated_layout_on_tpu_r0,
output1)
def testMultiMeshWithResourceOps(self):
self.skipForDeviceType(
['CPU'],
'Skipping test as only CPU mesh is available for multi-meshtest.',
)
host_device_id = test_util.create_device_ids_array((1,))
host_cpu_mesh = Mesh(
[_MESH_DIM_X],
host_device_id,
np.ravel(host_device_id).tolist(),
test_util.create_device_list((1,), 'CPU'),
)
replicated_layout_on_cpu = Layout.replicated(host_cpu_mesh, rank=0)
replicated_layout_on_tpu = Layout.replicated(self.second_mesh, rank=1)
a = constant_op.constant(
[1, 2, 3, 4], dtype=dtypes.int64
) # NOTE(b/274627284): Variable of int32 type on GPU doesn't work.
def func(t):
t = math_ops.cast(t, dtypes.float32)
t = math_ops.reduce_sum(t)
return math_ops.sqrt(t)
golden_result = func(a)
@polymorphic_function.function
def cpu_func(t):
return math_ops.sqrt(t)
@polymorphic_function.function
def tpu_func(t):
t = math_ops.cast(t, dtypes.float32)
t = math_ops.reduce_sum(t)
cpu_tensor = api.copy_to_mesh(t, replicated_layout_on_cpu)
return cpu_func(cpu_tensor)
with ops.device_v2(api.device_name()):
v = api.copy_to_mesh(a, replicated_layout_on_tpu)
w = d_variable.DVariable(v)
output = tpu_func(w)
self.assertDTensorEqual(golden_result, replicated_layout_on_cpu, output)
@parameterized.named_parameters(
('_host_to_dev_sharded_i32', True, True, dtypes.int32),
('_dev_to_host_sharded_i32', False, True, dtypes.int32),
('_host_to_dev_replicated_i32', True, False, dtypes.int32),
('_dev_to_host_replicated_i32', False, False, dtypes.int32),
('_host_to_dev_sharded_bf16', True, True, dtypes.bfloat16),
('_dev_to_host_sharded_bf16', False, True, dtypes.bfloat16),
('_host_to_dev_replicated_bf16', True, False, dtypes.bfloat16),
('_dev_to_host_replicated_bf16', False, False, dtypes.bfloat16),
('_host_to_dev_sharded_f32', True, True, dtypes.float32),
('_dev_to_host_sharded_f32', False, True, dtypes.float32),
('_host_to_dev_replicated_f32', True, False, dtypes.float32),
('_dev_to_host_replicated_f32', False, False, dtypes.float32),
('_host_to_dev_sharded_f64', True, True, dtypes.float64),
('_dev_to_host_sharded_f64', False, True, dtypes.float64),
('_host_to_dev_replicated_f64', True, False, dtypes.float64),
('_dev_to_host_replicated_f64', False, False, dtypes.float64),
)
def testMultiMeshHostDeviceTransfer(self, host_to_dev, sharded, dtype):
self.skipForDeviceType(
['CPU'],
'Skipping test as only CPU mesh is available for multi-meshtest.',
)
def run_copy_to_mesh(data, src_layout, dst_layout):
@polymorphic_function.function
def func(x):
return api.copy_to_mesh(x, dst_layout)
if src_layout.is_fully_replicated():
src_data = api.copy_to_mesh(data, src_layout)
else:
src_data = api.copy_to_mesh(
data, Layout.replicated(src_layout.mesh, rank=len(data.shape))
)
src_data = api.relayout(src_data, src_layout)
dst_data = func(src_data)
return (src_data, dst_data)
dev_mesh = self.first_mesh
cpu_mesh = self.second_mesh
if host_to_dev:
src_mesh, dst_mesh = cpu_mesh, dev_mesh
else:
src_mesh, dst_mesh = dev_mesh, cpu_mesh
if sharded:
src_layout = Layout.batch_sharded(src_mesh, src_mesh.dim_names[0], rank=2)
dst_layout = Layout.batch_sharded(dst_mesh, dst_mesh.dim_names[0], rank=2)
else:
src_layout = Layout.replicated(src_mesh, rank=2)
dst_layout = Layout.replicated(dst_mesh, rank=2)
data = array_ops.ones([8, 8], dtype=dtype)
src, dst = run_copy_to_mesh(data, src_layout, dst_layout)
self.assertDTensorEqual(data, src_layout, src)
self.assertDTensorEqual(data, dst_layout, dst)
@parameterized.named_parameters(('_host_to_tpu', True),
('_tpu_to_host', False))
def testMultiMeshWithHostMesh(self, host_to_tpu):
self.skipForDeviceType(
['CPU'],
'Skipping test as only CPU mesh is available for multi-meshtest.',
)
sharded_layout_on_tpu = Layout([_MESH_DIM_X], self.second_mesh)
host_layout = Layout(sharded_layout_on_tpu.sharding_specs,
sharded_layout_on_tpu.mesh.host_mesh())
if host_to_tpu:
source_layout = host_layout
target_layout = sharded_layout_on_tpu
else:
source_layout = sharded_layout_on_tpu
target_layout = host_layout
numpy_a = constant_op.constant([1, 2, 3, 4], dtype=dtypes.int32)
# TODO(b/193443769): switch to a single copy_to_mesh when this is supported.
replicated_layout = Layout.replicated(source_layout.mesh,
source_layout.rank)
a = api.copy_to_mesh(numpy_a, replicated_layout)
a = api.relayout(a, source_layout)
@polymorphic_function.function
def func(t):
target_tensor = api.copy_to_mesh(t, target_layout)
return array_ops.identity(target_tensor)
with ops.device_v2(api.device_name()):
dtensor_output = func(a)
self.assertDTensorEqual(numpy_a, target_layout, dtensor_output)
def testMultiMeshBackward(self):
self.skipForDeviceType(
['CPU'],
'Skipping test as only CPU mesh is available for multi-meshtest.',
)
replicated_layout_on_tpu = Layout.replicated(self.second_mesh, rank=1)
host_layout = Layout.replicated(self.second_mesh.host_mesh(), rank=1)
source_layout = host_layout
target_layout = replicated_layout_on_tpu
@polymorphic_function.function
def func(x):
with backprop.GradientTape() as tape:
tape.watch(x)
x = x * 4.0
t = api.copy_to_mesh(x, target_layout)
sqrt = math_ops.sqrt(t)
sqrt_grad = tape.gradient(sqrt, x)
return sqrt_grad
@polymorphic_function.function
def second(x):
with backprop.GradientTape() as tape:
tape.watch(x)
sqrt_grad = func(x)
sqrt_grad_grad = tape.gradient(sqrt_grad, x)
return sqrt_grad_grad
numpy_a = constant_op.constant([1, 4, 16, 64], dtype=dtypes.float32)
a = api.copy_to_mesh(numpy_a, source_layout)
with ops.device_v2(api.device_name()):
a_grad = func(a)
self.assertDTensorEqual(0.5 * 0.5 * (1 / numpy_a)**0.5, host_layout, a_grad)
with ops.device_v2(api.device_name()):
a_grad_grad = second(a)
self.assertDTensorEqual(-0.5 * 0.5 * 0.5 * (1 / numpy_a)**1.5, host_layout,
a_grad_grad)
def testMultiMeshMultipleCopyToMesh(self):
self.skipForDeviceType(
['CPU'],
'Skipping test as only CPU mesh is available for multi-meshtest.',
)
sharded_layout_on_tpu = Layout([_MESH_DIM_X], self.second_mesh)
host_layout = Layout(
sharded_layout_on_tpu.sharding_specs,
sharded_layout_on_tpu.mesh.host_mesh(),
)
source_layout = host_layout
target_layout = sharded_layout_on_tpu
numpy_a = constant_op.constant([1, 2, 3, 4], dtype=dtypes.int32)
numpy_b = constant_op.constant([2, 2, 3, 4], dtype=dtypes.int32)
# TODO(b/193443769): switch to a single copy_to_mesh when this is supported.
replicated_layout = Layout.replicated(
source_layout.mesh, source_layout.rank
)
a = api.copy_to_mesh(numpy_a, replicated_layout)
b = api.copy_to_mesh(numpy_b, replicated_layout)
a = api.relayout(a, source_layout)
b = api.relayout(b, source_layout)
@polymorphic_function.function
def func(a, b):
a = api.copy_to_mesh(a, target_layout)
b = api.copy_to_mesh(b, target_layout)
return array_ops.identity(a), array_ops.identity(b)
with ops.device_v2(api.device_name()):
dtensor_a, dtensor_b = func(a, b)
self.assertDTensorEqual(numpy_a, target_layout, dtensor_a)
self.assertDTensorEqual(numpy_b, target_layout, dtensor_b)
def testDVariableDefaultMesh(self):
other_layout = Layout.replicated(_OTHER_CPU_MESH, rank=0)
first_layout = Layout.replicated(_ONE_D_CPU_MESH, rank=0)
_ = api.copy_to_mesh(1.0, other_layout)
init_value = api.copy_to_mesh(1.0, first_layout)
_ = d_variable.DVariable(init_value)
if __name__ == '__main__':
test.main()
| MultiMeshTest |
python | pola-rs__polars | py-polars/src/polars/dataframe/group_by.py | {
"start": 27435,
"end": 33201
} | class ____:
"""
A rolling grouper.
This has an `.agg` method which will allow you to run all polars expressions in a
group by context.
"""
def __init__(
self,
df: DataFrame,
index_column: IntoExpr,
*,
period: str | timedelta,
offset: str | timedelta | None,
closed: ClosedInterval,
group_by: IntoExpr | Iterable[IntoExpr] | None,
predicates: Iterable[Any] | None,
) -> None:
period = parse_as_duration_string(period)
offset = parse_as_duration_string(offset)
self.df = df
self.time_column = index_column
self.period = period
self.offset = offset
self.closed = closed
self.group_by = group_by
self.predicates = predicates
def __iter__(self) -> Self:
from polars.lazyframe.opt_flags import QueryOptFlags
temp_col = "__POLARS_GB_GROUP_INDICES"
groups_df = (
self.df.lazy()
.with_row_index("__POLARS_GB_ROW_INDEX")
.rolling(
index_column=self.time_column,
period=self.period,
offset=self.offset,
closed=self.closed,
group_by=self.group_by,
)
.agg(F.first().alias(temp_col))
.collect(optimizations=QueryOptFlags.none())
)
self._group_names = groups_df.select(F.all().exclude(temp_col)).iter_rows()
self._group_indices = groups_df.select(temp_col).to_series()
self._current_index = 0
return self
def __next__(self) -> tuple[tuple[object, ...], DataFrame]:
if self._current_index >= len(self._group_indices):
raise StopIteration
group_name = next(self._group_names)
group_data = self.df[self._group_indices[self._current_index], :]
self._current_index += 1
return group_name, group_data
def having(self, *predicates: IntoExpr | Iterable[IntoExpr]) -> RollingGroupBy:
"""
Filter groups with a list of predicates after aggregation.
Using this method is equivalent to adding the predicates to the aggregation and
filtering afterwards.
This method can be chained and all conditions will be combined using `&`.
Parameters
----------
*predicates
Expressions that evaluate to a boolean value for each group. Typically, this
requires the use of an aggregation function. Multiple predicates are
combined using `&`.
"""
return RollingGroupBy(
self.df,
self.time_column,
period=self.period,
offset=self.offset,
closed=self.closed,
group_by=self.group_by,
predicates=_chain_predicates(self.predicates, predicates),
)
def agg(
self,
*aggs: IntoExpr | Iterable[IntoExpr],
**named_aggs: IntoExpr,
) -> DataFrame:
"""
Compute aggregations for each group of a group by operation.
Parameters
----------
*aggs
Aggregations to compute for each group of the group by operation,
specified as positional arguments.
Accepts expression input. Strings are parsed as column names.
**named_aggs
Additional aggregations, specified as keyword arguments.
The resulting columns will be renamed to the keyword used.
"""
from polars.lazyframe.opt_flags import QueryOptFlags
group_by = self.df.lazy().rolling(
index_column=self.time_column,
period=self.period,
offset=self.offset,
closed=self.closed,
group_by=self.group_by,
)
if self.predicates:
group_by = group_by.having(self.predicates)
return group_by.agg(*aggs, **named_aggs).collect(
optimizations=QueryOptFlags.none()
)
def map_groups(
self,
function: Callable[[DataFrame], DataFrame],
schema: SchemaDict | None,
) -> DataFrame:
"""
Apply a custom/user-defined function (UDF) over the groups as a new DataFrame.
Using this is considered an anti-pattern as it will be very slow because:
- it forces the engine to materialize the whole `DataFrames` for the groups.
- it is not parallelized.
- it blocks optimizations as the passed python function is opaque to the
optimizer.
The idiomatic way to apply custom functions over multiple columns is using:
`pl.struct([my_columns]).map_elements(lambda struct_series: ..)`
Parameters
----------
function
Function to apply over each group of the `LazyFrame`; it receives
a DataFrame and should return a DataFrame.
schema
Schema of the output function. This has to be known statically. If the
given schema is incorrect, this is a bug in the caller's query and may
lead to errors. If set to None, polars assumes the schema is unchanged.
"""
from polars.lazyframe.opt_flags import QueryOptFlags
if self.predicates:
msg = "cannot call `map_groups` when filtering groups with `having`"
raise TypeError(msg)
return (
self.df.lazy()
.rolling(
index_column=self.time_column,
period=self.period,
offset=self.offset,
closed=self.closed,
group_by=self.group_by,
)
.map_groups(function, schema)
.collect(optimizations=QueryOptFlags.none())
)
| RollingGroupBy |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/dagster/dagster_pipes/dagster_pipes_details_and_customization/custom_context_injector.py | {
"start": 280,
"end": 1049
} | class ____(PipesContextInjector):
# Note that `PipesContextData` corresponds to what this document
# calls the "context payload"-- a JSON-serializable dictionary with context info.
@contextmanager
def inject_context(self, context_data: "PipesContextData") -> Iterator[PipesParams]:
key = "".join(random.choices(string.ascii_letters, k=30))
cloud_service.write(key, json.dumps(context_data))
yield {"key": key}
def no_messages_debug_text(self) -> str:
return (
"Attempted to inject context using a `cloud_service`. Expected"
" `MyCustomCloudServiceContextLoader` to be explicitly passed to `open_dagster_pipes`"
" in the external process."
)
| MyCustomCloudServiceContextInjector |
python | jina-ai__jina | tests/integration/docarray_v2/test_streaming.py | {
"start": 6049,
"end": 7265
} | class ____(Executor):
@requests(on='/non_generator')
def non_generator(self, docs: DocList[BaseDoc], **kwargs):
return docs
@requests(on='/generator')
def generator(self, doc: MyDocument, **kwargs):
yield MyDocument(text='new document')
@pytest.mark.asyncio
@pytest.mark.parametrize(
'executor,expected',
[
('Executor1', {'/default': True, '/non_generator': False}),
('Executor2', {'/default': False, '/generator': True}),
('Executor3', {'/generator': True, '/non_generator': False}),
],
)
async def test_endpoint_discovery(executor, expected):
from google.protobuf import json_format
from jina.logging.logger import JinaLogger
from jina.parsers import set_pod_parser
from jina.serve.runtimes.worker.request_handling import WorkerRequestHandler
args = set_pod_parser().parse_args(['--uses', executor])
handler = WorkerRequestHandler(args, JinaLogger('data request handler'))
res = await handler.endpoint_discovery(None, None)
for endpoint, is_generator in expected.items():
assert (
json_format.MessageToDict(res.schemas)[endpoint]['is_generator']
== is_generator
)
| Executor3 |
python | getsentry__sentry | src/sentry/api/endpoints/email_capture.py | {
"start": 697,
"end": 1604
} | class ____(Endpoint):
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.TELEMETRY_EXPERIENCE
# Disable authentication and permission requirements.
permission_classes = (IsAuthenticated,)
def post(self, request: Request) -> Response:
if not is_demo_mode_enabled():
return Response(status=404)
serializer = EmailCaptureSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
email = serializer.validated_data["email"]
# Include other fields in the request and send them to Marketo together.
# There are a undetermined number of optional fields in request.data and we don't validate them.
# Only the email field is required.
form = request.data
form["email"] = email
client.submit_form(form)
return Response(status=200)
| EmailCaptureEndpoint |
python | anthropics__anthropic-sdk-python | src/anthropic/types/completion_create_params.py | {
"start": 3662,
"end": 3936
} | class ____(CompletionCreateParamsBase, total=False):
stream: Literal[False]
"""Whether to incrementally stream the response using server-sent events.
See [streaming](https://docs.claude.com/en/api/streaming) for details.
"""
| CompletionCreateParamsNonStreaming |
python | doocs__leetcode | solution/1500-1599/1562.Find Latest Group of Size M/Solution2.py | {
"start": 0,
"end": 404
} | class ____:
def findLatestStep(self, arr: List[int], m: int) -> int:
n = len(arr)
if m == n:
return n
cnt = [0] * (n + 2)
ans = -1
for i, v in enumerate(arr):
v -= 1
l, r = cnt[v - 1], cnt[v + 1]
if l == m or r == m:
ans = i
cnt[v - l] = cnt[v + r] = l + r + 1
return ans
| Solution |
python | pypa__pip | src/pip/_internal/models/direct_url.py | {
"start": 467,
"end": 1829
} | class ____(Exception):
pass
def _get(
d: dict[str, Any], expected_type: type[T], key: str, default: T | None = None
) -> T | None:
"""Get value from dictionary and verify expected type."""
if key not in d:
return default
value = d[key]
if not isinstance(value, expected_type):
raise DirectUrlValidationError(
f"{value!r} has unexpected type for {key} (expected {expected_type})"
)
return value
def _get_required(
d: dict[str, Any], expected_type: type[T], key: str, default: T | None = None
) -> T:
value = _get(d, expected_type, key, default)
if value is None:
raise DirectUrlValidationError(f"{key} must have a value")
return value
def _exactly_one_of(infos: Iterable[InfoType | None]) -> InfoType:
infos = [info for info in infos if info is not None]
if not infos:
raise DirectUrlValidationError(
"missing one of archive_info, dir_info, vcs_info"
)
if len(infos) > 1:
raise DirectUrlValidationError(
"more than one of archive_info, dir_info, vcs_info"
)
assert infos[0] is not None
return infos[0]
def _filter_none(**kwargs: Any) -> dict[str, Any]:
"""Make dict excluding None values."""
return {k: v for k, v in kwargs.items() if v is not None}
@dataclass
| DirectUrlValidationError |
python | kubernetes-client__python | kubernetes/client/rest.py | {
"start": 1154,
"end": 13121
} | class ____(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if configuration.retries is not None:
addition_pool_args['retries'] = configuration.retries
if configuration.tls_server_name:
addition_pool_args['server_hostname'] = configuration.tls_server_name
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy and not should_bypass_proxies(configuration.host, no_proxy=configuration.no_proxy or ''):
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ApiValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if (re.search('json', headers['Content-Type'], re.IGNORECASE) or
headers['Content-Type'] == 'application/apply-patch+yaml'):
if headers['Content-Type'] == 'application/json-patch+json':
if not isinstance(body, list):
headers['Content-Type'] = \
'application/strategic-merge-patch+json'
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# In the python 3, the response.data is bytes.
# we need to decode it to string.
if six.PY3:
r.data = r.data.decode('utf8')
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
| RESTClientObject |
python | tornadoweb__tornado | tornado/test/httpserver_test.py | {
"start": 26261,
"end": 26997
} | class ____(AsyncHTTPSTestCase, HandlerBaseTestCase):
def get_app(self):
return Application([("/", XHeaderTest.Handler)])
def get_httpserver_options(self):
output = super().get_httpserver_options()
output["xheaders"] = True
return output
def test_request_without_xprotocol(self):
self.assertEqual(self.fetch_json("/")["remote_protocol"], "https")
http_scheme = {"X-Scheme": "http"}
self.assertEqual(
self.fetch_json("/", headers=http_scheme)["remote_protocol"], "http"
)
bad_scheme = {"X-Scheme": "unknown"}
self.assertEqual(
self.fetch_json("/", headers=bad_scheme)["remote_protocol"], "https"
)
| SSLXHeaderTest |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_output_config_param.py | {
"start": 239,
"end": 385
} | class ____(TypedDict, total=False):
effort: Optional[Literal["low", "medium", "high"]]
"""All possible effort levels."""
| BetaOutputConfigParam |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/sagemaker.py | {
"start": 89295,
"end": 91251
} | class ____(AwsBaseOperator[SageMakerHook]):
"""
Start a notebook instance.
.. seealso:
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SageMakerStartNotebookOperator`
:param instance_name: The name of the notebook instance to start.
:param wait_for_completion: Whether or not to wait for notebook to be InService before returning
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
"""
template_fields: Sequence[str] = aws_template_fields("instance_name", "wait_for_completion")
aws_hook_class = SageMakerHook
ui_color = "#ff7300"
def __init__(
self,
instance_name: str,
wait_for_completion: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.instance_name = instance_name
self.wait_for_completion = wait_for_completion
def execute(self, context):
self.log.info("Starting SageMaker notebook %s....", self.instance_name)
self.hook.conn.start_notebook_instance(NotebookInstanceName=self.instance_name)
if self.wait_for_completion:
self.log.info("Waiting for SageMaker notebook %s to start...", self.instance_name)
self.hook.conn.get_waiter("notebook_instance_in_service").wait(
NotebookInstanceName=self.instance_name
)
| SageMakerStartNoteBookOperator |
python | ipython__ipython | IPython/core/magics/extension.py | {
"start": 925,
"end": 2477
} | class ____(Magics):
"""Magics to manage the IPython extensions system."""
@line_magic
def load_ext(self, module_str):
"""Load an IPython extension by its module name."""
if not module_str:
raise UsageError('Missing module name.')
res = self.shell.extension_manager.load_extension(module_str)
if res == 'already loaded':
print("The %s extension is already loaded. To reload it, use:" % module_str)
print(" %reload_ext", module_str)
elif res == 'no load function':
print("The %s module is not an IPython extension." % module_str)
@line_magic
def unload_ext(self, module_str):
"""Unload an IPython extension by its module name.
Not all extensions can be unloaded, only those which define an
``unload_ipython_extension`` function.
"""
if not module_str:
raise UsageError('Missing module name.')
res = self.shell.extension_manager.unload_extension(module_str)
if res == 'no unload function':
print("The %s extension doesn't define how to unload it." % module_str)
elif res == "not loaded":
print("The %s extension is not loaded." % module_str)
@line_magic
def reload_ext(self, module_str):
"""Reload an IPython extension by its module name."""
if not module_str:
raise UsageError('Missing module name.')
self.shell.extension_manager.reload_extension(module_str)
| ExtensionMagics |
python | pytorch__pytorch | torch/distributed/checkpoint/_experimental/checkpoint_reader.py | {
"start": 476,
"end": 8695
} | class ____:
"""
Handles reading state dictionaries from storage.
This class is responsible for reading model state dictionaries from storage according
to the specified checkpoint layout. It supports synchronization barriers to ensure
all ranks in a distributed setting complete their checkpoint operations.
"""
def __init__(
self,
rank_info: RankInfo,
):
"""
Initialize a CheckpointReader.
Args:
rank_info: Information about the current rank in a distributed setting.
"""
self._rank_info = rank_info
def read(
self,
path: str,
state_dict: Optional[STATE_DICT] = None,
*,
map_location: Any = None,
**kwargs: dict[str, Any],
) -> tuple[STATE_DICT, list[str]]:
"""
Reads a state dictionary from storage.
Args:
path (str): The path from which to read the checkpoint.
map_location (Any): Device mapping function or device name for relocating tensors.
**kwargs: Additional keyword arguments passed to torch.load.
Returns:
STATE_DICT: The loaded state dictionary.
list[str]: List of missing keys.
"""
logger.debug(
"Reading checkpoint from %s for rank %s",
path,
self._rank_info.global_rank,
)
dir_path = Path(path)
file_path = dir_path / f"checkpoint_{self._rank_info.global_rank}.pt"
# Check if the file exists
if not os.path.exists(file_path):
logger.error("Checkpoint file not found at %s", file_path)
raise FileNotFoundError(f"Checkpoint file not found at {file_path}")
if state_dict is None:
result: tuple[STATE_DICT, list[str]] = (
torch.load(file_path, map_location=map_location),
[],
)
else:
result = self._partial_read(
file_path, state_dict, map_location=map_location, **kwargs
)
logger.debug("Successfully read checkpoint file from %s", file_path)
return result
def _partial_read(
self,
file_path: Path,
state_dict: STATE_DICT,
*,
map_location: Any = None,
**kwargs: dict[str, Any],
) -> tuple[STATE_DICT, list[str]]:
"""
Reads only the keys present in state_dict from the checkpoint file.
This method optimizes checkpoint loading by only loading the tensors that
are actually needed, based on the keys present in the input state_dict.
This can significantly reduce memory usage and loading time for large checkpoints
when only a subset of the model needs to be loaded.
Args:
file_path (str): The path to the checkpoint file.
state_dict (STATE_DICT): The state dictionary containing keys to load.
map_location (Any): Device mapping function or device name for relocating tensors.
**kwargs: Additional keyword arguments passed to torch.load.
Returns:
tuple[STATE_DICT, list[str]]: The updated state dictionary with loaded values and a list of missing keys.
"""
with FakeTensorMode():
metadata_dict = torch.load(file_path, map_location=map_location)
missing_keys = []
with open(file_path, "rb") as file:
# Helper function to load tensor data from file
def load_tensor(
target: Optional[torch.Tensor], source: torch.Tensor, full_key: str
) -> torch.Tensor:
if target is not None and (
target.size() != source.size() or target.dtype != source.dtype
):
raise RuntimeError(
f"Target tensor size={target.size()} dtype={target.dtype} does not match "
f"source tensor size={source.size()} dtype={source.dtype} for key {full_key}"
)
tensor_offset = source.untyped_storage()._checkpoint_offset
if tensor_offset is None:
raise AssertionError(
"checkpoint_offset for tensor in torch serialized file is not set. This could "
"happen if the checkpoint was saved with a older version of Pytorch. "
"Please make sure that the checkpoint was saved with Pytorch 2.7 or later."
)
tensor_len = source.nelement() * source.element_size()
file.seek(
tensor_offset + source.element_size() * int(source.storage_offset())
)
if target is None:
target = torch.empty(
source.size(), dtype=source.dtype, device=source.device
)
buffer = file.read(tensor_len)
cpu_tensor = torch.frombuffer(buffer, dtype=source.dtype)
tensor = cpu_tensor.view(source.size())
target.copy_(tensor)
return target
# Helper function to recursively process nested structures
def process_value(
target_value: Any, source_value: Any, key_path: str
) -> Any:
source_type = type(source_value)
if source_type is torch._subclasses.fake_tensor.FakeTensor:
source_type = torch.Tensor
if target_value is not None and not isinstance(
target_value, source_type
):
raise RuntimeError(
f"Target value {key_path} is set to {type(target_value)}, but source value is {type(source_value)}"
)
if isinstance(source_value, torch.Tensor):
return load_tensor(target_value, source_value, key_path)
elif isinstance(source_value, dict):
if target_value is None:
# create a new map with all the keys present in source_value
target_value = dict.fromkeys(source_value.keys())
# pyrefly: ignore [missing-attribute]
for key in list(target_value.keys()):
current_path = f"{key_path}.{key}" if key_path else key
if key in source_value:
target_value[key] = process_value(
target_value[key], source_value[key], current_path
)
else:
missing_keys.append(current_path)
return target_value
elif isinstance(source_value, list):
if target_value is None:
target_value = [None] * len(source_value)
result = []
for i, (target_item, source_item) in enumerate(
zip_longest(target_value, source_value, fillvalue=None)
):
current_path = f"{key_path}[{i}]" if key_path else f"[{i}]"
result.append(
process_value(target_item, source_item, current_path)
)
return result
else:
return source_value
# Start recursive processing from the root of the state dictionary
updated_state_dict = process_value(state_dict, metadata_dict, "")
if missing_keys:
if len(missing_keys) > 10:
logger.warning(
"Missing %s keys from checkpoint: %s... (and %s more)",
len(missing_keys),
missing_keys[:10],
len(missing_keys) - 10,
)
else:
logger.warning(
"Missing %s keys from checkpoint: %s",
len(missing_keys),
missing_keys,
)
return updated_state_dict, missing_keys
| CheckpointReader |
python | readthedocs__readthedocs.org | readthedocs/oauth/services/base.py | {
"start": 5087,
"end": 12510
} | class ____(Service):
"""
Subclass of Service that interacts with a VCS provider using the user's OAuth token.
:param user: User to use in token lookup and session creation
:param account: :py:class:`SocialAccount` instance for user
"""
def __init__(self, user, account):
self.user = user
self.account = account
# Cache organizations to avoid multiple DB hits
# when syncing repositories that belong to the same organization.
# Used by `create_organization` method in subclasses.
self._organizations_cache = {}
structlog.contextvars.bind_contextvars(
user_username=self.user.username,
social_provider=self.allauth_provider.id,
social_account_id=self.account.pk,
)
@classmethod
def for_project(cls, project):
users = AdminPermission.admins(project)
for user in users:
yield from cls.for_user(user)
@classmethod
def for_user(cls, user):
accounts = SocialAccount.objects.filter(
user=user,
provider=cls.allauth_provider.id,
)
for account in accounts:
yield cls(user=user, account=account)
@classmethod
def sync_user_access(cls, user):
"""
Sync the user's access to the provider repositories and organizations.
Since UserService makes use of the user's OAuth token,
we can just sync the user's repositories in order to
update the user access to repositories and organizations.
:raises SyncServiceError: if the access token is invalid or revoked
"""
has_error = False
for service in cls.for_user(user):
try:
service.sync()
except SyncServiceError:
# Don't stop the sync if one service account fails,
# as we should try to sync all accounts.
has_error = True
if has_error:
raise SyncServiceError()
@cached_property
def session(self):
return get_oauth2_client(self.account)
def paginate(self, url, **kwargs) -> Iterator[dict]:
"""
Recursively combine results from service's pagination.
:param url: start url to get the data from.
:type url: unicode
:param kwargs: optional parameters passed to .get() method
:type kwargs: dict
"""
resp = None
try:
resp = self.session.get(url, params=kwargs)
# TODO: this check of the status_code would be better in the
# ``create_session`` method since it could be used from outside, but
# I didn't find a generic way to make a test request to each
# provider.
if resp.status_code in [401, 403]:
# Bad credentials: the token we have in our database is not
# valid. Probably the user has revoked the access to our App. He
# needs to reconnect his account
raise SyncServiceError(
SyncServiceError.INVALID_OR_REVOKED_ACCESS_TOKEN.format(
provider=self.allauth_provider.name
)
)
next_url = self.get_next_url_to_paginate(resp)
yield from self.get_paginated_results(resp)
if next_url:
yield from self.paginate(next_url)
# Catch specific exception related to OAuth
except InvalidClientIdError:
log.warning("access_token or refresh_token failed.", url=url)
raise SyncServiceError(
SyncServiceError.INVALID_OR_REVOKED_ACCESS_TOKEN.format(
provider=self.allauth_provider.name
)
)
# Catch exceptions with request or deserializing JSON
except (RequestException, ValueError):
# Response data should always be JSON, still try to log if not
# though
try:
debug_data = resp.json() if resp else {}
except ValueError:
debug_data = resp.content
log.debug(
"Paginate failed at URL.",
url=url,
debug_data=debug_data,
)
return []
def sync(self):
"""
Sync repositories (RemoteRepository) and organizations (RemoteOrganization).
- creates a new RemoteRepository/Organization per new repository
- updates fields for existing RemoteRepository/Organization
- deletes old RemoteRepository/Organization that are not present
for this user in the current provider
"""
repository_remote_ids = self.sync_repositories()
(
organization_remote_ids,
organization_repositories_remote_ids,
) = self.sync_organizations()
# Delete RemoteRepository where the user doesn't have access anymore
# (skip RemoteRepository tied to a Project on this user)
repository_remote_ids += organization_repositories_remote_ids
(
self.user.remote_repository_relations.filter(
account=self.account,
remote_repository__vcs_provider=self.vcs_provider_slug,
)
.exclude(
remote_repository__remote_id__in=repository_remote_ids,
)
.delete()
)
# Delete RemoteOrganization where the user doesn't have access anymore
(
self.user.remote_organization_relations.filter(
account=self.account,
remote_organization__vcs_provider=self.vcs_provider_slug,
)
.exclude(
remote_organization__remote_id__in=organization_remote_ids,
)
.delete()
)
def get_next_url_to_paginate(self, response):
"""
Return the next url to feed the `paginate` method.
:param response: response from where to get the `next_url` attribute
:type response: requests.Response
"""
raise NotImplementedError
def get_paginated_results(self, response):
"""
Return the results for the current response/page.
:param response: response from where to get the results.
:type response: requests.Response
"""
raise NotImplementedError
def get_webhook_url(self, project, integration):
"""Get the webhook URL for the project's integration."""
return "{base_url}{path}".format(
base_url=settings.PUBLIC_API_URL,
path=reverse(
"api_webhook",
kwargs={
"project_slug": project.slug,
"integration_pk": integration.pk,
},
),
)
def get_provider_data(self, project, integration):
"""
Gets provider data from Git Providers Webhooks API.
:param project: project
:type project: Project
:param integration: Integration for the project
:type integration: Integration
:returns: Dictionary containing provider data from the API or None
:rtype: dict
"""
raise NotImplementedError
def sync_repositories(self):
raise NotImplementedError
def sync_organizations(self):
raise NotImplementedError
| UserService |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_massachusetts_zip.py | {
"start": 1791,
"end": 4174
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid Massachusetts zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_massachusetts_zip": ["01001", "01349", "01721", "02113"],
"invalid_massachusetts_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_massachusetts_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_massachusetts_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_massachusetts_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidMassachusettsZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidMassachusettsZip |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_organization_group_suspect_flags.py | {
"start": 197,
"end": 4099
} | class ____(APITestCase, SnubaTestCase):
endpoint = "sentry-api-0-organization-group-suspect-flags"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
@property
def features(self) -> dict[str, bool]:
return {"organizations:feature-flag-suspect-flags": True}
def test_get(self) -> None:
today = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(minutes=5)
group = self.create_group(
first_seen=today - datetime.timedelta(hours=1),
last_seen=today + datetime.timedelta(hours=1),
)
self._mock_event(
today,
hash="a" * 32,
flags=[
{"flag": "key", "result": True},
{"flag": "other", "result": False},
],
group_id=group.id,
project_id=self.project.id,
)
self._mock_event(
today,
hash="a" * 32,
flags=[
{"flag": "key", "result": False},
{"flag": "other", "result": False},
],
group_id=2,
project_id=self.project.id,
)
with self.feature(self.features):
response = self.client.get(f"/api/0/issues/{group.id}/suspect/flags/")
assert response.status_code == 200
assert response.json() == {
"data": [
{
"flag": "key",
"score": 0.01634056054997356,
"baseline_percent": 0.5,
"distribution": {
"baseline": {
"false": 1,
"true": 1,
},
"outliers": {
"true": 1,
},
},
"is_filtered": True,
},
{
"flag": "other",
"score": 0.016181914331041776,
"baseline_percent": 0,
"distribution": {
"baseline": {
"false": 2,
},
"outliers": {
"false": 1,
},
},
"is_filtered": True,
},
]
}
def test_get_no_flag_access(self) -> None:
"""Does not have feature-flag access."""
group = self.create_group()
response = self.client.get(f"/api/0/issues/{group.id}/suspect/flags/")
assert response.status_code == 404
def test_get_no_group(self) -> None:
"""Group not found."""
with self.feature(self.features):
response = self.client.get("/api/0/issues/22/suspect/flags/")
assert response.status_code == 404
def _mock_event(
self,
ts: datetime.datetime,
hash: str = "a" * 32,
group_id: int | None = None,
project_id: int = 1,
flags: list[_FlagResult] | None = None,
) -> None:
self.snuba_insert(
(
2,
"insert",
{
"event_id": uuid.uuid4().hex,
"primary_hash": hash,
"group_id": group_id if group_id else int(hash[:16], 16),
"project_id": project_id,
"message": "message",
"platform": "python",
"datetime": ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"data": {
"received": time.mktime(ts.timetuple()),
"contexts": {"flags": {"values": flags or []}},
},
},
{},
)
)
| OrganizationGroupSuspectFlagsTestCase |
python | fluentpython__example-code-2e | 24-class-metaprog/persistent/persistlib.py | {
"start": 1304,
"end": 1800
} | class ____:
def __init__(self, name: str, py_type: type) -> None:
self.name = name
self.type = py_type
def __set__(self, instance: 'Persistent', value: Any) -> None:
try:
value = self.type(value)
except (TypeError, ValueError) as e:
type_name = self.type.__name__
msg = f'{value!r} is not compatible with {self.name}:{type_name}.'
raise TypeError(msg) from e
instance.__dict__[self.name] = value
| Field |
python | pytorch__pytorch | test/inductor/test_ordered_set.py | {
"start": 52572,
"end": 53003
} | class ____(TestOnlySetsInBinaryOps, TestCase):
def setUp(self):
super().setUp()
def gen():
for i in range(0, 10, 2): # noqa: UP028
yield i
self.OrderedSet = OrderedSet((1, 2, 3))
self.other = gen()
self.otherIsIterable = True
del TestOnlySetsInBinaryOps
# ==============================================================================
| TestOnlySetsGenerator |
python | bokeh__bokeh | src/bokeh/core/property/instance.py | {
"start": 4156,
"end": 4579
} | class ____(Object[S]):
""" Accept values that are instances of serializable types (e.g. |HasProps|). """
@staticmethod
def _assert_type(instance_type: type[Any]) -> None:
if not (isinstance(instance_type, type) and issubclass(instance_type, Serializable)):
raise ValueError(f"expected a subclass of Serializable (e.g. HasProps), got {instance_type}")
I = TypeVar("I", bound=HasProps)
| Instance |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/tf_record_writer_test.py | {
"start": 1448,
"end": 5778
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
def setUp(self):
super(TFRecordWriterTest, self).setUp()
self._num_records = 8
def writer_fn(self, filename, compression_type=""):
input_dataset = readers.TFRecordDataset([filename], compression_type)
return writers.TFRecordWriter(self._outputFilename(),
compression_type).write(input_dataset)
def _record(self, i):
return compat.as_bytes("Record %d" % (i))
def _createFile(self, options=None):
filename = self._inputFilename()
writer = python_io.TFRecordWriter(filename, options)
for i in range(self._num_records):
writer.write(self._record(i))
writer.close()
return filename
def _inputFilename(self):
return os.path.join(self.get_temp_dir(), "tf_record.in.txt")
def _outputFilename(self):
return os.path.join(self.get_temp_dir(), "tf_record.out.txt")
@combinations.generate(test_base.default_test_combinations())
def testWrite(self):
self.evaluate(self.writer_fn(self._createFile()))
for i, r in enumerate(tf_record.tf_record_iterator(self._outputFilename())):
self.assertAllEqual(self._record(i), r)
@combinations.generate(test_base.default_test_combinations())
def testWriteZLIB(self):
options = tf_record.TFRecordOptions(tf_record.TFRecordCompressionType.ZLIB)
self.evaluate(
self.writer_fn(self._createFile(options), compression_type="ZLIB"))
for i, r in enumerate(
tf_record.tf_record_iterator(self._outputFilename(), options=options)):
self.assertAllEqual(self._record(i), r)
@combinations.generate(test_base.default_test_combinations())
def testWriteGZIP(self):
options = tf_record.TFRecordOptions(tf_record.TFRecordCompressionType.GZIP)
self.evaluate(
self.writer_fn(self._createFile(options), compression_type="GZIP"))
for i, r in enumerate(
tf_record.tf_record_iterator(self._outputFilename(), options=options)):
self.assertAllEqual(self._record(i), r)
@combinations.generate(test_base.default_test_combinations())
def testFailDataset(self):
with self.assertRaises(TypeError):
writers.TFRecordWriter(self._outputFilename(), "").write("whoops")
@combinations.generate(test_base.default_test_combinations())
def testFailDType(self):
input_dataset = dataset_ops.Dataset.from_tensors(10)
with self.assertRaises(TypeError):
writers.TFRecordWriter(self._outputFilename(), "").write(input_dataset)
@combinations.generate(test_base.default_test_combinations())
def testFailShape(self):
input_dataset = dataset_ops.Dataset.from_tensors([["hello"], ["world"]])
with self.assertRaises(TypeError):
writers.TFRecordWriter(self._outputFilename(), "").write(input_dataset)
@combinations.generate(test_base.default_test_combinations())
def testSideEffect(self):
def writer_fn():
input_dataset = readers.TFRecordDataset(self._createFile())
return writers.TFRecordWriter(self._outputFilename()).write(input_dataset)
@def_function.function
def fn():
_ = writer_fn()
return "hello"
self.assertEqual(self.evaluate(fn()), b"hello")
for i, r in enumerate(tf_record.tf_record_iterator(self._outputFilename())):
self.assertAllEqual(self._record(i), r)
@combinations.generate(test_base.default_test_combinations())
def testShard(self):
filename = self._createFile()
dataset = readers.TFRecordDataset([filename])
def reduce_func(key, dataset):
shard_filename = string_ops.string_join(
[filename, string_ops.as_string(key)])
writer = writers.TFRecordWriter(shard_filename)
writer.write(dataset.map(lambda _, x: x))
return dataset_ops.Dataset.from_tensors(shard_filename)
dataset = dataset.enumerate()
dataset = dataset.apply(
grouping.group_by_window(lambda i, _: i % 2, reduce_func,
dtypes.int64.max))
get_next = self.getNext(dataset)
for i in range(2):
shard_filename = (filename + str(i)).encode()
self.assertEqual(self.evaluate(get_next()), shard_filename)
for j, r in enumerate(tf_record.tf_record_iterator(shard_filename)):
self.assertAllEqual(self._record(i + 2*j), r)
if __name__ == "__main__":
test.main()
| TFRecordWriterTest |
python | gevent__gevent | src/gevent/_fileobjectcommon.py | {
"start": 3045,
"end": 3111
} | class ____(WriteIsWriteallMixin, io.FileIO):
pass
| WriteallFileIO |
python | pallets__flask | src/flask/templating.py | {
"start": 846,
"end": 1333
} | class ____(BaseEnvironment):
"""Works like a regular Jinja environment but has some additional
knowledge of how Flask's blueprint works so that it can prepend the
name of the blueprint to referenced templates if necessary.
"""
def __init__(self, app: App, **options: t.Any) -> None:
if "loader" not in options:
options["loader"] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
| Environment |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0020_migrate_null_hidden_field.py | {
"start": 316,
"end": 549
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0019_migrate_protected_versions_to_hidden"),
]
operations = [
migrations.RunPython(forwards_func),
]
| Migration |
python | falconry__falcon | falcon/errors.py | {
"start": 72063,
"end": 74325
} | class ____(HTTPError):
"""502 Bad Gateway.
The server, while acting as a gateway or proxy, received an invalid
response from an inbound server it accessed while attempting to
fulfill the request.
(See also: RFC 7231, Section 6.6.3)
All the arguments are defined as keyword-only.
Keyword Args:
title (str): Error title (default '502 Bad Gateway').
description (str): Human-friendly description of the error, along with
a helpful suggestion or two.
headers (dict or list): A ``dict`` of header names and values
to set, or a ``list`` of (*name*, *value*) tuples. Both *name* and
*value* must be of type ``str`` or ``StringType``, and only
character values 0x00 through 0xFF may be used on platforms that
use wide characters.
Note:
The Content-Type header, if present, will be overridden. If
you wish to return custom error messages, you can create
your own HTTP error class, and install an error handler
to convert it into an appropriate HTTP response for the
client
Note:
Falcon can process a list of ``tuple`` slightly faster
than a ``dict``.
href (str): A URL someone can visit to find out more information
(default ``None``). Unicode characters are percent-encoded.
href_text (str): If href is given, use this as the friendly
title/description for the link (default 'API documentation
for this error').
code (int): An internal code that customers can reference in their
support request or to help them when searching for knowledge
base articles related to this error (default ``None``).
"""
def __init__(
self,
*,
title: str | None = None,
description: str | None = None,
headers: HeaderArg | None = None,
**kwargs: HTTPErrorKeywordArguments,
):
super().__init__(
status.HTTP_502,
title=title,
description=description,
headers=headers,
**kwargs, # type: ignore[arg-type]
)
| HTTPBadGateway |
python | huggingface__transformers | src/transformers/models/blt/configuration_blt.py | {
"start": 6541,
"end": 10654
} | class ____(PreTrainedConfig):
r"""
Configuration class for the Blt Patcher/Entropy model component.
Args:
vocab_size (`int`, *optional*, defaults to 260):
Vocabulary size of the Blt patcher model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling the patcher model.
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the hidden representations.
num_hidden_layers (`int`, *optional*, defaults to 14):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
max_position_embeddings (`int`, *optional*, defaults to 8192):
The maximum sequence length that this model might ever be used with.
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimension of the MLP representations.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
model_type = "blt_patcher"
def __init__(
self,
vocab_size: Optional[int] = 260,
hidden_size: Optional[int] = 768,
num_hidden_layers: Optional[int] = 14,
num_attention_heads: Optional[int] = 12,
num_key_value_heads: Optional[int] = None,
max_position_embeddings: Optional[int] = 8192,
rms_norm_eps: Optional[float] = 1e-5,
dropout: Optional[float] = 0.0,
intermediate_size: Optional[int] = 2048,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
initializer_range: Optional[float] = 0.02,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.head_dim = hidden_size // num_attention_heads
self.num_key_value_heads = num_key_value_heads if num_key_value_heads is not None else num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.rms_norm_eps = rms_norm_eps
self.dropout = dropout
self.hidden_act = "silu" # Blt uses silu activation
self.intermediate_size = intermediate_size or int(8 * self.hidden_size / 3)
self.initializer_range = initializer_range
self.rope_parameters = rope_parameters
# Remove tie_word_embeddings from kwargs to avoid duplicate parameter error
kwargs.pop("tie_word_embeddings", None)
super().__init__(**kwargs, tie_word_embeddings=False)
| BltPatcherConfig |
python | pypa__warehouse | tests/unit/email/test_init.py | {
"start": 72704,
"end": 97903
} | class ____:
@pytest.fixture
def _organization_invite(self, pyramid_user):
self.initiator_user = pyramid_user
self.user = UserFactory.create()
EmailFactory.create(user=self.user, verified=True)
self.desired_role = "Manager"
self.organization_name = "example"
self.message = "test message"
self.email_token = "token"
self.token_age = 72 * 60 * 60
@pytest.mark.usefixtures("_organization_invite")
def test_send_organization_member_invited_email(
self,
db_request,
make_email_renderers,
send_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
"organization-member-invited"
)
result = email.send_organization_member_invited_email(
db_request,
self.initiator_user,
user=self.user,
desired_role=self.desired_role,
initiator_username=self.initiator_user.username,
organization_name=self.organization_name,
email_token=self.email_token,
token_age=self.token_age,
)
assert result == {
"username": self.user.username,
"desired_role": self.desired_role,
"initiator_username": self.initiator_user.username,
"n_hours": self.token_age // 60 // 60,
"organization_name": self.organization_name,
"token": self.email_token,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.initiator_user.name} <{self.initiator_user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.initiator_user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.initiator_user.email,
"subject": subject_renderer.string_response,
"redact_ip": False,
},
},
)
]
@pytest.mark.usefixtures("_organization_invite")
def test_send_organization_role_verification_email(
self,
db_request,
make_email_renderers,
send_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
"verify-organization-role"
)
result = email.send_organization_role_verification_email(
db_request,
self.user,
desired_role=self.desired_role,
initiator_username=self.initiator_user.username,
organization_name=self.organization_name,
email_token=self.email_token,
token_age=self.token_age,
)
assert result == {
"username": self.user.username,
"desired_role": self.desired_role,
"initiator_username": self.initiator_user.username,
"n_hours": self.token_age // 60 // 60,
"organization_name": self.organization_name,
"token": self.email_token,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.user.name} <{self.user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.user.email,
"subject": subject_renderer.string_response,
"redact_ip": True,
},
},
)
]
@pytest.mark.usefixtures("_organization_invite")
def test_send_organization_member_invite_canceled_email(
self,
db_request,
make_email_renderers,
send_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
"organization-member-invite-canceled"
)
result = email.send_organization_member_invite_canceled_email(
db_request,
self.initiator_user,
user=self.user,
organization_name=self.organization_name,
)
assert result == {
"username": self.user.username,
"organization_name": self.organization_name,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.initiator_user.name} <{self.initiator_user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.initiator_user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.initiator_user.email,
"subject": subject_renderer.string_response,
"redact_ip": False,
},
},
)
]
@pytest.mark.usefixtures("_organization_invite")
def test_send_canceled_as_invited_organization_member_email(
self,
db_request,
make_email_renderers,
send_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
"canceled-as-invited-organization-member"
)
result = email.send_canceled_as_invited_organization_member_email(
db_request,
self.user,
organization_name=self.organization_name,
)
assert result == {
"username": self.user.username,
"organization_name": self.organization_name,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.user.name} <{self.user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.user.email,
"subject": subject_renderer.string_response,
"redact_ip": True,
},
},
)
]
@pytest.mark.usefixtures("_organization_invite")
def test_send_organization_member_invite_declined_email(
self,
db_request,
make_email_renderers,
send_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
"organization-member-invite-declined"
)
result = email.send_organization_member_invite_declined_email(
db_request,
self.initiator_user,
user=self.user,
organization_name=self.organization_name,
message=self.message,
)
assert result == {
"username": self.user.username,
"organization_name": self.organization_name,
"message": self.message,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.initiator_user.name} <{self.initiator_user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.initiator_user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.initiator_user.email,
"subject": subject_renderer.string_response,
"redact_ip": False,
},
},
)
]
@pytest.mark.usefixtures("_organization_invite")
def test_send_declined_as_invited_organization_member_email(
self,
db_request,
make_email_renderers,
send_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
"declined-as-invited-organization-member"
)
result = email.send_declined_as_invited_organization_member_email(
db_request,
self.user,
organization_name=self.organization_name,
)
assert result == {
"username": self.user.username,
"organization_name": self.organization_name,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.user.name} <{self.user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.user.email,
"subject": subject_renderer.string_response,
"redact_ip": True,
},
},
)
]
@pytest.mark.usefixtures("_organization_invite")
def test_send_organization_member_added_email(
self,
db_request,
make_email_renderers,
send_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
"organization-member-added"
)
result = email.send_organization_member_added_email(
db_request,
self.initiator_user,
user=self.user,
submitter=self.initiator_user,
organization_name=self.organization_name,
role=self.desired_role,
)
assert result == {
"username": self.user.username,
"submitter": self.initiator_user.username,
"organization_name": self.organization_name,
"role": self.desired_role,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.initiator_user.name} <{self.initiator_user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.initiator_user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.initiator_user.email,
"subject": subject_renderer.string_response,
"redact_ip": False,
},
},
)
]
@pytest.mark.usefixtures("_organization_invite")
def test_send_added_as_organization_email(
self,
db_request,
make_email_renderers,
send_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
"added-as-organization-member"
)
result = email.send_added_as_organization_member_email(
db_request,
self.user,
submitter=self.initiator_user,
organization_name=self.organization_name,
role=self.desired_role,
)
assert result == {
"username": self.user.username,
"submitter": self.initiator_user.username,
"organization_name": self.organization_name,
"role": self.desired_role,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.user.name} <{self.user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.user.email,
"subject": subject_renderer.string_response,
"redact_ip": True,
},
},
)
]
@pytest.mark.usefixtures("_organization_invite")
def test_send_organization_member_removed_email(
self,
db_request,
make_email_renderers,
send_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
"organization-member-removed"
)
result = email.send_organization_member_removed_email(
db_request,
self.initiator_user,
user=self.user,
submitter=self.initiator_user,
organization_name=self.organization_name,
)
assert result == {
"username": self.user.username,
"submitter": self.initiator_user.username,
"organization_name": self.organization_name,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.initiator_user.name} <{self.initiator_user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.initiator_user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.initiator_user.email,
"subject": subject_renderer.string_response,
"redact_ip": False,
},
},
)
]
@pytest.mark.usefixtures("_organization_invite")
def test_send_removed_as_organization_email(
self,
db_request,
make_email_renderers,
send_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
"removed-as-organization-member"
)
result = email.send_removed_as_organization_member_email(
db_request,
self.user,
submitter=self.initiator_user,
organization_name=self.organization_name,
)
assert result == {
"username": self.user.username,
"submitter": self.initiator_user.username,
"organization_name": self.organization_name,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.user.name} <{self.user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.user.email,
"subject": subject_renderer.string_response,
"redact_ip": True,
},
},
)
]
@pytest.mark.usefixtures("_organization_invite")
def test_send_organization_member_role_changed_email(
self,
db_request,
make_email_renderers,
send_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
"organization-member-role-changed"
)
result = email.send_organization_member_role_changed_email(
db_request,
self.initiator_user,
user=self.user,
submitter=self.initiator_user,
organization_name=self.organization_name,
role=self.desired_role,
)
assert result == {
"username": self.user.username,
"submitter": self.initiator_user.username,
"organization_name": self.organization_name,
"role": self.desired_role,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.initiator_user.name} <{self.initiator_user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.initiator_user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.initiator_user.email,
"subject": subject_renderer.string_response,
"redact_ip": False,
},
},
)
]
@pytest.mark.usefixtures("_organization_invite")
def test_send_role_changed_as_organization_email(
self,
db_request,
make_email_renderers,
send_email,
):
subject_renderer, body_renderer, html_renderer = make_email_renderers(
"role-changed-as-organization-member"
)
result = email.send_role_changed_as_organization_member_email(
db_request,
self.user,
submitter=self.initiator_user,
organization_name=self.organization_name,
role=self.desired_role,
)
assert result == {
"username": self.user.username,
"submitter": self.initiator_user.username,
"organization_name": self.organization_name,
"role": self.desired_role,
}
subject_renderer.assert_(**result)
body_renderer.assert_(**result)
html_renderer.assert_(**result)
assert db_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{self.user.name} <{self.user.email}>",
{
"sender": None,
"subject": subject_renderer.string_response,
"body_text": body_renderer.string_response,
"body_html": (
f"<html>\n"
f"<head></head>\n"
f"<body><p>{html_renderer.string_response}</p></body>\n"
f"</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": self.user.id,
"additional": {
"from_": db_request.registry.settings["mail.sender"],
"to": self.user.email,
"subject": subject_renderer.string_response,
"redact_ip": True,
},
},
)
]
| TestOrganizationMemberEmails |
python | redis__redis-py | redis/asyncio/cluster.py | {
"start": 84697,
"end": 98042
} | class ____(AbstractStrategy):
NO_SLOTS_COMMANDS = {"UNWATCH"}
IMMEDIATE_EXECUTE_COMMANDS = {"WATCH", "UNWATCH"}
UNWATCH_COMMANDS = {"DISCARD", "EXEC", "UNWATCH"}
SLOT_REDIRECT_ERRORS = (AskError, MovedError)
CONNECTION_ERRORS = (
ConnectionError,
OSError,
ClusterDownError,
SlotNotCoveredError,
)
def __init__(self, pipe: ClusterPipeline) -> None:
super().__init__(pipe)
self._explicit_transaction = False
self._watching = False
self._pipeline_slots: Set[int] = set()
self._transaction_node: Optional[ClusterNode] = None
self._transaction_connection: Optional[Connection] = None
self._executing = False
self._retry = copy(self._pipe.cluster_client.retry)
self._retry.update_supported_errors(
RedisCluster.ERRORS_ALLOW_RETRY + self.SLOT_REDIRECT_ERRORS
)
def _get_client_and_connection_for_transaction(
self,
) -> Tuple[ClusterNode, Connection]:
"""
Find a connection for a pipeline transaction.
For running an atomic transaction, watch keys ensure that contents have not been
altered as long as the watch commands for those keys were sent over the same
connection. So once we start watching a key, we fetch a connection to the
node that owns that slot and reuse it.
"""
if not self._pipeline_slots:
raise RedisClusterException(
"At least a command with a key is needed to identify a node"
)
node: ClusterNode = self._pipe.cluster_client.nodes_manager.get_node_from_slot(
list(self._pipeline_slots)[0], False
)
self._transaction_node = node
if not self._transaction_connection:
connection: Connection = self._transaction_node.acquire_connection()
self._transaction_connection = connection
return self._transaction_node, self._transaction_connection
def execute_command(self, *args: Union[KeyT, EncodableT], **kwargs: Any) -> "Any":
# Given the limitation of ClusterPipeline sync API, we have to run it in thread.
response = None
error = None
def runner():
nonlocal response
nonlocal error
try:
response = asyncio.run(self._execute_command(*args, **kwargs))
except Exception as e:
error = e
thread = threading.Thread(target=runner)
thread.start()
thread.join()
if error:
raise error
return response
async def _execute_command(
self, *args: Union[KeyT, EncodableT], **kwargs: Any
) -> Any:
if self._pipe.cluster_client._initialize:
await self._pipe.cluster_client.initialize()
slot_number: Optional[int] = None
if args[0] not in self.NO_SLOTS_COMMANDS:
slot_number = await self._pipe.cluster_client._determine_slot(*args)
if (
self._watching or args[0] in self.IMMEDIATE_EXECUTE_COMMANDS
) and not self._explicit_transaction:
if args[0] == "WATCH":
self._validate_watch()
if slot_number is not None:
if self._pipeline_slots and slot_number not in self._pipeline_slots:
raise CrossSlotTransactionError(
"Cannot watch or send commands on different slots"
)
self._pipeline_slots.add(slot_number)
elif args[0] not in self.NO_SLOTS_COMMANDS:
raise RedisClusterException(
f"Cannot identify slot number for command: {args[0]},"
"it cannot be triggered in a transaction"
)
return self._immediate_execute_command(*args, **kwargs)
else:
if slot_number is not None:
self._pipeline_slots.add(slot_number)
return super().execute_command(*args, **kwargs)
def _validate_watch(self):
if self._explicit_transaction:
raise RedisError("Cannot issue a WATCH after a MULTI")
self._watching = True
async def _immediate_execute_command(self, *args, **options):
return await self._retry.call_with_retry(
lambda: self._get_connection_and_send_command(*args, **options),
self._reinitialize_on_error,
)
async def _get_connection_and_send_command(self, *args, **options):
redis_node, connection = self._get_client_and_connection_for_transaction()
return await self._send_command_parse_response(
connection, redis_node, args[0], *args, **options
)
async def _send_command_parse_response(
self,
connection: Connection,
redis_node: ClusterNode,
command_name,
*args,
**options,
):
"""
Send a command and parse the response
"""
await connection.send_command(*args)
output = await redis_node.parse_response(connection, command_name, **options)
if command_name in self.UNWATCH_COMMANDS:
self._watching = False
return output
async def _reinitialize_on_error(self, error):
if self._watching:
if type(error) in self.SLOT_REDIRECT_ERRORS and self._executing:
raise WatchError("Slot rebalancing occurred while watching keys")
if (
type(error) in self.SLOT_REDIRECT_ERRORS
or type(error) in self.CONNECTION_ERRORS
):
if self._transaction_connection:
self._transaction_connection = None
self._pipe.cluster_client.reinitialize_counter += 1
if (
self._pipe.cluster_client.reinitialize_steps
and self._pipe.cluster_client.reinitialize_counter
% self._pipe.cluster_client.reinitialize_steps
== 0
):
await self._pipe.cluster_client.nodes_manager.initialize()
self.reinitialize_counter = 0
else:
if isinstance(error, AskError):
self._pipe.cluster_client.nodes_manager.update_moved_exception(
error
)
self._executing = False
def _raise_first_error(self, responses, stack):
"""
Raise the first exception on the stack
"""
for r, cmd in zip(responses, stack):
if isinstance(r, Exception):
self._annotate_exception(r, cmd.position + 1, cmd.args)
raise r
def mset_nonatomic(
self, mapping: Mapping[AnyKeyT, EncodableT]
) -> "ClusterPipeline":
raise NotImplementedError("Method is not supported in transactional context.")
async def execute(
self, raise_on_error: bool = True, allow_redirections: bool = True
) -> List[Any]:
stack = self._command_queue
if not stack and (not self._watching or not self._pipeline_slots):
return []
return await self._execute_transaction_with_retries(stack, raise_on_error)
async def _execute_transaction_with_retries(
self, stack: List["PipelineCommand"], raise_on_error: bool
):
return await self._retry.call_with_retry(
lambda: self._execute_transaction(stack, raise_on_error),
self._reinitialize_on_error,
)
async def _execute_transaction(
self, stack: List["PipelineCommand"], raise_on_error: bool
):
if len(self._pipeline_slots) > 1:
raise CrossSlotTransactionError(
"All keys involved in a cluster transaction must map to the same slot"
)
self._executing = True
redis_node, connection = self._get_client_and_connection_for_transaction()
stack = chain(
[PipelineCommand(0, "MULTI")],
stack,
[PipelineCommand(0, "EXEC")],
)
commands = [c.args for c in stack if EMPTY_RESPONSE not in c.kwargs]
packed_commands = connection.pack_commands(commands)
await connection.send_packed_command(packed_commands)
errors = []
# parse off the response for MULTI
# NOTE: we need to handle ResponseErrors here and continue
# so that we read all the additional command messages from
# the socket
try:
await redis_node.parse_response(connection, "MULTI")
except ResponseError as e:
self._annotate_exception(e, 0, "MULTI")
errors.append(e)
except self.CONNECTION_ERRORS as cluster_error:
self._annotate_exception(cluster_error, 0, "MULTI")
raise
# and all the other commands
for i, command in enumerate(self._command_queue):
if EMPTY_RESPONSE in command.kwargs:
errors.append((i, command.kwargs[EMPTY_RESPONSE]))
else:
try:
_ = await redis_node.parse_response(connection, "_")
except self.SLOT_REDIRECT_ERRORS as slot_error:
self._annotate_exception(slot_error, i + 1, command.args)
errors.append(slot_error)
except self.CONNECTION_ERRORS as cluster_error:
self._annotate_exception(cluster_error, i + 1, command.args)
raise
except ResponseError as e:
self._annotate_exception(e, i + 1, command.args)
errors.append(e)
response = None
# parse the EXEC.
try:
response = await redis_node.parse_response(connection, "EXEC")
except ExecAbortError:
if errors:
raise errors[0]
raise
self._executing = False
# EXEC clears any watched keys
self._watching = False
if response is None:
raise WatchError("Watched variable changed.")
# put any parse errors into the response
for i, e in errors:
response.insert(i, e)
if len(response) != len(self._command_queue):
raise InvalidPipelineStack(
"Unexpected response length for cluster pipeline EXEC."
" Command stack was {} but response had length {}".format(
[c.args[0] for c in self._command_queue], len(response)
)
)
# find any errors in the response and raise if necessary
if raise_on_error or len(errors) > 0:
self._raise_first_error(
response,
self._command_queue,
)
# We have to run response callbacks manually
data = []
for r, cmd in zip(response, self._command_queue):
if not isinstance(r, Exception):
command_name = cmd.args[0]
if command_name in self._pipe.cluster_client.response_callbacks:
r = self._pipe.cluster_client.response_callbacks[command_name](
r, **cmd.kwargs
)
data.append(r)
return data
async def reset(self):
self._command_queue = []
# make sure to reset the connection state in the event that we were
# watching something
if self._transaction_connection:
try:
if self._watching:
# call this manually since our unwatch or
# immediate_execute_command methods can call reset()
await self._transaction_connection.send_command("UNWATCH")
await self._transaction_connection.read_response()
# we can safely return the connection to the pool here since we're
# sure we're no longer WATCHing anything
self._transaction_node.release(self._transaction_connection)
self._transaction_connection = None
except self.CONNECTION_ERRORS:
# disconnect will also remove any previous WATCHes
if self._transaction_connection:
await self._transaction_connection.disconnect()
# clean up the other instance attributes
self._transaction_node = None
self._watching = False
self._explicit_transaction = False
self._pipeline_slots = set()
self._executing = False
def multi(self):
if self._explicit_transaction:
raise RedisError("Cannot issue nested calls to MULTI")
if self._command_queue:
raise RedisError(
"Commands without an initial WATCH have already been issued"
)
self._explicit_transaction = True
async def watch(self, *names):
if self._explicit_transaction:
raise RedisError("Cannot issue a WATCH after a MULTI")
return await self.execute_command("WATCH", *names)
async def unwatch(self):
if self._watching:
return await self.execute_command("UNWATCH")
return True
async def discard(self):
await self.reset()
async def unlink(self, *names):
return self.execute_command("UNLINK", *names)
| TransactionStrategy |
python | tiangolo__fastapi | docs_src/security/tutorial004_an_py310.py | {
"start": 1101,
"end": 4217
} | class ____(User):
hashed_password: str
password_hash = PasswordHash.recommended()
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
app = FastAPI()
def verify_password(plain_password, hashed_password):
return password_hash.verify(plain_password, hashed_password)
def get_password_hash(password):
return password_hash.hash(password)
def get_user(db, username: str):
if username in db:
user_dict = db[username]
return UserInDB(**user_dict)
def authenticate_user(fake_db, username: str, password: str):
user = get_user(fake_db, username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_access_token(data: dict, expires_delta: timedelta | None = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.now(timezone.utc) + expires_delta
else:
expire = datetime.now(timezone.utc) + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(token: Annotated[str, Depends(oauth2_scheme)]):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except InvalidTokenError:
raise credentials_exception
user = get_user(fake_users_db, username=token_data.username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(
current_user: Annotated[User, Depends(get_current_user)],
):
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post("/token")
async def login_for_access_token(
form_data: Annotated[OAuth2PasswordRequestForm, Depends()],
) -> Token:
user = authenticate_user(fake_users_db, form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Bearer"},
)
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username}, expires_delta=access_token_expires
)
return Token(access_token=access_token, token_type="bearer")
@app.get("/users/me/", response_model=User)
async def read_users_me(
current_user: Annotated[User, Depends(get_current_active_user)],
):
return current_user
@app.get("/users/me/items/")
async def read_own_items(
current_user: Annotated[User, Depends(get_current_active_user)],
):
return [{"item_id": "Foo", "owner": current_user.username}]
| UserInDB |
python | google__jax | tests/pallas/pallas_test.py | {
"start": 3415,
"end": 27860
} | class ____(PallasBaseTest):
def test_add_one(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), floatx))
def add_one(x_ref, o_ref):
o_ref[()] = x_ref[()] + 1.
x = 0.
self.assertEqual(add_one(x), 1.)
def test_add_singleton_vector(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((1,), jnp.float32),
)
def add_one(x_ref, o_ref):
o_ref[0] = x_ref[0] + 1.
x = jnp.array([0.], jnp.float32)
np.testing.assert_allclose(add_one(x), jnp.array([1.], jnp.float32))
def test_add_vector_block_spec(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((8,), intx),
in_specs=[pl.BlockSpec((1,), lambda i: i)],
out_specs=pl.BlockSpec((1,), lambda i: i),
grid=8,
)
def add_one(x_ref, o_ref):
o_ref[0] = x_ref[0] + 1
np.testing.assert_allclose(add_one(jnp.arange(8)), jnp.arange(8) + 1)
def test_add_matrix_block_spec(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((8, 8), intx),
in_specs=[pl.BlockSpec((2, 2), lambda i, j: (i, j))],
out_specs=pl.BlockSpec((2, 2), lambda i, j: (i, j)),
grid=(4, 4),
)
def add_one(x_ref, o_ref):
o_ref[:, :] = x_ref[:, :] + 1
x = jnp.arange(64).reshape((8, 8))
np.testing.assert_allclose(add_one(x), x + 1)
def test_bool_array(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), jnp.bool_))
def logical_and(x_ref, o_ref):
o_ref[()] = jnp.logical_and(x_ref[()], True)
x = jnp.array(True)
self.assertTrue(jnp.all(logical_and(x)))
def test_vector_indexing(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), floatx),
)
def index(x_ref, i_ref, o_ref):
o_ref[()] = x_ref[i_ref[()]]
x = jnp.arange(5.)
for i in range(5):
np.testing.assert_allclose(index(x, i), x[i])
def test_pallas_call_no_outputs(self):
a = np.arange(256, dtype=np.int32)
f = self.pallas_call(lambda x_ref: None, ())
self.assertAllClose((), f(a))
def test_pallas_call_out_shape_is_singleton_tuple(self):
a = np.arange(1024, dtype=np.int32).reshape((8, 128))
f = self.pallas_call(lambda x_ref, o1_ref: None,
out_shape=(a,))
res = f(a)
self.assertIsInstance(res, tuple)
self.assertLen(res, 1)
def test_pallas_call_out_shape_is_list(self):
a = np.arange(1024, dtype=np.int32).reshape((8, 128))
f = self.pallas_call(lambda x_ref, o1_ref: None,
out_shape=[a])
res = f(a)
# TODO(necula): we normalize out_shape to a tuple, we shouldn't.
self.assertIsInstance(res, tuple)
@jtu.skip_on_devices("gpu") # TODO: RET_CHECK failure
def test_block_spec_with_padding(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
def f(*, shape, block_shape):
def kernel(o1_ref):
assert o1_ref.shape == block_shape
o1_ref[...] = jnp.full(o1_ref.shape, pl.program_id(0))
return self.pallas_call(kernel,
jax.ShapeDtypeStruct(shape, dtype=np.int32),
grid=((shape[0] + block_shape[0] - 1) // block_shape[0],),
out_specs=pl.BlockSpec(block_shape, lambda i: i))()
# No padding
pids = f(shape=(8,), block_shape=(2,))
self.assertAllClose(pids,
np.array([0, 0, 1, 1, 2, 2, 3, 3], dtype=np.int32))
# Pad the last block
pids = f(shape=(8,), block_shape=(3,))
self.assertAllClose(pids,
np.array([0, 0, 0, 1, 1, 1, 2, 2], dtype=np.int32))
# Works even if the shape is smaller than 1 block
pids = f(shape=(3,), block_shape=(8,))
self.assertAllClose(pids,
np.array([0, 0, 0], dtype=np.int32))
@parameterized.parameters("int32", "float32")
def test_block_spec_padding_is_nan(self, dtype_name):
if not self.INTERPRET:
self.skipTest("Only applicable for the interpret mode")
dtype = np.dtype(dtype_name)
def copy_kernel(x_ref, o_ref):
o_ref[...] = x_ref[...]
res = self.pallas_call(copy_kernel,
jax.ShapeDtypeStruct((6,), dtype=dtype),
grid=(1,),
in_specs=[pl.BlockSpec((6,), lambda i: 0)])(
np.full((3,), 42, dtype=dtype)
)
expected_pad = {"int32": jnp.iinfo(np.int32).min,
"float32": np.nan}[dtype_name]
self.assertAllClose(res,
np.array([42, 42, 42, expected_pad, expected_pad, expected_pad],
dtype=dtype))
def test_block_spec_mapped_dimension(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((4,), jnp.float32),
in_specs=[
pl.BlockSpec((None, 4), lambda _: (0, 0)),
pl.BlockSpec((None, 4), lambda _: (1, 0)),
],
grid=1,
)
def add_vectors(x_ref, y_ref, o_ref):
o_ref[:] = x_ref[:] + y_ref[:]
xy = jnp.arange(8., dtype=np.float32).reshape((2, 4))
out = add_vectors(xy, xy)
out_ref = xy[0] + xy[1]
np.testing.assert_allclose(out, out_ref)
@jtu.parameterized_filterable(
kwargs=[
dict(shape=(), block_shape=()),
dict(shape=(2,), block_shape=(2,)),
dict(shape=(128,), block_shape=(128,)),
dict(shape=(128,), block_shape=(64,), dtype=np.int16),
dict(shape=(128,), block_shape=(128,), dtype=np.int16),
dict(shape=(1024,), block_shape=(128,), dtype=np.int16),
dict(shape=(1024,), block_shape=(256,), dtype=np.int16),
dict(shape=(128,), block_shape=(64,)),
dict(shape=(2, 2), block_shape=(2, 2)),
dict(shape=(3, 3), block_shape=(3, 3)),
dict(shape=(4, 2), block_shape=(2, 2)),
dict(shape=(6, 2, 2), block_shape=(2, 2, 2)),
dict(shape=(6, 2, 2), block_shape=(3, 2, 2)),
dict(shape=(16, 128), block_shape=(8, 128)),
dict(shape=(6, 16, 128), block_shape=(2, 8, 128)),
dict(shape=(6, 16, 128), block_shape=(3, 8, 128)),
dict(shape=(16, 64), block_shape=(8, 64)),
dict(shape=(16, 128), block_shape=(4, 128)),
dict(shape=(16, 128), block_shape=(2, 128)),
dict(shape=(16, 128), block_shape=(8, 64)),
# Blocks larger than the number of lands and sublanes.
dict(shape=(9, 128), block_shape=(9, 64)),
dict(shape=(9, 128), block_shape=(9, 128)),
dict(shape=(18, 128), block_shape=(9, 128)),
dict(shape=(8, 129), block_shape=(8, 129)),
dict(shape=(9, 129), block_shape=(8, 129)),
dict(shape=(9, 129), block_shape=(9, 129)),
# Tiling of small arrays
dict(shape=(1, 128), block_shape=(4, 128)),
dict(shape=(2, 128), block_shape=(4, 128)),
dict(shape=(3, 128), block_shape=(4, 128)),
dict(shape=(5, 128), block_shape=(8, 128)),
]
)
def test_block_spec_valid_block_shapes(self, *,
shape, block_shape,
dtype=np.int32):
if np.iinfo(dtype).bits == 16:
self.skipTest("TODO(necula): test fails with Mosaic unimplemented for np.int16")
rank = len(shape)
assert rank == len(block_shape)
def copy_kernel(x_ref, o_ref):
o_ref[...] = x_ref[...]
grid = [(sd + bd - 1) // bd for sd, bd in zip(shape, block_shape)]
x = np.arange(math.prod(shape), dtype=dtype).reshape(shape)
test_context = contextlib.nullcontext()
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
if rank < 1:
test_context = self.assertRaisesRegex(
ValueError,
"TPU lowering currently supports only blocks of rank >= 1")
if rank >= 1:
bs0, as0 = block_shape[-1], shape[-1]
if rank >= 2:
bs1, as1 = block_shape[-2], shape[-2]
else:
bs1, as1 = 1, 1
evenly_divisible = (
(bs0 == as0 or bs0 % 128 == 0) and
(bs1 == as1 or bs1 % 8 == 0))
if not evenly_divisible:
if rank == 1:
test_context = self.assertRaisesRegex(
ValueError,
r"the first \(and only\) dimension of the block shape is a"
" multiple of the tiling size",
)
else:
test_context = self.assertRaisesRegex(
ValueError,
"last two dimensions of your block shape are divisible by 8"
" and 128",
)
elif jtu.test_device_matches(["gpu"]) and not self.INTERPRET:
block_size = math.prod(block_shape)
block_size_is_power_2 = 0 == (block_size & (block_size - 1))
if not block_size_is_power_2:
test_context = self.assertRaisesRegex(
Exception,
"array arguments and results whose size is a power of 2")
with test_context:
res = self.pallas_call(
copy_kernel,
jax.ShapeDtypeStruct(x.shape, x.dtype),
grid=grid,
in_specs=[pl.BlockSpec(block_shape, lambda *indices: indices)],
out_specs=pl.BlockSpec(block_shape, lambda *indices: indices),
)(x)
self.assertAllClose(res, x)
def test_pallas_call_no_grid(self):
o_ref_shape = None
def kernel(o_ref):
nonlocal o_ref_shape
o_ref_shape = o_ref.shape
o_ref[...] = jnp.full(o_ref.shape, 42, dtype=np.int32)
pids = self.pallas_call(kernel,
jax.ShapeDtypeStruct((8, 128), dtype=np.int32))()
self.assertAllClose(pids, np.full((8, 128), 42, dtype=np.int32))
self.assertEqual(o_ref_shape, (8, 128))
def test_pallas_call_no_block_spec(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
o_ref_shape = None
def kernel(o_ref):
nonlocal o_ref_shape
o_ref_shape = o_ref.shape
o_ref[...] = jnp.full(o_ref.shape, pl.program_id(0))
pids = self.pallas_call(kernel,
jax.ShapeDtypeStruct((8,), dtype=np.int32),
grid=(1,))()
self.assertEqual(o_ref_shape, (8,))
self.assertAllClose(pids, np.array([0] * 8, dtype=np.int32))
def test_block_spec_no_block_shape_and_no_index_map(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
o_ref_shape = None
def kernel(o_ref):
nonlocal o_ref_shape
o_ref_shape = o_ref.shape
o_ref[...] = jnp.full(o_ref.shape, pl.program_id(0))
pids = self.pallas_call(kernel,
jax.ShapeDtypeStruct((8,), dtype=np.int32),
out_specs=pl.BlockSpec(),
grid=(1,))()
self.assertEqual(o_ref_shape, (8,))
self.assertAllClose(pids, np.array([0] * 8, dtype=np.int32))
def test_block_spec_no_block_shape(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
o_ref_shape = None
def kernel(o_ref):
nonlocal o_ref_shape
o_ref_shape = o_ref.shape
o_ref[...] = jnp.full(o_ref.shape, pl.program_id(0))
pids = self.pallas_call(kernel,
jax.ShapeDtypeStruct((8,), dtype=np.int32),
out_specs=pl.BlockSpec(None, lambda i: i),
grid=(1,))()
self.assertEqual(o_ref_shape, (8,))
self.assertAllClose(pids, np.array([0] * 8, dtype=np.int32))
def test_block_spec_no_index_map(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
o_ref_shape = None
def kernel(o_ref):
nonlocal o_ref_shape
o_ref_shape = o_ref.shape
o_ref[...] = jnp.full(o_ref.shape, pl.program_id(0))
pids = self.pallas_call(kernel,
jax.ShapeDtypeStruct((8,), dtype=np.int32),
out_specs=pl.BlockSpec((4,)),
grid=(1,))()
self.assertEqual(o_ref_shape, (4,))
self.assertAllClose(pids[0:4], np.array([0] * 4, dtype=np.int32))
def test_const_args(self):
if config.use_simplified_jaxpr_constants.value:
self.skipTest("TODO: decide if we want to keep these errors")
# See https://github.com/jax-ml/jax/issues/21557.
# to_store will be hoisted as a constant. Choose distinct shapes from in/outs.
to_store = np.arange(128, dtype=np.float32).reshape((1, 128))
x = np.arange(16 * 128, dtype=np.float32).reshape((16, 128))
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((64, 128), x.dtype),
grid=(2,),
in_specs=[pl.BlockSpec((8, 128), lambda i: (i, 0))],
out_specs=pl.BlockSpec((32, 128), lambda i: (i, 0)),
)
def kernel(src, dst):
dst[0:1] = to_store
with self.assertRaisesRegex(
ValueError,
"The kernel function .* captures constants"):
kernel(x)
def test_vector_slicing(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((2,), floatx),
)
def index(x_ref, idx_ref, o_ref):
idx = idx_ref[()]
o_ref[:] = x_ref[idx]
x = jnp.arange(5.)
for i in range(4):
idx = jnp.arange(i, i + 2)
np.testing.assert_allclose(index(x, idx), x[idx])
@parameterized.named_parameters(*[
(f"m_{m}_n_{n}_k_{k}_dtype_{dtype}_bm_{block_size_m}_"
f"bn_{block_size_n}_bk_{block_size_k}", m, n, k, dtype,
block_size_m, block_size_n, block_size_k)
for m in [512, 1024]
for k in [512]
for n in [512, 1024]
for dtype in ["float32", "float16"]
for block_size_m in [64, 128]
for block_size_n in [64, 128]
for block_size_k in [32]
if block_size_m <= m and block_size_n <= n and block_size_k <= k
])
def test_matmul_block_spec(self, m, n, k, dtype, bm, bn, bk):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
k1, k2 = random.split(random.key(0))
x = random.normal(k1, (m, k), dtype=dtype)
y = random.normal(k2, (k, n), dtype=dtype)
out = matmul_block_spec(x, y, bm=bm, bn=bn, bk=bk,
interpret=self.INTERPRET)
expected = jnp.matmul(
x, y, preferred_element_type=jnp.float32).astype(dtype)
np.testing.assert_allclose(out, expected, atol=0.05, rtol=0.05)
def test_unused_ref(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
m, n = 16, 32
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((m, n), jnp.float32),
)
def dummy(_, o_ref):
o_ref[jnp.arange(m)[:, None], jnp.arange(n)[None, :]] = jnp.ones_like(
o_ref
)
key = random.key(0)
x = random.normal(key, (m, n))
np.testing.assert_allclose(dummy(x), jnp.ones_like(x), atol=1e-5, rtol=1e-5)
def test_using_pallas_slice(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
m, n = 32, 4
out_shape = jax.ShapeDtypeStruct((4, n), floatx)
@functools.partial(
self.pallas_call,
out_shape=out_shape,
)
def slice_kernel(x_ref, y_ref):
y_ref[:4, :4] = x_ref[:4, :4]
x = random.normal(random.key(0), (m, n))
y = slice_kernel(x)
y_ref = x[:4]
np.testing.assert_allclose(y, y_ref, atol=1e-2, rtol=1e-2)
def test_pallas_trace_cache(self):
if jtu.test_device_matches(["tpu"]) and not self.INTERPRET:
self.skipTest("On TPU the test works only in interpret mode")
trace_count = 0
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), jnp.float32),
)
def add_one(x_ref, o_ref):
nonlocal trace_count
o_ref[()] = x_ref[()] + 1.
trace_count += 1
@jax.jit
def f(x):
return add_one(add_one(x))
x = jnp.array(0., dtype=jnp.float32)
self.assertEqual(f(x), 2.)
self.assertEqual(trace_count, 1)
def test_pallas_call_under_disable_jit(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((8,), jnp.float32),
)
def add_one(x_ref, o_ref):
o_ref[...] = x_ref[...] + 1.
x = jnp.arange(8, dtype=jnp.float32)
result = add_one(x)
np.testing.assert_array_equal(result, x + 1.)
with jax.disable_jit():
result = add_one(x)
np.testing.assert_array_equal(result, x + 1.)
@parameterized.parameters(
("float32", None),
("float32", jax.lax.Precision.DEFAULT),
("float32", jax.lax.Precision.HIGH),
("float32", jax.lax.Precision.HIGHEST),
("float32", jax.lax.DotAlgorithmPreset.DEFAULT),
("float32", jax.lax.DotAlgorithmPreset.F16_F16_F32),
("float32", jax.lax.DotAlgorithmPreset.BF16_BF16_F32),
("float32", jax.lax.DotAlgorithmPreset.BF16_BF16_F32_X3),
("float32", jax.lax.DotAlgorithmPreset.BF16_BF16_F32_X6),
("float32", jax.lax.DotAlgorithmPreset.BF16_BF16_F32_X9),
("float32", jax.lax.DotAlgorithmPreset.TF32_TF32_F32),
("float32", jax.lax.DotAlgorithmPreset.TF32_TF32_F32_X3),
("float32", jax.lax.DotAlgorithmPreset.F32_F32_F32),
("bfloat16", None),
("bfloat16", jax.lax.Precision.DEFAULT),
("bfloat16", jax.lax.Precision.HIGHEST),
("bfloat16", jax.lax.DotAlgorithmPreset.DEFAULT),
("bfloat16", jax.lax.DotAlgorithmPreset.BF16_BF16_F32),
)
def test_dot_precision(self, dtype, precision):
if not jtu.test_device_matches(["gpu"]):
self.skipTest("`DotAlgorithmPreset` only supported on GPU.")
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((32, 64), jnp.float32),
)
def dot_kernel(x_ref, y_ref, o_ref):
o_ref[()] = pl.dot(x_ref[()], y_ref[()], precision=precision)
key0, key1 = random.split(random.key(0))
x = random.normal(key0, (32, 16), dtype=dtype)
y = random.normal(key1, (16, 64), dtype=dtype)
expected = jnp.dot(
x,
y,
precision=jax.lax.Precision.HIGHEST,
preferred_element_type=jnp.float32,
)
if dtype == "bfloat16" or precision in (
jax.lax.Precision.HIGHEST,
jax.lax.DotAlgorithmPreset.F32_F32_F32,
jax.lax.DotAlgorithmPreset.BF16_BF16_F32_X6,
jax.lax.DotAlgorithmPreset.BF16_BF16_F32_X9,
):
atol = 5e-6
elif precision in (
jax.lax.DotAlgorithmPreset.BF16_BF16_F32_X3,
jax.lax.DotAlgorithmPreset.TF32_TF32_F32_X3,
):
atol = 5e-4
else:
atol = 5e-2
self.assertAllClose(dot_kernel(x, y), expected, atol=atol, rtol=atol / 10)
@parameterized.parameters(jnp.int8, jnp.uint8)
def test_integer_dot(self, dtype):
if jtu.test_device_matches(["tpu"]) and not jtu.is_device_tpu_at_least(5):
self.skipTest("`int8` dot is only supported on v5 TPUs and newer.")
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((32, 64), jnp.int32),
)
def dot_kernel(x_ref, y_ref, o_ref):
o_ref[()] = pl.dot(x_ref[()], y_ref[()])
key0, key1 = random.split(random.key(0))
# FIXME(cjfj): TPU fails with `uint8` values >= 128.
kwargs = dict(minval=jnp.iinfo(dtype).min, maxval=128, dtype=dtype)
# TODO(cjfj): Investigate why this fails on GPU with `k == 16`.
x = random.randint(key0, (32, 128), **kwargs)
y = random.randint(key1, (128, 64), **kwargs)
expected = jnp.dot(x, y, preferred_element_type=jnp.int32)
self.assertAllClose(dot_kernel(x, y), expected, atol=0.0, rtol=0.0)
def test_dot_with_vector(self):
if not jtu.test_device_matches(["gpu"]) or self.INTERPRET:
self.skipTest(
"jnp.dot is only restricted to 2D on GPU in non-interpret mode."
)
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((32,), jnp.float32),
)
def dot_kernel(x_ref, y_ref, o_ref):
o_ref[()] = jnp.dot(x_ref[()], y_ref[()])
key0, key1 = random.split(random.key(0))
x = random.normal(key0, (32, 64), dtype=jnp.float32)
y = random.normal(key1, (64,), dtype=jnp.float32)
with self.assertRaisesRegex(Exception, "must be 2D"):
dot_kernel(x, y)
@parameterized.parameters(jnp.int4, jnp.uint4)
def test_subbyte_load(self, dtype):
if not jtu.test_device_matches(["gpu"]):
self.skipTest("`[u]int4` loads only supported on GPU.")
x = jnp.arange(-128, 128, dtype=jnp.int8)
@functools.partial(self.pallas_call, out_shape=x)
def copy_kernel(x_ref, o_ref):
o_ref[()] = x_ref[()].astype(jnp.int8)
expected = x.astype(dtype).astype(jnp.int8)
self.assertAllClose(copy_kernel(x.astype(dtype)), expected)
@parameterized.parameters(jnp.int4, jnp.uint4)
def test_subbyte_load_non_contiguous(self, dtype):
if not jtu.test_device_matches(["gpu"]):
self.skipTest("`[u]int4` loads only supported on GPU.")
x = jnp.arange(-128, 64, dtype=jnp.int8)
expected = x.astype(dtype).astype(jnp.int8)[::3]
@functools.partial(self.pallas_call, out_shape=expected)
def copy_kernel(x_ref, o_ref):
o_ref[()] = x_ref[::3].astype(jnp.int8)
self.assertAllClose(copy_kernel(x.astype(dtype)), expected)
@parameterized.parameters(True, False)
def test_float8_e4m3b11fnuz_dot(self, transpose):
if not jtu.test_device_matches(["tpu"]) or not jtu.is_device_tpu_at_least(5):
self.skipTest("`float8_e4m3b11fnuz` dot only supported on TPU.")
if jtu.is_device_tpu(7, "x"):
self.skipTest("Unsupported type for matmul.")
dtype = jnp.float8_e4m3b11fnuz
x = jax.random.normal(jax.random.key(0), (2048, 1024), dtype=jnp.bfloat16)
y = jax.random.normal(jax.random.key(1), (1024, 1024), dtype=dtype)
if transpose:
expected = x @ y.T.astype(jnp.bfloat16)
else:
expected = x @ y.astype(jnp.bfloat16)
@functools.partial(
self.pallas_call,
in_specs=(pl.BlockSpec(), pl.BlockSpec()),
out_shape=expected,
)
def dot_kernel(x_ref, y_ref, o_ref):
o_ref[...] = pl.dot(
x_ref[...], y_ref[...], trans_b=transpose
).astype(o_ref.dtype)
self.assertAllClose(dot_kernel(x, y), expected)
@parameterized.parameters(
((32,), 2, 0), ((32, 64), 4, 0), ((32, 16), 8, 1), ((32, 16, 2), 16, 1)
)
def test_split(self, shape, num_parts, axis):
if jtu.test_device_matches(["tpu"]) and shape[axis] == num_parts:
self.skipTest("TPU doesn't support fully split axis.")
x = jax.random.normal(jax.random.key(0), shape)
expected = jnp.split(x, num_parts, axis)
@functools.partial(self.pallas_call, out_shape=expected)
def kernel(x_ref, *o_ref):
x_parts = jnp.split(x_ref[()], num_parts, axis)
for o_ref, x_part in zip(o_ref, x_parts):
o_ref[...] = x_part
self.assertAllClose(kernel(x), expected)
| PallasCallTest |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 30873,
"end": 31074
} | class ____(BaseModel):
"""Get the response content part of a Human-in-the-loop response."""
ti_id: UUID
type: Literal["GetHITLDetailResponse"] = "GetHITLDetailResponse"
| GetHITLDetailResponse |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_commits.py | {
"start": 332,
"end": 3911
} | class ____(APITestCase):
endpoint = "sentry-api-0-project-commits"
def test_simple(self) -> None:
project = self.create_project(name="komal")
version = "1.1"
repo = Repository.objects.create(organization_id=project.organization_id, name=project.name)
release = Release.objects.create(organization_id=project.organization_id, version=version)
commit = self.create_commit(repo=repo, project=project, key="a" * 40, release=release)
ReleaseProject.objects.create(project=project, release=release)
self.login_as(user=self.user)
response = self.get_success_response(project.organization.slug, project.slug)
assert [r["id"] for r in response.data] == [commit.key]
def test_duplicate_released_commits(self) -> None:
project = self.create_project(name="komal")
repo = Repository.objects.create(organization_id=project.organization_id, name=project.name)
release = Release.objects.create(organization_id=project.organization_id, version="1.1")
release2 = Release.objects.create(organization_id=project.organization_id, version="1.2")
ReleaseProject.objects.create(project=project, release=release)
commit = Commit.objects.create(
organization_id=project.organization_id, repository_id=repo.id, key="a" * 40
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
release=release,
commit=commit,
order=0,
project_id=project.id,
)
ReleaseCommit.objects.create(
organization_id=project.organization_id,
release=release2,
commit=commit,
order=0,
project_id=project.id,
)
self.login_as(user=self.user)
response = self.get_success_response(project.organization.slug, project.slug)
assert len(response.data) == 1
def test_query_filter(self) -> None:
project = self.create_project(name="komal")
version = "1.1"
repo = Repository.objects.create(organization_id=project.organization_id, name=project.name)
release = Release.objects.create(organization_id=project.organization_id, version=version)
self.create_commit(repo=repo, project=project, key="foobar", release=release)
ReleaseProject.objects.create(project=project, release=release)
self.login_as(user=self.user)
url = reverse(
"sentry-api-0-project-commits",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
response = self.client.get(url + "?query=foobar", format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
response = self.client.get(url + "?query=random", format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 0
response = self.client.get(url + "?query=foob", format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
response = self.client.get(url + "?query=f", format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 1
response = self.client.get(url + "?query=ooba", format="json")
assert response.status_code == 200, response.content
assert len(response.data) == 0
| ProjectCommitListTest |
python | pytorch__pytorch | torch/_inductor/wrapper_benchmark.py | {
"start": 415,
"end": 5256
} | class ____(Protocol):
def __call__(self, times: int, repeat: int) -> float: ...
_kernel_category_choices = [
"foreach",
"persistent_reduction",
"pointwise",
"reduction",
"split_scan",
"template",
]
def get_kernel_category_by_source_code(src_code: str) -> str:
"""
Similar to get_kernel_category but use the source code. Call this API
if we have not compile the src_code to module yet.
"""
choices = [
ch for ch in _kernel_category_choices if f"@triton_heuristics.{ch}" in src_code
]
if len(choices) == 1:
return choices[0]
else:
return "unknown"
def get_kernel_category(kernel_mod: ModuleType) -> str:
"""
Given the module defining a triton kernel, return the category of the kernel.
Category can be one of:
- pointwise
- reduction
- persistent_reduction
Currently we simply decide the category depending on what decorator is imported
by the kernel.
"""
choices = [ch for ch in _kernel_category_choices if ch in kernel_mod.__dict__]
if len(choices) == 1:
return choices[0]
else:
return "unknown"
def get_triton_kernel(mod: ModuleType): # type: ignore[no-untyped-def]
from torch._inductor.runtime.triton_heuristics import CachingAutotuner
cand_list = [
v
for k, v in mod.__dict__.items()
if k.startswith("triton_") and isinstance(v, CachingAutotuner)
]
assert len(cand_list) == 1
return cand_list[0]
def benchmark_all_kernels(
benchmark_name: str, benchmark_all_configs: Optional[dict[Any, Any]]
) -> None:
"""
An experimental API used only when config.benchmark_kernel is true.
Run the kernel benchmarks for all the kernels cached in PyCodeCache.
Used in the compiled modules.
Put this method here rather than codegen it for convenience since its implementation
does not change based on different graph modules being compiled.
"""
from torch._inductor.codecache import PyCodeCache
nfound = 0
for kernel_mod in PyCodeCache.modules:
kernel_key = kernel_mod.key
if not hasattr(kernel_mod, "get_args") or not hasattr(kernel_mod, "call"):
continue
triton_kernel = get_triton_kernel(kernel_mod)
device_type = triton_kernel.device_props.type
kernel_category = get_kernel_category(kernel_mod)
args = kernel_mod.get_args()
num_in_out_ptrs = len(
[
arg_name
for arg_name in triton_kernel.fn.arg_names
if arg_name.startswith("in_out_ptr")
]
)
num_gb = triton_kernel.inductor_meta.get("kernel_num_gb", None)
if num_gb is None:
num_gb = get_num_bytes(*args, num_in_out_args=num_in_out_ptrs) / 1e9
def get_info_str(
ms: float,
n_regs: Optional[Any],
n_spills: Optional[Any],
shared: Optional[Any],
prefix: str = "",
) -> str:
if not any(x is None for x in [n_regs, n_spills, shared]):
kernel_detail_str = (
f" {n_regs:3} regs {n_spills:3} spills {shared:8} shared mem"
)
else:
kernel_detail_str = ""
gb_per_s = num_gb / (ms / 1e3)
return create_bandwidth_info_str(
ms, num_gb, gb_per_s, prefix=prefix, suffix=kernel_detail_str
)
kernel_desc = (
f"{benchmark_name:20} {kernel_category[:3].upper()} {kernel_key[:10]}"
)
if benchmark_all_configs:
assert hasattr(kernel_mod, "benchmark_all_configs")
bench_result = kernel_mod.benchmark_all_configs(args)
print(kernel_desc)
for launcher, ms in bench_result.items():
print(
f" {get_info_str(ms, launcher.n_regs, launcher.n_spills, launcher.shared)} @ {launcher.config}"
)
else:
ms = benchmarker.benchmark(
lambda: kernel_mod.call(args),
device=device_type,
rep=40,
)
assert len(triton_kernel.launchers) == 1, (
"Autotuner should have selected the best config"
)
launcher = triton_kernel.launchers[0]
print(
get_info_str(
ms,
launcher.n_regs,
launcher.n_spills,
launcher.shared,
prefix=f"{kernel_desc} ",
)
)
nfound += 1
if nfound == 0:
print(
"No kernel with benchmark functionality found. Make sure you run inductor with config.benchmark_kernel being True"
)
@dataclass
| BenchmarkCallableType |
python | neetcode-gh__leetcode | python/2390-removing-stars-from-a-string.py | {
"start": 85,
"end": 314
} | class ____(object) :
def removeStars(self, s) :
res = []
for c in s :
if res and c == '*':
res.pop()
else:
res.append(c)
return ''.join(res)
| Solution |
python | huggingface__transformers | src/transformers/models/afmoe/modular_afmoe.py | {
"start": 1694,
"end": 1740
} | class ____(GptOssRMSNorm):
pass
| AfmoeRMSNorm |
python | readthedocs__readthedocs.org | readthedocs/proxito/views/hosting.py | {
"start": 28342,
"end": 28442
} | class ____(SettingsOverrideObject):
_default_class = BaseReadTheDocsConfigJson
| ReadTheDocsConfigJson |
python | huggingface__transformers | tests/test_backbone_common.py | {
"start": 799,
"end": 10283
} | class ____:
all_model_classes = ()
has_attentions = True
def test_config(self):
config_class = self.config_class
# test default config
config = config_class()
self.assertIsNotNone(config)
num_stages = len(config.depths) if hasattr(config, "depths") else config.num_hidden_layers
expected_stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, num_stages + 1)]
self.assertEqual(config.stage_names, expected_stage_names)
self.assertTrue(set(config.out_features).issubset(set(config.stage_names)))
# Test out_features and out_indices are correctly set
# out_features and out_indices both None
config = config_class(out_features=None, out_indices=None)
self.assertEqual(config.out_features, [config.stage_names[-1]])
self.assertEqual(config.out_indices, [len(config.stage_names) - 1])
# out_features and out_indices both set
config = config_class(out_features=["stem", "stage1"], out_indices=[0, 1])
self.assertEqual(config.out_features, ["stem", "stage1"])
self.assertEqual(config.out_indices, [0, 1])
# Only out_features set
config = config_class(out_features=["stage1", "stage3"])
self.assertEqual(config.out_features, ["stage1", "stage3"])
self.assertEqual(config.out_indices, [1, 3])
# Only out_indices set
config = config_class(out_indices=[0, 2])
self.assertEqual(config.out_features, [config.stage_names[0], config.stage_names[2]])
self.assertEqual(config.out_indices, [0, 2])
# Error raised when out_indices do not correspond to out_features
with self.assertRaises(ValueError):
config = config_class(out_features=["stage1", "stage2"], out_indices=[0, 2])
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_config_save_pretrained(self):
config_class = self.config_class
config_first = config_class(out_indices=[0, 1, 2, 3])
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(tmpdirname)
config_second = self.config_class.from_pretrained(tmpdirname)
self.assertEqual(config_second.to_dict(), config_first.to_dict())
def test_channels(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertEqual(len(model.channels), len(config.out_features))
num_features = model.num_features
out_indices = [config.stage_names.index(feat) for feat in config.out_features]
out_channels = [num_features[idx] for idx in out_indices]
self.assertListEqual(model.channels, out_channels)
new_config = copy.deepcopy(config)
new_config.out_features = None
model = model_class(new_config)
self.assertEqual(len(model.channels), 1)
self.assertListEqual(model.channels, [num_features[-1]])
new_config = copy.deepcopy(config)
new_config.out_indices = None
model = model_class(new_config)
self.assertEqual(len(model.channels), 1)
self.assertListEqual(model.channels, [num_features[-1]])
def test_create_from_modified_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
result = model(**inputs_dict)
self.assertEqual(len(result.feature_maps), len(config.out_features))
self.assertEqual(len(model.channels), len(config.out_features))
self.assertEqual(len(result.feature_maps), len(config.out_indices))
self.assertEqual(len(model.channels), len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
modified_config = copy.deepcopy(config)
modified_config.out_features = None
model = model_class(modified_config)
model.to(torch_device)
model.eval()
result = model(**inputs_dict)
self.assertEqual(len(result.feature_maps), 1)
self.assertEqual(len(model.channels), 1)
modified_config = copy.deepcopy(config)
modified_config.out_indices = None
model = model_class(modified_config)
model.to(torch_device)
model.eval()
result = model(**inputs_dict)
self.assertEqual(len(result.feature_maps), 1)
self.assertEqual(len(model.channels), 1)
# Check backbone can be initialized with fresh weights
modified_config = copy.deepcopy(config)
modified_config.use_pretrained_backbone = False
model = model_class(modified_config)
model.to(torch_device)
model.eval()
result = model(**inputs_dict)
def test_backbone_common_attributes(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for backbone_class in self.all_model_classes:
backbone = backbone_class(config)
self.assertTrue(hasattr(backbone, "backbone_type"))
self.assertTrue(hasattr(backbone, "stage_names"))
self.assertTrue(hasattr(backbone, "num_features"))
self.assertTrue(hasattr(backbone, "out_indices"))
self.assertTrue(hasattr(backbone, "out_features"))
self.assertTrue(hasattr(backbone, "out_feature_channels"))
self.assertTrue(hasattr(backbone, "channels"))
self.assertIsInstance(backbone.backbone_type, BackboneType)
# Verify num_features has been initialized in the backbone init
self.assertIsNotNone(backbone.num_features)
self.assertTrue(len(backbone.channels) == len(backbone.out_indices))
self.assertTrue(len(backbone.stage_names) == len(backbone.num_features))
self.assertTrue(len(backbone.channels) <= len(backbone.num_features))
self.assertTrue(len(backbone.out_feature_channels) == len(backbone.stage_names))
def test_backbone_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
batch_size = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
backbone = backbone_class(config)
backbone.to(torch_device)
backbone.eval()
outputs = backbone(**inputs_dict)
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps, tuple)
self.assertTrue(len(outputs.feature_maps) == len(backbone.channels))
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels))
self.assertIsNone(outputs.hidden_states)
self.assertIsNone(outputs.attentions)
# Test output_hidden_states=True
outputs = backbone(**inputs_dict, output_hidden_states=True)
self.assertIsNotNone(outputs.hidden_states)
self.assertTrue(len(outputs.hidden_states), len(backbone.stage_names))
for hidden_state, n_channels in zip(outputs.hidden_states, backbone.channels):
self.assertTrue(hidden_state.shape[:2], (batch_size, n_channels))
# Test output_attentions=True
if self.has_attentions:
outputs = backbone(**inputs_dict, output_attentions=True)
self.assertIsNotNone(outputs.attentions)
def test_backbone_stage_selection(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
batch_size = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
config.out_indices = [-2, -1]
backbone = backbone_class(config)
backbone.to(torch_device)
backbone.eval()
outputs = backbone(**inputs_dict)
# Test number of feature maps returned
self.assertIsInstance(outputs.feature_maps, tuple)
self.assertTrue(len(outputs.feature_maps) == 2)
# Order of channels returned is same as order of channels iterating over stage names
channels_from_stage_names = [
backbone.out_feature_channels[name] for name in backbone.stage_names if name in backbone.out_features
]
self.assertEqual(backbone.channels, channels_from_stage_names)
for feature_map, n_channels in zip(outputs.feature_maps, backbone.channels):
self.assertTrue(feature_map.shape[:2], (batch_size, n_channels))
| BackboneTesterMixin |
python | fsspec__filesystem_spec | fsspec/implementations/tests/local/local_test.py | {
"start": 267,
"end": 338
} | class ____(abstract.AbstractPutTests, LocalFixtures):
pass
| TestLocalPut |
python | getsentry__sentry | src/sentry/middleware/subdomain.py | {
"start": 346,
"end": 1860
} | class ____:
"""
Extracts any subdomain from request.get_host() relative to the `system.base-hostname` option, and attaches it to
the request object under request.subdomain.
If no subdomain is extracted, then request.subdomain is None.
"""
def __init__(self, get_response: Callable[[HttpRequest], HttpResponseBase]):
self.base_hostname = options.get("system.base-hostname")
if self.base_hostname:
self.base_hostname = self.base_hostname.rstrip("/")
self.get_response = get_response
def __call__(self, request: HttpRequest) -> HttpResponseBase:
request.subdomain = None
if not self.base_hostname:
return self.get_response(request)
try:
host = request.get_host().lower()
except DisallowedHost:
url_prefix = options.get("system.url-prefix")
logger.info(
"subdomain.disallowed_host",
extra={
"location": url_prefix,
"host": request.META.get("HTTP_HOST", "<unknown>"),
"path": request.path,
},
)
return HttpResponseRedirect(url_prefix)
if not host.endswith(f".{self.base_hostname}"):
return self.get_response(request)
subdomain = host[: -len(self.base_hostname)].rstrip(".")
if len(subdomain) > 0:
request.subdomain = subdomain
return self.get_response(request)
| SubdomainMiddleware |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 75506,
"end": 75897
} | class ____(SimpleHandlerTestCase):
class Handler(RequestHandler):
def get(self):
1 / 0
def log_exception(self, typ, value, tb):
1 / 0
def test_buggy_log_exception(self):
# Something gets logged even though the application's
# logger is broken.
with ExpectLog(app_log, ".*"):
self.fetch("/")
| BuggyLoggingTest |
python | huggingface__transformers | src/transformers/models/splinter/modeling_splinter.py | {
"start": 4368,
"end": 6845
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.attention_dropout = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size**-0.5
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
**kwargs,
) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
query_states = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Splinter
| SplinterSelfAttention |
python | getsentry__sentry | tests/sentry/workflow_engine/tasks/test_delayed_workflows.py | {
"start": 7159,
"end": 8772
} | class ____(TestDelayedWorkflowTaskBase):
@override_options({"delayed_processing.batch_size": 1})
@patch("sentry.workflow_engine.tasks.delayed_workflows.process_delayed_workflows.apply_async")
def test_batched_cleanup(self, mock_process_delayed: MagicMock) -> None:
self._push_base_events()
project_client = self.batch_client.for_project(self.project.id)
all_data = project_client.get_hash_data(batch_key=None)
process_in_batches(project_client)
batch_one_key = mock_process_delayed.call_args_list[0][1]["kwargs"]["batch_key"]
batch_two_key = mock_process_delayed.call_args_list[1][1]["kwargs"]["batch_key"]
# Verify we removed the data from the buffer
data = project_client.get_hash_data(batch_key=None)
assert data == {}
first_batch = project_client.get_hash_data(batch_key=batch_one_key)
event_data = EventRedisData.from_redis_data(first_batch, continue_on_error=False)
from sentry.workflow_engine.processors.delayed_workflow import cleanup_redis_buffer
cleanup_redis_buffer(
project_client,
event_data.events.keys(),
batch_one_key,
)
# Verify the batch we "executed" is removed
data = project_client.get_hash_data(batch_key=batch_one_key)
assert data == {}
# Verify the batch we didn't execute is still in redis
data = project_client.get_hash_data(batch_key=batch_two_key)
for key in first_batch.keys():
all_data.pop(key)
assert data == all_data
| TestDelayedWorkflowTaskIntegration |
python | kamyu104__LeetCode-Solutions | Python/number-of-self-divisible-permutations.py | {
"start": 93,
"end": 730
} | class ____(object):
def selfDivisiblePermutationCount(self, n):
"""
:type n: int
:rtype: int
"""
def popcount(x):
return bin(x).count('1')
def gcd(a, b):
while b:
a, b = b, a%b
return a
lookup = [[gcd(i+1, j+1) == 1 for j in xrange(n)] for i in xrange(n)]
dp = [0]*(1<<n)
dp[0] = 1
for mask in xrange(1<<n):
i = popcount(mask)
for j in xrange(n):
if mask&(1<<j) == 0 and lookup[i][j]:
dp[mask|(1<<j)] += dp[mask]
return dp[-1]
| Solution |
python | doocs__leetcode | solution/0200-0299/0250.Count Univalue Subtrees/Solution.py | {
"start": 192,
"end": 795
} | class ____:
def countUnivalSubtrees(self, root: Optional[TreeNode]) -> int:
def dfs(root):
if root is None:
return True
l, r = dfs(root.left), dfs(root.right)
if not l or not r:
return False
a = root.val if root.left is None else root.left.val
b = root.val if root.right is None else root.right.val
if a == b == root.val:
nonlocal ans
ans += 1
return True
return False
ans = 0
dfs(root)
return ans
| Solution |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/events.py | {
"start": 125363,
"end": 131316
} | class ____(event.Events["registry"]):
"""Define events specific to :class:`_orm.registry` lifecycle.
The :class:`_orm.RegistryEvents` class defines events that are specific
to the lifecycle and operation of the :class:`_orm.registry` object.
e.g.::
from typing import Any
from sqlalchemy import event
from sqlalchemy.orm import registry
from sqlalchemy.orm import TypeResolve
from sqlalchemy.types import TypeEngine
reg = registry()
@event.listens_for(reg, "resolve_type_annotation")
def resolve_custom_type(
resolve_type: TypeResolve,
) -> TypeEngine[Any] | None:
if python_type is MyCustomType:
return MyCustomSQLType()
return None
The events defined by :class:`_orm.RegistryEvents` include
:meth:`_orm.RegistryEvents.resolve_type_annotation`,
:meth:`_orm.RegistryEvents.before_configured`, and
:meth:`_orm.RegistryEvents.after_configured`.`. These events may be
applied to a :class:`_orm.registry` object as shown in the preceding
example, as well as to a declarative base class directly, which will
automtically locate the registry for the event to be applied::
from typing import Any
from sqlalchemy import event
from sqlalchemy.orm import DeclarativeBase
from sqlalchemy.orm import registry as RegistryType
from sqlalchemy.orm import TypeResolve
from sqlalchemy.types import TypeEngine
class Base(DeclarativeBase):
pass
@event.listens_for(Base, "resolve_type_annotation")
def resolve_custom_type(
resolve_type: TypeResolve,
) -> TypeEngine[Any] | None:
if resolve_type.resolved_type is MyCustomType:
return MyCustomSQLType()
else:
return None
@event.listens_for(Base, "after_configured")
def after_base_configured(registry: RegistryType) -> None:
print(f"Registry {registry} fully configured")
.. versionadded:: 2.1
"""
_target_class_doc = "SomeRegistry"
_dispatch_target = decl_api.registry
@classmethod
def _accept_with(
cls,
target: Any,
identifier: str,
) -> Any:
if isinstance(target, decl_api.registry):
return target
elif (
isinstance(target, type)
and "_sa_registry" in target.__dict__
and isinstance(target.__dict__["_sa_registry"], decl_api.registry)
):
return target._sa_registry # type: ignore[attr-defined]
else:
return None
@classmethod
def _listen(
cls,
event_key: _EventKey["registry"],
**kw: Any,
) -> None:
identifier = event_key.identifier
# Only resolve_type_annotation needs retval=True
if identifier == "resolve_type_annotation":
kw["retval"] = True
event_key.base_listen(**kw)
def resolve_type_annotation(
self, resolve_type: decl_api.TypeResolve
) -> Optional[Any]:
"""Intercept and customize type annotation resolution.
This event is fired when the :class:`_orm.registry` attempts to
resolve a Python type annotation to a SQLAlchemy type. This is
particularly useful for handling advanced typing scenarios such as
nested :pep:`695` type aliases.
The :meth:`.RegistryEvents.resolve_type_annotation` event automatically
sets up ``retval=True`` when the event is set up, so that implementing
functions may return a resolved type, or ``None`` to indicate no type
was resolved, and the default resolution for the type should proceed.
:param resolve_type: A :class:`_orm.TypeResolve` object which contains
all the relevant information about the type, including a link to the
registry and its resolver function.
:return: A SQLAlchemy type to use for the given Python type. If
``None`` is returned, the default resolution behavior will proceed
from there.
.. versionadded:: 2.1
.. seealso::
:ref:`orm_declarative_resolve_type_event`
"""
def before_configured(self, registry: "registry") -> None:
"""Called before a series of mappers in this registry are configured.
This event is invoked each time the :func:`_orm.configure_mappers`
function is invoked and this registry has mappers that are part of
the configuration process.
Compared to the :meth:`.MapperEvents.before_configured` event hook,
this event is local to the mappers within a specific
:class:`_orm.registry` and not for all :class:`.Mapper` objects
globally.
:param registry: The :class:`_orm.registry` instance.
.. versionadded:: 2.1
.. seealso::
:meth:`.RegistryEvents.after_configured`
:meth:`.MapperEvents.before_configured`
:meth:`.MapperEvents.after_configured`
"""
def after_configured(self, registry: "registry") -> None:
"""Called after a series of mappers in this registry are configured.
This event is invoked each time the :func:`_orm.configure_mappers`
function completes and this registry had mappers that were part of
the configuration process.
Compared to the :meth:`.MapperEvents.after_configured` event hook, this
event is local to the mappers within a specific :class:`_orm.registry`
and not for all :class:`.Mapper` objects globally.
:param registry: The :class:`_orm.registry` instance.
.. versionadded:: 2.1
.. seealso::
:meth:`.RegistryEvents.before_configured`
:meth:`.MapperEvents.before_configured`
:meth:`.MapperEvents.after_configured`
"""
| RegistryEvents |
python | pydantic__pydantic | .github/actions/people/people.py | {
"start": 5689,
"end": 5781
} | class ____(BaseModel):
"""Container for label nodes."""
nodes: list[LabelNode]
| Labels |
python | huggingface__transformers | src/transformers/models/speecht5/modeling_speecht5.py | {
"start": 100793,
"end": 119613
} | class ____(SpeechT5PreTrainedModel):
input_modalities = ("text",)
main_input_name = "input_ids"
def __init__(self, config: SpeechT5Config):
super().__init__(config)
if config.vocab_size is None:
raise ValueError(
f"You are trying to instantiate {self.__class__} with a configuration that does not define the"
" vocabulary size of the language model head. Please instantiate the model as follows:"
" `SpeechT5ForTextToSpeech.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of"
" your model's configuration."
)
text_encoder = SpeechT5EncoderWithTextPrenet(config)
speech_decoder = SpeechT5DecoderWithSpeechPrenet(config)
self.speecht5 = SpeechT5Model(config, text_encoder, speech_decoder)
self.speech_decoder_postnet = SpeechT5SpeechDecoderPostnet(config)
# Initialize weights and apply final processing
self.post_init()
@classmethod
def can_generate(cls) -> bool:
# Speecht5 has a unique model structure, where the external class (`SpeechT5ForTextToSpeech`) doesn't need to inherit from
# `GenerationMixin` (it has a non-standard generation method). This means that the base `can_generate()` will return `False`,
# but we need to override it so as to do `GenerationConfig` handling in multiple parts of the codebase.
return True
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
decoder_input_values: Optional[torch.FloatTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
speaker_embeddings: Optional[torch.FloatTensor] = None,
labels: Optional[torch.FloatTensor] = None,
stop_labels: Optional[torch.Tensor] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple, Seq2SeqSpectrogramOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SpeechT5Tokenizer`]. See [`~PreTrainedTokenizer.encode`] and
[`~PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
decoder_input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`):
Float values of input mel spectrogram.
SpeechT5 uses an all-zero spectrum as the starting token for `decoder_input_values` generation. If
`past_key_values` is used, optionally only the last `decoder_input_values` have to be input (see
`past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_values`. Causal mask will
also be used by default.
If you want to change padding behavior, you should read [`SpeechT5Decoder._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more
information on the default strategy.
speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
Tensor containing the speaker embeddings.
labels (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_mel_bins)`, *optional*):
Float values of target mel spectrogram. Timesteps set to `-100.0` are ignored (masked) for the loss
computation. Spectrograms can be obtained using [`SpeechT5Processor`]. See [`SpeechT5Processor.__call__`]
for details.
stop_labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Binary tensor indicating the position of the stop token in the sequence.
Example:
```python
>>> from transformers import SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, set_seed
>>> import torch
>>> processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts")
>>> model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts")
>>> vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
>>> inputs = processor(text="Hello, my dog is cute", return_tensors="pt")
>>> speaker_embeddings = torch.zeros((1, 512)) # or load xvectors from a file
>>> set_seed(555) # make deterministic
>>> # generate speech
>>> speech = model.generate(inputs["input_ids"], speaker_embeddings=speaker_embeddings, vocoder=vocoder)
>>> speech.shape
torch.Size([15872])
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_values is None:
decoder_input_values, decoder_attention_mask = shift_spectrograms_right(
labels, self.config.reduction_factor, decoder_attention_mask
)
if self.config.use_guided_attention_loss:
output_attentions = True
outputs = self.speecht5(
input_values=input_ids,
attention_mask=attention_mask,
decoder_input_values=decoder_input_values,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
use_cache=use_cache,
speaker_embeddings=speaker_embeddings,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
)
outputs_before_postnet, outputs_after_postnet, logits = self.speech_decoder_postnet(outputs[0])
loss = None
if labels is not None:
criterion = SpeechT5SpectrogramLoss(self.config)
loss = criterion(
attention_mask,
outputs_before_postnet,
outputs_after_postnet,
logits,
labels,
outputs.cross_attentions,
)
if not return_dict:
output = (outputs_after_postnet,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return Seq2SeqSpectrogramOutput(
loss=loss,
spectrogram=outputs_after_postnet,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
@torch.no_grad()
def generate(
self,
input_ids: torch.LongTensor,
attention_mask: Optional[torch.LongTensor] = None,
speaker_embeddings: Optional[torch.FloatTensor] = None,
threshold: float = 0.5,
minlenratio: float = 0.0,
maxlenratio: float = 20.0,
vocoder: Optional[nn.Module] = None,
output_cross_attentions: bool = False,
return_output_lengths: bool = False,
**kwargs,
) -> Union[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor]]:
r"""
Converts a sequence of input tokens into a sequence of mel spectrograms, which are subsequently turned into a
speech waveform using a vocoder.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SpeechT5Tokenizer`]. See [`~PreTrainedTokenizer.encode`] and
[`~PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Attention mask from the tokenizer, required for batched inference to signal to the model where to
ignore padded tokens from the input_ids.
speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
Tensor containing the speaker embeddings.
threshold (`float`, *optional*, defaults to 0.5):
The generated sequence ends when the predicted stop token probability exceeds this value.
minlenratio (`float`, *optional*, defaults to 0.0):
Used to calculate the minimum required length for the output sequence.
maxlenratio (`float`, *optional*, defaults to 20.0):
Used to calculate the maximum allowed length for the output sequence.
vocoder (`nn.Module`, *optional*):
The vocoder that converts the mel spectrogram into a speech waveform. If `None`, the output is the mel
spectrogram.
output_cross_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of the decoder's cross-attention layers.
return_output_lengths (`bool`, *optional*, defaults to `False`):
Whether or not to return the concrete spectrogram/waveform lengths.
Returns:
`tuple(torch.FloatTensor)` comprising various elements depending on the inputs:
- when `return_output_lengths` is False
- **spectrogram** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape
`(output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrogram.
- **waveform** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape
`(num_frames,)` -- The predicted speech waveform.
- **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`)
`torch.FloatTensor` of shape `(config.decoder_layers, config.decoder_attention_heads,
output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers.
- when `return_output_lengths` is True
- **spectrograms** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape
`(batch_size, output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrograms that
are padded to the maximum length.
- **spectrogram_lengths** (*optional*, returned when no `vocoder` is provided) `list[Int]` -- A list of
all the concrete lengths for each spectrogram.
- **waveforms** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape
`(batch_size, num_frames)` -- The predicted speech waveforms that are padded to the maximum length.
- **waveform_lengths** (*optional*, returned when a `vocoder` is provided) `list[Int]` -- A list of all
the concrete lengths for each waveform.
- **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`)
`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, config.decoder_attention_heads,
output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers.
"""
if speaker_embeddings is not None:
batch_size = input_ids.size(0)
if speaker_embeddings.size(0) != batch_size:
if speaker_embeddings.size(0) == 1:
speaker_embeddings = speaker_embeddings.repeat(batch_size, 1)
else:
raise ValueError(
"The first dimension of speaker_embeddings must be either 1 or the same as batch_size."
)
return _generate_speech(
self,
input_ids,
speaker_embeddings,
attention_mask,
threshold,
minlenratio,
maxlenratio,
vocoder,
output_cross_attentions,
return_output_lengths,
)
@torch.no_grad()
def generate_speech(
self,
input_ids: torch.LongTensor,
speaker_embeddings: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
threshold: float = 0.5,
minlenratio: float = 0.0,
maxlenratio: float = 20.0,
vocoder: Optional[nn.Module] = None,
output_cross_attentions: bool = False,
return_output_lengths: bool = False,
) -> Union[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor]]:
r"""
Converts a sequence of input tokens into a sequence of mel spectrograms, which are subsequently turned into a
speech waveform using a vocoder.
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SpeechT5Tokenizer`]. See [`~PreTrainedTokenizer.encode`] and
[`~PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
speaker_embeddings (`torch.FloatTensor` of shape `(batch_size, config.speaker_embedding_dim)`, *optional*):
Tensor containing the speaker embeddings.
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
`[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
threshold (`float`, *optional*, defaults to 0.5):
The generated sequence ends when the predicted stop token probability exceeds this value.
minlenratio (`float`, *optional*, defaults to 0.0):
Used to calculate the minimum required length for the output sequence.
maxlenratio (`float`, *optional*, defaults to 20.0):
Used to calculate the maximum allowed length for the output sequence.
vocoder (`nn.Module`, *optional*, defaults to `None`):
The vocoder that converts the mel spectrogram into a speech waveform. If `None`, the output is the mel
spectrogram.
output_cross_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of the decoder's cross-attention layers.
return_output_lengths (`bool`, *optional*, defaults to `False`):
Whether or not to return the concrete spectrogram/waveform lengths.
Returns:
`tuple(torch.FloatTensor)` comprising various elements depending on the inputs:
- when `return_output_lengths` is False
- **spectrogram** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape
`(output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrogram.
- **waveform** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape
`(num_frames,)` -- The predicted speech waveform.
- **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`)
`torch.FloatTensor` of shape `(config.decoder_layers, config.decoder_attention_heads,
output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers.
- when `return_output_lengths` is True
- **spectrograms** (*optional*, returned when no `vocoder` is provided) `torch.FloatTensor` of shape
`(batch_size, output_sequence_length, config.num_mel_bins)` -- The predicted log-mel spectrograms that
are padded to the maximum length.
- **spectrogram_lengths** (*optional*, returned when no `vocoder` is provided) `list[Int]` -- A list of
all the concrete lengths for each spectrogram.
- **waveforms** (*optional*, returned when a `vocoder` is provided) `torch.FloatTensor` of shape
`(batch_size, num_frames)` -- The predicted speech waveforms that are padded to the maximum length.
- **waveform_lengths** (*optional*, returned when a `vocoder` is provided) `list[Int]` -- A list of all
the concrete lengths for each waveform.
- **cross_attentions** (*optional*, returned when `output_cross_attentions` is `True`)
`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, config.decoder_attention_heads,
output_sequence_length, input_sequence_length)` -- The outputs of the decoder's cross-attention layers.
"""
if speaker_embeddings is not None:
batch_size = input_ids.size(0)
if speaker_embeddings.size(0) != batch_size:
if speaker_embeddings.size(0) == 1:
speaker_embeddings = speaker_embeddings.repeat(batch_size, 1)
else:
raise ValueError(
"The first dimension of speaker_embeddings must be either 1 or the same as batch size."
)
return _generate_speech(
self,
input_ids,
speaker_embeddings,
attention_mask,
threshold,
minlenratio,
maxlenratio,
vocoder,
output_cross_attentions,
return_output_lengths,
)
@auto_docstring(
custom_intro="""
SpeechT5 Model with a speech encoder and a speech decoder.
"""
)
| SpeechT5ForTextToSpeech |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 185363,
"end": 189086
} | class ____(Response):
"""
Response of events.scalar_metrics_iter_raw endpoint.
:param variants: Raw data points for each variant
:type variants: dict
:param total: Total data points count. If count_total is false, null is
returned
:type total: int
:param returned: Number of data points returned in this call. If 0 results were
returned, no more results are available
:type returned: int
:param scroll_id: Scroll ID. Use to get more data points when calling this
endpoint again
:type scroll_id: str
"""
_service = "events"
_action = "scalar_metrics_iter_raw"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"returned": {
"description": "Number of data points returned in this call. If 0 results were returned, no more results are available",
"type": ["integer", "null"],
},
"scroll_id": {
"description": "Scroll ID. Use to get more data points when calling this endpoint again",
"type": ["string", "null"],
},
"total": {
"description": "Total data points count. If count_total is false, null is returned",
"type": ["integer", "null"],
},
"variants": {
"additionalProperties": True,
"description": "Raw data points for each variant",
"type": ["object", "null"],
},
},
"type": "object",
}
def __init__(
self,
variants: Optional[dict] = None,
total: Optional[int] = None,
returned: Optional[int] = None,
scroll_id: Optional[str] = None,
**kwargs: Any
) -> None:
super(ScalarMetricsIterRawResponse, self).__init__(**kwargs)
self.variants = variants
self.total = total
self.returned = returned
self.scroll_id = scroll_id
@schema_property("variants")
def variants(self) -> Optional[dict]:
return self._property_variants
@variants.setter
def variants(self, value: Optional[dict]) -> None:
if value is None:
self._property_variants = None
return
self.assert_isinstance(value, "variants", (dict,))
self._property_variants = value
@schema_property("total")
def total(self) -> Optional[int]:
return self._property_total
@total.setter
def total(self, value: Optional[int]) -> None:
if value is None:
self._property_total = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "total", six.integer_types)
self._property_total = value
@schema_property("returned")
def returned(self) -> Optional[int]:
return self._property_returned
@returned.setter
def returned(self, value: Optional[int]) -> None:
if value is None:
self._property_returned = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "returned", six.integer_types)
self._property_returned = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| ScalarMetricsIterRawResponse |
python | davidhalter__jedi | test/completion/django.py | {
"start": 4537,
"end": 6505
} | class ____(BusinessModel):
text_field = models.IntegerField()
new_field = models.FloatField()
inherited = Inherited()
#? int()
inherited.text_field
#? str()
inherited.char_field
#? float()
inherited.new_field
#?
Inherited.category_fk2.category_name
#? str()
inherited.category_fk2.category_name
#? str()
Inherited.objects.get().char_field
#? int()
Inherited.objects.get().text_field
#? float()
Inherited.objects.get().new_field
# -----------------
# Model methods
# -----------------
#? ['from_db']
Inherited.from_db
#? ['validate_unique']
Inherited.validate_uniqu
#? ['validate_unique']
Inherited().validate_unique
# -----------------
# Django Auth
# -----------------
#? str()
User().email
#? str()
User.objects.get().email
# -----------------
# values & values_list (dave is too lazy to implement it)
# -----------------
#?
BusinessModel.objects.values_list('char_field')[0]
#? dict()
BusinessModel.objects.values('char_field')[0]
#?
BusinessModel.objects.values('char_field')[0]['char_field']
# -----------------
# Completion
# -----------------
#? 19 ['text_field=']
Inherited(text_fiel)
#? 18 ['new_field=']
Inherited(new_fiel)
#? 19 ['char_field=']
Inherited(char_fiel)
#? 19 ['email_field=']
Inherited(email_fie)
#? 19 []
Inherited(unidentif)
#? 21 ['category_fk=', 'category_fk2=', 'category_fk3=', 'category_fk4=', 'category_fk5=']
Inherited(category_fk)
#? 21 ['attached_o2o=']
Inherited(attached_o2)
#? 18 ['tags_m2m=']
Inherited(tags_m2m)
#? 32 ['tags_m2m=']
Inherited.objects.create(tags_m2)
#? 32 ['tags_m2m=']
Inherited.objects.filter(tags_m2)
#? 35 ['char_field=']
Inherited.objects.exclude(char_fiel)
#? 34 ['char_field=']
Inherited.objects.update(char_fiel)
#? 32 ['email_field=']
Inherited.objects.get(email_fiel)
#? 44 ['category_fk2=']
Inherited.objects.get_or_create(category_fk2)
#? 44 ['uuid_field=']
Inherited.objects.update_or_create(uuid_fiel)
#? 48 ['char_field=']
Inherited.objects.exclude(pk=3).filter(char_fiel)
| Inherited |
python | TheAlgorithms__Python | web_programming/emails_from_url.py | {
"start": 430,
"end": 3424
} | class ____(HTMLParser):
def __init__(self, domain: str) -> None:
super().__init__()
self.urls: list[str] = []
self.domain = domain
def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
"""
This function parse html to take takes url from tags
"""
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, not empty nor # print it and not already in urls.
if name == "href" and value not in (*self.urls, "", "#"):
url = parse.urljoin(self.domain, value)
self.urls.append(url)
# Get main domain name (example.com)
def get_domain_name(url: str) -> str:
"""
This function get the main domain name
>>> get_domain_name("https://a.b.c.d/e/f?g=h,i=j#k")
'c.d'
>>> get_domain_name("Not a URL!")
''
"""
return ".".join(get_sub_domain_name(url).split(".")[-2:])
# Get sub domain name (sub.example.com)
def get_sub_domain_name(url: str) -> str:
"""
>>> get_sub_domain_name("https://a.b.c.d/e/f?g=h,i=j#k")
'a.b.c.d'
>>> get_sub_domain_name("Not a URL!")
''
"""
return parse.urlparse(url).netloc
def emails_from_url(url: str = "https://github.com") -> list[str]:
"""
This function takes url and return all valid urls
"""
# Get the base domain from the url
domain = get_domain_name(url)
# Initialize the parser
parser = Parser(domain)
try:
# Open URL
r = httpx.get(url, timeout=10, follow_redirects=True)
# pass the raw HTML to the parser to get links
parser.feed(r.text)
# Get links and loop through
valid_emails = set()
for link in parser.urls:
# open URL.
# Check if the link is already absolute
if not link.startswith("http://") and not link.startswith("https://"):
# Prepend protocol only if link starts with domain, normalize otherwise
if link.startswith(domain):
link = f"https://{link}"
else:
link = parse.urljoin(f"https://{domain}", link)
try:
read = httpx.get(link, timeout=10, follow_redirects=True)
# Get the valid email.
emails = re.findall("[a-zA-Z0-9]+@" + domain, read.text)
# If not in list then append it.
for email in emails:
valid_emails.add(email)
except ValueError:
pass
except ValueError:
raise SystemExit(1)
# Finally return a sorted list of email addresses with no duplicates.
return sorted(valid_emails)
if __name__ == "__main__":
emails = emails_from_url("https://github.com")
print(f"{len(emails)} emails found:")
print("\n".join(sorted(emails)))
| Parser |
python | pandas-dev__pandas | pandas/tseries/frequencies.py | {
"start": 4838,
"end": 13031
} | class ____:
"""
Not sure if I can avoid the state machine here
"""
def __init__(self, index) -> None:
self.index = index
self.i8values = index.asi8
# For get_unit_from_dtype we need the dtype to the underlying ndarray,
# which for tz-aware is not the same as index.dtype
if isinstance(index, ABCIndex):
# error: Item "ndarray[Any, Any]" of "Union[ExtensionArray,
# ndarray[Any, Any]]" has no attribute "_ndarray"
self._creso = get_unit_from_dtype(
index._data._ndarray.dtype # type: ignore[union-attr]
)
else:
# otherwise we have DTA/TDA
self._creso = get_unit_from_dtype(index._ndarray.dtype)
# This moves the values, which are implicitly in UTC, to the
# the timezone so they are in local time
if hasattr(index, "tz"):
if index.tz is not None:
self.i8values = tz_convert_from_utc(
self.i8values, index.tz, reso=self._creso
)
if len(index) < 3:
raise ValueError("Need at least 3 dates to infer frequency")
self.is_monotonic = (
self.index._is_monotonic_increasing or self.index._is_monotonic_decreasing
)
@cache_readonly
def deltas(self) -> npt.NDArray[np.int64]:
return unique_deltas(self.i8values)
@cache_readonly
def deltas_asi8(self) -> npt.NDArray[np.int64]:
# NB: we cannot use self.i8values here because we may have converted
# the tz in __init__
return unique_deltas(self.index.asi8)
@cache_readonly
def is_unique(self) -> bool:
return len(self.deltas) == 1
@cache_readonly
def is_unique_asi8(self) -> bool:
return len(self.deltas_asi8) == 1
def get_freq(self) -> str | None:
"""
Find the appropriate frequency string to describe the inferred
frequency of self.i8values
Returns
-------
str or None
"""
if not self.is_monotonic or not self.index._is_unique:
return None
delta = self.deltas[0]
ppd = periods_per_day(self._creso)
if delta and _is_multiple(delta, ppd):
return self._infer_daily_rule()
# Business hourly, maybe. 17: one day / 65: one weekend
if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]):
return "bh"
# Possibly intraday frequency. Here we use the
# original .asi8 values as the modified values
# will not work around DST transitions. See #8772
if not self.is_unique_asi8:
return None
delta = self.deltas_asi8[0]
pph = ppd // 24
ppm = pph // 60
pps = ppm // 60
if _is_multiple(delta, pph):
# Hours
return _maybe_add_count("h", delta / pph)
elif _is_multiple(delta, ppm):
# Minutes
return _maybe_add_count("min", delta / ppm)
elif _is_multiple(delta, pps):
# Seconds
return _maybe_add_count("s", delta / pps)
elif _is_multiple(delta, (pps // 1000)):
# Milliseconds
return _maybe_add_count("ms", delta / (pps // 1000))
elif _is_multiple(delta, (pps // 1_000_000)):
# Microseconds
return _maybe_add_count("us", delta / (pps // 1_000_000))
else:
# Nanoseconds
return _maybe_add_count("ns", delta)
@cache_readonly
def day_deltas(self) -> list[int]:
ppd = periods_per_day(self._creso)
return [x / ppd for x in self.deltas]
@cache_readonly
def hour_deltas(self) -> list[int]:
pph = periods_per_day(self._creso) // 24
return [x / pph for x in self.deltas]
@cache_readonly
def fields(self) -> np.ndarray: # structured array of fields
return build_field_sarray(self.i8values, reso=self._creso)
@cache_readonly
def rep_stamp(self) -> Timestamp:
return Timestamp(self.i8values[0], unit=self.index.unit)
def month_position_check(self) -> str | None:
return month_position_check(self.fields, self.index.dayofweek)
@cache_readonly
def mdiffs(self) -> npt.NDArray[np.int64]:
nmonths = self.fields["Y"] * 12 + self.fields["M"]
return unique_deltas(nmonths.astype("i8"))
@cache_readonly
def ydiffs(self) -> npt.NDArray[np.int64]:
return unique_deltas(self.fields["Y"].astype("i8"))
def _infer_daily_rule(self) -> str | None:
annual_rule = self._get_annual_rule()
if annual_rule:
nyears = self.ydiffs[0]
month = MONTH_ALIASES[self.rep_stamp.month]
alias = f"{annual_rule}-{month}"
return _maybe_add_count(alias, nyears)
quarterly_rule = self._get_quarterly_rule()
if quarterly_rule:
nquarters = self.mdiffs[0] / 3
mod_dict = {0: 12, 2: 11, 1: 10}
month = MONTH_ALIASES[mod_dict[self.rep_stamp.month % 3]]
alias = f"{quarterly_rule}-{month}"
return _maybe_add_count(alias, nquarters)
monthly_rule = self._get_monthly_rule()
if monthly_rule:
return _maybe_add_count(monthly_rule, self.mdiffs[0])
if self.is_unique:
return self._get_daily_rule()
if self._is_business_daily():
return "B"
wom_rule = self._get_wom_rule()
if wom_rule:
return wom_rule
return None
def _get_daily_rule(self) -> str | None:
ppd = periods_per_day(self._creso)
days = self.deltas[0] / ppd
if days % 7 == 0:
# Weekly
wd = int_to_weekday[self.rep_stamp.weekday()]
alias = f"W-{wd}"
return _maybe_add_count(alias, days / 7)
else:
return _maybe_add_count("D", days)
def _get_annual_rule(self) -> str | None:
if len(self.ydiffs) > 1:
return None
if len(unique(self.fields["M"])) > 1:
return None
pos_check = self.month_position_check()
if pos_check is None:
return None
else:
return {"cs": "YS", "bs": "BYS", "ce": "YE", "be": "BYE"}.get(pos_check)
def _get_quarterly_rule(self) -> str | None:
if len(self.mdiffs) > 1:
return None
if not self.mdiffs[0] % 3 == 0:
return None
pos_check = self.month_position_check()
if pos_check is None:
return None
else:
return {"cs": "QS", "bs": "BQS", "ce": "QE", "be": "BQE"}.get(pos_check)
def _get_monthly_rule(self) -> str | None:
if len(self.mdiffs) > 1:
return None
pos_check = self.month_position_check()
if pos_check is None:
return None
else:
return {"cs": "MS", "bs": "BMS", "ce": "ME", "be": "BME"}.get(pos_check)
def _is_business_daily(self) -> bool:
# quick check: cannot be business daily
if self.day_deltas != [1, 3]:
return False
# probably business daily, but need to confirm
first_weekday = self.index[0].weekday()
shifts = np.diff(self.i8values)
ppd = periods_per_day(self._creso)
shifts = np.floor_divide(shifts, ppd)
weekdays = np.mod(first_weekday + np.cumsum(shifts), 7)
return bool(
np.all(
((weekdays == 0) & (shifts == 3))
| ((weekdays > 0) & (weekdays <= 4) & (shifts == 1))
)
)
def _get_wom_rule(self) -> str | None:
weekdays = unique(self.index.weekday)
if len(weekdays) > 1:
return None
week_of_months = unique((self.index.day - 1) // 7)
# Only attempt to infer up to WOM-4. See #9425
week_of_months = week_of_months[week_of_months < 4]
if len(week_of_months) == 0 or len(week_of_months) > 1:
return None
# get which week
week = week_of_months[0] + 1
wd = int_to_weekday[weekdays[0]]
return f"WOM-{week}{wd}"
| _FrequencyInferer |
python | bottlepy__bottle | test/test_environ.py | {
"start": 369,
"end": 19717
} | class ____(unittest.TestCase):
def test_app_property(self):
e = {}
r = BaseRequest(e)
self.assertRaises(RuntimeError, lambda: r.app)
e.update({'bottle.app': 5})
self.assertEqual(r.app, 5)
def test_route_property(self):
e = {'bottle.route': 5}
r = BaseRequest(e)
self.assertEqual(r.route, 5)
def test_url_for_property(self):
e = {}
r = BaseRequest(e)
self.assertRaises(RuntimeError, lambda: r.url_args)
e.update({'route.url_args': {'a': 5}})
self.assertEqual(r.url_args, {'a': 5})
def test_path(self):
""" PATH_INFO normalization. """
# Legal paths
tests = [('', '/'), ('x','/x'), ('x/', '/x/'), ('/x', '/x'), ('/x/', '/x/')]
for raw, norm in tests:
self.assertEqual(norm, BaseRequest({'PATH_INFO': raw}).path)
# Strange paths
tests = [('///', '/'), ('//x','/x')]
for raw, norm in tests:
self.assertEqual(norm, BaseRequest({'PATH_INFO': raw}).path)
# No path at all
self.assertEqual('/', BaseRequest({}).path)
def test_method(self):
self.assertEqual(BaseRequest({}).method, 'GET')
self.assertEqual(BaseRequest({'REQUEST_METHOD':'GET'}).method, 'GET')
self.assertEqual(BaseRequest({'REQUEST_METHOD':'GeT'}).method, 'GET')
self.assertEqual(BaseRequest({'REQUEST_METHOD':'get'}).method, 'GET')
self.assertEqual(BaseRequest({'REQUEST_METHOD':'POst'}).method, 'POST')
self.assertEqual(BaseRequest({'REQUEST_METHOD':'FanTASY'}).method, 'FANTASY')
def test_script_name(self):
""" SCRIPT_NAME normalization. """
# Legal paths
tests = [('', '/'), ('x','/x/'), ('x/', '/x/'), ('/x', '/x/'), ('/x/', '/x/')]
for raw, norm in tests:
self.assertEqual(norm, BaseRequest({'SCRIPT_NAME': raw}).script_name)
# Strange paths
tests = [('///', '/'), ('///x///','/x/')]
for raw, norm in tests:
self.assertEqual(norm, BaseRequest({'SCRIPT_NAME': raw}).script_name)
# No path at all
self.assertEqual('/', BaseRequest({}).script_name)
def test_pathshift(self):
""" Request.path_shift() """
def test_shift(s, p, c):
request = BaseRequest({'SCRIPT_NAME': s, 'PATH_INFO': p})
request.path_shift(c)
return [request['SCRIPT_NAME'], request.path]
self.assertEqual(['/a/b', '/c/d'], test_shift('/a/b', '/c/d', 0))
self.assertEqual(['/a/b', '/c/d/'], test_shift('/a/b', '/c/d/', 0))
self.assertEqual(['/a/b/c', '/d'], test_shift('/a/b', '/c/d', 1))
self.assertEqual(['/a', '/b/c/d'], test_shift('/a/b', '/c/d', -1))
self.assertEqual(['/a/b/c', '/d/'], test_shift('/a/b', '/c/d/', 1))
self.assertEqual(['/a', '/b/c/d/'], test_shift('/a/b', '/c/d/', -1))
self.assertEqual(['/a/b/c', '/d/'], test_shift('/a/b/', '/c/d/', 1))
self.assertEqual(['/a', '/b/c/d/'], test_shift('/a/b/', '/c/d/', -1))
self.assertEqual(['/a/b/c/d', '/'], test_shift('/', '/a/b/c/d', 4))
self.assertEqual(['/', '/a/b/c/d/'], test_shift('/a/b/c/d', '/', -4))
self.assertRaises(AssertionError, test_shift, '/a/b', '/c/d', 3)
self.assertRaises(AssertionError, test_shift, '/a/b', '/c/d', -3)
def test_url(self):
""" Environ: URL building """
request = BaseRequest({'HTTP_HOST':'example.com'})
self.assertEqual('http://example.com/', request.url)
request = BaseRequest({'SERVER_NAME':'example.com'})
self.assertEqual('http://example.com/', request.url)
request = BaseRequest({'SERVER_NAME':'example.com', 'SERVER_PORT':'81'})
self.assertEqual('http://example.com:81/', request.url)
request = BaseRequest({'wsgi.url_scheme':'https', 'SERVER_NAME':'example.com'})
self.assertEqual('https://example.com/', request.url)
request = BaseRequest({'HTTP_HOST':'example.com', 'PATH_INFO':'/path',
'QUERY_STRING':'1=b&c=d', 'SCRIPT_NAME':'/sp'})
self.assertEqual('http://example.com/sp/path?1=b&c=d', request.url)
request = BaseRequest({'HTTP_HOST':'example.com', 'PATH_INFO':'/pa th',
'SCRIPT_NAME':'/s p'})
self.assertEqual('http://example.com/s%20p/pa%20th', request.url)
def test_dict_access(self):
""" Environ: request objects are environment dicts """
e = {}
wsgiref.util.setup_testing_defaults(e)
request = BaseRequest(e)
self.assertEqual(list(request), list(e.keys()))
self.assertEqual(len(request), len(e))
for k, v in e.items():
self.assertTrue(k in request)
self.assertEqual(request[k], v)
request[k] = 'test'
self.assertEqual(request[k], 'test')
del request['PATH_INFO']
self.assertTrue('PATH_INFO' not in request)
def test_readonly_environ(self):
request = BaseRequest({'bottle.request.readonly':True})
def test(): request['x']='y'
self.assertRaises(KeyError, test)
def test_header_access(self):
""" Environ: Request objects decode headers """
e = {}
wsgiref.util.setup_testing_defaults(e)
e['HTTP_SOME_HEADER'] = 'some value'
request = BaseRequest(e)
request['HTTP_SOME_OTHER_HEADER'] = 'some other value'
self.assertTrue('Some-Header' in request.headers)
self.assertTrue(request.headers['Some-Header'] == 'some value')
self.assertTrue(request.headers['Some-Other-Header'] == 'some other value')
def test_header_access_special(self):
e = {}
wsgiref.util.setup_testing_defaults(e)
request = BaseRequest(e)
request['CONTENT_TYPE'] = 'test'
request['CONTENT_LENGTH'] = '123'
self.assertEqual(request.headers['Content-Type'], 'test')
self.assertEqual(request.headers['Content-Length'], '123')
def test_cookie_dict(self):
""" Environ: Cookie dict """
t = dict()
t['a=a'] = {'a': 'a'}
t['a=a; b=b'] = {'a': 'a', 'b':'b'}
t['a=a; a=b'] = {'a': 'b'}
for k, v in t.items():
request = BaseRequest({'HTTP_COOKIE': k})
for n in v:
self.assertEqual(v[n], request.cookies[n])
self.assertEqual(v[n], request.get_cookie(n))
def test_get(self):
""" Environ: GET data """
qs = touni(tob('a=a&a=1&b=b&c=c&cn=%e7%93%b6'), 'latin1')
request = BaseRequest({'QUERY_STRING':qs})
self.assertTrue('a' in request.query)
self.assertTrue('b' in request.query)
self.assertEqual(['a','1'], request.query.getall('a'))
self.assertEqual(['b'], request.query.getall('b'))
self.assertEqual('1', request.query['a'])
self.assertEqual('b', request.query['b'])
self.assertEqual('瓶', request.query['cn'])
self.assertEqual('瓶', request.query.cn)
def test_post(self):
""" Environ: POST data """
sq = tob('a=a&a=1&b=b&c=&d&cn=%e7%93%b6')
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(sq)
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(sq))
e['REQUEST_METHOD'] = "POST"
request = BaseRequest(e)
self.assertTrue('a' in request.POST)
self.assertTrue('b' in request.POST)
self.assertEqual(['a','1'], request.POST.getall('a'))
self.assertEqual(['b'], request.POST.getall('b'))
self.assertEqual('1', request.POST['a'])
self.assertEqual('b', request.POST['b'])
self.assertEqual('', request.POST['c'])
self.assertEqual('', request.POST['d'])
self.assertEqual('瓶', request.POST['cn'])
self.assertEqual('瓶', request.POST.cn)
def test_bodypost(self):
sq = tob('foobar')
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(sq)
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(sq))
e['REQUEST_METHOD'] = "POST"
request = BaseRequest(e)
self.assertEqual('', request.POST['foobar'])
def test_body_noclose(self):
""" Test that the body file handler is not closed after request.POST """
sq = tob('a=a&a=1&b=b&c=&d')
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(sq)
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(sq))
e['REQUEST_METHOD'] = "POST"
request = BaseRequest(e)
self.assertEqual(sq, request.body.read())
request.POST # This caused a body.close() with Python 3.x
self.assertEqual(sq, request.body.read())
def test_params(self):
""" Environ: GET and POST are combined in request.param """
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob('b=b&c=p'))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = '7'
e['QUERY_STRING'] = 'a=a&c=g'
e['REQUEST_METHOD'] = "POST"
request = BaseRequest(e)
self.assertEqual(['a','b','c'], sorted(request.params.keys()))
self.assertEqual('p', request.params['c'])
def test_getpostleak(self):
""" Environ: GET and POST should not leak into each other """
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob('b=b'))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = '3'
e['QUERY_STRING'] = 'a=a'
e['REQUEST_METHOD'] = "POST"
request = BaseRequest(e)
self.assertEqual(['a'], list(request.GET.keys()))
self.assertEqual(['b'], list(request.POST.keys()))
def test_body(self):
""" Environ: Request.body should behave like a file object factory """
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob('abc'))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(3)
request = BaseRequest(e)
self.assertEqual(tob('abc'), request.body.read())
self.assertEqual(tob('abc'), request.body.read(3))
self.assertEqual(tob('abc'), request.body.readline())
self.assertEqual(tob('abc'), request.body.readline(3))
def test_bigbody(self):
""" Environ: Request.body should handle big uploads using files """
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob('x')*1024*1000)
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(1024*1000)
request = BaseRequest(e)
self.assertTrue(hasattr(request.body, 'fileno'))
self.assertEqual(1024*1000, len(request.body.read()))
self.assertEqual(1024, len(request.body.read(1024)))
self.assertEqual(1024*1000, len(request.body.readline()))
self.assertEqual(1024, len(request.body.readline(1024)))
def test_tobigbody(self):
""" Environ: Request.body should truncate to Content-Length bytes """
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob('x')*1024)
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = '42'
request = BaseRequest(e)
self.assertEqual(42, len(request.body.read()))
self.assertEqual(42, len(request.body.read(1024)))
self.assertEqual(42, len(request.body.readline()))
self.assertEqual(42, len(request.body.readline(1024)))
def _test_chunked(self, body, expect):
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob(body))
e['wsgi.input'].seek(0)
e['HTTP_TRANSFER_ENCODING'] = 'chunked'
if isinstance(expect, str):
self.assertEqual(tob(expect), BaseRequest(e).body.read())
else:
self.assertRaises(expect, lambda: BaseRequest(e).body)
def test_chunked(self):
self._test_chunked('1\r\nx\r\nff\r\n' + 'y'*255 + '\r\n0\r\n',
'x' + 'y'*255)
self._test_chunked('8\r\nxxxxxxxx\r\n0\r\n','xxxxxxxx')
self._test_chunked('0\r\n', '')
def test_chunked_meta_fields(self):
self._test_chunked('8 ; foo\r\nxxxxxxxx\r\n0\r\n','xxxxxxxx')
self._test_chunked('8;foo\r\nxxxxxxxx\r\n0\r\n','xxxxxxxx')
self._test_chunked('8;foo=bar\r\nxxxxxxxx\r\n0\r\n','xxxxxxxx')
def test_chunked_not_terminated(self):
self._test_chunked('1\r\nx\r\n', HTTPError)
def test_chunked_wrong_size(self):
self._test_chunked('2\r\nx\r\n', HTTPError)
def test_chunked_illegal_size(self):
self._test_chunked('x\r\nx\r\n', HTTPError)
def test_chunked_not_chunked_at_all(self):
self._test_chunked('abcdef', HTTPError)
def test_multipart(self):
""" Environ: POST (multipart files and multible values per key) """
fields = [('field1','value1'), ('field2','value2'), ('field2','万难')]
files = [('file1','filename1.txt','content1'), ('万难','万难foo.py', 'ä\nö\rü')]
e = tools.multipart_environ(fields=fields, files=files)
request = BaseRequest(e)
# File content
self.assertTrue('file1' in request.POST)
self.assertTrue('file1' in request.files)
self.assertTrue('file1' not in request.forms)
cmp = tob('content1') if sys.version_info >= (3,2,0) else 'content1'
self.assertEqual(cmp, request.POST['file1'].file.read())
# File name and meta data
self.assertTrue('万难' in request.POST)
self.assertTrue('万难' in request.files)
self.assertTrue('万难' not in request.forms)
self.assertEqual('foo.py', request.POST['万难'].filename)
self.assertTrue(request.files['万难'])
self.assertFalse(request.files.file77)
# UTF-8 files
x = request.POST['万难'].file.read()
if (3,2,0) > sys.version_info >= (3,0,0):
x = x.encode('utf8')
self.assertEqual(tob('ä\nö\rü'), x)
# No file
self.assertTrue('file3' not in request.POST)
self.assertTrue('file3' not in request.files)
self.assertTrue('file3' not in request.forms)
# Field (single)
self.assertEqual('value1', request.POST['field1'])
self.assertTrue('field1' not in request.files)
self.assertEqual('value1', request.forms['field1'])
self.assertEqual('万难', request.forms['field2'])
self.assertEqual(touni('万难'), request.forms.field2)
# Field (multi)
self.assertEqual(2, len(request.POST.getall('field2')))
self.assertEqual(['value2', '万难'], request.POST.getall('field2'))
self.assertEqual(['value2', '万难'], request.forms.getall('field2'))
self.assertTrue('field2' not in request.files)
def test_json_empty(self):
""" Environ: Request.json property with empty body. """
self.assertEqual(BaseRequest({}).json, None)
def test_json_noheader(self):
""" Environ: Request.json property with missing content-type header. """
test = dict(a=5, b='test', c=[1,2,3])
e = {}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob(json_dumps(test)))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(json_dumps(test)))
self.assertEqual(BaseRequest(e).json, None)
def test_json_tobig(self):
""" Environ: Request.json property with huge body. """
test = dict(a=5, tobig='x' * bottle.BaseRequest.MEMFILE_MAX)
e = {'CONTENT_TYPE': 'application/json'}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob(json_dumps(test)))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(json_dumps(test)))
self.assertRaises(HTTPError, lambda: BaseRequest(e).json)
def test_json_valid(self):
""" Environ: Request.json property. """
test = dict(a=5, b='test', c=[1,2,3])
e = {'CONTENT_TYPE': 'application/json; charset=UTF-8'}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob(json_dumps(test)))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(json_dumps(test)))
self.assertEqual(BaseRequest(e).json, test)
def test_json_forged_header_issue616(self):
test = dict(a=5, b='test', c=[1,2,3])
e = {'CONTENT_TYPE': 'text/plain;application/json'}
wsgiref.util.setup_testing_defaults(e)
e['wsgi.input'].write(tob(json_dumps(test)))
e['wsgi.input'].seek(0)
e['CONTENT_LENGTH'] = str(len(json_dumps(test)))
self.assertEqual(BaseRequest(e).json, None)
def test_json_header_empty_body(self):
"""Request Content-Type is application/json but body is empty"""
e = {'CONTENT_TYPE': 'application/json'}
wsgiref.util.setup_testing_defaults(e)
wsgiref.util.setup_testing_defaults(e)
e['CONTENT_LENGTH'] = "0"
self.assertEqual(BaseRequest(e).json, None)
def test_isajax(self):
e = {}
wsgiref.util.setup_testing_defaults(e)
self.assertFalse(BaseRequest(e.copy()).is_ajax)
e['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
self.assertTrue(BaseRequest(e.copy()).is_ajax)
def test_auth(self):
user, pwd = 'marc', 'secret'
basic = touni(base64.b64encode(tob('%s:%s' % (user, pwd))))
r = BaseRequest({})
self.assertEqual(r.auth, None)
r.environ['HTTP_AUTHORIZATION'] = 'basic %s' % basic
self.assertEqual(r.auth, (user, pwd))
r.environ['REMOTE_USER'] = user
self.assertEqual(r.auth, (user, pwd))
del r.environ['HTTP_AUTHORIZATION']
self.assertEqual(r.auth, (user, None))
def test_remote_route(self):
ips = ['1.2.3.4', '2.3.4.5', '3.4.5.6']
r = BaseRequest({})
self.assertEqual(r.remote_route, [])
r.environ['HTTP_X_FORWARDED_FOR'] = ', '.join(ips)
self.assertEqual(r.remote_route, ips)
r.environ['REMOTE_ADDR'] = ips[1]
self.assertEqual(r.remote_route, ips)
del r.environ['HTTP_X_FORWARDED_FOR']
self.assertEqual(r.remote_route, [ips[1]])
def test_remote_addr(self):
ips = ['1.2.3.4', '2.3.4.5', '3.4.5.6']
r = BaseRequest({})
self.assertEqual(r.remote_addr, None)
r.environ['HTTP_X_FORWARDED_FOR'] = ', '.join(ips)
self.assertEqual(r.remote_addr, ips[0])
r.environ['REMOTE_ADDR'] = ips[1]
self.assertEqual(r.remote_addr, ips[0])
del r.environ['HTTP_X_FORWARDED_FOR']
self.assertEqual(r.remote_addr, ips[1])
def test_user_defined_attributes(self):
for cls in (BaseRequest, LocalRequest):
r = cls()
# New attributes go to the environ dict.
r.foo = 'somevalue'
self.assertEqual(r.foo, 'somevalue')
self.assertTrue('somevalue' in r.environ.values())
# Attributes are read-only once set.
self.assertRaises(AttributeError, setattr, r, 'foo', 'x')
# Properties raise AttributeError.
self.assertRaises(AttributeError, setattr, r, 'body', 'x')
# Unknown attributes raise AttributeError.
self.assertRaises(AttributeError, getattr, r, 'somevalue')
| TestRequest |
python | doocs__leetcode | solution/1500-1599/1529.Minimum Suffix Flips/Solution.py | {
"start": 0,
"end": 180
} | class ____:
def minFlips(self, target: str) -> int:
ans = 0
for v in target:
if (ans & 1) ^ int(v):
ans += 1
return ans
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.