language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 964354,
"end": 964634
} | class ____(sgqlc.types.Type):
"""A GitHub Security Advisory Reference"""
__schema__ = github_schema
__field_names__ = ("url",)
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
"""A publicly accessible reference"""
| SecurityAdvisoryReference |
python | celery__celery | t/unit/backends/test_base.py | {
"start": 1584,
"end": 1744
} | class ____:
def test_nulldict(self):
x = _nulldict()
x['foo'] = 1
x.update(foo=1, bar=2)
x.setdefault('foo', 3)
| test_nulldict |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 122128,
"end": 122296
} | class ____:
xlThemeFontMajor = 1 # from enum XlThemeFont
xlThemeFontMinor = 2 # from enum XlThemeFont
xlThemeFontNone = 0 # from enum XlThemeFont
| ThemeFont |
python | keras-team__keras | keras/src/saving/saving_api_test.py | {
"start": 4262,
"end": 7837
} | class ____(test_case.TestCase):
def get_model(self, dtype=None):
return Sequential(
[
layers.Dense(5, input_shape=(3,), dtype=dtype),
layers.Softmax(),
]
)
@parameterized.named_parameters(
[
{"testcase_name": "bfloat16", "dtype": "bfloat16"},
{"testcase_name": "float16", "dtype": "float16"},
{"testcase_name": "float32", "dtype": "float32"},
{"testcase_name": "float64", "dtype": "float64"},
]
)
def test_basic_load(self, dtype):
"""Test basic model loading."""
model = self.get_model(dtype)
filepath = os.path.join(self.get_temp_dir(), "test_model.keras")
saving_api.save_model(model, filepath)
loaded_model = saving_api.load_model(filepath)
x = np.random.uniform(size=(10, 3))
self.assertEqual(loaded_model.weights[0].dtype, dtype)
self.assertTrue(np.allclose(model.predict(x), loaded_model.predict(x)))
def test_load_unsupported_format(self):
"""Test loading model with unsupported format."""
with self.assertRaisesRegex(ValueError, "File format not supported"):
saving_api.load_model("model.pkl")
def test_load_keras_not_zip(self):
"""Test loading keras file that's not a zip."""
with self.assertRaisesRegex(ValueError, "File not found"):
saving_api.load_model("not_a_zip.keras")
def test_load_h5_format(self):
"""Test loading model in h5 format."""
model = self.get_model()
filepath_h5 = os.path.join(self.get_temp_dir(), "test_model.h5")
saving_api.save_model(model, filepath_h5)
loaded_model = saving_api.load_model(filepath_h5)
x = np.random.uniform(size=(10, 3))
self.assertTrue(np.allclose(model.predict(x), loaded_model.predict(x)))
os.remove(filepath_h5)
def test_load_model_with_custom_objects(self):
"""Test loading model with custom objects."""
class CustomLayer(layers.Layer):
def call(self, inputs):
return inputs
model = Sequential([CustomLayer(input_shape=(3,))])
filepath = os.path.join(self.get_temp_dir(), "custom_model.keras")
model.save(filepath)
loaded_model = saving_api.load_model(
filepath, custom_objects={"CustomLayer": CustomLayer}
)
self.assertIsInstance(loaded_model.layers[0], CustomLayer)
os.remove(filepath)
def test_save_unzipped(self):
"""Test saving/loading an unzipped model dir."""
model = self.get_model()
# Test error with keras extension
bad_filepath = os.path.join(self.get_temp_dir(), "test_model.keras")
with self.assertRaisesRegex(ValueError, "should not end in"):
saving_api.save_model(model, bad_filepath, zipped=False)
filepath = os.path.join(self.get_temp_dir(), "test_model_dir")
saving_api.save_model(model, filepath, zipped=False)
self.assertTrue(os.path.exists(filepath))
self.assertTrue(os.path.isdir(filepath))
config_filepath = os.path.join(filepath, "config.json")
weights_filepath = os.path.join(filepath, "model.weights.h5")
self.assertTrue(os.path.exists(config_filepath))
self.assertTrue(os.path.exists(weights_filepath))
loaded_model = saving_api.load_model(filepath)
x = np.random.uniform(size=(10, 3))
self.assertTrue(np.allclose(model.predict(x), loaded_model.predict(x)))
| LoadModelTests |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/tumblr_oauth2/tests.py | {
"start": 253,
"end": 1121
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = TumblrOAuth2Provider.id
def get_mocked_response(self):
return [
MockedResponse(
HTTPStatus.OK,
"""
{
"meta": {
"status": 200,
"msg": "OK"
},
"response": {
"user": {
"following": 263,
"default_post_format": "html",
"name": "derekg",
"likes": 606,
"blogs": [
{
"name": "derekg",
"title": "Derek Gottfrid",
"url": "http://derekg.org/",
"tweet": "auto",
"primary": true,
"followers": 33004929
},
{
"name": "ihatehipstrz",
"title": "I Hate Hipstrz"
}
]
}
} }
""",
)
]
def get_expected_to_str(self):
return "derekg"
| TumblrTests |
python | wandb__wandb | wandb/sdk/lib/retry.py | {
"start": 10980,
"end": 12026
} | class ____(Backoff):
"""Jittered exponential backoff: sleep times increase ~exponentially up to some limit."""
def __init__(
self,
initial_sleep: datetime.timedelta,
max_sleep: datetime.timedelta,
max_retries: Optional[int] = None,
timeout_at: Optional[datetime.datetime] = None,
) -> None:
self._next_sleep = min(max_sleep, initial_sleep)
self._max_sleep = max_sleep
self._remaining_retries = max_retries
self._timeout_at = timeout_at
def next_sleep_or_reraise(self, exc: Exception) -> datetime.timedelta:
if self._remaining_retries is not None:
if self._remaining_retries <= 0:
raise exc
self._remaining_retries -= 1
if self._timeout_at is not None and NOW_FN() > self._timeout_at:
raise exc
result, self._next_sleep = (
self._next_sleep,
min(self._max_sleep, self._next_sleep * (1 + random.random())),
)
return result
| ExponentialBackoff |
python | scipy__scipy | scipy/linalg/tests/test_fblas.py | {
"start": 4590,
"end": 4740
} | class ____(BaseScal):
blas_func = fblas.zscal
dtype = complex128
##################################################
# Test blas ?copy
| TestZscal |
python | django__django | tests/annotations/models.py | {
"start": 2029,
"end": 2233
} | class ____(models.Model):
data = models.JSONField(default=dict, blank=True)
id = models.IntegerField(primary_key=True)
class Meta:
required_db_features = {"supports_json_field"}
| JsonModel |
python | kamyu104__LeetCode-Solutions | Python/minimum-sum-of-four-digit-number-after-splitting-digits.py | {
"start": 1141,
"end": 1419
} | class ____(object):
def minimumSum(self, num):
"""
:type num: int
:rtype: int
"""
nums = sorted(map(int, list(str(num))))
a = b = 0
for x in nums:
a = a*10+x
a, b = b, a
return a+b
| Solution2 |
python | scipy__scipy | benchmarks/benchmarks/test_functions.py | {
"start": 294,
"end": 517
} | class ____:
def fun(self, x):
return np.dot(x, x) + x[0]
def der(self, x):
d = 2. * x
d[0] += 1
return d
def hess(self, x):
return 2. * np.eye(x.size)
| AsymmetricQuadratic |
python | dagster-io__dagster | python_modules/automation/python_modules/automation/automation_tests/dagster_dev_tests/ai_review_tests/test_ai_review_cache_comprehensive.py | {
"start": 180,
"end": 16108
} | class ____:
"""Comprehensive test coverage for ai-review-cache command."""
def test_import_and_basic_structure(self):
"""Test that command can be imported and has expected structure."""
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
assert ai_review_cache is not None
assert ai_review_cache.name == "ai-review-cache"
assert callable(ai_review_cache)
def test_help_command(self):
"""Test that help command works and contains expected content."""
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--help"])
assert result.exit_code == 0
assert "ai-review-cache" in result.output
assert "--action" in result.output
assert "--format" in result.output
assert "status" in result.output
assert "clear" in result.output
assert "json" in result.output
assert "human" in result.output
def test_default_parameters(self):
"""Test command with default parameters (action=status, format=human)."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_instance = Mock()
mock_instance.get_cache_status.return_value = {
"exists": False,
"size_bytes": 0,
"entries": 0,
}
mock_cache_manager.return_value = mock_instance
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, [])
assert result.exit_code == 0
assert "📊 AI Review Cache Status" in result.output
assert "No cache found" in result.output
def test_status_action_no_cache_human_format(self):
"""Test status action when no cache exists with human format."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_instance = Mock()
mock_instance.get_cache_status.return_value = {
"exists": False,
"size_bytes": 0,
"entries": 0,
}
mock_cache_manager.return_value = mock_instance
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "status", "--format", "human"])
assert result.exit_code == 0
assert "📊 AI Review Cache Status" in result.output
assert "No cache found" in result.output
def test_status_action_no_cache_json_format(self):
"""Test status action when no cache exists with JSON format."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_instance = Mock()
status_data = {"exists": False, "size_bytes": 0, "entries": 0}
mock_instance.get_cache_status.return_value = status_data
mock_cache_manager.return_value = mock_instance
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "status", "--format", "json"])
assert result.exit_code == 0
# Verify JSON output
output_json = json.loads(result.output.strip())
assert output_json == status_data
def test_status_action_with_valid_cache_human_format(self):
"""Test status action when cache exists and is valid with human format."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_instance = Mock()
mock_instance.get_cache_status.return_value = {
"exists": True,
"size_bytes": 1024,
"entries": 1,
"last_analysis": 1640995200.0, # 2022-01-01 00:00:00 UTC
"cached_commit": "abc123def456",
"cached_branch": "main",
"is_valid": True,
}
mock_cache_manager.return_value = mock_instance
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "status"])
assert result.exit_code == 0
assert "📊 AI Review Cache Status" in result.output
assert "Cache size: 1,024 bytes" in result.output
assert "Entries: 1" in result.output
assert "Last analysis:" in result.output
assert "Cached commit: abc123def456" in result.output
assert "Cached branch: main" in result.output
assert "✅ Yes" in result.output
def test_status_action_with_stale_cache_human_format(self):
"""Test status action when cache exists but is stale with human format."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_instance = Mock()
mock_instance.get_cache_status.return_value = {
"exists": True,
"size_bytes": 2048,
"entries": 1,
"last_analysis": 1640995200.0,
"cached_commit": "old123commit",
"cached_branch": "feature",
"is_valid": False,
}
mock_cache_manager.return_value = mock_instance
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "status"])
assert result.exit_code == 0
assert "Cache size: 2,048 bytes" in result.output
assert "❌ No (stale)" in result.output
def test_status_action_with_corrupted_cache_human_format(self):
"""Test status action when cache exists but is corrupted with human format."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_instance = Mock()
mock_instance.get_cache_status.return_value = {
"exists": True,
"size_bytes": 512,
"entries": 0,
"error": "Corrupted cache file",
}
mock_cache_manager.return_value = mock_instance
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "status"])
assert result.exit_code == 0
assert "❌ Error: Corrupted cache file" in result.output
def test_clear_action_success(self):
"""Test clear action when cache clearing succeeds."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_instance = Mock()
mock_instance.clear_cache.return_value = True
mock_cache_manager.return_value = mock_instance
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "clear"])
assert result.exit_code == 0
assert "✅ Cache cleared successfully" in result.output
mock_instance.clear_cache.assert_called_once()
def test_clear_action_failure(self):
"""Test clear action when cache clearing fails."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_instance = Mock()
mock_instance.clear_cache.return_value = False
mock_cache_manager.return_value = mock_instance
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "clear"])
assert result.exit_code == 1
assert "❌ Failed to clear cache" in result.output
def test_git_repository_error(self):
"""Test error handling when not in a git repository."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_cache_manager.side_effect = ValueError(
"Not in a git repository - cannot create cache"
)
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "status"])
assert result.exit_code == 1
assert "❌ Error: Must be run from within a git repository" in result.output
def test_other_value_error(self):
"""Test error handling for other ValueError scenarios."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_cache_manager.side_effect = ValueError("Some other error")
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "status"])
assert result.exit_code == 1
assert "❌ Error: Some other error" in result.output
def test_unexpected_exception(self):
"""Test error handling for unexpected exceptions."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_cache_manager.side_effect = RuntimeError("Unexpected runtime error")
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "status"])
assert result.exit_code == 1
assert "❌ Unexpected error: Unexpected runtime error" in result.output
def test_exception_during_status_operation(self):
"""Test error handling when exception occurs during status operation."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_instance = Mock()
mock_instance.get_cache_status.side_effect = RuntimeError("Status check failed")
mock_cache_manager.return_value = mock_instance
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "status"])
assert result.exit_code == 1
assert "❌ Unexpected error: Status check failed" in result.output
def test_exception_during_clear_operation(self):
"""Test error handling when exception occurs during clear operation."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_instance = Mock()
mock_instance.clear_cache.side_effect = RuntimeError("Clear operation failed")
mock_cache_manager.return_value = mock_instance
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "clear"])
assert result.exit_code == 1
assert "❌ Unexpected error: Clear operation failed" in result.output
def test_invalid_action_parameter(self):
"""Test that invalid action parameter is rejected."""
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "invalid"])
assert result.exit_code != 0
assert "Invalid value for '--action'" in result.output
def test_invalid_format_parameter(self):
"""Test that invalid format parameter is rejected."""
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--format", "invalid"])
assert result.exit_code != 0
assert "Invalid value for '--format'" in result.output
def test_case_insensitive_parameters(self):
"""Test that parameters are case insensitive."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_instance = Mock()
mock_instance.get_cache_status.return_value = {
"exists": False,
"size_bytes": 0,
"entries": 0,
}
mock_cache_manager.return_value = mock_instance
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
# Test uppercase action
result = runner.invoke(ai_review_cache, ["--action", "STATUS"])
assert result.exit_code == 0
# Test uppercase format
result = runner.invoke(ai_review_cache, ["--format", "JSON"])
assert result.exit_code == 0
@patch("time.strftime")
def test_time_formatting_in_status_display(self, mock_strftime):
"""Test that time formatting works correctly in status display."""
mock_strftime.return_value = "2022-01-01 12:00:00"
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_instance = Mock()
mock_instance.get_cache_status.return_value = {
"exists": True,
"size_bytes": 1024,
"entries": 1,
"last_analysis": 1640995200.0,
"cached_commit": "abc123",
"cached_branch": "main",
"is_valid": True,
}
mock_cache_manager.return_value = mock_instance
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "status"])
assert result.exit_code == 0
assert "Last analysis: 2022-01-01 12:00:00" in result.output
mock_strftime.assert_called_once_with("%Y-%m-%d %H:%M:%S", time.localtime(1640995200.0))
def test_large_cache_size_formatting(self):
"""Test formatting of large cache sizes with comma separators."""
with patch(
"automation.dagster_dev.commands.ai_review_cache.CacheManager"
) as mock_cache_manager:
mock_instance = Mock()
mock_instance.get_cache_status.return_value = {
"exists": True,
"size_bytes": 1234567,
"entries": 1,
"last_analysis": 1640995200.0,
"cached_commit": "abc123",
"cached_branch": "main",
"is_valid": True,
}
mock_cache_manager.return_value = mock_instance
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--action", "status"])
assert result.exit_code == 0
assert "Cache size: 1,234,567 bytes" in result.output
| TestAiReviewCacheComprehensive |
python | python-openxml__python-docx | src/docx/oxml/text/font.py | {
"start": 726,
"end": 1007
} | class ____(BaseOxmlElement):
"""`w:color` element, specifying the color of a font and perhaps other objects."""
val: RGBColor | str = RequiredAttribute("w:val", ST_HexColor)
themeColor: MSO_THEME_COLOR | None = OptionalAttribute("w:themeColor", MSO_THEME_COLOR)
| CT_Color |
python | tensorflow__tensorflow | tensorflow/python/ops/weak_tensor_math_ops_test.py | {
"start": 11963,
"end": 12766
} | class ____(test_util.TensorFlowTestCase):
allowed_dtypes = [dtypes.float32, dtypes.float64, dtypes.complex128]
def testBasic(self):
for dtype in self.allowed_dtypes:
x = _get_weak_tensor([1.0, 2.0, 0.0, 4.0], dtype=dtype)
y = math_ops.reciprocal_no_nan(x)
target = _get_weak_tensor([1.0, 0.5, 0.0, 0.25], dtype=dtype)
self.assertAllEqual(y, target)
self.assertEqual(y.dtype.base_dtype, target.dtype.base_dtype)
def testInverse(self):
for dtype in self.allowed_dtypes:
x = np.random.choice([0, 1, 2, 4, 5], size=(5, 5, 5))
x = _get_weak_tensor(x, dtype=dtype)
y = math_ops.reciprocal_no_nan(math_ops.reciprocal_no_nan(x))
self.assertAllClose(y, x)
self.assertEqual(y.dtype.base_dtype, x.dtype.base_dtype)
| ReciprocalNoNanTest |
python | realpython__materials | python-inherit-list-userlist/custom_list1.py | {
"start": 0,
"end": 384
} | class ____(list):
def join(self, separator=" "):
return separator.join(str(item) for item in self)
def map(self, action):
return type(self)(action(item) for item in self)
def filter(self, predicate):
return type(self)(item for item in self if predicate(item))
def for_each(self, func):
for item in self:
func(item)
| CustomList |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 183157,
"end": 183737
} | class ____(Operation):
def call(self, x):
return backend.numpy.sign(x)
def compute_output_spec(self, x):
sparse = getattr(x, "sparse", False)
return KerasTensor(x.shape, dtype=x.dtype, sparse=sparse)
@keras_export(["keras.ops.sign", "keras.ops.numpy.sign"])
def sign(x):
"""Returns a tensor with the signs of the elements of `x`.
Args:
x: Input tensor.
Returns:
Output tensor of same shape as `x`.
"""
if any_symbolic_tensors((x,)):
return Sign().symbolic_call(x)
return backend.numpy.sign(x)
| Sign |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 126925,
"end": 127785
} | class ____:
def setup_method(self):
self.rng = np.random.default_rng(2444103536)
def test_rvs(self):
vals = stats.zipf.rvs(1.5, size=(2, 50), random_state=self.rng)
assert np.all(vals >= 1)
assert np.shape(vals) == (2, 50)
assert vals.dtype.char in typecodes['AllInteger']
val = stats.zipf.rvs(1.5, random_state=self.rng)
assert isinstance(val, int)
val = stats.zipf(1.5).rvs(3, random_state=self.rng)
assert isinstance(val, np.ndarray)
assert val.dtype.char in typecodes['AllInteger']
def test_moments(self):
# n-th moment is finite iff a > n + 1
m, v = stats.zipf.stats(a=2.8)
assert_(np.isfinite(m))
assert_equal(v, np.inf)
s, k = stats.zipf.stats(a=4.8, moments='sk')
assert_(not np.isfinite([s, k]).all())
| TestZipf |
python | doocs__leetcode | solution/1600-1699/1616.Split Two Strings to Make Palindrome/Solution.py | {
"start": 0,
"end": 451
} | class ____:
def checkPalindromeFormation(self, a: str, b: str) -> bool:
def check1(a: str, b: str) -> bool:
i, j = 0, len(b) - 1
while i < j and a[i] == b[j]:
i, j = i + 1, j - 1
return i >= j or check2(a, i, j) or check2(b, i, j)
def check2(a: str, i: int, j: int) -> bool:
return a[i : j + 1] == a[i : j + 1][::-1]
return check1(a, b) or check1(b, a)
| Solution |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 42313,
"end": 43095
} | class ____(Operation):
def call(self, x):
return backend.numpy.bartlett(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype=backend.floatx())
@keras_export(["keras.ops.bartlett", "keras.ops.numpy.bartlett"])
def bartlett(x):
"""Bartlett window function.
The Bartlett window is a triangular window that rises then falls linearly.
Args:
x: Scalar or 1D Tensor. Window length.
Returns:
A 1D tensor containing the Bartlett window values.
Example:
>>> x = keras.ops.convert_to_tensor(5)
>>> keras.ops.bartlett(x)
array([0. , 0.5, 1. , 0.5, 0. ], dtype=float32)
"""
if any_symbolic_tensors((x,)):
return Bartlett().symbolic_call(x)
return backend.numpy.bartlett(x)
| Bartlett |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/libtool_deletion/package.py | {
"start": 221,
"end": 637
} | class ____(autotools.AutotoolsPackage):
"""Mock AutotoolsPackage to check proper deletion
of libtool archives.
"""
homepage = "https://www.gnu.org/software/make/"
url = "http://www.example.com/libtool-deletion-1.0.tar.gz"
version("4.2.1", sha256="e40b8f018c1da64edd1cc9a6fce5fa63b2e707e404e20cad91fbae337c98a5b7")
def do_stage(self):
mkdirp(self.stage.source_path)
| LibtoolDeletion |
python | pytorch__pytorch | test/test_meta.py | {
"start": 37979,
"end": 44568
} | class ____(torch.utils._python_dispatch.TorchDispatchMode):
test_case: TestCase
device: torch.device
dtype: torch.dtype
aten_olp_no_out_overload: set = set()
def __init__(self, test_case, *, device, dtype, symbolic_meta: bool, inplace: bool, supports_out: bool):
self.test_case = test_case
# save TLS
self.precision = test_case.precision
self.rel_tol = test_case.rel_tol
self.device_type = torch.device(device).type
self.dtype = dtype
self.symbolic_meta = symbolic_meta
self.inplace = inplace
self.supports_out = supports_out
@staticmethod
def try_resolve_aten_out_overload(ol, args, kwargs, num_outputs):
ol_args = ol._schema.arguments
olp: OpOverloadPacket = ol._overloadpacket
if olp in MetaCrossRefDispatchMode.aten_olp_no_out_overload:
return (None, None, None)
candidate_ols = []
for candidate_ol_name in olp.overloads():
candidate_ol = getattr(olp, candidate_ol_name)
if any(arg.is_out for arg in candidate_ol._schema.arguments):
candidate_ols.append(candidate_ol)
if not candidate_ols:
MetaCrossRefDispatchMode.aten_olp_no_out_overload.add(olp)
return (None, None, None)
# Now match based on args, kwargs and number of required outputs
candidate_ol: OpOverload = None
for candidate_ol in candidate_ols:
candidate_ol_args = candidate_ol._schema.arguments
if (len(args) >= len(candidate_ol_args)):
continue
# Positional arguments must have the same type
if not all(
ol_args[pos_arg_ind].type == candidate_ol_args[pos_arg_ind].type
for pos_arg_ind in range(len(args))
):
continue
# Number of outputs must match
candidate_out_names = [out_arg.name for out_arg in candidate_ol_args[-num_outputs:] if out_arg.is_out]
if len(candidate_out_names) != num_outputs:
continue
# Now try and match kwargs. Just need to ensure that the
# remaining kwargs allow an out overload to be called. For example
# we can throw away parameters like `dtype` that may be passed to the
# functional version of the op since the `dtype` will already be present
# in the `out` argument
new_kwargs = {}
kwargs_match = True
for arg in candidate_ol_args[len(args):-num_outputs]:
if arg.name not in kwargs:
if arg.has_default_value():
new_kwargs[arg.name] = arg.default_value
elif isinstance(arg.type, torch.OptionalType):
if isinstance(arg.type.getElementType(), torch.BoolType):
new_kwargs[arg.name] = False
else:
new_kwargs[arg.name] = None
else:
kwargs_match = False
break
else:
new_kwargs[arg.name] = kwargs[arg.name]
if kwargs_match:
return candidate_ol, candidate_out_names, new_kwargs
return None, None, None
def _get_expected_test_result(self, func: OpOverload):
if self.dtype in meta_dispatch_skips.get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_dispatch_device_skips[self.device_type].get(func, set()):
test_expect = TestExpect.SKIP
elif self.dtype in meta_dispatch_expected_failures.get(func, set()):
test_expect = TestExpect.XFAILURE
elif self.dtype in meta_dispatch_device_expected_failures[self.device_type].get(func, set()):
test_expect = TestExpect.XFAILURE
else:
test_expect = TestExpect.SUCCESS
return test_expect
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
kwargs = kwargs or {}
self.test_case.precision = self.precision
self.test_case.rel_tol = self.rel_tol
test_expect = self._get_expected_test_result(func)
expected = run_meta_crossref(
self.test_case,
test_expect,
func,
args,
kwargs,
dtype=self.dtype,
device_type=self.device_type,
run_symbolic_meta=self.symbolic_meta,
)
# This is to test torch ops that do not have an out parameter but have
# aten op overloads that have out parameters. Additionally, Python decompositions
# may register OpOverloadPacket's so decompositions need to be tested
# to ensure all OpOverloads still function for the Meta key (e.g. if a python decomposition
# is registered for an aten op aten.foo with overloads [default, out], the python
# function needs to support receiving `out` arguments)
if (
not self.inplace and
not self.supports_out and
test_expect == TestExpect.SUCCESS and
(torch.is_tensor(expected) or isinstance(expected, Iterable))
):
# check to see if there is a potential out overload
num_outputs = 1 if torch.is_tensor(expected) else len(expected)
func_out_overload, out_param_names, kwargs = self.try_resolve_aten_out_overload(func, args, kwargs, num_outputs)
if func_out_overload:
if num_outputs == 1:
kwargs[out_param_names[0]] = expected
else:
for ind, out_param_name in enumerate(out_param_names):
kwargs[out_param_name] = expected[ind]
test_expect = self._get_expected_test_result(func_out_overload)
run_meta_crossref(
self.test_case,
test_expect,
func_out_overload,
args,
kwargs,
dtype=self.dtype,
device_type=self.device_type,
run_symbolic_meta=self.symbolic_meta,
)
return expected
# NB: we're running these tests only on CUDA because there are some
# inconsistencies between CUDA and CPU, and running on CUDA makes it easier
# to ignore the CPU case when inconsistencies arise. Ideally we deal
# with the inconsistencies but this takes time.
@unMarkDynamoStrictTest
| MetaCrossRefDispatchMode |
python | langchain-ai__langchain | libs/langchain/tests/unit_tests/chains/test_base.py | {
"start": 1048,
"end": 7090
} | class ____(Chain):
"""Fake chain class for testing purposes."""
be_correct: bool = True
the_input_keys: list[str] = ["foo"]
the_output_keys: list[str] = ["bar"]
@property
def input_keys(self) -> list[str]:
"""Input keys."""
return self.the_input_keys
@property
def output_keys(self) -> list[str]:
"""Output key of bar."""
return self.the_output_keys
@override
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
if self.be_correct:
return {"bar": "baz"}
return {"baz": "bar"}
def test_bad_inputs() -> None:
"""Test errors are raised if input keys are not found."""
chain = FakeChain()
with pytest.raises(ValueError, match=re.escape("Missing some input keys: {'foo'}")):
chain({"foobar": "baz"})
def test_bad_outputs() -> None:
"""Test errors are raised if outputs keys are not found."""
chain = FakeChain(be_correct=False)
with pytest.raises(
ValueError, match=re.escape("Missing some output keys: {'bar'}")
):
chain({"foo": "baz"})
def test_run_info() -> None:
"""Test that run_info is returned properly when specified."""
chain = FakeChain()
output = chain({"foo": "bar"}, include_run_info=True)
assert "foo" in output
assert "bar" in output
assert RUN_KEY in output
def test_correct_call() -> None:
"""Test correct call of fake chain."""
chain = FakeChain()
output = chain({"foo": "bar"})
assert output == {"foo": "bar", "bar": "baz"}
def test_single_input_correct() -> None:
"""Test passing single input works."""
chain = FakeChain()
output = chain("bar")
assert output == {"foo": "bar", "bar": "baz"}
def test_single_input_error() -> None:
"""Test passing single input errors as expected."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError, match="Missing some input keys:"):
chain("bar")
def test_run_single_arg() -> None:
"""Test run method with single arg."""
chain = FakeChain()
output = chain.run("bar")
assert output == "baz"
def test_run_multiple_args_error() -> None:
"""Test run method with multiple args errors as expected."""
chain = FakeChain()
with pytest.raises(ValueError, match="`run` supports only one positional argument"):
chain.run("bar", "foo")
def test_run_kwargs() -> None:
"""Test run method with kwargs."""
chain = FakeChain(the_input_keys=["foo", "bar"])
output = chain.run(foo="bar", bar="foo")
assert output == "baz"
def test_run_kwargs_error() -> None:
"""Test run method with kwargs errors as expected."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(ValueError, match=re.escape("Missing some input keys: {'bar'}")):
chain.run(foo="bar", baz="foo")
def test_run_args_and_kwargs_error() -> None:
"""Test run method with args and kwargs."""
chain = FakeChain(the_input_keys=["foo", "bar"])
with pytest.raises(
ValueError,
match="`run` supported with either positional arguments "
"or keyword arguments but not both",
):
chain.run("bar", foo="bar")
def test_multiple_output_keys_error() -> None:
"""Test run with multiple output keys errors as expected."""
chain = FakeChain(the_output_keys=["foo", "bar"])
with pytest.raises(
ValueError,
match="`run` not supported when there is not exactly one output key",
):
chain.run("bar")
def test_run_arg_with_memory() -> None:
"""Test run method works when arg is passed."""
chain = FakeChain(the_input_keys=["foo", "baz"], memory=FakeMemory())
chain.run("bar")
def test_run_with_callback() -> None:
"""Test run method works when callback manager is passed."""
handler = FakeCallbackHandler()
chain = FakeChain(
callbacks=[handler],
)
output = chain.run("bar")
assert output == "baz"
assert handler.starts == 1
assert handler.ends == 1
assert handler.errors == 0
def test_run_with_callback_and_input_error() -> None:
"""Test callback manager catches run validation input error."""
handler = FakeCallbackHandler()
chain = FakeChain(
the_input_keys=["foo", "bar"],
callbacks=[handler],
)
with pytest.raises(ValueError, match=re.escape("Missing some input keys: {'foo'}")):
chain({"bar": "foo"})
assert handler.starts == 1
assert handler.ends == 0
assert handler.errors == 1
def test_manually_specify_rid() -> None:
chain = FakeChain()
run_id = uuid.uuid4()
with collect_runs() as cb:
chain.invoke({"foo": "bar"}, {"run_id": run_id})
run = cb.traced_runs[0]
assert run.id == run_id
run_id2 = uuid.uuid4()
with collect_runs() as cb:
list(chain.stream({"foo": "bar"}, {"run_id": run_id2}))
run = cb.traced_runs[0]
assert run.id == run_id2
async def test_manually_specify_rid_async() -> None:
chain = FakeChain()
run_id = uuid.uuid4()
with collect_runs() as cb:
await chain.ainvoke({"foo": "bar"}, {"run_id": run_id})
run = cb.traced_runs[0]
assert run.id == run_id
run_id2 = uuid.uuid4()
with collect_runs() as cb:
res = chain.astream({"foo": "bar"}, {"run_id": run_id2})
async for _ in res:
pass
run = cb.traced_runs[0]
assert run.id == run_id2
def test_run_with_callback_and_output_error() -> None:
"""Test callback manager catches run validation output error."""
handler = FakeCallbackHandler()
chain = FakeChain(
the_output_keys=["foo", "bar"],
callbacks=[handler],
)
with pytest.raises(
ValueError, match=re.escape("Missing some output keys: {'foo'}")
):
chain("foo")
assert handler.starts == 1
assert handler.ends == 0
assert handler.errors == 1
| FakeChain |
python | streamlit__streamlit | lib/tests/streamlit/runtime/state/session_state_test.py | {
"start": 34126,
"end": 35691
} | class ____(unittest.TestCase):
def test_is_stale_widget_metadata_is_None(self):
assert _is_stale_widget(None, {}, {})
def test_is_stale_widget_active_id(self):
metadata = WidgetMetadata(
id="widget_id_1",
deserializer=lambda x: str(x),
serializer=lambda x: int(x),
value_type="int_value",
)
assert not _is_stale_widget(metadata, {"widget_id_1"}, {})
def test_is_stale_widget_unrelated_fragment(self):
metadata = WidgetMetadata(
id="widget_id_1",
deserializer=lambda x: str(x),
serializer=lambda x: int(x),
value_type="int_value",
fragment_id="my_fragment",
)
assert not _is_stale_widget(metadata, {"widget_id_2"}, {"some_other_fragment"})
def test_is_stale_widget_actually_stale_fragment(self):
metadata = WidgetMetadata(
id="widget_id_1",
deserializer=lambda x: str(x),
serializer=lambda x: int(x),
value_type="int_value",
fragment_id="my_fragment",
)
assert _is_stale_widget(metadata, {"widget_id_2"}, {"my_fragment"})
def test_is_stale_widget_actually_stale_no_fragment(self):
metadata = WidgetMetadata(
id="widget_id_1",
deserializer=lambda x: str(x),
serializer=lambda x: int(x),
value_type="int_value",
fragment_id="my_fragment",
)
assert _is_stale_widget(metadata, {"widget_id_2"}, {})
| IsStaleWidgetTests |
python | pytorch__pytorch | test/dynamo/test_aot_autograd_cache.py | {
"start": 84419,
"end": 93464
} | class ____(torch._dynamo.test_case.TestCase):
@property
def device_type(self) -> str:
return "cuda" if torch.cuda.is_available() else "cpu"
def default_config(self):
return AOTConfig(
fw_compiler=None,
bw_compiler=None,
inference_compiler=None,
partition_fn=None,
decompositions={},
num_params_buffers=0,
aot_id=0,
keep_inference_input_mutations=False,
dynamic_shapes=True,
aot_autograd_arg_pos_to_source=None,
is_export=False,
no_tangents=False,
enable_log=False,
precompile_backend_id=None,
)
def _get_dynamo_output(self, fn, *args, **kwargs):
# Reset dynamo between runs
torch._dynamo.reset()
fx_graph = None
example_inputs = None
def compiler(gm, inputs, **kwargs):
nonlocal fx_graph
nonlocal example_inputs
fx_graph = gm
example_inputs = inputs
return gm
g = torch.compile(fn, backend=compiler, fullgraph=True)
result = g(*args, **kwargs)
return (result, fx_graph, example_inputs)
def gen_cache_key(self, f, config, inputs=None):
if inputs is None:
inputs = [torch.ones(3)]
_, fx_g, example_inputs = self._get_dynamo_output(f, *inputs)
shape_env = ShapeEnv()
ctx = TracingContext(FakeTensorMode(shape_env=shape_env))
# Needs a shape env for FxGraphCache.check_can_cache to pass.
# Not needed for actual key calculation.
with torch._guards.tracing(ctx):
with sanitize_gm_for_cache(fx_g):
return autograd_cache_key(fx_g, example_inputs, config, {})
def test_basic_hash_key(self):
def fn(x):
return x.sin().cos()
config = self.default_config()
# Check hash is stable on multiple runs
c1 = self.gen_cache_key(fn, config)
c2 = self.gen_cache_key(fn, config)
self.assertEqual(c1, c2)
def test_identical_graphs_and_configs(self):
def fn(x):
return x.sin().cos()
def fn2(x): # noqa: F841
y = x.sin()
z = y.cos()
return z
# Make the id different, but otherwise identical
config = self.default_config()
config2 = self.default_config()
config2.aot_id = 1
c1 = self.gen_cache_key(fn, config)
c2 = self.gen_cache_key(fn, config2)
self.assertEqual(c1, c2)
def test_different_graphs(self):
def fn(x):
return x.cos().sin()
def fn2(x):
return x.sin().cos()
config = self.default_config()
c1 = self.gen_cache_key(fn, config)
c2 = self.gen_cache_key(fn2, config)
self.assertNotEqual(c1, c2)
def test_different_configs(self):
def fn(x):
return x.cos().sin()
config = self.default_config()
config2 = self.default_config()
config2.dynamic_shapes = False
c1 = self.gen_cache_key(fn, config)
c2 = self.gen_cache_key(fn, config2)
self.assertNotEqual(c1, c2)
def test_different_inputs(self):
def fn(x):
return x.cos().sin()
config = self.default_config()
c1 = self.gen_cache_key(fn, config, inputs=[torch.ones(3)])
c2 = self.gen_cache_key(fn, config, inputs=[torch.ones(2)])
self.assertNotEqual(c1, c2)
def test_different_global_configs(self):
def fn(x):
return x.cos().sin()
config = self.default_config()
c1 = self.gen_cache_key(fn, config)
c2 = self.gen_cache_key(fn, config)
self.assertEqual(c1, c2)
c1 = self.gen_cache_key(fn, config)
# Change functorch config
with functorch_config.patch(
{"debug_assert": not functorch_config.debug_assert}
):
c2 = self.gen_cache_key(fn, config)
self.assertNotEqual(c1, c2)
c1 = self.gen_cache_key(fn, config)
# Change inductor config
with inductor_config.patch({"debug": not inductor_config.debug}):
c2 = self.gen_cache_key(fn, config)
self.assertNotEqual(c1, c2)
c1 = self.gen_cache_key(fn, config)
# Change torch grad enabled
with torch.no_grad():
c2 = self.gen_cache_key(fn, config)
self.assertNotEqual(c1, c2)
def test_incompatible_function(self):
@torch._dynamo.allow_in_graph
class AllowInGraphFunc(torch.autograd.Function):
@staticmethod
def forward(_, x):
torch._dynamo.graph_break()
return x.sin()
def fn(x):
return AllowInGraphFunc.apply(x)
config = self.default_config()
self.assertRaises(
BypassAOTAutogradCache, lambda: self.gen_cache_key(fn, config)
)
def test_private_namespace(self):
# TODO: anyone who monkeypatches a **public** function into torch namespace with @allow_in_graph
# could still break our sanity check and cache something bad. But that's an edge case we'll take the risk on.
# Monkeypatch some random private function into torch, see that it fails
@torch._dynamo.allow_in_graph
def my_private_fun(x):
return x.sin()
with patch("torch._my_priv", new=my_private_fun, create=True):
def fn(x):
return torch._my_priv(x)
config = self.default_config()
self.assertRaises(
BypassAOTAutogradCache, lambda: self.gen_cache_key(fn, config)
)
@torch._inductor.config.patch({"freezing": True})
def test_freezing(self):
def fn(x):
return x.cos().sin()
config = self.default_config()
self.assertRaises(
BypassAOTAutogradCache, lambda: self.gen_cache_key(fn, config)
)
def test_private_builtin(self):
# _foreach_add is a private torch function, but
# it's also a builtin_function_or_method, so it should be allowed to be cached
# since dynamo allows it in the graph
def fn(x, b):
y = (x, x)
return torch._foreach_add(y, b)
config = self.default_config()
r1 = self.gen_cache_key(fn, config, inputs=[torch.ones(3), 1])
r2 = self.gen_cache_key(fn, config, inputs=[torch.ones(3), 2])
self.assertNotEqual(r1, r2)
def test_nn_module_with_params(self):
class MyMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.seq = torch.nn.Parameter(torch.ones((3, 3)))
def forward(self, x):
return self.seq + x
config = self.default_config()
# Different inputs and parameters, but all the same size
c1 = self.gen_cache_key(MyMod(), config, inputs=[torch.ones((3, 3))])
c2 = self.gen_cache_key(MyMod(), config, inputs=[torch.ones((3, 3))])
self.assertEqual(c1, c2)
def test_normal_torch_function(self):
@torch._dynamo.allow_in_graph
def fn(x):
y = torch.sin(x)
z = torch.cos(x)
w = y + z
w.abs()
return w
config = self.default_config()
self.gen_cache_key(fn, config)
def test_safe_torchfunction(self):
def fn(x):
a = x.size()
b = torch.Size([3, 3])
c = a == b
x = torch.sym_int(9)
y = torch.sym_float(x)
z = torch.sym_int(torch.sym_sqrt(y))
result = torch.sym_sum([x, y, z])
return (c, result)
config = self.default_config()
self.gen_cache_key(fn, config, inputs=[torch.ones((3, 3))])
def test_sanitize_gm_for_cache(self):
def fn(x):
y = torch.sin(x)
z = torch.cos(x)
w = y + z
w.abs()
return w
_, fx_g, example_inputs = self._get_dynamo_output(fn, torch.ones(3))
ctx = TracingContext(FakeTensorMode(shape_env=ShapeEnv()))
with torch._guards.tracing(ctx):
fx_g.meta = {"foo": "bar"}
fx_g.compile_subgraph_reason = "Blah"
config = self.default_config()
with sanitize_gm_for_cache(fx_g):
c1 = autograd_cache_key(fx_g, example_inputs, config, {})
c3 = autograd_cache_key(fx_g, example_inputs, config, {})
fx_g.meta = {"foo": "baz"}
fx_g.compile_subgraph_reason = None
with sanitize_gm_for_cache(fx_g):
c2 = autograd_cache_key(fx_g, example_inputs, config, {})
c4 = autograd_cache_key(fx_g, example_inputs, config, {})
self.assertEqual(c1, c2)
self.assertNotEqual(c3, c4)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| AOTAutogradCachePicklerTests |
python | google__jax | jaxlib/mosaic/python/tpu.py | {
"start": 1151,
"end": 1580
} | class ____(_tpu_gen.TraceOp): # noqa: F405
"""An extension to the automatically generated TraceOp bindings."""
def __init__(self, results, message, level, *, loc=None, ip=None):
super().__init__(results, message, level, loc=loc, ip=ip)
self.regions[0].blocks.append(*[]) # Append the block.
@property
def body(self):
return self.regions[0].blocks[0]
@_cext.register_operation(_Dialect, replace=True)
| TraceOp |
python | hynek__structlog | tests/processors/test_renderers.py | {
"start": 3354,
"end": 8660
} | class ____:
def test_sort_keys(self, event_dict):
"""
Keys are sorted if sort_keys is set.
"""
rv = LogfmtRenderer(sort_keys=True)(None, None, event_dict)
assert r'a=<A(\o/)> b="[3, 4]" x=7 y=test z="(1, 2)"' == rv
def test_order_complete(self, event_dict):
"""
Orders keys according to key_order.
"""
rv = LogfmtRenderer(key_order=["y", "b", "a", "z", "x"])(
None, None, event_dict
)
assert r'y=test b="[3, 4]" a=<A(\o/)> z="(1, 2)" x=7' == rv
def test_order_missing(self, event_dict):
"""
Missing keys get rendered as None.
"""
rv = LogfmtRenderer(key_order=["c", "y", "b", "a", "z", "x"])(
None, None, event_dict
)
assert r'c= y=test b="[3, 4]" a=<A(\o/)> z="(1, 2)" x=7' == rv
def test_order_missing_dropped(self, event_dict):
"""
Missing keys get dropped
"""
rv = LogfmtRenderer(
key_order=["c", "y", "b", "a", "z", "x"], drop_missing=True
)(None, None, event_dict)
assert r'y=test b="[3, 4]" a=<A(\o/)> z="(1, 2)" x=7' == rv
def test_order_extra(self, event_dict):
"""
Extra keys get sorted if sort_keys=True.
"""
event_dict["B"] = "B"
event_dict["A"] = "A"
rv = LogfmtRenderer(
key_order=["c", "y", "b", "a", "z", "x"], sort_keys=True
)(None, None, event_dict)
assert (
r'c= y=test b="[3, 4]" a=<A(\o/)> z="(1, 2)" x=7 A=A B=B'
) == rv
def test_order_sorted_missing_dropped(self, event_dict):
"""
Keys get sorted if sort_keys=True and extras get dropped.
"""
event_dict["B"] = "B"
event_dict["A"] = "A"
rv = LogfmtRenderer(
key_order=["c", "y", "b", "a", "z", "x"],
sort_keys=True,
drop_missing=True,
)(None, None, event_dict)
assert r'y=test b="[3, 4]" a=<A(\o/)> z="(1, 2)" x=7 A=A B=B' == rv
def test_random_order(self, event_dict):
"""
No special ordering doesn't blow up.
"""
rv = LogfmtRenderer()(None, None, event_dict)
assert isinstance(rv, str)
def test_empty_event_dict(self):
"""
Empty event dict renders as empty string.
"""
rv = LogfmtRenderer()(None, None, {})
assert "" == rv
def test_bool_as_flag(self):
"""
If activated, render ``{"a": True}`` as ``a`` instead of ``a=true``.
"""
event_dict = {"a": True, "b": False}
rv_abbrev = LogfmtRenderer(bool_as_flag=True)(None, None, event_dict)
assert r"a b=false" == rv_abbrev
rv_no_abbrev = LogfmtRenderer(bool_as_flag=False)(
None, None, event_dict
)
assert r"a=true b=false" == rv_no_abbrev
def test_reference_format(self):
"""
Test rendering according to example at
https://pkg.go.dev/github.com/kr/logfmt
"""
event_dict = {
"foo": "bar",
"a": 14,
"baz": "hello kitty",
"cool%story": "bro",
"f": True,
"%^asdf": True,
}
rv = LogfmtRenderer()(None, None, event_dict)
assert 'foo=bar a=14 baz="hello kitty" cool%story=bro f %^asdf' == rv
def test_equal_sign_or_space_in_value(self):
"""
Values with equal signs are always quoted.
"""
event_dict = {
"without": "somevalue",
"withequal": "some=value",
"withspace": "some value",
}
rv = LogfmtRenderer()(None, None, event_dict)
assert (
r'without=somevalue withequal="some=value" withspace="some value"'
== rv
)
def test_invalid_key(self):
"""
Keys cannot contain space characters.
"""
event_dict = {"invalid key": "somevalue"}
with pytest.raises(ValueError, match='Invalid key: "invalid key"'):
LogfmtRenderer()(None, None, event_dict)
def test_newline_in_value(self):
"""
Newlines in values are escaped.
"""
event_dict = {"with_newline": "some\nvalue"}
rv = LogfmtRenderer()(None, None, event_dict)
assert r"with_newline=some\nvalue" == rv
@pytest.mark.parametrize(
("raw", "escaped"),
[
# Slash by itself does not need to be escaped.
(r"a\slash", r"a\slash"),
# A quote requires quoting, and escaping the quote.
('a"quote', r'"a\"quote"'),
# If anything triggers quoting of the string, then the slash must
# be escaped.
(
r'a\slash with space or a"quote',
r'"a\\slash with space or a\"quote"',
),
(
r"I want to render this \"string\" with logfmtrenderer",
r'"I want to render this \\\"string\\\" with logfmtrenderer"',
),
],
)
def test_escaping(self, raw, escaped):
"""
Backslashes and quotes are escaped.
"""
rv = LogfmtRenderer()(None, None, {"key": raw})
assert f"key={escaped}" == rv
| TestLogfmtRenderer |
python | Textualize__textual | src/textual/_segment_tools.py | {
"start": 624,
"end": 8708
} | class ____(Exception):
pass
def index_to_cell_position(segments: Iterable[Segment], index: int) -> int:
"""Given a character index, return the cell position of that character within
an Iterable of Segments. This is the sum of the cell lengths of all the characters
*before* the character at `index`.
Args:
segments: The segments to find the cell position within.
index: The index to convert into a cell position.
Returns:
The cell position of the character at `index`.
Raises:
NoCellPositionForIndex: If the supplied index doesn't fall within the given segments.
"""
if not segments:
raise NoCellPositionForIndex
if index == 0:
return 0
cell_position_end = 0
segment_length = 0
segment_end_index = 0
segment_cell_length = 0
text = ""
iter_segments = iter(segments)
try:
while segment_end_index < index:
segment = next(iter_segments)
text = segment.text
segment_length = len(text)
segment_cell_length = cell_len(text)
cell_position_end += segment_cell_length
segment_end_index += segment_length
except StopIteration:
raise NoCellPositionForIndex
# Check how far into this segment the target index is
segment_index_start = segment_end_index - segment_length
index_within_segment = index - segment_index_start
segment_cell_start = cell_position_end - segment_cell_length
return segment_cell_start + cell_len(text[:index_within_segment])
def line_crop(
segments: list[Segment], start: int, end: int, total: int
) -> list[Segment]:
"""Crops a list of segments between two cell offsets.
Args:
segments: A list of Segments for a line.
start: Start offset (cells)
end: End offset (cells, exclusive)
total: Total cell length of segments.
Returns:
A new shorter list of segments
"""
# This is essentially a specialized version of Segment.divide
# The following line has equivalent functionality (but a little slower)
# return list(Segment.divide(segments, [start, end]))[1]
_cell_len = cell_len
pos = 0
output_segments: list[Segment] = []
add_segment = output_segments.append
iter_segments = iter(segments)
segment: Segment | None = None
for segment in iter_segments:
end_pos = pos + _cell_len(segment.text)
if end_pos > start:
segment = segment.split_cells(start - pos)[1]
break
pos = end_pos
else:
return []
if end >= total:
# The end crop is the end of the segments, so we can collect all remaining segments
if segment:
add_segment(segment)
output_segments.extend(iter_segments)
return output_segments
pos = start
while segment is not None:
end_pos = pos + _cell_len(segment.text)
if end_pos < end:
add_segment(segment)
else:
add_segment(segment.split_cells(end - pos)[0])
break
pos = end_pos
segment = next(iter_segments, None)
return output_segments
def line_trim(segments: list[Segment], start: bool, end: bool) -> list[Segment]:
"""Optionally remove a cell from the start and / or end of a list of segments.
Args:
segments: A line (list of Segments)
start: Remove cell from start.
end: Remove cell from end.
Returns:
A new list of segments.
"""
segments = segments.copy()
if segments and start:
_, first_segment = segments[0].split_cells(1)
if first_segment.text:
segments[0] = first_segment
else:
segments.pop(0)
if segments and end:
last_segment = segments[-1]
last_segment, _ = last_segment.split_cells(len(last_segment.text) - 1)
if last_segment.text:
segments[-1] = last_segment
else:
segments.pop()
return segments
def line_pad(
segments: Iterable[Segment], pad_left: int, pad_right: int, style: Style
) -> list[Segment]:
"""Adds padding to the left and / or right of a list of segments.
Args:
segments: A line of segments.
pad_left: Cells to pad on the left.
pad_right: Cells to pad on the right.
style: Style of padded cells.
Returns:
A new line with padding.
"""
if pad_left and pad_right:
return [
make_blank(pad_left, style),
*segments,
make_blank(pad_right, style),
]
elif pad_left:
return [
make_blank(pad_left, style),
*segments,
]
elif pad_right:
return [
*segments,
make_blank(pad_right, style),
]
return list(segments)
def align_lines(
lines: list[list[Segment]],
style: Style,
size: Size,
horizontal: AlignHorizontal,
vertical: AlignVertical,
) -> Iterable[list[Segment]]:
"""Align lines.
Args:
lines: A list of lines.
style: Background style.
size: Size of container.
horizontal: Horizontal alignment.
vertical: Vertical alignment.
Returns:
Aligned lines.
"""
if not lines:
return
width, height = size
get_line_length = Segment.get_line_length
line_lengths = [get_line_length(line) for line in lines]
shape_width = max(line_lengths)
shape_height = len(line_lengths)
def blank_lines(count: int) -> list[list[Segment]]:
"""Create blank lines.
Args:
count: Desired number of blank lines.
Returns:
A list of blank lines.
"""
return [[make_blank(width, style)]] * count
top_blank_lines = bottom_blank_lines = 0
vertical_excess_space = max(0, height - shape_height)
if vertical == "top":
bottom_blank_lines = vertical_excess_space
elif vertical == "middle":
top_blank_lines = vertical_excess_space // 2
bottom_blank_lines = vertical_excess_space - top_blank_lines
elif vertical == "bottom":
top_blank_lines = vertical_excess_space
if top_blank_lines:
yield from blank_lines(top_blank_lines)
if horizontal == "left":
for cell_length, line in zip(line_lengths, lines):
if cell_length == width:
yield line
else:
yield line_pad(line, 0, width - cell_length, style)
elif horizontal == "center":
left_space = max(0, width - shape_width) // 2
for cell_length, line in zip(line_lengths, lines):
if cell_length == width:
yield line
else:
yield line_pad(
line, left_space, width - cell_length - left_space, style
)
elif horizontal == "right":
for cell_length, line in zip(line_lengths, lines):
if width == cell_length:
yield line
else:
yield line_pad(line, width - cell_length, 0, style)
if bottom_blank_lines:
yield from blank_lines(bottom_blank_lines)
_re_spaces = re.compile(r"(\s+|\S+)")
def apply_hatch(
segments: Iterable[Segment],
character: str,
hatch_style: Style,
_split=_re_spaces.split,
) -> Iterable[Segment]:
"""Replace run of spaces with another character + style.
Args:
segments: Segments to process.
character: Character to replace spaces.
hatch_style: Style of replacement characters.
Yields:
Segments.
"""
_Segment = Segment
for segment in segments:
if " " not in segment.text:
yield segment
else:
text, style, _ = segment
for token in _split(text):
if token:
if token.isspace():
yield _Segment(character * len(token), hatch_style)
else:
yield _Segment(token, style)
| NoCellPositionForIndex |
python | scikit-learn__scikit-learn | sklearn/utils/_param_validation.py | {
"start": 18670,
"end": 19373
} | class ____(_Constraint):
"""Constraint representing random states.
Convenience class for
[Interval(Integral, 0, 2**32 - 1, closed="both"), np.random.RandomState, None]
"""
def __init__(self):
super().__init__()
self._constraints = [
Interval(Integral, 0, 2**32 - 1, closed="both"),
_InstancesOf(np.random.RandomState),
_NoneConstraint(),
]
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
| _RandomStates |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess1.py | {
"start": 1173,
"end": 1364
} | class ____(Generic[_T]):
value: _T
def __get__(self, instance: object | None, cls: type[object]) -> _T: ...
def __set__(self, instance: object, value: _T) -> None: ...
| DescriptorD |
python | django__django | tests/delete_regress/models.py | {
"start": 2051,
"end": 2133
} | class ____(models.Model):
policy_number = models.CharField(max_length=10)
| Policy |
python | python__mypy | mypy/suggestions.py | {
"start": 30892,
"end": 34297
} | class ____(TypeStrVisitor):
"""Visitor used to format types"""
# TODO: Probably a lot
def __init__(self, module: str | None, graph: Graph, options: Options) -> None:
super().__init__(options=options)
self.module = module
self.graph = graph
def visit_any(self, t: AnyType) -> str:
if t.missing_import_name:
return t.missing_import_name
else:
return "Any"
def visit_instance(self, t: Instance) -> str:
s = t.type.fullname or t.type.name or None
if s is None:
return "<???>"
mod_obj = split_target(self.graph, s)
assert mod_obj
mod, obj = mod_obj
# If a class is imported into the current module, rewrite the reference
# to point to the current module. This helps the annotation tool avoid
# inserting redundant imports when a type has been reexported.
if self.module:
parts = obj.split(".") # need to split the object part if it is a nested class
tree = self.graph[self.module].tree
if tree and parts[0] in tree.names and mod not in tree.names:
mod = self.module
if (mod, obj) == ("builtins", "tuple"):
mod, obj = "typing", "Tuple[" + t.args[0].accept(self) + ", ...]"
elif t.args:
obj += f"[{self.list_str(t.args)}]"
if mod_obj == ("builtins", "unicode"):
return "Text"
elif mod == "builtins":
return obj
else:
delim = "." if "." not in obj else ":"
return mod + delim + obj
def visit_tuple_type(self, t: TupleType) -> str:
if t.partial_fallback and t.partial_fallback.type:
fallback_name = t.partial_fallback.type.fullname
if fallback_name != "builtins.tuple":
return t.partial_fallback.accept(self)
s = self.list_str(t.items)
return f"Tuple[{s}]"
def visit_uninhabited_type(self, t: UninhabitedType) -> str:
return "Any"
def visit_typeddict_type(self, t: TypedDictType) -> str:
return t.fallback.accept(self)
def visit_union_type(self, t: UnionType) -> str:
if len(t.items) == 2 and is_overlapping_none(t):
s = remove_optional(t).accept(self)
return f"{s} | None" if self.options.use_or_syntax() else f"Optional[{s}]"
else:
return super().visit_union_type(t)
def visit_callable_type(self, t: CallableType) -> str:
# TODO: use extended callables?
if is_tricky_callable(t):
arg_str = "..."
else:
# Note: for default arguments, we just assume that they
# are required. This isn't right, but neither is the
# other thing, and I suspect this will produce more better
# results than falling back to `...`
args = [typ.accept(self) for typ in t.arg_types]
arg_str = f"[{', '.join(args)}]"
return f"Callable[{arg_str}, {t.ret_type.accept(self)}]"
TType = TypeVar("TType", bound=Type)
def make_suggestion_anys(t: TType) -> TType:
"""Make all anys in the type as coming from the suggestion engine.
This keeps those Anys from influencing constraint generation,
which allows us to do better when refining types.
"""
return cast(TType, t.accept(MakeSuggestionAny()))
| TypeFormatter |
python | pytorch__pytorch | torch/nn/modules/activation.py | {
"start": 29560,
"end": 52432
} | class ____(Module):
r"""Allows the model to jointly attend to information from different representation subspaces.
This MultiheadAttention layer implements the original architecture described
in the `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_ paper. The
intent of this layer is as a reference implementation for foundational understanding
and thus it contains only limited features relative to newer architectures.
Given the fast pace of innovation in transformer-like architectures, we recommend
exploring this `tutorial <https://pytorch.org/tutorials/intermediate/transformer_building_blocks.html>`_
to build efficient layers from building blocks in core or using higher
level libraries from the `PyTorch Ecosystem <https://landscape.pytorch.org/>`_.
Multi-Head Attention is defined as:
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(\text{head}_1,\dots,\text{head}_h)W^O
where :math:`\text{head}_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
``nn.MultiheadAttention`` will use the optimized implementations of
``scaled_dot_product_attention()`` when possible.
In addition to support for the new ``scaled_dot_product_attention()``
function, for speeding up Inference, MHA will use
fastpath inference with support for Nested Tensors, iff:
- self attention is being computed (i.e., ``query``, ``key``, and ``value`` are the same tensor).
- inputs are batched (3D) with ``batch_first==True``
- Either autograd is disabled (using ``torch.inference_mode`` or ``torch.no_grad``) or no tensor argument ``requires_grad``
- training is disabled (using ``.eval()``)
- ``add_bias_kv`` is ``False``
- ``add_zero_attn`` is ``False``
- ``kdim`` and ``vdim`` are equal to ``embed_dim``
- if a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ is passed, neither ``key_padding_mask``
nor ``attn_mask`` is passed
- autocast is disabled
If the optimized inference fastpath implementation is in use, a
`NestedTensor <https://pytorch.org/docs/stable/nested.html>`_ can be passed for
``query``/``key``/``value`` to represent padding more efficiently than using a
padding mask. In this case, a `NestedTensor <https://pytorch.org/docs/stable/nested.html>`_
will be returned, and an additional speedup proportional to the fraction of the input
that is padding can be expected.
Args:
embed_dim: Total dimension of the model.
num_heads: Number of parallel attention heads. Note that ``embed_dim`` will be split
across ``num_heads`` (i.e. each head will have dimension ``embed_dim // num_heads``).
dropout: Dropout probability on ``attn_output_weights``. Default: ``0.0`` (no dropout).
bias: If specified, adds bias to input / output projection layers. Default: ``True``.
add_bias_kv: If specified, adds bias to the key and value sequences at dim=0. Default: ``False``.
add_zero_attn: If specified, adds a new batch of zeros to the key and value sequences at dim=1.
Default: ``False``.
kdim: Total number of features for keys. Default: ``None`` (uses ``kdim=embed_dim``).
vdim: Total number of features for values. Default: ``None`` (uses ``vdim=embed_dim``).
batch_first: If ``True``, then the input and output tensors are provided
as (batch, seq, feature). Default: ``False`` (seq, batch, feature).
Examples::
>>> # xdoctest: +SKIP
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
.. _`FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness`:
https://arxiv.org/abs/2205.14135
"""
__constants__ = ["batch_first"]
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
batch_first=False,
device=None,
dtype=None,
) -> None:
if embed_dim <= 0 or num_heads <= 0:
raise ValueError(
f"embed_dim and num_heads must be greater than 0,"
f" got embed_dim={embed_dim} and num_heads={num_heads} instead"
)
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, (
"embed_dim must be divisible by num_heads"
)
if not self._qkv_same_embed_dim:
self.q_proj_weight = Parameter(
torch.empty((embed_dim, embed_dim), **factory_kwargs)
)
self.k_proj_weight = Parameter(
torch.empty((embed_dim, self.kdim), **factory_kwargs)
)
self.v_proj_weight = Parameter(
torch.empty((embed_dim, self.vdim), **factory_kwargs)
)
self.register_parameter("in_proj_weight", None)
else:
self.in_proj_weight = Parameter(
torch.empty((3 * embed_dim, embed_dim), **factory_kwargs)
)
self.register_parameter("q_proj_weight", None)
self.register_parameter("k_proj_weight", None)
self.register_parameter("v_proj_weight", None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim, **factory_kwargs))
else:
self.register_parameter("in_proj_bias", None)
self.out_proj = NonDynamicallyQuantizableLinear(
embed_dim, embed_dim, bias=bias, **factory_kwargs
)
if add_bias_kv:
self.bias_k = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
self.bias_v = Parameter(torch.empty((1, 1, embed_dim), **factory_kwargs))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self) -> None:
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.0)
constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if "_qkv_same_embed_dim" not in state:
state["_qkv_same_embed_dim"] = True
super().__setstate__(state)
def forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
average_attn_weights: bool = True,
is_causal: bool = False,
) -> tuple[Tensor, Optional[Tensor]]:
r"""Compute attention outputs using query, key, and value embeddings.
Supports optional parameters for padding, masks and attention weights.
Args:
query: Query embeddings of shape :math:`(L, E_q)` for unbatched input, :math:`(L, N, E_q)` when ``batch_first=False``
or :math:`(N, L, E_q)` when ``batch_first=True``, where :math:`L` is the target sequence length,
:math:`N` is the batch size, and :math:`E_q` is the query embedding dimension ``embed_dim``.
Queries are compared against key-value pairs to produce the output.
See "Attention Is All You Need" for more details.
key: Key embeddings of shape :math:`(S, E_k)` for unbatched input, :math:`(S, N, E_k)` when ``batch_first=False``
or :math:`(N, S, E_k)` when ``batch_first=True``, where :math:`S` is the source sequence length,
:math:`N` is the batch size, and :math:`E_k` is the key embedding dimension ``kdim``.
See "Attention Is All You Need" for more details.
value: Value embeddings of shape :math:`(S, E_v)` for unbatched input, :math:`(S, N, E_v)` when
``batch_first=False`` or :math:`(N, S, E_v)` when ``batch_first=True``, where :math:`S` is the source
sequence length, :math:`N` is the batch size, and :math:`E_v` is the value embedding dimension ``vdim``.
See "Attention Is All You Need" for more details.
key_padding_mask: If specified, a mask of shape :math:`(N, S)` indicating which elements within ``key``
to ignore for the purpose of attention (i.e. treat as "padding"). For unbatched `query`, shape should be :math:`(S)`.
Binary and float masks are supported.
For a binary mask, a ``True`` value indicates that the corresponding ``key`` value will be ignored for
the purpose of attention. For a float mask, it will be directly added to the corresponding ``key`` value.
need_weights: If specified, returns ``attn_output_weights`` in addition to ``attn_outputs``.
Set ``need_weights=False`` to use the optimized ``scaled_dot_product_attention``
and achieve the best performance for MHA.
Default: ``True``.
attn_mask: If specified, a 2D or 3D mask preventing attention to certain positions. Must be of shape
:math:`(L, S)` or :math:`(N\cdot\text{num\_heads}, L, S)`, where :math:`N` is the batch size,
:math:`L` is the target sequence length, and :math:`S` is the source sequence length. A 2D mask will be
broadcasted across the batch while a 3D mask allows for a different mask for each entry in the batch.
Binary and float masks are supported. For a binary mask, a ``True`` value indicates that the
corresponding position is not allowed to attend. For a float mask, the mask values will be added to
the attention weight.
If both attn_mask and key_padding_mask are supplied, their types should match.
average_attn_weights: If true, indicates that the returned ``attn_weights`` should be averaged across
heads. Otherwise, ``attn_weights`` are provided separately per head. Note that this flag only has an
effect when ``need_weights=True``. Default: ``True`` (i.e. average weights across heads)
is_causal: If specified, applies a causal mask as attention mask.
Default: ``False``.
Warning:
``is_causal`` provides a hint that ``attn_mask`` is the
causal mask. Providing incorrect hints can result in
incorrect execution, including forward and backward
compatibility.
Outputs:
- **attn_output** - Attention outputs of shape :math:`(L, E)` when input is unbatched,
:math:`(L, N, E)` when ``batch_first=False`` or :math:`(N, L, E)` when ``batch_first=True``,
where :math:`L` is the target sequence length, :math:`N` is the batch size, and :math:`E` is the
embedding dimension ``embed_dim``.
- **attn_output_weights** - Only returned when ``need_weights=True``. If ``average_attn_weights=True``,
returns attention weights averaged across heads of shape :math:`(L, S)` when input is unbatched or
:math:`(N, L, S)`, where :math:`N` is the batch size, :math:`L` is the target sequence length, and
:math:`S` is the source sequence length. If ``average_attn_weights=False``, returns attention weights per
head of shape :math:`(\text{num\_heads}, L, S)` when input is unbatched or :math:`(N, \text{num\_heads}, L, S)`.
.. note::
`batch_first` argument is ignored for unbatched inputs.
""" # noqa: B950
why_not_fast_path = ""
if (
(attn_mask is not None and torch.is_floating_point(attn_mask))
or (key_padding_mask is not None)
and torch.is_floating_point(key_padding_mask)
):
why_not_fast_path = "floating-point masks are not supported for fast path."
is_batched = query.dim() == 3
key_padding_mask = F._canonical_mask(
mask=key_padding_mask,
mask_name="key_padding_mask",
other_type=F._none_or_dtype(attn_mask),
other_name="attn_mask",
target_type=query.dtype,
)
attn_mask = F._canonical_mask(
mask=attn_mask,
mask_name="attn_mask",
other_type=None,
other_name="",
target_type=query.dtype,
check_other=False,
)
is_fastpath_enabled = torch.backends.mha.get_fastpath_enabled()
if not is_fastpath_enabled:
why_not_fast_path = "torch.backends.mha.get_fastpath_enabled() was not True"
elif not is_batched:
why_not_fast_path = (
f"input not batched; expected query.dim() of 3 but got {query.dim()}"
)
elif query is not key or key is not value:
# When lifting this restriction, don't forget to either
# enforce that the dtypes all match or test cases where
# they don't!
why_not_fast_path = "non-self attention was used (query, key, and value are not the same Tensor)"
elif self.in_proj_bias is not None and query.dtype != self.in_proj_bias.dtype:
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_bias ({self.in_proj_bias.dtype}) don't match"
elif self.in_proj_weight is None:
why_not_fast_path = "in_proj_weight was None"
elif query.dtype != self.in_proj_weight.dtype:
# this case will fail anyway, but at least they'll get a useful error message.
why_not_fast_path = f"dtypes of query ({query.dtype}) and self.in_proj_weight ({self.in_proj_weight.dtype}) don't match"
elif self.training:
why_not_fast_path = "training is enabled"
elif (self.num_heads % 2) != 0:
why_not_fast_path = "self.num_heads is not even"
elif not self.batch_first:
why_not_fast_path = "batch_first was not True"
elif self.bias_k is not None:
why_not_fast_path = "self.bias_k was not None"
elif self.bias_v is not None:
why_not_fast_path = "self.bias_v was not None"
elif self.add_zero_attn:
why_not_fast_path = "add_zero_attn was enabled"
elif not self._qkv_same_embed_dim:
why_not_fast_path = "_qkv_same_embed_dim was not True"
elif query.is_nested and (
key_padding_mask is not None or attn_mask is not None
):
why_not_fast_path = (
"supplying both src_key_padding_mask and src_mask at the same time \
is not supported with NestedTensor input"
)
elif torch.is_autocast_enabled():
why_not_fast_path = "autocast is enabled"
if not why_not_fast_path:
tensor_args = (
query,
key,
value,
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
)
# We have to use list comprehensions below because TorchScript does not support
# generator expressions.
if torch.overrides.has_torch_function(tensor_args):
why_not_fast_path = "some Tensor argument has_torch_function"
elif _is_make_fx_tracing():
why_not_fast_path = "we are running make_fx tracing"
elif not all(_check_arg_device(x) for x in tensor_args):
why_not_fast_path = (
"some Tensor argument's device is neither one of "
f"cpu, cuda or {torch.utils.backend_registration._privateuse1_backend_name}"
)
elif torch.is_grad_enabled() and any(
_arg_requires_grad(x) for x in tensor_args
):
why_not_fast_path = (
"grad is enabled and at least one of query or the "
"input/output projection weights or biases requires_grad"
)
if not why_not_fast_path:
merged_mask, mask_type = self.merge_masks(
attn_mask, key_padding_mask, query
)
if self.in_proj_bias is not None and self.in_proj_weight is not None:
return torch._native_multi_head_attention(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.out_proj.weight,
self.out_proj.bias,
merged_mask,
need_weights,
average_attn_weights,
mask_type,
)
any_nested = query.is_nested or key.is_nested or value.is_nested
assert not any_nested, (
"MultiheadAttention does not support NestedTensor outside of its fast path. "
+ f"The fast path was not hit because {why_not_fast_path}"
)
if self.batch_first and is_batched:
# make sure that the transpose op does not affect the "is" property
if key is value:
if query is key:
query = key = value = query.transpose(1, 0)
else:
query, key = (x.transpose(1, 0) for x in (query, key))
value = key
else:
query, key, value = (x.transpose(1, 0) for x in (query, key, value))
if not self._qkv_same_embed_dim:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight,
average_attn_weights=average_attn_weights,
is_causal=is_causal,
)
else:
attn_output, attn_output_weights = F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
average_attn_weights=average_attn_weights,
is_causal=is_causal,
)
if self.batch_first and is_batched:
return attn_output.transpose(1, 0), attn_output_weights
else:
return attn_output, attn_output_weights
def merge_masks(
self,
attn_mask: Optional[Tensor],
key_padding_mask: Optional[Tensor],
query: Tensor,
) -> tuple[Optional[Tensor], Optional[int]]:
r"""Determine mask type and combine masks if necessary.
If only one mask is provided, that mask
and the corresponding mask type will be returned. If both masks are provided, they will be both
expanded to shape ``(batch_size, num_heads, seq_len, seq_len)``, combined with logical ``or``
and mask type 2 will be returned
Args:
attn_mask: attention mask of shape ``(seq_len, seq_len)``, mask type 0
key_padding_mask: padding mask of shape ``(batch_size, seq_len)``, mask type 1
query: query embeddings of shape ``(batch_size, seq_len, embed_dim)``
Returns:
merged_mask: merged mask
mask_type: merged mask type (0, 1, or 2)
"""
mask_type: Optional[int] = None
merged_mask: Optional[Tensor] = None
if key_padding_mask is not None:
mask_type = 1
merged_mask = key_padding_mask
if attn_mask is not None:
# In this branch query can't be a nested tensor, so it has a shape
batch_size, seq_len, _ = query.shape
mask_type = 2
# Always expands attn_mask to 4D
if attn_mask.dim() == 3:
attn_mask_expanded = attn_mask.view(batch_size, -1, seq_len, seq_len)
else: # attn_mask.dim() == 2:
attn_mask_expanded = attn_mask.view(1, 1, seq_len, seq_len).expand(
batch_size, self.num_heads, -1, -1
)
merged_mask = attn_mask_expanded
if key_padding_mask is not None:
key_padding_mask_expanded = key_padding_mask.view(
batch_size, 1, 1, seq_len
).expand(-1, self.num_heads, -1, -1)
merged_mask = attn_mask_expanded + key_padding_mask_expanded
# no attn_mask and no key_padding_mask, returns None, None
return merged_mask, mask_type
| MultiheadAttention |
python | spyder-ide__spyder | external-deps/spyder-kernels/spyder_kernels/utils/iofuncs.py | {
"start": 901,
"end": 17648
} | class ____(dict):
"""
Matlab style struct, enhanced.
Supports dictionary and attribute style access. Can be pickled,
and supports code completion in a REPL.
Examples
========
>>> from spyder_kernels.utils.iofuncs import MatlabStruct
>>> a = MatlabStruct()
>>> a.b = 'spam' # a["b"] == 'spam'
>>> a.c["d"] = 'eggs' # a.c.d == 'eggs'
>>> print(a)
{'c': {'d': 'eggs'}, 'b': 'spam'}
"""
def __getattr__(self, attr):
"""Access the dictionary keys for unknown attributes."""
try:
return self[attr]
except KeyError:
msg = "'MatlabStruct' object has no attribute %s" % attr
raise AttributeError(msg)
def __getitem__(self, attr):
"""
Get a dict value; create a MatlabStruct if requesting a submember.
Do not create a key if the attribute starts with an underscore.
"""
if attr in self.keys() or attr.startswith('_'):
return dict.__getitem__(self, attr)
frame = inspect.currentframe()
# step into the function that called us
if frame.f_back.f_back and self._is_allowed(frame.f_back.f_back):
dict.__setitem__(self, attr, MatlabStruct())
elif self._is_allowed(frame.f_back):
dict.__setitem__(self, attr, MatlabStruct())
return dict.__getitem__(self, attr)
def _is_allowed(self, frame):
"""Check for allowed op code in the calling frame"""
allowed = [dis.opmap['STORE_ATTR'], dis.opmap['LOAD_CONST'],
dis.opmap.get('STOP_CODE', 0)]
bytecode = frame.f_code.co_code
instruction = bytecode[frame.f_lasti + 3]
return instruction in allowed
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
@property
def __dict__(self):
"""Allow for code completion in a REPL"""
return self.copy()
def get_matlab_value(val):
"""
Extract a value from a Matlab file
From the oct2py project, see
https://pythonhosted.org/oct2py/conversions.html
"""
# Extract each item of a list.
if isinstance(val, list):
return [get_matlab_value(v) for v in val]
# Ignore leaf objects.
if not isinstance(val, np.ndarray):
return val
# Convert user defined classes.
if hasattr(val, 'classname'):
out = dict()
for name in val.dtype.names:
out[name] = get_matlab_value(val[name].squeeze().tolist())
cls = type(val.classname, (object,), out)
return cls()
# Extract struct data.
elif val.dtype.names:
out = MatlabStruct()
for name in val.dtype.names:
out[name] = get_matlab_value(val[name].squeeze().tolist())
val = out
# Extract cells.
elif val.dtype.kind == 'O':
val = val.squeeze().tolist()
if not isinstance(val, list):
val = [val]
val = get_matlab_value(val)
# Compress singleton values.
elif val.size == 1:
val = val.item()
# Compress empty values.
elif val.size == 0:
if val.dtype.kind in 'US':
val = ''
else:
val = []
return val
def load_matlab(filename):
if sp.io is FakeObject:
return None, ''
try:
out = sp.io.loadmat(filename, struct_as_record=True)
data = dict()
for (key, value) in out.items():
data[key] = get_matlab_value(value)
return data, None
except Exception as error:
return None, str(error)
def save_matlab(data, filename):
if sp.io is FakeObject:
return
try:
sp.io.savemat(filename, data, oned_as='row')
except Exception as error:
return str(error)
# ---- For arrays
# -----------------------------------------------------------------------------
def load_array(filename):
if np.load is FakeObject:
return None, ''
try:
name = osp.splitext(osp.basename(filename))[0]
data = np.load(filename)
if isinstance(data, np.lib.npyio.NpzFile):
return dict(data), None
elif hasattr(data, 'keys'):
return data, None
else:
return {name: data}, None
except Exception as error:
return None, str(error)
def __save_array(data, basename, index):
"""Save numpy array"""
fname = basename + '_%04d.npy' % index
np.save(fname, data)
return fname
# ---- For PIL images
# -----------------------------------------------------------------------------
if sys.byteorder == 'little':
_ENDIAN = '<'
else:
_ENDIAN = '>'
DTYPES = {
"1": ('|b1', None),
"L": ('|u1', None),
"I": ('%si4' % _ENDIAN, None),
"F": ('%sf4' % _ENDIAN, None),
"I;16": ('|u2', None),
"I;16S": ('%si2' % _ENDIAN, None),
"P": ('|u1', None),
"RGB": ('|u1', 3),
"RGBX": ('|u1', 4),
"RGBA": ('|u1', 4),
"CMYK": ('|u1', 4),
"YCbCr": ('|u1', 4),
}
def __image_to_array(filename):
img = PIL.Image.open(filename)
try:
dtype, extra = DTYPES[img.mode]
except KeyError:
raise RuntimeError("%s mode is not supported" % img.mode)
shape = (img.size[1], img.size[0])
if extra is not None:
shape += (extra,)
return np.array(img.getdata(), dtype=np.dtype(dtype)).reshape(shape)
def load_image(filename):
if PIL.Image is FakeObject or np.array is FakeObject:
return None, ''
try:
name = osp.splitext(osp.basename(filename))[0]
return {name: __image_to_array(filename)}, None
except Exception as error:
return None, str(error)
# ---- For misc formats
# -----------------------------------------------------------------------------
def load_pickle(filename):
"""Load a pickle file as a dictionary"""
try:
if pd.read_pickle is not FakeObject:
return pd.read_pickle(filename), None
else:
with open(filename, 'rb') as fid:
data = pickle.load(fid)
return data, None
except Exception as err:
return None, str(err)
def load_json(filename):
"""Load a json file as a dictionary"""
try:
with open(filename, 'r') as fid:
data = json.load(fid)
return data, None
except Exception as err:
return None, str(err)
# ---- For Spydata files
# -----------------------------------------------------------------------------
def save_dictionary(data, filename):
"""Save dictionary in a single file .spydata file"""
filename = osp.abspath(filename)
old_cwd = os.getcwd()
os.chdir(osp.dirname(filename))
error_message = None
skipped_keys = []
data_copy = {}
try:
# Copy dictionary before modifying it to fix #6689
for obj_name, obj_value in data.items():
# Skip modules, since they can't be pickled, users virtually never
# would want them to be and so they don't show up in the skip list.
# Skip callables, since they are only pickled by reference and thus
# must already be present in the user's environment anyway.
if not (callable(obj_value) or isinstance(obj_value,
types.ModuleType)):
# If an object cannot be deepcopied, then it cannot be pickled.
# Ergo, we skip it and list it later.
try:
data_copy[obj_name] = copy.deepcopy(obj_value)
except Exception:
skipped_keys.append(obj_name)
data = data_copy
if not data:
raise RuntimeError('No supported objects to save')
saved_arrays = {}
if np.ndarray is not FakeObject:
# Saving numpy arrays with np.save
arr_fname = osp.splitext(filename)[0]
for name in list(data.keys()):
try:
if (isinstance(data[name], np.ndarray) and
data[name].size > 0):
# Save arrays at data root
fname = __save_array(data[name], arr_fname,
len(saved_arrays))
saved_arrays[(name, None)] = osp.basename(fname)
data.pop(name)
elif isinstance(data[name], (list, dict)):
# Save arrays nested in lists or dictionaries
if isinstance(data[name], list):
iterator = enumerate(data[name])
else:
iterator = iter(list(data[name].items()))
to_remove = []
for index, value in iterator:
if (isinstance(value, np.ndarray) and
value.size > 0):
fname = __save_array(value, arr_fname,
len(saved_arrays))
saved_arrays[(name, index)] = (
osp.basename(fname))
to_remove.append(index)
for index in sorted(to_remove, reverse=True):
data[name].pop(index)
except (RuntimeError, pickle.PicklingError, TypeError,
AttributeError, IndexError):
# If an array can't be saved with numpy for some reason,
# leave the object intact and try to save it normally.
pass
if saved_arrays:
data['__saved_arrays__'] = saved_arrays
pickle_filename = osp.splitext(filename)[0] + '.pickle'
# Attempt to pickle everything.
# If pickling fails, iterate through to eliminate problem objs & retry.
with open(pickle_filename, 'w+b') as fdesc:
try:
pickle.dump(data, fdesc, protocol=2)
except (pickle.PicklingError, AttributeError, TypeError,
ImportError, IndexError, RuntimeError):
data_filtered = {}
for obj_name, obj_value in data.items():
try:
pickle.dumps(obj_value, protocol=2)
except Exception:
skipped_keys.append(obj_name)
else:
data_filtered[obj_name] = obj_value
if not data_filtered:
raise RuntimeError('No supported objects to save')
pickle.dump(data_filtered, fdesc, protocol=2)
# Use PAX (POSIX.1-2001) format instead of default GNU.
# This improves interoperability and UTF-8/long variable name support.
with tarfile.open(filename, "w", format=tarfile.PAX_FORMAT) as tar:
for fname in ([pickle_filename]
+ [fn for fn in list(saved_arrays.values())]):
tar.add(osp.basename(fname))
os.remove(fname)
except (RuntimeError, pickle.PicklingError, TypeError) as error:
error_message = str(error)
else:
if skipped_keys:
skipped_keys.sort()
error_message = ('Some objects could not be saved: '
+ ', '.join(skipped_keys))
finally:
os.chdir(old_cwd)
return error_message
def is_within_directory(directory, target):
"""Check if a file is within a directory."""
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
"""Safely extract a tar file."""
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception(
f"Attempted path traversal in tar file {tar.name!r}"
)
tar.extractall(path, members, numeric_owner=numeric_owner)
def load_dictionary(filename):
"""Load dictionary from .spydata file"""
filename = osp.abspath(filename)
old_cwd = os.getcwd()
tmp_folder = tempfile.mkdtemp()
os.chdir(tmp_folder)
data = None
error_message = None
try:
with tarfile.open(filename, "r") as tar:
safe_extract(tar)
pickle_filename = glob.glob('*.pickle')[0]
# 'New' format (Spyder >=2.2)
with open(pickle_filename, 'rb') as fdesc:
data = pickle.loads(fdesc.read())
saved_arrays = {}
if np.load is not FakeObject:
# Loading numpy arrays saved with np.save
try:
saved_arrays = data.pop('__saved_arrays__')
for (name, index), fname in list(saved_arrays.items()):
arr = np.load(osp.join(tmp_folder, fname), allow_pickle=True)
if index is None:
data[name] = arr
elif isinstance(data[name], dict):
data[name][index] = arr
else:
data[name].insert(index, arr)
except KeyError:
pass
# Except AttributeError from e.g. trying to load function no longer present
except (AttributeError, EOFError, ValueError) as error:
error_message = str(error)
# To ensure working dir gets changed back and temp dir wiped no matter what
finally:
os.chdir(old_cwd)
try:
shutil.rmtree(tmp_folder)
except OSError as error:
error_message = str(error)
return data, error_message
# ---- For HDF5 files
# -----------------------------------------------------------------------------
def load_hdf5(filename):
"""
Load an hdf5 file.
Notes
-----
- This is a fairly dumb implementation which reads the whole HDF5 file into
Spyder's variable explorer. Since HDF5 files are designed for storing
very large data-sets, it may be much better to work directly with the
HDF5 objects, thus keeping the data on disk. Nonetheless, this gives
quick and dirty but convenient access to them.
- There is no support for creating files with compression, chunking etc,
although these can be read without problem.
- When reading an HDF5 file with sub-groups, groups in the file will
correspond to dictionaries with the same layout.
"""
def get_group(group):
contents = {}
for name, obj in list(group.items()):
if isinstance(obj, h5py.Dataset):
contents[name] = np.array(obj)
elif isinstance(obj, h5py.Group):
# it is a group, so call self recursively
contents[name] = get_group(obj)
# other objects such as links are ignored
return contents
try:
import h5py
f = h5py.File(filename, 'r')
contents = get_group(f)
f.close()
return contents, None
except Exception as error:
return None, str(error)
def save_hdf5(data, filename):
"""
Save an hdf5 file.
Notes
-----
- All datatypes to be saved must be convertible to a numpy array, otherwise
an exception will be raised.
- Data attributes are currently ignored.
- When saving data after reading it with load_hdf5, dictionaries are not
turned into HDF5 groups.
"""
try:
import h5py
f = h5py.File(filename, 'w')
for key, value in list(data.items()):
f[key] = np.array(value)
f.close()
except Exception as error:
return str(error)
# ---- For DICOM files
# -----------------------------------------------------------------------------
def load_dicom(filename):
"""Load a DICOM files."""
try:
from pydicom import dicomio
name = osp.splitext(osp.basename(filename))[0]
try:
# For Pydicom 3/Python 3.10+
data = dicomio.dcmread(filename, force=True)
except TypeError:
data = dicomio.dcmread(filename)
except AttributeError:
# For Pydicom 2/Python 3.9-
try:
data = dicomio.read_file(filename, force=True)
except TypeError:
data = dicomio.read_file(filename)
arr = data.pixel_array
return {name: arr}, None
except Exception as error:
return None, str(error)
# ---- Class to group all IO functionality
# -----------------------------------------------------------------------------
| MatlabStruct |
python | getsentry__sentry | fixtures/safe_migrations_apps/safe_run_sql_app/migrations/0001_initial.py | {
"start": 153,
"end": 659
} | class ____(CheckedMigration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="TestTable",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
],
),
]
| Migration |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 8998,
"end": 9151
} | class ____(EllipticCurve):
name = "secp224r1"
key_size = 224
group_order = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFF16A2E0B8F03E13DD29455C5C2A3D
| SECP224R1 |
python | google__pytype | pytype/pytd/pytd_visitors.py | {
"start": 2634,
"end": 3042
} | class ____(base_visitor.Visitor):
"""Visitor that accumulates type parameters in its "params" attribute."""
def __init__(self):
super().__init__()
self._seen = set()
self.params = []
def EnterTypeParameter(self, p):
if p.name not in self._seen:
self.params.append(p)
self._seen.add(p.name)
def EnterParamSpec(self, p):
self.EnterTypeParameter(p)
| CollectTypeParameters |
python | pytorch__pytorch | test/jit/test_tracer.py | {
"start": 1176,
"end": 72485
} | class ____(JitTestCase):
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_large_nbr_kernel_args(self):
class Recurrence(nn.Module):
def __init__(self, seq_len):
super().__init__()
self.seq_len = seq_len
def forward(self, input):
input = input.transpose(0, 1)
# Main loop
output = []
for i in range(self.seq_len):
b = input[i] * 2
output.append(b)
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
output = output.transpose(0, 1)
return output
input_size = 8
batch_size = 2
seq_len = 130
rec = Recurrence(seq_len)
input = torch.rand(batch_size, seq_len, input_size)
torch.cuda.set_device(0)
rec = rec.cuda()
input = input.cuda()
traced_rec = torch.jit.trace(rec, (input))
def test_trace_legacy_ctor(self):
class MyModule(nn.Module):
def forward(self, x):
return (x + 1, torch.FloatTensor([0]))
traced_rec = torch.jit.trace(MyModule(), torch.randn(2, 2))
def test_simple(self):
x = torch.tensor([0.4], requires_grad=True)
y = torch.tensor([0.7], requires_grad=True)
def f(x, y):
return torch.sigmoid(torch.tanh(x * (x + y)))
self.checkTrace(f, (x, y))
def test_trace_checking_with_global_name(self):
class MyClass(torch.nn.Module):
def forward(self, xs: List[Tensor]):
y = torch.cat(xs, dim=0)
return y
model = MyClass()
# Simulate these inputs being in the globals, like they would be if,
# e.g. they were defined outermost scope of a script
global input1, input2
input1 = torch.ones(2, 2)
input2 = torch.ones(2, 2)
m2 = torch.jit.trace(model, ((input1, input2),))
def test_trace_aliased_parameter(self):
class M(nn.Module):
def __init__(self, x):
super().__init__()
self.x = nn.Parameter(x)
def forward(self, y):
return self.x + y
m = M(torch.rand(3, 4))
r = torch.jit.trace(m, m.x)
t2 = torch.rand(3, 4)
self.assertEqual(r(t2), m.x + t2)
def test_trace_nested_fn(self):
class TracedInlineDecision(torch.nn.Module):
def forward(self, x, flag):
@torch.jit.script
def make_decision(flag, x):
if flag:
return x
else:
return torch.zeros_like(x)
x = torch.neg(x)
return make_decision(flag, x)
decision = TracedInlineDecision()
torch.jit.trace(
decision,
(torch.rand(3, 4), torch.tensor([True], dtype=torch.bool)),
check_trace=True,
)
def test_trace_single_tuple(self):
x = torch.tensor(2.0)
def f2(x):
return (x,)
jit_f2 = torch.jit.trace(f2, x)
assert f2(x) == jit_f2(x) # fails
def test_trace_out_operator_with_two_output(self):
example_input = torch.rand(2, 8)
out_1, out_2 = torch.cummax(example_input, 1)
def run_cummax(example_input, out_1, out_2):
output_1, output_2 = torch.cummax(example_input, 1, out=(out_1, out_2))
return output_1, output_2
trace_model = torch.jit.trace(run_cummax, (example_input, out_1, out_2))
def test_trace_namedtuple(self):
Point = namedtuple("point", ["x", "y"])
def f(p):
if type(p) is tuple:
p = Point(*p)
return p.x + p.y
p = Point(torch.randn(1), torch.randn(1))
traced = torch.jit.trace(f, (p,))
self.assertEqual(f(p), traced(p))
def test_trace_topk(self):
class M(torch.nn.Module):
def forward(self, x, y):
return x.topk(y, dim=1)[1]
mod = M()
inputs = (torch.randint(0, 10, (20, 20)), torch.tensor(17))
traced_func = torch.jit.trace(mod, inputs)
test_inputs = (torch.randint(0, 9, (9, 9)), torch.tensor(8))
eager_out = mod(*test_inputs)
traced_out = traced_func(*test_inputs)
self.assertNotWarn(
lambda: traced_func(*test_inputs),
"Shouldn't throw slicing related warn here",
)
self.assertEqual(eager_out, traced_out)
test_inputs = (torch.randint(0, 50, (50, 50)), torch.tensor(12))
eager_out = mod(*test_inputs)
traced_out = traced_func(*test_inputs)
self.assertNotWarn(
lambda: traced_func(*test_inputs),
"Shouldn't throw slicing related warn here",
)
self.assertEqual(eager_out, traced_out)
def test_typeas_trace_check(self):
a = torch.tensor([0.4], requires_grad=True)
b = torch.tensor([0.7], requires_grad=True)
def f(x, y):
return x.type_as(y)
trace = torch.jit.trace(f, (a, b))
def test_trace_index(self):
x = torch.tensor([0.4], requires_grad=True)
y = torch.tensor([0], dtype=torch.int64)
def fn(x, y):
return x[y]
fn_traced = torch.jit.trace(
fn,
(
x,
y,
),
)
self.assertEqual(fn(x, y), fn_traced(x, y))
# Backwards tracing was broken for indexing by a constant,
# because it's internally implemented using as_strided,
# and we attempted to trace its derivative (which is not
# currently supported.) It currently works because
# slice() is now not marked as traceable.
def test_trace_index_constant(self):
x = torch.tensor([0.4], requires_grad=True)
def fn(x):
return x[0]
def run(f):
y = f(x)
grad = torch.autograd.grad(y, x)[0].clone()
return y, grad
traced_fn = torch.jit.trace(fn, torch.ones(1))
self.assertEqual(run(fn), run(traced_fn))
def test_index_put(self):
ten = torch.zeros(3, 3)
mask = torch.tensor(
[[True, True, True], [True, False, False], [True, True, False]]
)
def test_fn(ten, mask):
ten[mask] = torch.ones(6)
return ten
traced_test_fn = torch.jit.trace(test_fn, (ten, mask))
ten = torch.rand(3, 3)
self.assertEqual(test_fn(ten, mask), traced_test_fn(ten, mask))
def test_canonicalize_tensor_iterator(self):
x = torch.randn(4, 4)
def f(x):
x = x + 2
x = x - 4
x = x * 6
x = x / 8
return x
traced = torch.jit.trace(f, (x,))
f(x)
graph = traced.graph_for(x)
# There should be 4 int constants for the right sides of operators, plus one
# for the alpha argument for add and sub
self.assertTrue(str(traced.graph_for(x)).count(": int = prim::Constant") == 5)
@suppress_warnings
def test_constant(self):
x = torch.randn(2, 2, requires_grad=True)
def f(x):
return x.matmul(torch.diag(torch.tensor([2.0, 2.0])))
self.checkTrace(f, (x,), (torch.ones(2, 2, requires_grad=True),))
def test_wrapped_number(self):
# Scalar's get converted to 'wrapped' tensors of default tensor type.
# Wrapped tensors behave differently in certain promotion operations:
# float_tensor * double -> float but wrapped_float * double -> double.
# This can cause issues in check-trace if not handled correctly in
# `aten::isclose()`.
def foobar():
x = -10000.0
result = x * torch.ones(1, dtype=torch.float)
return result
scripted = torch.jit.trace(foobar, (), check_trace=True)
def test_inplace_transplant(self):
x = torch.tensor([0.0], requires_grad=True)
def fn(x):
y = x.clone()
y.add_(2)
y.add_(3)
return y
g, _ = torch.jit._get_trace_graph(fn, (x,))
self.run_pass("dce", g)
FileCheck().check_count("aten::clone", 1, exactly=True).check_count(
"aten::add_", 2, exactly=True
).check_next("return").run(str(g))
self.assertExportImport(g, (x,))
def test_inplace_flags(self):
class InplaceFn(Function):
@staticmethod
def forward(ctx, x):
ctx.mark_dirty(x)
return x.add_(1)
@staticmethod
def backward(ctx, go):
return go
class RegularFn(Function):
@staticmethod
def forward(ctx, x):
return x.add(1)
@staticmethod
def backward(ctx, go):
return go
x = torch.tensor([0.0], requires_grad=True)
def fn(x):
y = RegularFn.apply(x)
y = InplaceFn.apply(y)
y = InplaceFn.apply(y)
y = RegularFn.apply(y)
return y
trace_graph, _ = torch.jit._get_trace_graph(fn, (x,), _force_outplace=True)
self.run_pass("dce", trace_graph)
ops = list(trace_graph.nodes())
for op in ops:
self.assertTrue(op.hasAttribute("inplace"))
inplace_flags = [False, True, True, False]
for op, is_inplace in zip(ops, inplace_flags):
self.assertEqual(op.i("inplace"), is_inplace)
def test_inplace_check(self):
class MyInplaceFn(Function):
@staticmethod
def forward(self, x):
x.add_(1)
self.mark_dirty(x)
return x
@staticmethod
def backward(self, grad):
return grad
def fn(x):
return MyInplaceFn.apply(x)
x = torch.randn(5, 5)
ge = torch.jit.trace(fn, (x,), _force_outplace=True, check_trace=False)
with self.assertRaisesRegex(RuntimeError, "inplace MyInplaceFn"):
ge(x)
def test_force_outplace_check_fill(self):
def f(x):
return torch.empty(x.shape).fill_(7)
x = torch.randn(10, 15)
ft = torch.jit.trace(f, x, _force_outplace=True)
self.assertEqual(f(x), ft(x))
def test_force_outplace_check_zero(self):
def f(x):
return torch.empty(x.shape).zero_()
x = torch.randn(10, 15)
ft = torch.jit.trace(f, x, _force_outplace=True)
self.assertEqual(f(x), ft(x))
def do_trace_size(self, requires_grad):
def fn(x):
return x.view(x.shape[1] * 2, x.size(0), 2)
x = torch.randn(5, 2, 4, requires_grad=requires_grad)
y = torch.randn(4, 8, 4, requires_grad=requires_grad)
# Check that it behaves as expected
traced_fn = torch.jit.trace(fn, x)
self.assertEqual(traced_fn(y), fn(y))
self.assertEqual(traced_fn(x), fn(x))
def test_trace_size(self):
self.do_trace_size(False)
# test the different graph_executor path that happens when
# gradients are required and sizes are involved
def test_trace_size_with_grad(self):
self.do_trace_size(True)
def test_trace_numel(self):
def fn(x):
return x.numel()
x = torch.randn(2, 3, 4)
y = torch.randn(4, 5, 6)
traced_fn = torch.jit.trace(fn, x)
self.assertEqual(traced_fn(y), fn(y))
self.assertEqual(traced_fn(x), fn(x))
def do_trace_arange(self, requires_grad):
def arange(x):
return torch.arange(x.shape[0])
def arange_scalar(x):
return torch.arange(12)
def arange_start_end(x):
return torch.arange(start=x.shape[0], end=x.shape[0] + 5)
x = torch.randn(5, 3, 2, requires_grad=requires_grad)
y = torch.randn(8, 2, 4, requires_grad=requires_grad)
# Check that it behaves as expected
traced_arange = torch.jit.trace(arange, x)
self.assertEqual(traced_arange(y), arange(y))
self.assertEqual(traced_arange(x), arange(x))
traced_arange_scalar = torch.jit.trace(arange_scalar, x)
self.assertEqual(traced_arange_scalar(y), arange_scalar(y))
self.assertEqual(traced_arange_scalar(x), arange_scalar(x))
traced_arange_start_end = torch.jit.trace(arange_start_end, x)
self.assertEqual(traced_arange_start_end(y), arange_start_end(y))
self.assertEqual(traced_arange_start_end(x), arange_start_end(x))
def test_trace_arange(self):
self.do_trace_arange(False)
# test the different graph_executor path that happens when
# gradients are required and sizes are involved
def test_trace_arange_with_grad(self):
self.do_trace_arange(True)
# Test that a trace of torch.full(x.shape) doesn't store the shape as a constant
def test_trace_full_dynamic_shape(self):
def full_with_shape_like(x):
return torch.full(x.shape, 2.0)
x = torch.randn(3, 4)
ge = torch.jit.trace(full_with_shape_like, example_inputs=x)
y = torch.randn(2, 7)
self.assertEqual(ge(y).shape, y.shape)
self.assertEqual(ge(x).shape, x.shape)
# Test that the trace of setitem doesn't store shapes as constants
# Fix https://github.com/pytorch/pytorch/issues/43548
def test_trace_slice_setitem_dynamic_shape(self):
def slice_setitem(x, y):
x[:, 2] = y + 1
return x
x = torch.randn(3, 4)
traced = torch.jit.trace(slice_setitem, (x, x[:, 0]))
x = torch.randn(10, 5)
self.assertEqual(traced(x.clone(), x[:, 0]), slice_setitem(x.clone(), x[:, 0]))
# Suppression: we are intentionally slicing a tensor, we don't care that it
# will be constantified
@suppress_warnings
def do_trace_slice(self, requires_grad):
def slice(x):
results = []
for i in range(4):
results.append(x[: x.size(0) - i, i : x.size(2), i:3])
return tuple(results)
def slice_select(x):
results = []
for i in range(4):
results.append(x[:, i:, x.size(2) - 5])
return tuple(results)
x = torch.randn(5, 6, 7, requires_grad=requires_grad)
y = torch.randn(7, 8, 9, requires_grad=requires_grad)
# Check that it behaves as expected
traced_slice = torch.jit.trace(slice, x)
self.assertEqual(traced_slice(y), slice(y))
self.assertEqual(traced_slice(x), slice(x))
traced_slice_select = torch.jit.trace(slice_select, x)
self.assertEqual(traced_slice_select(y), slice_select(y))
self.assertEqual(traced_slice_select(x), slice_select(x))
def test_trace_slice(self):
self.do_trace_slice(False)
# test the different graph_executor path that happens when
# gradients are required and sizes are involved
def test_trace_slice_with_grad(self):
self.do_trace_slice(True)
def test_trace_casts(self):
casts = [
lambda x: x.byte(),
lambda x: x.float(),
lambda x: x.cpu(),
lambda x: x.to(device="cpu"),
lambda x: x.to(dtype=torch.int64),
lambda x: x.to(device="cpu", dtype=torch.float),
lambda x: x.to(x),
]
def assertContainsCast(trace):
self.assertEqual(
sum(n.kind() == "aten::to" for n in trace.graph.nodes()), 1
)
for cast in casts:
trace = torch.jit.trace(cast, torch.randn(2, 2))
assertContainsCast(trace)
x = torch.randn(2, 2)
self.assertEqual(trace(x), cast(x))
def to_tensor(x, y):
return x.to(y)
to_tensor_trace = torch.jit.trace(
to_tensor, (torch.randn(2, 2), torch.randn(1, 8))
)
assertContainsCast(to_tensor_trace)
x, y = torch.randn(2, 2), torch.randn(1, 10)
self.assertEqual(to_tensor_trace(x, y), to_tensor(x, y))
@skipIfCompiledWithoutNumpy
@skipIfCrossRef
def test_trace_warn(self):
def fn(x):
int(x) # Warning 1.
y = x * 1
if y: # Warning 2.
pass
q = [x, x * 4]
z = q[y]
float(z) # Warning 3.
z.tolist() # Warning 4.
z.numpy() # Warning 5.
for _ in torch.ones(4, 4): # Warning 6.
pass
return z + 4
with warnings.catch_warnings(record=True) as warns:
traced_fn = torch.jit.trace(fn, torch.tensor([1]))
for warn in warns:
self.assertIs(warn.category, torch.jit.TracerWarning)
warns = [str(w.message) for w in warns]
self.assertIn("a Python integer", warns[0])
self.assertIn("a Python boolean", warns[1])
self.assertIn("a Python float", warns[2])
self.assertIn("a Python list", warns[3])
self.assertIn("a NumPy array", warns[4])
self.assertIn("Iterating over", warns[5])
def test_trace_tuple(self):
def fn(x, y):
return x, (x * y[1], x * y[0])
x, y = torch.randn(2, 2), (torch.ones(2, 2), torch.randn(2, 2))
traced_fn = torch.jit.trace(fn, (x, y))
self.assertEqual(traced_fn(x, y), fn(x, y))
# should be a tuple nested within another tuple
FileCheck().check_count("prim::TupleConstruct", 2, exactly=True).check_next(
"return"
).run(str(traced_fn.graph))
self.assertExportImport(traced_fn.graph, (x, y))
def test_trace_random(self):
def f(mean, std):
return torch.normal(mean, std)
traced = torch.jit.trace(
f, (torch.zeros(2, 3), torch.ones(2, 3)), check_trace=False
)
mean, std = torch.zeros(5, 5), torch.ones(5, 5)
with torch.random.fork_rng(devices=[]):
output = f(mean, std)
traced_output = traced(mean, std)
self.assertEqual(output, traced_output)
def test_trace_tensor_factory(self):
def run(**kwargs):
inputs_require_grads = kwargs.pop("inputs_require_grads", True)
def fn(x):
return x + torch.ones(2, 3, **kwargs)
input_kwargs = kwargs.copy()
if "out" in input_kwargs:
del input_kwargs["out"]
input = torch.ones(2, 3, **input_kwargs)
self.checkTrace(fn, (input,), inputs_require_grads=inputs_require_grads)
# check we recorded 'ones' and did not just record a constant
tfn = torch.jit.trace(fn, input)
self.assertTrue("ones" in str(tfn.graph))
run()
run(dtype=torch.int, inputs_require_grads=False)
run(out=torch.tensor([]))
if RUN_CUDA:
run(device="cuda:0")
if RUN_CUDA_MULTI_GPU:
run(device="cuda:1")
def test_trace_indexed_assignment(self):
def stuff(x, y):
x = x.clone()
x[0] = y
return x
example = torch.rand(3, 4)
self.checkTrace(stuff, (example, example[0] + 1))
# TODO: implement
@unittest.expectedFailure
def test_output_unflatten(self):
"""Check that outputs of traced functions retain the original structure and nesting"""
def fn(x):
return (
x * 2,
(
x**2,
x + 4,
(x + 2,),
),
x * 4,
)
self.checkTrace(fn, (torch.randn(2, 2),))
def test_input_flatten(self):
"""Check that inputs to traced functions are flattened"""
def fn(x, t):
y, z = t
return x * y * z
inputs = (torch.randn(1), (torch.randn(1), torch.randn(1)))
self.checkTrace(fn, inputs)
def test_input_dict_empty(self):
def test(d):
pass
with self.assertRaises(RuntimeError):
self.checkTrace(test, {})
def test_input_dict_remembers_keys(self):
"""Check that the trace remembers which keys were in a dict input"""
class TestModule(torch.nn.Module):
def forward(self, dict_input):
return dict_input["x"]
input_1 = {"x": torch.tensor(1)}
m = TestModule()
m_traced = torch.jit.trace(m, (input_1,))
self.assertEqual(m_traced(input_1), torch.tensor(1))
# should work to change the values and not the keys
input_same_key_different_value = {"x": torch.tensor(2)}
self.assertEqual(m_traced(input_same_key_different_value), torch.tensor(2))
# error to use something that doesn't have `x`
input_different_key = {"y": torch.tensor(3)}
with self.assertRaises(RuntimeError):
m_traced(input_different_key)
# it's okay to have additional elements in the dictionary, so long as 'x' is there
input_additional_key = {"x": torch.tensor(4), "y": torch.tensor(3)}
self.assertEqual(m_traced(input_additional_key), torch.tensor(4))
def test_input_dict_insertion_order(self):
"""Check that dictionary access doesn't care about insertion order"""
class TestModule(torch.nn.Module):
def forward(self, dict_input):
return dict_input["x"], dict_input["y"]
input_x_then_y = {}
input_x_then_y["x"] = torch.tensor(1)
input_x_then_y["y"] = torch.tensor(2)
m = TestModule()
m_traced = torch.jit.trace(m, (input_x_then_y,))
self.assertEqual(m_traced(input_x_then_y), (torch.tensor(1), torch.tensor(2)))
input_y_then_x = {}
input_y_then_x["y"] = torch.tensor(4)
input_y_then_x["x"] = torch.tensor(3)
self.assertEqual(m_traced(input_y_then_x), (torch.tensor(3), torch.tensor(4)))
def test_input_dict_recursive(self):
class TestModule(torch.nn.Module):
def forward(self, dict_input):
return dict_input["x"][1]
input_1 = {"x": {1: torch.tensor(1)}}
m = TestModule()
m_traced = torch.jit.trace(m, (input_1,))
input_2 = {"x": {1: torch.tensor(2)}}
self.assertEqual(m_traced(input_2), torch.tensor(2))
def test_input_dict_checkTrace_mut(self):
def test(d):
d["x"].tanh_()
return d["x"]
inputs = {"x": torch.rand(3, 4), "y": torch.rand(3, 4)}
self.checkTrace(test, (inputs,), inputs_require_grads=False)
def test_input_dict_unify(self):
def test(d):
return d["int"], d["float"]
inputs = {
"int": torch.ones((2, 2), dtype=torch.int32),
"float": torch.ones((2, 2), dtype=torch.float32),
}
self.checkTrace(test, (inputs,), inputs_require_grads=False)
def test_input_tuple_of_dicts(self):
def test(t):
d = t[0]
return d["x"]["y"]
inputs = {"x": {"y": torch.rand(2, 3)}}
self.checkTrace(test, ((inputs, inputs),), allow_unused=True)
def test_input_dict_of_dicts(self):
def test(d):
return d["x"]["y"]
nested_input = {"y": torch.rand(2, 3)}
unified_nested = {"y": torch.rand(3, 2)}
inputs = {"x": nested_input, "force_unify": unified_nested}
self.checkTrace(test, (inputs,), allow_unused=True)
def test_input_dict_of_lists(self):
def test(d):
return d["x"][0]
inputs = {"x": [torch.rand(3, 2)]}
self.checkTrace(test, (inputs,))
def test_input_list_toplevel_flatten(self):
def test(t1, t2):
return torch.add(t1, t2)
inputs = [torch.ones(2, 2), torch.rand(2, 2)]
self.checkTrace(test, inputs)
def test_input_list_toplevel_flatten_direct(self):
class Test(torch.nn.Module):
def forward(self, t1, t2):
return torch.add(t1, t2)
inputs = [torch.ones(2, 2), torch.rand(2, 2)]
torch.jit.trace(Test(), inputs)
def test_input_list_of_tuples(self):
def test(l):
return l[0][0]
inputs = [(torch.ones(2, 2),)]
self.checkTrace(test, (inputs,))
def test_input_dict_empty_list(self):
def test(d):
pass
inputs = {1: []}
with self.assertRaisesRegex(RuntimeError, "List trace"):
self.checkTrace(test, (inputs,))
def test_input_list_mixed_type(self):
def test(d):
pass
inputs = [torch.rand(2, 3), (torch.ones(2), torch.ones(2))]
with self.assertRaisesRegex(RuntimeError, "consistent"):
self.checkTrace(test, (inputs,))
def test_conv(self):
x = torch.ones(20, 16, 50, 40)
g, outputs, inputs = torch.jit._get_trace_graph(
nn.Conv2d(16, 13, 3, bias=False), x, return_inputs=True
)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
def test_max_pool(self):
x = torch.rand(20, 16, 10, 10)
def max_pool2d(x):
return F.max_pool2d(x, 2) + 2
trace = torch.jit.trace(max_pool2d, (x))
graph = trace.graph_for(x)
FileCheck().check("aten::max_pool2d(").run(graph)
self.assertEqual(max_pool2d(x), trace(x))
def test_nested_inplace(self):
x = torch.randn(2, 2)
g, outputs, inputs = torch.jit._get_trace_graph(
lambda x: F.threshold(x, 0, 0, inplace=True), (x,), return_inputs=True
)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
FileCheck().check("threshold_").run(str(g))
self.assertExportImport(g, (x,))
def test_repeated_input(self):
def fn(a, b):
return a + b
ge = self.checkTrace(fn, [torch.randn(2, 2)] * 2)
inputs = set(ge.graph.inputs())
# three instead of 2 because the export/import in checkTrace adds a
# `self` module argument
self.assertTrue(len(inputs) == 3)
def test_repeated_output(self):
def fn(a, b):
z = a + b
return z, z
ge = self.checkTrace(fn, [torch.randn(2, 2) for _ in range(2)])
tuple_output = list(ge.graph.outputs())[0]
tuple_inputs = list(tuple_output.node().inputs())
self.assertTrue(tuple_inputs[0] == tuple_inputs[1])
def test_inplace_copy(self):
x = torch.randn(4, 4, requires_grad=True)
def f(x):
out = torch.zeros(x.size())
out.copy_(x)
return out
g, outputs, inputs = torch.jit._get_trace_graph(f, (x,), return_inputs=True)
self.run_pass("dce", g)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
self.assertExportImport(g, (x,))
def test_inplace_copy_force_outplace(self):
x = torch.randn(4, 4, requires_grad=True)
def f(x):
out = torch.zeros(x.size())
out.copy_(x)
return out
g, outputs, inputs = torch.jit._get_trace_graph(
f, (x,), return_inputs=True, _force_outplace=True
)
self.run_pass("dce", g)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
self.assertExportImport(g, (x,))
FileCheck().check("expand_as").run(str(g))
def test_shared_param(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.b = self.a = nn.Parameter(torch.randn(2, 2))
def forward(self, x):
return x * self.a + self.b
m = MyModule()
g, _ = torch.jit._get_trace_graph(m, (torch.randn(2, 2),))
self.run_pass("dce", g)
self.assertEqual(len(list(g.inputs())), 2)
FileCheck().check("mul").check("add").run(str(g))
def run_ge_tests(self, optimize, use_cuda):
with enable_profiling_mode_for_profiling_tests():
with torch.jit.optimized_execution(optimize):
def rand(*args):
t = torch.rand(*args).float()
if use_cuda:
t = t.cuda()
return t
self.checkTrace(
lambda a, b: a * b + b, [rand(1), rand(1)], [rand(2, 3), rand(2, 3)]
)
# trivial identity
self.checkTrace(lambda a, b: (b, a), [rand(1), rand(1)])
def foo(a):
t = a * a
return t * t, 4 * t
self.checkTrace(foo, [rand(1)])
# unused input
self.checkTrace(
lambda a, b: a * a, [rand(1), rand(1)], allow_unused=True
)
# test outputs that do not get used in grad
self.checkTrace(foo, [rand(1)], drop=1)
# test autograd fallback
self.checkTrace(
lambda a, b: a * b / (a - 2 * b) + b, [rand(1), rand(1)]
)
def test_ge_unoptimized(self):
self.run_ge_tests(False, False)
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser support for Sandcastle")
@enable_cpu_fuser
def test_ge_optimized(self):
with enable_profiling_mode_for_profiling_tests():
self.run_ge_tests(True, False)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_ge_cuda(self):
self.run_ge_tests(True, True)
# more manual test of graph executor that can be used as a scratchpad
def test_ge(self):
def foo(a, b):
return a * b / (a - b) + b
V = Variable
a, b = V(torch.rand(1)), V(torch.rand(1))
ge = torch.jit.trace(foo, (a, b))
a, b = (
V(torch.rand(1), requires_grad=True),
V(torch.rand(1), requires_grad=True),
)
(r,) = ge(a, b)
da, db = torch.autograd.grad(r + 3, [a, b], create_graph=True)
l2 = da * db + db * db
g2result = torch.autograd.grad(l2, [da, db])
r = foo(a, b)
da2, db2 = torch.autograd.grad(r + 3, [a, b], create_graph=True)
self.assertEqual(da, da2)
self.assertEqual(db, db2)
l3 = da2 * db2 + db2 * db2
g2result2 = torch.autograd.grad(l3, [da2, db2])
self.assertEqual(g2result, g2result2)
def test_trace_annotation(self):
@_trace(torch.rand(1))
def foo(a):
return a + a + a
x = torch.randn(5, 5)
self.assertEqual(foo(x), x + x + x)
@unittest.skipIf(not RUN_CUDA, "calls .cuda()")
# By default, on Ampere or later GPUs, nn.Linear computes float tensors at TF32 precision.
# We want float tensors to be computed at full precision in order to use the default precision
@with_tf32_off
def test_traced_module_cuda(self):
class Model(nn.Module):
def __init__(self, num_features, num_layers):
super().__init__()
self.num_layers = num_layers
layers = [
[nn.Linear(num_features, num_features), nn.Sigmoid()]
for _ in range(num_layers)
]
self.submodule = nn.Sequential(*chain(*layers))
def forward(self, x):
for i in range(self.num_layers):
x = self.submodule[i](x) + x
return x
model = Model(5, 3)
x = torch.randn(2, 5)
traced_model = torch.jit.trace(model, x)
# We're missing some attributes these modules had initially. Make sure we can
# still get the __repr__()
model.__repr__()
# XXX: indexing sequentials is broken
linear_submodule = next(iter(traced_model.submodule._modules.values()))
# All attributes that aren't parameters should raise
with self.assertRaises(AttributeError):
linear_submodule.in_features
linear_submodule.weight
linear_submodule.weight = nn.Parameter(
torch.randn(linear_submodule.weight.shape)
)
with self.assertRaises(RuntimeError):
del linear_submodule.weight
# Submodules can't be called
with self.assertRaises(RuntimeError):
linear_submodule(x)
# Type casts
linear_submodule.cuda()
traced_model.float().cuda()
cuda_out = traced_model(x.float().cuda())
traced_model.cpu()
cpu_out = traced_model(x.float())
self.assertEqual(cpu_out, cuda_out)
traced_model.to("cuda")
cuda_out = traced_model(x.float().cuda())
traced_model.to("cpu")
cpu_out = traced_model(x.float())
self.assertEqual(cpu_out, cuda_out)
traced_model.to(torch.get_default_dtype())
# state_dict + load_state_dict
state = {k: v.clone() for k, v in traced_model.state_dict().items()}
new_state = {k: v.clone().fill_(1) for k, v in state.items()}
out = traced_model(x)
traced_model.load_state_dict(new_state)
out_ones = traced_model(x)
traced_model.load_state_dict(state)
out_state = traced_model(x)
self.assertEqual(out, out_state)
self.assertNotEqual(out, out_ones)
@unittest.skipIf(not RUN_CUDA, "uses cuda")
def test_type_same_device(self):
class Model(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.dtype = torch.float16
def forward(self, x=None):
h = x.type(self.dtype)
return h
a = Model()
b = torch.jit.trace(
a, example_inputs=(torch.ones([1], device=torch.device("cuda")),)
)
FileCheck().check_not("device").run(b.code)
def test_export_no_reorder(self):
def func(a, b):
return a * b / (a - 2 * b) + b
recording_inputs = [
torch.tensor(
[0.55619788169860839844], dtype=torch.float32, requires_grad=True
),
torch.tensor(
[0.25947844982147216797], dtype=torch.float32, requires_grad=True
),
]
ge1 = torch.jit.trace(func, recording_inputs)
ge2 = self.getExportImportCopy(ge1)
outputs_ge1 = ge1(*recording_inputs)
outputs_ge2 = ge2(*recording_inputs)
grad_ge1 = torch.autograd.grad(outputs_ge1, recording_inputs)
grad_ge2 = torch.autograd.grad(outputs_ge2, recording_inputs)
self.assertTrue(outputs_ge1 == outputs_ge2)
self.assertTrue(grad_ge1 == grad_ge2)
def test_python_function(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
return x + 1
@staticmethod
def backward(ctx, grad_output):
return grad_output
@_trace(torch.zeros(2))
def fn(x):
return MyFn.apply(x + 2) + 3
x = torch.tensor([1.0, 2.0, 3.0])
y = torch.randn(2, 2, requires_grad=True)
fn(x)
fn(y)
def test_python_function_tup(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
return x + 1, x - 1
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
@_trace(torch.zeros(2))
def fn(x):
a, b = MyFn.apply(x + 2)
return a + b + 3
x = torch.tensor([1.0, 2.0, 3.0])
y = torch.randn(2, 2, requires_grad=True)
fn(x)
fn(y)
def test_trace_detach(self):
def foo(x, w):
return torch.matmul(x, w).detach()
traced = torch.jit.trace(foo, (torch.rand(3, 4), torch.rand(4, 5)))
FileCheck().check("matmul").check("detach").run(str(traced.graph))
x, w = torch.rand(3, 4), torch.rand(4, 5, requires_grad=True)
traced_result = traced(x, w)
self.assertEqual(foo(x, w), traced_result)
self.assertFalse(traced_result.requires_grad)
self.assertIsNone(traced_result.grad_fn)
def test_trace_detach_redispatch(self):
def foo(x, w):
y = torch.matmul(x, w)
assert y.requires_grad
y = y.detach()
# Make sure trace kernel redispatches to the right lower kernel.
assert not y.requires_grad
return y
x, w = torch.rand(3, 4), torch.rand(4, 5, requires_grad=True)
# With `check_trace=True` it will run with `@torch.no_grad()` and break assert.
torch.jit.trace(foo, (x, w), check_trace=False)
def test_trace_detach_inplace(self):
def foo(x, w):
y = torch.matmul(x, w)
y.detach_()
return y
traced = torch.jit.trace(foo, (torch.rand(3, 4), torch.rand(4, 5)))
FileCheck().check("matmul").check("detach(").run(str(traced.graph))
x, w = torch.rand(3, 4), torch.rand(4, 5, requires_grad=True)
traced_result = traced(x, w)
self.assertEqual(foo(x, w), traced_result)
self.assertFalse(traced_result.requires_grad)
self.assertIsNone(traced_result.grad_fn)
def test_trace_detach_inplace_redispatch(self):
def foo(x, w):
y = torch.matmul(x, w)
assert y.requires_grad
y.detach_()
# Make sure trace kernel redispatches to the right lower kernel.
assert not y.requires_grad
return y
x, w = torch.rand(3, 4), torch.rand(4, 5, requires_grad=True)
# With `check_trace=True` it will run with `@torch.no_grad()` and break assert.
torch.jit.trace(foo, (x, w), check_trace=False)
def test_trace_slice_full_dim(self):
def foo(x):
return x[0:5, 0] + 1.0
traced = torch.jit.trace(foo, (torch.rand(5, 4),))
test_x = torch.rand(6, 3)
self.assertEqual(foo(test_x), traced(test_x))
def test_trace_dict_input(self):
class Bar(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = Foo()
def forward(self, a, b):
return self.foo({"a": a, "b": b})["a"]
class Foo(torch.nn.Module):
def forward(self, x):
return {"a": x["a"] * x["b"]}
x = (torch.rand(3), torch.rand(3))
model = Bar()
self.checkTrace(model, x)
def test_trace_dict_output(self):
class TraceDictStrTensor(torch.nn.Module):
def forward(self, a, b):
return {"a": a, "b": b}
class TraceDictTensorTensor(torch.nn.Module):
def forward(self, a, b):
return {a: b, b: a}
x = (torch.rand(3), torch.rand(3))
with self.assertRaisesRegex(RuntimeError, r"Encountering a dict at the output"):
torch.jit.trace(TraceDictStrTensor(), x)
traced_dict_str_mod = torch.jit.trace(TraceDictStrTensor(), x, strict=False)
self.assertEqual(traced_dict_str_mod(*x), {"a": x[0], "b": x[1]})
traced_dict_tensor_mod = torch.jit.trace(
TraceDictTensorTensor(), x, strict=False
)
self.assertEqual(traced_dict_tensor_mod(*x), {x[0]: x[1], x[1]: x[0]})
def test_trace_with_tensor_list_output(self):
def f():
return [torch.zeros(1), torch.zeros(5)]
with self.assertWarnsRegex(
torch.jit.TracerWarning, "cause the trace to be incorrect"
):
torch.jit.trace(f, [])
traced_non_strict_f = torch.jit.trace(f, [], strict=False)
self.assertEqual(traced_non_strict_f(), f())
def test_trace_with_number_list_output(self):
def f():
return [1, 5]
with self.assertRaisesRegex(
RuntimeError, r"Only tensors.+can be output from traced functions"
):
traced_f = torch.jit.trace(f, [])
def test_trace_with_nested_tensor_list_output(self):
def f():
return [[torch.zeros(1)], [torch.zeros(5)]]
with self.assertRaisesRegex(
RuntimeError, r"Only tensors.+can be output from traced functions"
):
traced_f = torch.jit.trace(f, [])
def test_trace_with_nested_strided_tensor_output(self):
@torch.jit.script
def nt_construct(values, kv_lengths):
kv_lengths_list: List[int] = kv_lengths.tolist()
return torch._nested_tensor_from_tensor_list(
list(values.split(kv_lengths_list, dim=0)), None, None, None, None
)
def f(x, offsets):
kv_lengths = offsets[1:] - offsets[:-1]
return nt_construct(x, kv_lengths).cos()
x = torch.rand(5, 4)
offsets = torch.tensor([0, 2, 5])
ref = f(x, offsets)
f_t = torch.jit.trace(f, (x, offsets))
res = f_t(x, offsets)
self.assertEqual(ref, res)
x2 = torch.rand((8, 4))
offsets2 = torch.tensor([0, 2, 4, 8])
self.assertEqual(f(x2, offsets2), f_t(x2, offsets2))
def test_trace_variable_instantiation(self):
def random_foo(x):
return Variable(Variable(x) + 1.0)
random_foo_traced = torch.jit.trace(random_foo, (torch.rand(3, 4),))
x = torch.rand(5, 6)
self.assertEqual(random_foo(x), random_foo_traced(x))
def test_trace_slice_expr_complete_type(self):
def random_foo(x):
return x + 1.0
random_foo_traced = torch.jit.trace(random_foo, (torch.rand(3, 4),))
@torch.jit.script
def random_bar(x):
return random_foo_traced(x)[0:1]
x = torch.rand(3, 4)
self.assertEqual(random_bar(x), (x + 1)[0:1])
def test_trace_inline_shape(self):
# testing peephole optimization of size is turned into a constant
# in script fn
@torch.jit.script
def tensor_size(x: torch.Tensor) -> torch.Tensor:
return torch.tensor([x.size()[0]])
self.assertEqual(
tensor_size(
torch.rand(
15,
)
),
torch.tensor([15]),
)
traced_tensor_size = torch.jit.trace(
tensor_size,
torch.rand(
7,
),
)
self.assertEqual(
traced_tensor_size(
torch.rand(
15,
)
),
torch.tensor([15]),
)
@torch.jit.script
def use_device(x):
return torch.zeros_like(x, device=x.device)
def foo(x):
return use_device(x)
traced_tensor_size = torch.jit.trace(
foo,
torch.rand(
7,
),
)
self.run_pass("inline", traced_tensor_size.graph)
FileCheck().check("prim::device").run(traced_tensor_size.graph)
def test_trace_save(self):
def fn(x):
return x + 2
def check(func):
with TemporaryFileName() as fname:
func.save(fname)
loaded = torch.jit.load(fname)
input = torch.randn(2, 2)
self.assertEqual(func(input), loaded(input))
out = torch.jit.trace(fn, (torch.ones(2, 2),))
check(out)
def test_trace_optioanl_dtype(self):
class Test(torch.nn.Module):
def forward(self):
return torch.arange(5)
traced = torch.jit.trace(Test(), ())
torch.allclose(traced(), Test()())
def test_trace_save_load_copy(self):
class Test(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3)
def forward(self, x):
return self.conv(x)
traced = torch.jit.trace(Test(), torch.rand(1, 3, 224, 224))
buffer = io.BytesIO()
torch.jit.save(traced, buffer)
buffer.seek(0)
loaded = torch.jit.load(buffer)
# should work
copy.copy(loaded)
copy.deepcopy(loaded)
def test_trace_export_fns(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 3
@torch.jit.export
def __getstate__(self):
return (3, self.training)
@torch.jit.export
def __setstate__(self, state):
self.a = state[0]
self.training = state[1]
def forward(self, x):
return x + self.a
f = Foo()
traced = torch.jit.trace(f, (torch.rand(3, 4),))
expected_names = ["__getstate__", "__setstate__"]
def check(mod):
self.assertTrue(
all(name in mod._c._method_names() for name in expected_names)
)
check(traced)
imported = self.getExportImportCopy(traced)
check(imported)
def test_trace_export_fns_recursive(self):
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = 3
@torch.jit.export
def __getstate__(self):
return (3, self.training)
@torch.jit.export
def __setstate__(self, state):
self.a = state[0]
self.training = state[1]
def forward(self, x):
return x + self.a
class Wrapper(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = Foo()
def forward(self, x):
return self.foo(x)
f = Wrapper()
traced = torch.jit.trace(f, (torch.rand(3, 4),))
expected_names = ["__getstate__", "__setstate__"]
def check(mod):
self.assertTrue(
all(name in mod._c._method_names() for name in expected_names)
)
check(traced.foo)
imported = self.getExportImportCopy(traced)
check(imported.foo)
# Note that Bar's forward can only be traced, but not scripted
class Bar(nn.Module):
@torch.jit.export
def addTwo(self, x):
return x + 2
def forward(self, input):
return (lambda a: a + 1)(input) # noqa: PLC3002
# When tracing Bar as a submodule, we only want to script the
# exported methods, and we want to keep the forwards still
# being traced.
class WrapperExports(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.bar = Bar()
@torch.jit.export
def addOne(self, x):
return x + 1
def forward(self, x):
return self.bar(x)
f = WrapperExports()
traced = torch.jit.trace(f, (torch.rand(3, 4),))
expected_names = ["addOne"]
check(traced)
def test_trace_autograd_function(self):
class TestFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return torch.neg(input)
@staticmethod
def backward(ctx, grad_output):
return torch.neg(grad_output)
class TracedModule(torch.nn.Module):
def forward(self, x):
return torch.relu(TestFunc.apply(x))
class Wrapper(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.tm = TracedModule()
def forward(self, x):
return self.tm(x)
traced = torch.jit.trace(Wrapper(), (torch.rand(3, 4),))
def test_trace_multi_output_function(self):
# An autograd.Function with two outputs.
# It swaps inputs so we can check if shape
# handling is correct in TorchScript.
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
return y, x
@staticmethod
def backward(ctx, du, dv):
return dv, du
class Bar(torch.nn.Module):
def forward(self, x, y):
x = x.relu()
y = y.relu()
z = Foo.apply(x, y)
return z
x = torch.rand(3, 2, dtype=torch.double)
y = torch.rand(1, 2, dtype=torch.double)
# Generate JIT IR.
traced = torch.jit.trace(Bar(), (x, y))
print(traced.graph)
# Expected output schema of the custom autograd.Function.
schema = (
"(Double(1, 2, strides=[2, 1], requires_grad=0, device=cpu), "
"Double(3, 2, strides=[2, 1], requires_grad=0, device=cpu)) "
"= ^Foo"
)
# See if expected schema exists.
FileCheck().check(schema).run(traced.graph)
# Also examine if the graph is runnable and produces
# the right result.
u, v = traced(x, y)
self.assertEqual(u, y)
self.assertEqual(v, x)
def test_interpolate_trace(self):
class test(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(1, 32, kernel_size=3, padding=1)
def forward(self, x):
y = self.conv(x)
w = nn.functional.interpolate(
y, mode="bilinear", align_corners=False, scale_factor=3
)
return w
f = test()
# no failure
g = torch.jit.trace(f, (torch.zeros(1, 1, 28, 28),))
x = torch.zeros(1, 1, 14, 14)
# constants not baked in
self.assertEqual(g(x), f(x))
@_tmp_donotuse_dont_inline_everything
def test_trace_optional(self):
@torch.jit.script
def test(x: Optional[Tensor]):
if x is None:
return torch.zeros(1)
else:
return x
def test_none():
return test(None)
def test_tensor():
return test(torch.zeros(2))
f_none = torch.jit.trace(test_none, ())
self.assertEqual(f_none(), torch.zeros(1))
f_tensor = torch.jit.trace(test_tensor, ())
self.assertEqual(f_tensor(), torch.zeros(2))
graph = f_tensor.graph
FileCheck().check('name="test"').check_next("prim::CallFunction").run(graph)
def test_trace_nested_datatypes(self):
@torch.jit.script
def foo(x):
return [[x + 1, x - 1], [x + 2, x - 2]]
def bar(x):
list_stuff = foo(x)
return list_stuff[0][0], list_stuff[1][1]
traced = torch.jit.trace(bar, torch.rand(3, 4))
x = torch.rand(5, 6)
self.assertEqual(bar(x), traced(x))
@_tmp_donotuse_dont_inline_everything
def test_call_traced_fn_from_traced_module(self):
@_trace(torch.rand(3, 4))
def traced_fn(x):
return torch.neg(x)
class TracedModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 5))
def forward(self, x):
return traced_fn(torch.mm(x, self.param))
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
# Note: neg op from the traced function should be properly inlined
FileCheck().check("aten::mm").check('name="traced_fn"').check_next(
"prim::CallFunction"
).run(str(tm.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_traced_module_from_traced_module(self):
class TracedModule1(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(5, 7))
def forward(self, x):
return torch.mm(x, self.param)
class TracedModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.param = torch.nn.Parameter(torch.rand(4, 5))
self.mod = torch.jit.trace(TracedModule1(), torch.rand(3, 5))
def forward(self, x):
return self.mod(torch.mm(x, self.param)) + 1.0
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
FileCheck().check("aten::mm").check("prim::CallMethod").check_same(
"forward"
).check("aten::add").run(str(tm.graph))
def test_index_put_trace_with_view(self):
@_trace(torch.rand(100), torch.tensor([1, 2, 3, 4]), torch.rand(1, 1, 1, 4))
def test_index_put(target, indices, rhs):
target[indices] = rhs
return target
FileCheck().check("aten::view").check("index_put_").run(
str(test_index_put.graph)
)
def test_index_put_trace_without_view(self):
@_trace(torch.rand(100), torch.tensor([1, 2, 3, 4]), torch.rand(4))
def test_index_put(target, indices, rhs):
target[indices] = rhs
return target
FileCheck().check_not("aten::view").check("index_put_").run(
str(test_index_put.graph)
)
@suppress_warnings
def test_trace_checker_dot_data(self):
with self.assertRaisesRegex(
torch.jit.TracingCheckError,
r"Tensor-valued Constant nodes differed in value across invocations",
):
@_trace(torch.rand(3, 4), check_inputs=[(torch.rand(3, 4),)])
def foo(x):
y = x.data
return x + y
@suppress_warnings
def test_trace_checker_control_flow(self):
def foo(x):
for _ in range(x.size(0)):
x = torch.neg(x)
return x
with self.assertRaisesRegex(
torch.jit.TracingCheckError, r"Graphs differed across invocations!"
):
torch.jit.trace(foo, torch.randn(3, 4), check_inputs=[torch.randn(4, 4)])
@suppress_warnings
def test_trace_checker_memoization(self):
with self.assertRaisesRegex(
torch.jit.TracingCheckError, r"Graphs differed across invocations!"
):
def foo(x):
if not hasattr(foo, "cache"):
foo.cache = torch.neg(x)
return x + foo.cache
traced = torch.jit.trace(
foo, torch.rand(3, 4), check_inputs=[(torch.rand(3, 4),)]
)
def test_trace_checker_slice_lhs(self):
def foo(x):
for i in range(3):
x[i, :] = torch.zeros(4)
return x
self.checkTrace(foo, (torch.rand(3, 4),), inputs_require_grads=False)
def test_trace_checker_inplace_on_view(self):
def foo(x):
x.view(-1).add_(-x.view(-1))
return x
with self.assertWarnsRegex(
torch.jit.TracerWarning,
"Output nr 1. of the traced function does not match the "
"corresponding output of the Python function",
):
torch.jit.trace(
foo,
torch.rand(3, 4),
check_inputs=[torch.rand(5, 6)],
_force_outplace=True,
)
def test_lhs_index_fails(self):
def foo(x):
x[0, 1] = 4
return x
with self.assertWarnsRegex(
torch.jit.TracerWarning, "cause the trace to be incorrect"
):
torch.jit.trace(foo, torch.rand(3, 4), _force_outplace=True)
def test_lhs_index_trivial(self):
def foo(y, x):
y[...] = x
return y
self.checkTrace(
foo, (torch.rand(3, 4), torch.rand(4)), inputs_require_grads=False
)
def test_inplace_warn(self):
def foo(x):
x.view(-1).add_(-x.view(-1))
return x
with self.assertWarnsRegex(
torch.jit.TracerWarning, "cause the trace to be incorrect"
):
torch.jit.trace(foo, torch.rand(3, 4), _force_outplace=True)
@suppress_warnings
def test_trace_checker_dropout_train(self):
def foo(x):
return torch.dropout(x, p=0.5, train=True)
with self.assertWarnsRegex(
torch.jit.TracerWarning,
"Output nr 1. of the traced function does not match the "
"corresponding output of the Python function",
):
torch.jit.trace(foo, torch.rand(3, 4), check_inputs=[torch.rand(5, 6)])
with self.assertWarnsRegex(
torch.jit.TracerWarning, "Trace had nondeterministic nodes"
):
torch.jit.trace(foo, torch.rand(3, 4), check_inputs=[torch.rand(5, 6)])
def test_trace_checker_dropout_notrain(self):
input = torch.rand(3, 4)
@_trace(input)
def foo(x):
return torch.dropout(x, p=0.5, train=False)
self.assertEqual(foo(input), input)
def test_trace_contiguous(self):
def foo(x):
return x[:, :, ::2].contiguous().view(12)
x = torch.rand(2, 3, 4)
traced = torch.jit.trace(foo, (x,))
y = traced(x)
self.assertNotEqual(x.storage().data_ptr(), y.storage().data_ptr())
# This tests the logic in THPVariable_contiguous. There is short-circuiting
# code that prevents us from even getting to VariableType::contiguous, since
# it is an optimization that prevents us from acquiring the GIL for touching
# the device. We needed to add the tracing logic directly into the
# THPVariable_contiguous function only for the path where we are skipping
# dispatch into contiguous. We should see an aten::contiguous in this trace!
def test_trace_contiguous_short_circuit(self):
def foo(x):
return x.contiguous()
x = torch.rand(2, 3, 4)
traced = torch.jit.trace(foo, (x,))
FileCheck().check("aten::contiguous").run(str(traced.graph))
def test_trace_inverse(self):
def foo(x):
return ~x
foo_traced = torch.jit.trace(foo, torch.zeros(3, 4, dtype=torch.uint8))
eg = torch.zeros(3, dtype=torch.uint8)
self.assertEqual(foo_traced(eg), foo(eg))
def test_trace_modulelist(self):
class MySubmod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
class MyMod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.ml = torch.nn.ModuleList([MySubmod(), MySubmod()])
def forward(self, x):
for mod in self.ml:
x = mod(x)
return x
traced = torch.jit.trace(MyMod(), (torch.rand(3, 4),))
def test_trace_fork_join_and_module(self):
class MySubmod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x), torch.neg(x)
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.ml = torch.nn.ModuleList([MySubmod() for i in range(2)])
def forward(self, x):
futs = []
for i in range(2):
futs.append(torch.jit._fork(self.ml[i], x))
results = []
for i in range(2):
results.append(torch.jit._wait(futs[i])[0])
return torch.stack(results)
m = Mod()
traced = torch.jit.trace(m, torch.rand(3, 4))
def test_trace_invert_module_hierarchy(self):
class MySubmod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x), torch.neg(x)
class MyFunctionalMod(torch.nn.Module):
def forward(self, x, submod):
return submod(x)
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sm = MySubmod()
self.fm = MyFunctionalMod()
def forward(self, x):
return self.fm(x, self.sm)
torch.jit.trace(Mod(), (torch.rand(3, 4),))
@skipIfCrossRef
def test_trace_records_names(self):
def foo(bar, baz):
baz = bar + 3
quick_brown_fox = torch.neg(baz)
for _ in range(20):
yeet = quick_brown_fox - 3.14
return yeet
traced = torch.jit.trace(foo, (torch.rand(3, 3), torch.rand(3, 3)))
graph_str = str(traced.graph)
assert "bar" in graph_str
assert "baz" in graph_str
assert "quick_brown_fox" in graph_str
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_tracing_hooks(self):
class Net(nn.Module):
def forward(self, x):
return x + x
def test_hook(is_post_hook, hook, fc):
n = Net()
if is_post_hook:
n.register_forward_hook(hook)
else:
n.register_forward_pre_hook(hook)
module = torch.jit.trace(n, (torch.tensor(1.0),))
eager_input = torch.tensor(1.0)
eager_out = n(eager_input)
fc.run(module.forward.graph)
input = torch.tensor(1.0)
output = module(input)
self.assertEqual(input, eager_input)
self.assertEqual(output, eager_out)
def hook_no_return(mod, input, output):
input[0].add_(1)
output.sub_(1)
fc = FileCheck().check("add(").check("add_(").check("sub_(")
test_hook(True, hook_no_return, fc)
def hook_return(mod, input, output):
input[0].add_(1)
return output - 3
fc = FileCheck().check("add(").check("add_(").check("sub(")
test_hook(True, hook_return, fc)
b = torch.tensor(3.0)
def captured_hook(mod, input, output):
return output - b
fc = FileCheck().check("add(").check("sub(")
test_hook(True, captured_hook, fc)
def pre_hook_no_ret(mod, input):
input[0].add_(3)
fc = FileCheck().check("add_(").check("add(")
test_hook(False, pre_hook_no_ret, fc)
def pre_hook_ret(mod, input):
return input[0] - 4
fc = FileCheck().check("sub(").check("add(")
test_hook(False, pre_hook_ret, fc)
def test_tracing_backward_hook_error(self):
class Net(nn.Module):
def forward(self, x):
return x + x
n = Net()
def backward_hook(module, grad_input, grad_output):
pass
n.register_backward_hook(backward_hook)
with self.assertRaisesRegex(Exception, "backward hooks assigned"):
torch.jit.trace(n, (torch.tensor(1.0),))
def test_tracing_multiple_methods(self):
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, x):
return self.conv(x)
def weighted_kernel_sum(self, weight):
return weight * self.conv.weight
example_weight = torch.rand(1, 1, 3, 3)
example_forward_input = torch.rand(1, 1, 3, 3)
inputs = {
"forward": example_forward_input,
"weighted_kernel_sum": example_weight,
}
n = Net()
module = torch.jit.trace_module(n, inputs)
check_inputs = []
for _ in range(2):
check_weight = torch.rand(1, 1, 3, 3)
check_forward_input = torch.rand(1, 1, 3, 3)
check_inputs.append(
{"forward": check_forward_input, "weighted_kernel_sum": check_weight}
)
module = torch.jit.trace_module(
n, inputs, check_trace=True, check_inputs=check_inputs
)
self.assertTrue(module._c._has_method("forward"))
self.assertTrue(module._c._has_method("weighted_kernel_sum"))
module = torch.jit.trace(n.forward, example_forward_input)
module = torch.jit.trace(
n.forward,
example_forward_input,
check_trace=True,
check_inputs=[example_forward_input],
)
with self.assertRaisesRegex(
AttributeError,
"trace doesn't support compiling individual module's functions",
):
module = torch.jit.trace(n.weighted_kernel_sum, inputs)
def test_tensor_with_grad_as_constant(self):
param = torch.randn(3).requires_grad_()
x = torch.randn(3)
def f(x):
return x + param
with self.assertRaisesRegex(
RuntimeError, "Cannot insert a Tensor that requires grad as a constant"
):
torch.jit.trace(f, x)
def test_non_tensor_tracing(self):
def f(x):
return x + param # noqa: F821
with self.assertRaisesRegex(
RuntimeError, r"Type 'Tuple\[int\]' cannot be traced"
):
torch.jit.trace(f, (1,))
def test_trace_skip_none_submodule(self):
class TestModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.submod = torch.nn.Linear(3, 4)
self.submod = None
def forward(self, inputs):
return inputs
m = TestModule()
tm = torch.jit.trace(m, torch.tensor(1.0))
self.assertFalse(hasattr(tm, "submod"))
def test_trace_with_conditional_property(self):
class Net(nn.Module):
def __init__(self, attr=None):
super().__init__()
if attr is not None:
self._attr = attr
self.attr_name = "_attr"
@property
def attr(self):
return getattr(self, self.attr_name)
def forward(self, x):
return x
x = torch.ones(1)
torch.jit.trace(Net(), x)
def test_trace_func_argument_names_captured(self):
def fn(first_arg: torch.Tensor, second_arg: torch.Tensor) -> torch.Tensor:
return first_arg + second_arg
traced_fn = torch.jit.trace(fn, (torch.ones(1), torch.ones(1)))
FileCheck().check("first_arg").check_next("second_arg").run(
str(traced_fn.graph)
)
def test_trace_partial_func_argument_names_captured(self):
def fn(first_arg: torch.Tensor, second_arg=1) -> torch.Tensor:
return first_arg + second_arg
traced_fn = torch.jit.trace(fn, (torch.ones(1),))
FileCheck().check("first_arg").check_not("second_arg").run(str(traced_fn.graph))
def test_trace_module_argument_names_captured(self):
class TestModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, first_arg: torch.Tensor, second_arg: torch.Tensor):
return self.conv(first_arg) + second_arg
m = TestModule()
example_input = (torch.ones(1, 1, 3, 3), torch.ones(1, 1, 3, 3))
# Explicitly tracing module's forward method
traced_module_forward = torch.jit.trace(m.forward, example_input)
FileCheck().check("first_arg").check_next("second_arg").run(
str(traced_module_forward.graph)
)
# Tracing module's directly
traced_module = torch.jit.trace(m, example_input)
FileCheck().check("first_arg").check_next("second_arg").run(
str(traced_module.graph)
)
def test_trace_checking_with_deprecated_name(self):
class MyClass(torch.nn.Module):
def __init__(self) -> None:
super(MyClass, self).__init__()
def forward(self, x, y, **deprecated_arguments):
if len(deprecated_arguments) > 0:
raise RuntimeError(
f"Got unexpected arguments: {deprecated_arguments}"
)
return x + y
model = MyClass()
m2 = torch.jit.trace(model, (torch.ones(1), torch.ones(1)))
m3 = torch.jit.trace(
model,
example_kwarg_inputs={"x": torch.ones(1), "y": torch.ones(1)},
strict=False,
)
def test_trace_with_tuple_tensor(self):
class MyClass(torch.nn.Module):
def __init__(self) -> None:
super(MyClass, self).__init__()
def forward(self, x, y):
return x + y[0] + y[1]
model = MyClass()
traced_model = torch.jit.trace(
model, (torch.ones(1), (torch.ones(1), torch.ones(1)))
)
input_dict = {
"x": torch.tensor([2, 3]),
"y": (torch.tensor([5, 6]), torch.tensor([7, 8])),
}
self.assertEqual(model(**input_dict), traced_model(**input_dict))
traced_model = torch.jit.trace(
model,
example_kwarg_inputs={
"x": torch.ones(1),
"y": (torch.ones(1), torch.ones(1)),
},
)
self.assertEqual(model(**input_dict), traced_model(**input_dict))
def test_trace_no_duplicated_lifted_input_output(self):
class Normalize(nn.Module):
def __init__(self) -> None:
super().__init__()
self.norm = nn.GroupNorm(num_groups=32, num_channels=32)
def forward(self, x, y):
if y is None:
y = x
else:
y = self.norm(y)
y = y * 2
return y
class G(nn.Module):
def __init__(self) -> None:
super().__init__()
self.norm = Normalize()
def forward(self, x):
A = self.norm(x, None)
B = F.relu(A)
return A, B
class Net(nn.Module):
def __init__(self) -> None:
super().__init__()
self.g = G()
self.norm_1 = Normalize()
def forward(self, x):
hs = self.g(x)
A, B = hs
h = self.norm_1(B, A)
return h
net = Net()
net = net.eval()
x = torch.randn(1, 32, 16, 16)
traced = torch.jit.trace(net, x)
FileCheck().check_not("prim::TupleUnpack").run(str(traced.graph))
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
| TestTracer |
python | great-expectations__great_expectations | tests/metrics/test_metric.py | {
"start": 1635,
"end": 2001
} | class ____:
@pytest.mark.unit
def test_instantiation_success(self):
ColumnValuesAbove(
column=COLUMN,
min_value=42,
)
@pytest.mark.unit
def test_instantiation_missing_domain_parameters_raises(self):
with pytest.raises(ValidationError):
ColumnValuesAbove(min_value=42)
| TestMetricInstantiation |
python | coleifer__peewee | tests/transactions.py | {
"start": 875,
"end": 7365
} | class ____(BaseTransactionTestCase):
def test_simple(self):
self.assertFalse(db.in_transaction())
with db.atomic():
self.assertTrue(db.in_transaction())
self._save(1)
self.assertFalse(db.in_transaction())
self.assertRegister([1])
# Explicit rollback, implicit commit.
with db.atomic() as txn:
self._save(2)
txn.rollback()
self.assertTrue(db.in_transaction())
self._save(3)
self.assertFalse(db.in_transaction())
self.assertRegister([1, 3])
# Explicit rollbacks.
with db.atomic() as txn:
self._save(4)
txn.rollback()
self._save(5)
txn.rollback()
self.assertRegister([1, 3])
@requires_nested
def test_transactions(self):
self.assertFalse(db.in_transaction())
with db.atomic():
self.assertTrue(db.in_transaction())
self._save(1)
self.assertRegister([1])
with db.atomic() as txn:
self._save(2)
txn.rollback()
self._save(3)
with db.atomic() as sp1:
self._save(4)
with db.atomic() as sp2:
self._save(5)
sp2.rollback()
with db.atomic() as sp3:
self._save(6)
with db.atomic() as sp4:
self._save(7)
with db.atomic() as sp5:
self._save(8)
self.assertRegister([1, 3, 4, 6, 7, 8])
sp4.rollback()
self.assertRegister([1, 3, 4, 6])
self.assertRegister([1, 3, 4, 6])
def test_commit_rollback(self):
with db.atomic() as txn:
self._save(1)
txn.commit()
self._save(2)
txn.rollback()
self.assertRegister([1])
with db.atomic() as txn:
self._save(3)
txn.rollback()
self._save(4)
self.assertRegister([1, 4])
@requires_nested
def test_commit_rollback_nested(self):
with db.atomic() as txn:
self.test_commit_rollback()
txn.rollback()
self.assertRegister([])
with db.atomic():
self.test_commit_rollback()
self.assertRegister([1, 4])
def test_nesting_transaction_obj(self):
self.assertRegister([])
with db.transaction() as txn:
self._save(1)
with db.transaction() as txn2:
self._save(2)
txn2.rollback() # Actually issues a rollback.
self.assertRegister([])
self._save(3)
self.assertRegister([3])
with db.transaction() as txn:
self._save(4)
with db.transaction() as txn2:
with db.transaction() as txn3:
self._save(5)
txn3.commit() # Actually commits.
self._save(6)
txn2.rollback()
self.assertRegister([3, 4, 5])
with db.transaction() as txn:
self._save(6)
try:
with db.transaction() as txn2:
self._save(7)
raise ValueError()
except ValueError:
pass
self.assertRegister([3, 4, 5, 6, 7])
@requires_nested
def test_savepoint_commit(self):
with db.atomic() as txn:
self._save(1)
txn.rollback()
self._save(2)
txn.commit()
with db.atomic() as sp:
self._save(3)
sp.rollback()
self._save(4)
sp.commit()
self.assertRegister([2, 4])
def test_atomic_decorator(self):
@db.atomic()
def save(i):
self._save(i)
save(1)
self.assertRegister([1])
def text_atomic_exception(self):
def will_fail(self):
with db.atomic():
self._save(1)
self._save(None)
self.assertRaises(IntegrityError, will_fail)
self.assertRegister([])
def user_error(self):
with db.atomic():
self._save(2)
raise ValueError
self.assertRaises(ValueError, user_error)
self.assertRegister([])
def test_manual_commit(self):
with db.manual_commit():
db.begin()
self._save(1)
db.rollback()
db.begin()
self._save(2)
db.commit()
with db.manual_commit():
db.begin()
self._save(3)
db.rollback()
db.begin()
self._save(4)
db.commit()
self.assertRegister([2, 4])
def test_mixing_manual_atomic(self):
@db.manual_commit()
def will_fail():
pass
@db.atomic()
def also_fails():
pass
with db.atomic():
self.assertRaises(ValueError, will_fail)
with db.manual_commit():
self.assertRaises(ValueError, also_fails)
with db.manual_commit():
with self.assertRaises(ValueError):
with db.atomic(): pass
with db.atomic():
with self.assertRaises(ValueError):
with db.manual_commit(): pass
def test_closing_db_in_transaction(self):
with db.atomic():
self.assertRaises(OperationalError, db.close)
@requires_nested
def test_db_context_manager(self):
db.close()
self.assertTrue(db.is_closed())
with db:
self.assertFalse(db.is_closed())
self._save(1)
with db:
self._save(2)
try:
with db:
self._save(3)
raise ValueError('xxx')
except ValueError:
pass
self._save(4)
try:
with db:
self._save(5)
with db:
self._save(6)
raise ValueError('yyy')
except ValueError:
pass
self.assertFalse(db.is_closed())
self.assertTrue(db.is_closed())
self.assertRegister([1, 2, 4])
@requires_nested
| TestTransaction |
python | jazzband__prettytable | tests/test_prettytable.py | {
"start": 6469,
"end": 8242
} | class ____:
def test_delete_column(self, col_prettytable: PrettyTable) -> None:
col_prettytable.del_column("Area")
assert (
col_prettytable.get_string()
== """+-----------+------------+-----------------+
| City name | Population | Annual Rainfall |
+-----------+------------+-----------------+
| Adelaide | 1158259 | 600.5 |
| Brisbane | 1857594 | 1146.4 |
| Darwin | 120900 | 1714.7 |
| Hobart | 205556 | 619.5 |
| Sydney | 4336374 | 1214.8 |
| Melbourne | 3806092 | 646.9 |
| Perth | 1554769 | 869.4 |
+-----------+------------+-----------------+"""
)
def test_delete_illegal_column_raises_error(
self, col_prettytable: PrettyTable
) -> None:
with pytest.raises(ValueError):
col_prettytable.del_column("City not-a-name")
def test_delete_row(self, city_data: PrettyTable) -> None:
city_data.del_row(2)
assert (
city_data.get_string()
== """+-----------+------+------------+-----------------+
| City name | Area | Population | Annual Rainfall |
+-----------+------+------------+-----------------+
| Adelaide | 1295 | 1158259 | 600.5 |
| Brisbane | 5905 | 1857594 | 1146.4 |
| Hobart | 1357 | 205556 | 619.5 |
| Sydney | 2058 | 4336374 | 1214.8 |
| Melbourne | 1566 | 3806092 | 646.9 |
| Perth | 5386 | 1554769 | 869.4 |
+-----------+------+------------+-----------------+"""
)
def test_delete_row_unavailable(self, city_data: PrettyTable) -> None:
with pytest.raises(IndexError):
city_data.del_row(10)
| TestDelete |
python | Pylons__pyramid | tests/test_traversal.py | {
"start": 32986,
"end": 35919
} | class ____(unittest.TestCase):
def _makeOne(self, context, url):
return self._getTargetClass()(context, url)
def _getTargetClass(self):
from pyramid.traversal import ResourceURL
return ResourceURL
def test_instance_conforms_to_IResourceURL(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import IResourceURL
context = DummyContext()
request = DummyRequest()
verifyObject(IResourceURL, self._makeOne(context, request))
def test_IResourceURL_attributes_with_vroot(self):
from pyramid.interfaces import VH_ROOT_KEY
root = DummyContext()
root.__parent__ = None
root.__name__ = None
one = DummyContext()
one.__parent__ = root
one.__name__ = 'one'
two = DummyContext()
two.__parent__ = one
two.__name__ = 'two'
environ = {VH_ROOT_KEY: '/one'}
request = DummyRequest(environ)
context_url = self._makeOne(two, request)
self.assertEqual(context_url.physical_path, '/one/two/')
self.assertEqual(context_url.virtual_path, '/two/')
self.assertEqual(
context_url.physical_path_tuple, ('', 'one', 'two', '')
)
self.assertEqual(context_url.virtual_path_tuple, ('', 'two', ''))
def test_IResourceURL_attributes_vroot_ends_with_slash(self):
from pyramid.interfaces import VH_ROOT_KEY
root = DummyContext()
root.__parent__ = None
root.__name__ = None
one = DummyContext()
one.__parent__ = root
one.__name__ = 'one'
two = DummyContext()
two.__parent__ = one
two.__name__ = 'two'
environ = {VH_ROOT_KEY: '/one/'}
request = DummyRequest(environ)
context_url = self._makeOne(two, request)
self.assertEqual(context_url.physical_path, '/one/two/')
self.assertEqual(context_url.virtual_path, '/two/')
self.assertEqual(
context_url.physical_path_tuple, ('', 'one', 'two', '')
)
self.assertEqual(context_url.virtual_path_tuple, ('', 'two', ''))
def test_IResourceURL_attributes_no_vroot(self):
root = DummyContext()
root.__parent__ = None
root.__name__ = None
one = DummyContext()
one.__parent__ = root
one.__name__ = 'one'
two = DummyContext()
two.__parent__ = one
two.__name__ = 'two'
environ = {}
request = DummyRequest(environ)
context_url = self._makeOne(two, request)
self.assertEqual(context_url.physical_path, '/one/two/')
self.assertEqual(context_url.virtual_path, '/one/two/')
self.assertEqual(
context_url.physical_path_tuple, ('', 'one', 'two', '')
)
self.assertEqual(
context_url.virtual_path_tuple, ('', 'one', 'two', '')
)
| ResourceURLTests |
python | pytorch__pytorch | torch/cuda/_utils.py | {
"start": 10466,
"end": 19139
} | class ____:
"""
Represents a compiled CUDA kernel that can be called with PyTorch tensors.
"""
def __init__(self, func: ctypes.c_void_p, module: ctypes.c_void_p) -> None:
self.func = func
self.module = module
self._max_shared_mem_bytes = 0
def __call__(
self,
grid: tuple[int, int, int] = (1, 1, 1),
block: tuple[int, int, int] = (1, 1, 1),
args: Optional[list] = None,
shared_mem: int = 0,
stream: Optional[Any] = None,
) -> None:
"""
Call the compiled CUDA kernel
Args:
grid (tuple): Grid dimensions (grid_x, grid_y, grid_z)
block (tuple): Block dimensions (block_x, block_y, block_z)
args (list): List of arguments to pass to the kernel.
PyTorch tensor arguments will be automatically converted to pointers.
shared_mem (int): Shared memory size in bytes
stream (torch.cuda.Stream): CUDA stream to use. If None, uses current stream.
"""
import torch
libcuda = torch.cuda._utils._get_gpu_runtime_library()
if not args:
args = []
# Process arguments and convert tensors to pointers
processed_args: list[ctypes.c_void_p] = []
c_args = []
for arg in args:
if isinstance(arg, torch.Tensor):
if not arg.is_cuda and not (arg.is_cpu and arg.is_pinned()):
raise ValueError(
"All tensor arguments must be CUDA tensors or pinned CPU tensors"
)
# Get pointer to tensor data
ptr = ctypes.c_void_p(arg.data_ptr())
processed_args.append(ptr)
c_args.append(ctypes.byref(ptr))
elif isinstance(arg, int):
# Convert integers to C int
c_int = ctypes.c_int(arg)
# Store the C int for reference keeping, not in processed_args
c_args.append(ctypes.byref(c_int))
elif isinstance(arg, float):
# Python floats are doubles - use double by default
c_double = ctypes.c_double(arg)
# Store the C double for reference keeping, not in processed_args
c_args.append(ctypes.byref(c_double))
else:
raise TypeError(f"Unsupported argument type: {type(arg)}")
# Convert to array of void pointers
c_args_array = (ctypes.c_void_p * len(c_args))()
for i, arg in enumerate(c_args):
c_args_array[i] = ctypes.cast(arg, ctypes.c_void_p)
# Get the stream
if stream is None:
# Defer import to avoid circular imports
import torch.cuda
stream = torch.cuda.current_stream()
# Check if kernel requires large shared memory but hasn't been configured
if shared_mem >= 48 * 1024 and (
self._max_shared_mem_bytes == 0 or shared_mem > self._max_shared_mem_bytes
):
configured_msg = (
"not configured"
if self._max_shared_mem_bytes == 0
else f"only {self._max_shared_mem_bytes} bytes configured"
)
raise RuntimeError(
f"Kernel requires {shared_mem} bytes of shared memory (>= 48KB), "
f"but {configured_msg}. "
"Call kernel.set_shared_memory_config(shared_mem) after compilation "
"and before launching the kernel."
)
_check_cuda(
libcuda.cuLaunchKernel(
self.func,
grid[0],
grid[1],
grid[2],
block[0],
block[1],
block[2],
shared_mem,
stream._as_parameter_,
c_args_array,
None,
)
)
def set_shared_memory_config(self, shared_mem_bytes: int) -> None:
if shared_mem_bytes < 48 * 1024:
# No configuration needed for <= 48KB, just update the value
self._max_shared_mem_bytes = shared_mem_bytes
return
libcuda = _get_gpu_runtime_library()
# Get device properties to validate against limits
device_props = torch.cuda.get_device_properties()
# HIP doesn't have shared_memory_per_block_optin in device properties, so we hard-code it here
if torch.version.hip:
# navi, CDNA1-CDNA3 allows a max of 64KB shared memory
# CDNA4 allows a max of 160KB shared memory
max_shared_mem = (
65536 if device_props.gcnArchName != "gfx950" else 160 * 1024
)
else:
max_shared_mem = getattr(
device_props, "shared_memory_per_block_optin", 49152
)
if shared_mem_bytes > max_shared_mem:
raise RuntimeError(
f"Requested shared memory ({shared_mem_bytes} bytes) exceeds "
f"device limit ({max_shared_mem} bytes). "
"Consider reducing block size or shared memory usage."
)
# Set the function attribute once
# https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__TYPES.html
cudaFuncAttributeMaxDynamicSharedMemorySize = 8
_check_cuda(
libcuda.cuFuncSetAttribute(
self.func,
cudaFuncAttributeMaxDynamicSharedMemorySize,
shared_mem_bytes,
)
)
self._max_shared_mem_bytes = shared_mem_bytes
def _cuda_load_module(
ptx: Union[str, bytes], kernel_names: Optional[list[str]] = None
) -> Union[_CudaModule, dict[str, "_CudaKernel"]]:
"""
Loads a CUDA module from PTX code and returns a module object that can access kernels.
Args:
ptx (bytes or str): The PTX code to load
kernel_names (list, optional): List of kernel names to extract from the module.
If None, will return a module object with __getattr__.
Returns:
object: If kernel_names is None, returns a module object with __getattr__ to access kernels.
If kernel_names is provided, returns a dict mapping kernel names to _CudaKernel objects.
"""
# Ensure CUDA is initialized
import torch.cuda
# Load CUDA driver library
libcuda = _get_gpu_runtime_library()
# Convert PTX to bytes if it's a string
if isinstance(ptx, str):
ptx = ptx.encode("utf-8")
# Load PTX module
module = ctypes.c_void_p()
# Get the current stream without directly importing torch.cuda at module level
stream = torch.cuda.current_stream()
with stream:
_check_cuda(libcuda.cuModuleLoadData(ctypes.byref(module), ptx))
if not kernel_names:
return _CudaModule(module)
# Return specific kernels
kernels = {}
for name in kernel_names:
func = ctypes.c_void_p()
_check_cuda(
libcuda.cuModuleGetFunction(
ctypes.byref(func), module, name.encode("utf-8")
)
)
kernels[name] = _CudaKernel(func, module)
return kernels
def _get_device_index(
device: Any, optional: bool = False, allow_cpu: bool = False
) -> int:
r"""Get the device index from :attr:`device`, which can be a torch.device object, a Python integer, or ``None``.
If :attr:`device` is a torch.device object, returns the device index if it
is a CUDA device. Note that for a CUDA device without a specified index,
i.e., ``torch.device('cuda')``, this will return the current default CUDA
device if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
CPU devices will be accepted and ``-1`` will be returned in this case.
If :attr:`device` is a Python integer, it is returned as is.
If :attr:`device` is ``None``, this will return the current default CUDA
device if :attr:`optional` is ``True``.
"""
if isinstance(device, int):
return device
if isinstance(device, str):
device = torch.device(device)
if isinstance(device, torch.device):
if allow_cpu:
if device.type not in ["cuda", "cpu"]:
raise ValueError(f"Expected a cuda or cpu device, but got: {device}")
elif device.type != "cuda":
raise ValueError(f"Expected a cuda device, but got: {device}")
if not torch.jit.is_scripting():
if isinstance(device, torch.cuda.device):
return device.idx
return _torch_get_device_index(device, optional, allow_cpu)
| _CudaKernel |
python | doocs__leetcode | lcof/面试题49. 丑数/Solution2.py | {
"start": 0,
"end": 437
} | class ____:
def nthUglyNumber(self, n: int) -> int:
dp = [1] * n
p2 = p3 = p5 = 0
for i in range(1, n):
next2, next3, next5 = dp[p2] * 2, dp[p3] * 3, dp[p5] * 5
dp[i] = min(next2, next3, next5)
if dp[i] == next2:
p2 += 1
if dp[i] == next3:
p3 += 1
if dp[i] == next5:
p5 += 1
return dp[-1]
| Solution |
python | python__mypy | mypy/suggestions.py | {
"start": 2387,
"end": 2473
} | class ____(TypedDict):
return_type: str
arg_types: list[str]
| PyAnnotateSignature |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 877927,
"end": 878325
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("PullRequestReview", graphql_name="node")
"""The item at the end of the edge."""
| PullRequestReviewEdge |
python | doocs__leetcode | solution/1400-1499/1499.Max Value of Equation/Solution2.py | {
"start": 0,
"end": 432
} | class ____:
def findMaxValueOfEquation(self, points: List[List[int]], k: int) -> int:
ans = -inf
q = deque()
for x, y in points:
while q and x - q[0][0] > k:
q.popleft()
if q:
ans = max(ans, x + y + q[0][1] - q[0][0])
while q and y - x >= q[-1][1] - q[-1][0]:
q.pop()
q.append((x, y))
return ans
| Solution |
python | getsentry__sentry | src/sentry/ratelimits/redis.py | {
"start": 790,
"end": 4450
} | class ____(RateLimiter):
def __init__(self, **options: Any) -> None:
cluster_key = settings.SENTRY_RATE_LIMIT_REDIS_CLUSTER
self.client = redis.redis_clusters.get(cluster_key)
def _construct_redis_key(
self,
key: str,
project: Project | None = None,
window: int | None = None,
request_time: float | None = None,
) -> str:
"""
Construct a rate limit key using the args given. Key will have a format of:
"rl:<key_hex>:[project?<project_id>:]<time_bucket>"
where the time bucket is calculated by integer dividing the current time by the window
"""
if window is None or window == 0:
window = self.window
if request_time is None:
request_time = time()
key_hex = md5_text(key).hexdigest()
bucket = _time_bucket(request_time, window)
redis_key = f"rl:{key_hex}"
if project is not None:
redis_key += f":{project.id}"
redis_key += f":{bucket}"
return redis_key
def validate(self) -> None:
try:
self.client.ping()
self.client.connection_pool.disconnect()
except Exception as e:
raise InvalidConfiguration(str(e))
def current_value(
self, key: str, project: Project | None = None, window: int | None = None
) -> int:
"""
Get the current value stored in redis for the rate limit with key "key" and said window
"""
redis_key = self._construct_redis_key(key, project=project, window=window)
try:
current_count = self.client.get(redis_key)
except RedisError:
# Don't report any existing hits when there is a redis error.
# Log what happened and move on
logger.exception("Failed to retrieve current value from redis")
return 0
if current_count is None:
# Key hasn't been created yet, therefore no hits done so far
return 0
return int(current_count)
def is_limited_with_value(
self, key: str, limit: int, project: Project | None = None, window: int | None = None
) -> tuple[bool, int, int]:
"""
Does a rate limit check as well as returning the new rate limit value and when the next
rate limit window will start.
Note that the counter is incremented when the check is done.
"""
request_time = time()
if window is None or window == 0:
window = self.window
redis_key = self._construct_redis_key(key, project=project, window=window)
expiration = window - int(request_time % window)
# Reset Time = next time bucket's start time
reset_time = _bucket_start_time(_time_bucket(request_time, window) + 1, window)
try:
pipe = self.client.pipeline()
pipe.incr(redis_key)
pipe.expire(redis_key, expiration)
pipeline_result = pipe.execute()
result = pipeline_result[0]
except (RedisError, IndexError):
# We don't want rate limited endpoints to fail when ratelimits
# can't be updated. We do want to know when that happens.
logger.exception("Failed to retrieve current rate limit value from redis")
return False, 0, reset_time
return result > limit, result, reset_time
def reset(self, key: str, project: Project | None = None, window: int | None = None) -> None:
redis_key = self._construct_redis_key(key, project=project, window=window)
self.client.delete(redis_key)
| RedisRateLimiter |
python | kamyu104__LeetCode-Solutions | Python/find-maximum-number-of-non-intersecting-substrings.py | {
"start": 51,
"end": 448
} | class ____(object):
def maxSubstrings(self, word):
"""
:type word: str
:rtype: int
"""
L = 4
result = 0
lookup = {}
for i, c in enumerate(word):
if c not in lookup:
lookup[c] = i
elif i-lookup[c]+1 >= L:
result += 1
lookup.clear()
return result
| Solution |
python | readthedocs__readthedocs.org | readthedocs/api/v3/views.py | {
"start": 10456,
"end": 12191
} | class ____(
APIv3Settings,
NestedViewSetMixin,
ProjectQuerySetMixin,
FlexFieldsMixin,
CreateModelMixin,
DestroyModelMixin,
ReadOnlyModelViewSet,
):
# The main query is done via the ``NestedViewSetMixin`` using the
# ``parents_query_lookups`` defined when registering the urls.
model = ProjectRelationship
lookup_field = "alias"
lookup_url_kwarg = "alias_slug"
permission_classes = [ReadOnlyPermission | (IsAuthenticated & IsProjectAdmin)]
def get_serializer_class(self):
"""
Return correct serializer depending on the action.
For GET it returns a serializer with many fields and on POST,
it return a serializer to validate just a few fields.
"""
if self.action == "create":
return SubprojectCreateSerializer
if self.action == "destroy":
return SubprojectDestroySerializer
return SubprojectSerializer
def get_serializer_context(self):
context = super().get_serializer_context()
context["parent"] = self._get_parent_project()
return context
def create(self, request, *args, **kwargs):
"""Define a Project as subproject of another Project."""
parent = self._get_parent_project()
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(parent=parent)
headers = self.get_success_headers(serializer.data)
# Use serializer that fully render a the subproject
serializer = SubprojectSerializer(instance=serializer.instance)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
| SubprojectRelationshipViewSet |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/_legacy_class_based/_documenters.py | {
"start": 76373,
"end": 77996
} | class ____(DataDocumenterMixinBase):
"""Mixin for DataDocumenter to provide the feature for supporting uninitialized
(type annotation only) global variables.
"""
def import_object(self, raiseerror: bool = False) -> bool:
try:
return super().import_object(raiseerror=True) # type: ignore[misc]
except ImportError as exc:
# annotation only instance variable (PEP-526)
try:
with mock(self.config.autodoc_mock_imports):
parent = import_module(self.modname)
annotations = get_type_hints(
parent,
None,
self.config.autodoc_type_aliases,
include_extras=True,
)
if self.objpath[-1] in annotations:
self.object = UNINITIALIZED_ATTR
self.parent = parent
return True
except ImportError:
pass
if raiseerror:
raise
logger.warning(exc.args[0], type='autodoc', subtype='import_object')
self.env.note_reread()
return False
def should_suppress_value_header(self) -> bool:
return (
self.object is UNINITIALIZED_ATTR or super().should_suppress_value_header()
)
def get_doc(self) -> list[list[str]] | None:
if self.object is UNINITIALIZED_ATTR:
return []
else:
return super().get_doc() # type: ignore[misc]
| UninitializedGlobalVariableMixin |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/redshift/resources.py | {
"start": 798,
"end": 9056
} | class ____(BaseRedshiftClient):
def __init__(self, conn_args: dict[str, Any], autocommit: Optional[bool], log: Logger):
# Extract parameters from resource config
self.conn_args = conn_args
self.autocommit = autocommit
self.log = log
def execute_query(self, query, fetch_results=False, cursor_factory=None, error_callback=None):
"""Synchronously execute a single query against Redshift. Will return a list of rows, where
each row is a tuple of values, e.g. SELECT 1 will return [(1,)].
Args:
query (str): The query to execute.
fetch_results (Optional[bool]): Whether to return the results of executing the query.
Defaults to False, in which case the query will be executed without retrieving the
results.
cursor_factory (Optional[:py:class:`psycopg2.extensions.cursor`]): An alternative
cursor_factory; defaults to None. Will be used when constructing the cursor.
error_callback (Optional[Callable[[Exception, Cursor, DagsterLogManager], None]]): A
callback function, invoked when an exception is encountered during query execution;
this is intended to support executing additional queries to provide diagnostic
information, e.g. by querying ``stl_load_errors`` using ``pg_last_copy_id()``. If no
function is provided, exceptions during query execution will be raised directly.
Returns:
Optional[List[Tuple[Any, ...]]]: Results of the query, as a list of tuples, when
fetch_results is set. Otherwise return None.
"""
check.str_param(query, "query")
check.bool_param(fetch_results, "fetch_results")
check.opt_class_param(
cursor_factory, "cursor_factory", superclass=psycopg2.extensions.cursor
)
check.opt_callable_param(error_callback, "error_callback")
with self._get_conn() as conn:
with self._get_cursor(conn, cursor_factory=cursor_factory) as cursor:
try:
self.log.info(f"Executing query '{query}'")
cursor.execute(query)
if fetch_results and cursor.rowcount > 0:
return cursor.fetchall()
else:
self.log.info("Empty result from query")
except Exception as e:
# If autocommit is disabled or not set (it is disabled by default), Redshift
# will be in the middle of a transaction at exception time, and because of
# the failure the current transaction will not accept any further queries.
#
# This conn.commit() call closes the open transaction before handing off
# control to the error callback, so that the user can issue additional
# queries. Notably, for e.g. pg_last_copy_id() to work, it requires you to
# use the same conn/cursor, so you have to do this conn.commit() to ensure
# things are in a usable state in the error callback.
if not self.autocommit:
conn.commit()
if error_callback is not None:
error_callback(e, cursor, self.log)
else:
raise
def execute_queries( # pyright: ignore[reportIncompatibleMethodOverride]
self, queries, fetch_results=False, cursor_factory=None, error_callback=None
):
"""Synchronously execute a list of queries against Redshift. Will return a list of list of
rows, where each row is a tuple of values, e.g. ['SELECT 1', 'SELECT 1'] will return
[[(1,)], [(1,)]].
Args:
queries (List[str]): The queries to execute.
fetch_results (Optional[bool]): Whether to return the results of executing the query.
Defaults to False, in which case the query will be executed without retrieving the
results.
cursor_factory (Optional[:py:class:`psycopg2.extensions.cursor`]): An alternative
cursor_factory; defaults to None. Will be used when constructing the cursor.
error_callback (Optional[Callable[[Exception, Cursor, DagsterLogManager], None]]): A
callback function, invoked when an exception is encountered during query execution;
this is intended to support executing additional queries to provide diagnostic
information, e.g. by querying ``stl_load_errors`` using ``pg_last_copy_id()``. If no
function is provided, exceptions during query execution will be raised directly.
Returns:
Optional[List[List[Tuple[Any, ...]]]]: Results of the query, as a list of list of
tuples, when fetch_results is set. Otherwise return None.
"""
check.list_param(queries, "queries", of_type=str)
check.bool_param(fetch_results, "fetch_results")
check.opt_class_param(
cursor_factory, "cursor_factory", superclass=psycopg2.extensions.cursor
)
check.opt_callable_param(error_callback, "error_callback")
results = []
with self._get_conn() as conn:
with self._get_cursor(conn, cursor_factory=cursor_factory) as cursor:
for query in queries:
try:
self.log.info(f"Executing query '{query}'")
cursor.execute(query)
if fetch_results and cursor.rowcount > 0:
results.append(cursor.fetchall())
else:
results.append([])
self.log.info("Empty result from query")
except Exception as e:
# If autocommit is disabled or not set (it is disabled by default), Redshift
# will be in the middle of a transaction at exception time, and because of
# the failure the current transaction will not accept any further queries.
#
# This conn.commit() call closes the open transaction before handing off
# control to the error callback, so that the user can issue additional
# queries. Notably, for e.g. pg_last_copy_id() to work, it requires you to
# use the same conn/cursor, so you have to do this conn.commit() to ensure
# things are in a usable state in the error callback.
if not self.autocommit:
conn.commit()
if error_callback is not None:
error_callback(e, cursor, self.log)
else:
raise
if fetch_results:
return results
@contextmanager
def _get_conn(self):
conn = None
try:
conn = psycopg2.connect(**self.conn_args)
yield conn
finally:
if conn:
conn.close()
@contextmanager
def _get_cursor(self, conn, cursor_factory=None):
check.opt_class_param(
cursor_factory, "cursor_factory", superclass=psycopg2.extensions.cursor
)
# Could be none, in which case we should respect the connection default. Otherwise
# explicitly set to true/false.
if self.autocommit is not None:
conn.autocommit = self.autocommit
with conn:
with conn.cursor(cursor_factory=cursor_factory) as cursor:
yield cursor
# If autocommit is set, we'll commit after each and every query execution. Otherwise, we
# want to do a final commit after we're wrapped up executing the full set of one or more
# queries.
if not self.autocommit:
conn.commit()
@deprecated(breaking_version="2.0", additional_warn_text="Use RedshiftClientResource instead.")
| RedshiftClient |
python | pikepdf__pikepdf | tests/test_pdf.py | {
"start": 4440,
"end": 5812
} | class ____:
def test_stream(self, resources):
with (resources / 'pal-1bit-trivial.pdf').open('rb') as stream:
with Pdf.open(stream) as pdf:
assert pdf.Root.Pages.Count == 1
def test_no_text_stream(self, resources):
with pytest.raises(TypeError):
with (resources / 'pal-1bit-trivial.pdf').open('r') as stream:
Pdf.open(stream)
def test_save_stream(self, trivial, outdir):
pdf = trivial
pdf.save(outdir / 'nostream.pdf', deterministic_id=True)
bio = BytesIO()
pdf.save(bio, deterministic_id=True)
bio.seek(0)
with (outdir / 'nostream.pdf').open('rb') as saved_file:
saved_file_contents = saved_file.read()
assert saved_file_contents == bio.read()
def test_read_not_readable_file(self, outdir):
with (Path(outdir) / 'writeme.pdf').open('wb') as writable:
with pytest.raises(ValueError, match=r'not readable'):
Pdf.open(writable)
def test_open_not_seekable_stream(self, resources):
class UnseekableBytesIO(BytesIO):
def seekable(self):
return False
testio = UnseekableBytesIO((resources / 'pal-1bit-trivial.pdf').read_bytes())
with pytest.raises(ValueError, match=r'not seekable'):
Pdf.open(testio)
| TestStreams |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 169144,
"end": 174471
} | class ____(rv_continuous):
r"""A Normal Inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `norminvgauss` is:
.. math::
f(x, a, b) = \frac{a \, K_1(a \sqrt{1 + x^2})}{\pi \sqrt{1 + x^2}} \,
\exp(\sqrt{a^2 - b^2} + b x)
where :math:`x` is a real number, the parameter :math:`a` is the tail
heaviness and :math:`b` is the asymmetry parameter satisfying
:math:`a > 0` and :math:`|b| <= a`.
:math:`K_1` is the modified Bessel function of second kind
(`scipy.special.k1`).
%(after_notes)s
A normal inverse Gaussian random variable `Y` with parameters `a` and `b`
can be expressed as a normal mean-variance mixture:
``Y = b * V + sqrt(V) * X`` where `X` is ``norm(0,1)`` and `V` is
``invgauss(mu=1/sqrt(a**2 - b**2))``. This representation is used
to generate random variates.
Another common parametrization of the distribution (see Equation 2.1 in
[2]_) is given by the following expression of the pdf:
.. math::
g(x, \alpha, \beta, \delta, \mu) =
\frac{\alpha\delta K_1\left(\alpha\sqrt{\delta^2 + (x - \mu)^2}\right)}
{\pi \sqrt{\delta^2 + (x - \mu)^2}} \,
e^{\delta \sqrt{\alpha^2 - \beta^2} + \beta (x - \mu)}
In SciPy, this corresponds to
:math:`a=\alpha \delta, b=\beta \delta, \text{loc}=\mu, \text{scale}=\delta`.
References
----------
.. [1] O. Barndorff-Nielsen, "Hyperbolic Distributions and Distributions on
Hyperbolae", Scandinavian Journal of Statistics, Vol. 5(3),
pp. 151-157, 1978.
.. [2] O. Barndorff-Nielsen, "Normal Inverse Gaussian Distributions and
Stochastic Volatility Modelling", Scandinavian Journal of
Statistics, Vol. 24, pp. 1-13, 1997.
%(example)s
"""
_support_mask = rv_continuous._open_support_mask
def _argcheck(self, a, b):
return (a > 0) & (np.absolute(b) < a)
def _shape_info(self):
ia = _ShapeInfo("a", False, (0, np.inf), (False, False))
ib = _ShapeInfo("b", False, (-np.inf, np.inf), (False, False))
return [ia, ib]
def _fitstart(self, data):
# Arbitrary, but the default a = b = 1 is not valid; the distribution
# requires |b| < a.
return super()._fitstart(data, args=(1, 0.5))
def _pdf(self, x, a, b):
gamma = np.sqrt(a**2 - b**2)
fac1 = a / np.pi
sq = np.hypot(1, x) # reduce overflows
return fac1 * sc.k1e(a * sq) * np.exp(b*x - a*sq + gamma) / sq
def _sf(self, x, a, b):
if np.isscalar(x):
# If x is a scalar, then so are a and b.
return integrate.quad(self._pdf, x, np.inf, args=(a, b))[0]
else:
a = np.atleast_1d(a)
b = np.atleast_1d(b)
result = []
for (x0, a0, b0) in zip(x, a, b):
result.append(integrate.quad(self._pdf, x0, np.inf,
args=(a0, b0))[0])
return np.array(result)
def _isf(self, q, a, b):
def _isf_scalar(q, a, b):
def eq(x, a, b, q):
# Solve eq(x, a, b, q) = 0 to obtain isf(x, a, b) = q.
return self._sf(x, a, b) - q
# Find a bracketing interval for the root.
# Start at the mean, and grow the length of the interval
# by 2 each iteration until there is a sign change in eq.
xm = self.mean(a, b)
em = eq(xm, a, b, q)
if em == 0:
# Unlikely, but might as well check.
return xm
if em > 0:
delta = 1
left = xm
right = xm + delta
while eq(right, a, b, q) > 0:
delta = 2*delta
right = xm + delta
else:
# em < 0
delta = 1
right = xm
left = xm - delta
while eq(left, a, b, q) < 0:
delta = 2*delta
left = xm - delta
result = optimize.brentq(eq, left, right, args=(a, b, q),
xtol=self.xtol)
return result
if np.isscalar(q):
return _isf_scalar(q, a, b)
else:
result = []
for (q0, a0, b0) in zip(q, a, b):
result.append(_isf_scalar(q0, a0, b0))
return np.array(result)
def _rvs(self, a, b, size=None, random_state=None):
# note: X = b * V + sqrt(V) * X is norminvgaus(a,b) if X is standard
# normal and V is invgauss(mu=1/sqrt(a**2 - b**2))
gamma = np.sqrt(a**2 - b**2)
ig = invgauss.rvs(mu=1/gamma, size=size, random_state=random_state)
return b * ig + np.sqrt(ig) * norm.rvs(size=size,
random_state=random_state)
def _stats(self, a, b):
gamma = np.sqrt(a**2 - b**2)
mean = b / gamma
variance = a**2 / gamma**3
skewness = 3.0 * b / (a * np.sqrt(gamma))
kurtosis = 3.0 * (1 + 4 * b**2 / a**2) / gamma
return mean, variance, skewness, kurtosis
norminvgauss = norminvgauss_gen(name="norminvgauss")
| norminvgauss_gen |
python | dagster-io__dagster | python_modules/libraries/dagster-pandas/dagster_pandas/constraints.py | {
"start": 14165,
"end": 15724
} | class ____(DataFrameConstraint):
"""A dataframe constraint that validates the expected count of rows.
Args:
num_allowed_rows (int): The number of allowed rows in your dataframe.
error_tolerance (Optional[int]): The acceptable threshold if you are not completely certain. Defaults to 0.
"""
def __init__(self, num_allowed_rows, error_tolerance=0):
self.num_allowed_rows = check.int_param(num_allowed_rows, "num_allowed_rows")
self.error_tolerance = abs(check.int_param(error_tolerance, "error_tolerance"))
if self.error_tolerance > self.num_allowed_rows:
raise ValueError("Tolerance can't be greater than the number of rows you expect.")
description = f"Dataframe must have {self.num_allowed_rows} +- {self.error_tolerance} rows."
super().__init__(error_description=description, markdown_description=description)
def validate(self, dataframe):
check.inst_param(dataframe, "dataframe", DataFrame)
if not (
self.num_allowed_rows - self.error_tolerance
<= len(dataframe)
<= self.num_allowed_rows + self.error_tolerance
):
raise DataFrameConstraintViolationException(
constraint_name=self.name,
constraint_description=(
f"Expected {self.num_allowed_rows} +- {self.error_tolerance} rows. Got {len(dataframe)}"
),
)
def apply_ignore_missing_data_to_mask(mask, column):
return mask & ~column.isnull()
| RowCountConstraint |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/metadata.py | {
"start": 1719,
"end": 1924
} | class ____(graphene.ObjectType):
jsonString = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneMetadataEntry,)
name = "JsonMetadataEntry"
| GrapheneJsonMetadataEntry |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/source_google_ads/streams.py | {
"start": 10483,
"end": 13132
} | class ____(GoogleAdsStream):
"""
Customer Client stream: https://developers.google.com/google-ads/api/fields/v20/customer_client
"""
primary_key = ["customer_client.id"]
def __init__(self, customer_status_filter: List[str], **kwargs):
self.customer_status_filter = customer_status_filter
super().__init__(**kwargs)
def get_query(self, stream_slice: Mapping[str, Any] = None) -> str:
fields = GoogleAds.get_fields_from_schema(self.get_json_schema())
table_name = get_resource_name(self.name)
active_customers_condition = []
if self.customer_status_filter:
customer_status_filter = ", ".join([f"'{status}'" for status in self.customer_status_filter])
active_customers_condition = [f"customer_client.status in ({customer_status_filter})"]
query = GoogleAds.convert_schema_into_query(fields=fields, table_name=table_name, conditions=active_customers_condition)
return query
def read_records(self, sync_mode, stream_slice: Optional[Mapping[str, Any]] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
"""
This method is overridden to avoid using login_customer_id from dummy_customers.
login_customer_id is used in the stream_slices to pass it to child customers,
but we don't need it here as this class iterate over customers accessible from user creds.
"""
if stream_slice is None:
return []
customer_id = stream_slice["customer_id"]
try:
response_records = self.google_ads_client.send_request(self.get_query(stream_slice), customer_id=customer_id)
yield from self.parse_records_with_backoff(response_records, stream_slice)
except GoogleAdsException as exception:
traced_exception(exception, customer_id, self.CATCH_CUSTOMER_NOT_ENABLED_ERROR)
def parse_response(self, response: SearchPager, stream_slice: Optional[Mapping[str, Any]] = None) -> Iterable[Mapping]:
"""
login_cusotmer_id is populated to child customers if they are under managers account
"""
records = [record for record in super().parse_response(response)]
# read_records get all customers connected to customer_id from stream_slice
# if the result is more than one customer, it's a manager, otherwise it is client account for which we don't need login_customer_id
root_is_manager = len(records) > 1
for record in records:
record["login_customer_id"] = stream_slice["login_customer_id"] if root_is_manager else "default"
yield record
| CustomerClient |
python | ray-project__ray | doc/source/serve/doc_code/app_builder.py | {
"start": 1383,
"end": 1663
} | class ____(BaseModel):
model1_uri: str
model2_uri: str
def composed_app_builder(args: ComposedArgs) -> Application:
return IngressDeployment.bind(
Model1.bind(args.model1_uri),
Model2.bind(args.model2_uri),
)
# __end_composed_builder__
| ComposedArgs |
python | dagster-io__dagster | python_modules/dagster-pipes/dagster_pipes/__init__.py | {
"start": 44695,
"end": 45859
} | class ____(PipesBlobStoreMessageWriterChannel):
"""Message writer channel for writing messages by periodically writing message chunks to a GCS bucket.
Args:
client (google.cloud.storage.Client): A google.cloud.storage.Client object.
bucket (str): The name of the GCS bucket to write to.
key_prefix (Optional[str]): An optional prefix to use for the keys of written blobs.
interval (float): interval in seconds between upload chunk uploads
"""
def __init__(
self, client: "GCSClient", bucket: str, key_prefix: Optional[str], *, interval: float = 10
):
super().__init__(interval=interval)
self._client = client
self._bucket = bucket
self._key_prefix = key_prefix
self._gcp_bucket = self._client.get_bucket(self._bucket)
def upload_messages_chunk(self, payload: IO, index: int) -> None:
key = f"{self._key_prefix}/{index}.json" if self._key_prefix else f"{index}.json"
self._gcp_bucket.blob(key).upload_from_string(payload.read())
# ########################
# ##### IO - AzureBlobStorage
# ########################
| PipesGCSMessageWriterChannel |
python | optuna__optuna | optuna/exceptions.py | {
"start": 1943,
"end": 2135
} | class ____(OptunaError):
"""Exception for a duplicated study name.
This error is raised when a specified study name already exists in the storage.
"""
pass
| DuplicatedStudyError |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/properties/snippets.py | {
"start": 3072,
"end": 3166
} | class ____(messages.Message):
notes = messages.MessageField(Note, 1, repeated=True)
| Notebook |
python | google__jax | jax/_src/pallas/mosaic/sc_primitives.py | {
"start": 28851,
"end": 36578
} | class ____(enum.Enum):
#: [a0, a1], [b0, b1] -> [[a0, a1], [b0, b1]]
COMPRESSED = "compressed"
#: [a0, a1], [b0, b1] -> [a0, b0, a1, b1]
INTERLEAVED = "interleaved"
def _format_to_ir_attribute(format: PackFormat) -> ir.Attribute:
return ir.Attribute.parse(f"#tpu.pack_format<{format.value}>")
pack_p = jax_core.Primitive("pack")
@pack_p.def_abstract_eval
def _pack_abstract_eval(a, b, *, format, preferred_element_type):
if a.shape != b.shape:
raise ValueError(
f"Packed arrays must have the same shape, got {a.shape} and {b.shape}"
)
if a.ndim != 1:
raise ValueError(f"Packed arrays must be 1-D, got {a.ndim}")
if a.dtype != b.dtype:
raise TypeError(
f"Packed arrays must have the same dtype, got {a.dtype} and {b.dtype}"
)
if preferred_element_type is None:
match a.dtype:
case jnp.float32:
packed_dtype = jnp.bfloat16
case jnp.int32:
packed_dtype = jnp.int16
case _:
# TODO(slebedev): Support more types.
raise NotImplementedError(
f"Only packing of float32 and int32 is supported, got {a.dtype}"
)
else:
packed_bw = dtypes.itemsize_bits(a.dtype) // 2
if dtypes.itemsize_bits(preferred_element_type) != packed_bw:
raise ValueError(
f"preferred_element_type= must have bitwidth {packed_bw}, got"
f" {dtypes.itemsize_bits(preferred_element_type)}"
)
packed_dtype = preferred_element_type
match format:
case PackFormat.INTERLEAVED:
packed_shape = (2 * a.size,)
case PackFormat.COMPRESSED:
packed_shape = (a.size, 2)
return jax_core.ShapedArray(packed_shape, packed_dtype)
@sc_lowering.register_lowering_rule(pack_p)
def _pack_lowering_rule(
ctx: sc_lowering.LoweringRuleContext,
a,
b,
*,
format,
preferred_element_type,
):
del preferred_element_type # Unused.
[out_aval] = ctx.avals_out
return tpu.pack_subelements(
aval_to_ir_type(out_aval),
[a, b],
[0, 1],
_format_to_ir_attribute(format),
)
def pack(
a: jax.Array,
b: jax.Array,
/,
*,
format: PackFormat,
preferred_element_type: jax.typing.DTypeLike | None = None,
) -> jax.Array:
"""Packs two arrays according to the given format.
.. warning:: This API is temporary and will be removed once the SparseCore
compiler is able to do packing/unpacking automatically.
Args:
a: The first array to pack.
b: The second array to pack.
format: The packing format to use.
preferred_element_type: Optional. The preferred element type of the packed
array. If specified, must have half the bitwidth of the input array types.
Returns:
The packed array.
"""
if preferred_element_type is not None:
preferred_element_type = jnp.dtype(preferred_element_type)
return pack_p.bind(
a, b, format=format, preferred_element_type=preferred_element_type
)
unpack_p = jax_core.Primitive("unpack")
unpack_p.multiple_results = True
@unpack_p.def_abstract_eval
def _unpack_abstract_eval(ab, *, format, preferred_element_type):
match format:
case PackFormat.INTERLEAVED:
if ab.ndim != 1 or ab.size % 2 != 0:
raise ValueError(
"Interleaved unpack requires a 1-D array with an even size, got"
f" {ab.shape}"
)
case PackFormat.COMPRESSED:
if ab.ndim != 2 or ab.shape[1] != 2:
raise ValueError(
"Compressed unpack requires an array with shape (N, 2), got"
f" {ab.shape}"
)
if preferred_element_type is None:
match ab.dtype:
case jnp.bfloat16:
unpacked_dtype = jnp.float32
case jnp.int16:
unpacked_dtype = jnp.int32
case _:
# TODO(slebedev): Support more types.
raise NotImplementedError(
f"Only unpacking of bloat16 and int16 is supported, got {ab.dtype}"
)
else:
unpacked_bw = dtypes.itemsize_bits(ab.dtype) * 2
if dtypes.itemsize_bits(preferred_element_type) != unpacked_bw:
raise ValueError(
f"preferred_element_type= must have bitwidth {unpacked_bw}, got"
f" {dtypes.itemsize_bits(preferred_element_type)}"
)
unpacked_dtype = preferred_element_type
return (jax_core.ShapedArray((ab.size // 2,), unpacked_dtype),) * 2
@sc_lowering.register_lowering_rule(unpack_p)
def _unpack_lowering_rule(
ctx: sc_lowering.LoweringRuleContext, ab, *, format, preferred_element_type
):
del preferred_element_type # Unused.
out_aval, _ = ctx.avals_out
out_type = aval_to_ir_type(out_aval)
return (
tpu.unpack_subelements(out_type, ab, 0, _format_to_ir_attribute(format)),
tpu.unpack_subelements(out_type, ab, 1, _format_to_ir_attribute(format)),
)
def unpack(
ab: jax.Array,
/,
*,
format: PackFormat,
preferred_element_type: jax.typing.DTypeLike | None = None,
) -> tuple[jax.Array, jax.Array]:
"""Unpacks two arrays according to the given format.
.. warning:: This API is temporary and will be removed once the SparseCore
compiler is able to do packing/unpacking automatically.
Args:
ab: The array to unpack.
format: The packing format to use.
preferred_element_type: Optional. The preferred element type of the unpacked
arrays. If specified, must have double the bitwidth of the input array
type.
Returns:
The unpacked arrays.
"""
if preferred_element_type is not None:
preferred_element_type = jnp.dtype(preferred_element_type)
return unpack_p.bind(
ab,
format=format,
preferred_element_type=preferred_element_type,
)
def _mask_all_reduce_abstract_eval(x, *, reduce):
if x.dtype != jnp.bool:
raise TypeError(f"Mask all-reduce only supports bool arrays, got {x.dtype}")
match x.shape:
case (minor_dim,):
return jax_core.ShapedArray((minor_dim // reduce,), jnp.int32)
case _:
raise ValueError("Mask all-reduce only supports 1D arrays")
def _mask_all_reduce_lowering_rule(
ctx: sc_lowering.LoweringRuleContext, x, *, reduce, kind: str
):
[out_aval] = ctx.avals_out
return tpu.all_reduce(
ir.VectorType.get(
out_aval.shape,
ir.IntegerType.get_signless(32),
),
x,
0,
ir.Attribute.parse(f"#tpu.reduction_kind<{kind}>"),
)
all_reduce_population_count_p = jax_core.Primitive(
"all_reduce_population_count"
)
all_reduce_population_count_p.def_abstract_eval(_mask_all_reduce_abstract_eval)
sc_lowering.register_lowering_rule(all_reduce_population_count_p)(
functools.partial(_mask_all_reduce_lowering_rule, kind="sum")
)
def all_reduce_population_count(x: jax.Array, *, reduce: int = 1) -> jax.Array:
"""Computes the number of nonzero elements in the array.
Args:
x: A 1D array of bools.
reduce: The factor to reduce the output shape by.
Returns:
An array with each element containing the number of true elements in ``x``.
"""
return all_reduce_population_count_p.bind(x, reduce=reduce)
all_reduce_ffs_p = jax_core.Primitive("all_reduce_ffs")
all_reduce_ffs_p.def_abstract_eval(_mask_all_reduce_abstract_eval)
sc_lowering.register_lowering_rule(all_reduce_ffs_p)(
functools.partial(_mask_all_reduce_lowering_rule, kind="find_first_set")
)
def all_reduce_ffs(x: jax.Array, *, reduce: int = 1) -> jax.Array:
"""Computes the index of the first true element in the array.
Args:
x: A 1D array of bools.
reduce: The factor to reduce the output shape by.
Returns:
An array with each element containing the index of the first true element in
``x`` or ``x.size`` if there are no true elements.
"""
return all_reduce_ffs_p.bind(x, reduce=reduce)
| PackFormat |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 619076,
"end": 619742
} | class ____(ExprNode):
# Extracts the docstring of the body element
subexprs = ['body']
type = py_object_type
is_temp = True
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
self.body = body
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % (
self.result(), self.body.result(),
code.intern_identifier(StringEncoding.EncodedString("__doc__")),
code.error_goto_if_null(self.result(), self.pos)))
self.generate_gotref(code)
| DocstringRefNode |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/router/base.py | {
"start": 544,
"end": 1657
} | class ____(Chain, ABC):
"""Chain that outputs the name of a destination chain and the inputs to it."""
@property
@override
def output_keys(self) -> list[str]:
return ["destination", "next_inputs"]
def route(self, inputs: dict[str, Any], callbacks: Callbacks = None) -> Route:
"""Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = self(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
async def aroute(
self,
inputs: dict[str, Any],
callbacks: Callbacks = None,
) -> Route:
"""Route inputs to a destination chain.
Args:
inputs: inputs to the chain
callbacks: callbacks to use for the chain
Returns:
a Route object
"""
result = await self.acall(inputs, callbacks=callbacks)
return Route(result["destination"], result["next_inputs"])
| RouterChain |
python | aio-libs__aiohttp | aiohttp/_websocket/models.py | {
"start": 869,
"end": 1038
} | class ____(NamedTuple):
data: bytes
size: int
extra: str | None = None
type: Literal[WSMsgType.CONTINUATION] = WSMsgType.CONTINUATION
| WSMessageContinuation |
python | aimacode__aima-python | text.py | {
"start": 14286,
"end": 15207
} | class ____(search.Problem):
def __init__(self, initial=None, goal=None, decoder=None):
super().__init__(initial or hashabledict(), goal)
self.decoder = decoder
def actions(self, state):
search_list = [c for c in self.decoder.chardomain if c not in state]
target_list = [c for c in alphabet if c not in state.values()]
# Find the best character to replace
plain_char = max(search_list, key=lambda c: self.decoder.P1[c])
for cipher_char in target_list:
yield (plain_char, cipher_char)
def result(self, state, action):
new_state = hashabledict(state) # copy to prevent hash issues
new_state[action[0]] = action[1]
return new_state
def goal_test(self, state):
"""We're done when all letters in search domain are assigned."""
return len(state) >= len(self.decoder.chardomain)
| PermutationDecoderProblem |
python | dask__dask | dask/dataframe/dask_expr/_rolling.py | {
"start": 4888,
"end": 4942
} | class ____(RollingReduction):
how = "sum"
| RollingSum |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/distributions/uniform_test.py | {
"start": 1541,
"end": 10453
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testUniformRange(self):
a = 3.0
b = 10.0
uniform = uniform_lib.Uniform(low=a, high=b)
self.assertAllClose(a, self.evaluate(uniform.low))
self.assertAllClose(b, self.evaluate(uniform.high))
self.assertAllClose(b - a, self.evaluate(uniform.range()))
@test_util.run_in_graph_and_eager_modes
def testUniformPDF(self):
a = constant_op.constant([-3.0] * 5 + [15.0])
b = constant_op.constant([11.0] * 5 + [20.0])
uniform = uniform_lib.Uniform(low=a, high=b)
a_v = -3.0
b_v = 11.0
x = np.array([-10.5, 4.0, 0.0, 10.99, 11.3, 17.0], dtype=np.float32)
def _expected_pdf():
pdf = np.zeros_like(x) + 1.0 / (b_v - a_v)
pdf[x > b_v] = 0.0
pdf[x < a_v] = 0.0
pdf[5] = 1.0 / (20.0 - 15.0)
return pdf
expected_pdf = _expected_pdf()
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
log_pdf = uniform.log_prob(x)
self.assertAllClose(np.log(expected_pdf), self.evaluate(log_pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformShape(self):
a = constant_op.constant([-3.0] * 5)
b = constant_op.constant(11.0)
uniform = uniform_lib.Uniform(low=a, high=b)
self.assertEqual(self.evaluate(uniform.batch_shape_tensor()), (5,))
self.assertEqual(uniform.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(self.evaluate(uniform.event_shape_tensor()), [])
self.assertEqual(uniform.event_shape, tensor_shape.TensorShape([]))
@test_util.run_in_graph_and_eager_modes
def testUniformPDFWithScalarEndpoint(self):
a = constant_op.constant([0.0, 5.0])
b = constant_op.constant(10.0)
uniform = uniform_lib.Uniform(low=a, high=b)
x = np.array([0.0, 8.0], dtype=np.float32)
expected_pdf = np.array([1.0 / (10.0 - 0.0), 1.0 / (10.0 - 5.0)])
pdf = uniform.prob(x)
self.assertAllClose(expected_pdf, self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformCDF(self):
batch_size = 6
a = constant_op.constant([1.0] * batch_size)
b = constant_op.constant([11.0] * batch_size)
a_v = 1.0
b_v = 11.0
x = np.array([-2.5, 2.5, 4.0, 0.0, 10.99, 12.0], dtype=np.float32)
uniform = uniform_lib.Uniform(low=a, high=b)
def _expected_cdf():
cdf = (x - a_v) / (b_v - a_v)
cdf[x >= b_v] = 1
cdf[x < a_v] = 0
return cdf
cdf = uniform.cdf(x)
self.assertAllClose(_expected_cdf(), self.evaluate(cdf))
log_cdf = uniform.log_cdf(x)
self.assertAllClose(np.log(_expected_cdf()), self.evaluate(log_cdf))
@test_util.run_in_graph_and_eager_modes
def testUniformEntropy(self):
a_v = np.array([1.0, 1.0, 1.0])
b_v = np.array([[1.5, 2.0, 3.0]])
uniform = uniform_lib.Uniform(low=a_v, high=b_v)
expected_entropy = np.log(b_v - a_v)
self.assertAllClose(expected_entropy, self.evaluate(uniform.entropy()))
@test_util.run_in_graph_and_eager_modes
def testUniformAssertMaxGtMin(self):
a_v = np.array([1.0, 1.0, 1.0], dtype=np.float32)
b_v = np.array([1.0, 2.0, 3.0], dtype=np.float32)
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"x < y"):
uniform = uniform_lib.Uniform(low=a_v, high=b_v, validate_args=True)
self.evaluate(uniform.low)
@test_util.run_in_graph_and_eager_modes
def testUniformSample(self):
a = constant_op.constant([3.0, 4.0])
b = constant_op.constant(13.0)
a1_v = 3.0
a2_v = 4.0
b_v = 13.0
n = constant_op.constant(100000)
uniform = uniform_lib.Uniform(low=a, high=b)
samples = uniform.sample(n, seed=137)
sample_values = self.evaluate(samples)
self.assertEqual(sample_values.shape, (100000, 2))
self.assertAllClose(
sample_values[::, 0].mean(), (b_v + a1_v) / 2, atol=1e-1, rtol=0.)
self.assertAllClose(
sample_values[::, 1].mean(), (b_v + a2_v) / 2, atol=1e-1, rtol=0.)
self.assertFalse(
np.any(sample_values[::, 0] < a1_v) or np.any(sample_values >= b_v))
self.assertFalse(
np.any(sample_values[::, 1] < a2_v) or np.any(sample_values >= b_v))
@test_util.run_in_graph_and_eager_modes
def _testUniformSampleMultiDimensional(self):
# DISABLED: Please enable this test once b/issues/30149644 is resolved.
batch_size = 2
a_v = [3.0, 22.0]
b_v = [13.0, 35.0]
a = constant_op.constant([a_v] * batch_size)
b = constant_op.constant([b_v] * batch_size)
uniform = uniform_lib.Uniform(low=a, high=b)
n_v = 100000
n = constant_op.constant(n_v)
samples = uniform.sample(n)
self.assertEqual(samples.get_shape(), (n_v, batch_size, 2))
sample_values = self.evaluate(samples)
self.assertFalse(
np.any(sample_values[:, 0, 0] < a_v[0]) or
np.any(sample_values[:, 0, 0] >= b_v[0]))
self.assertFalse(
np.any(sample_values[:, 0, 1] < a_v[1]) or
np.any(sample_values[:, 0, 1] >= b_v[1]))
self.assertAllClose(
sample_values[:, 0, 0].mean(), (a_v[0] + b_v[0]) / 2, atol=1e-2)
self.assertAllClose(
sample_values[:, 0, 1].mean(), (a_v[1] + b_v[1]) / 2, atol=1e-2)
@test_util.run_in_graph_and_eager_modes
def testUniformMean(self):
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.mean()), s_uniform.mean())
@test_util.run_in_graph_and_eager_modes
def testUniformVariance(self):
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.variance()), s_uniform.var())
@test_util.run_in_graph_and_eager_modes
def testUniformStd(self):
a = 10.0
b = 100.0
uniform = uniform_lib.Uniform(low=a, high=b)
if not stats:
return
s_uniform = stats.uniform(loc=a, scale=b - a)
self.assertAllClose(self.evaluate(uniform.stddev()), s_uniform.std())
@test_util.run_in_graph_and_eager_modes
def testUniformNans(self):
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(low=a, high=b)
no_nans = constant_op.constant(1.0)
nans = constant_op.constant(0.0) / constant_op.constant(0.0)
self.assertTrue(self.evaluate(math_ops.is_nan(nans)))
with_nans = array_ops_stack.stack([no_nans, nans])
pdf = uniform.prob(with_nans)
is_nan = self.evaluate(math_ops.is_nan(pdf))
self.assertFalse(is_nan[0])
self.assertTrue(is_nan[1])
@test_util.run_in_graph_and_eager_modes
def testUniformSamplePdf(self):
a = 10.0
b = [11.0, 100.0]
uniform = uniform_lib.Uniform(a, b)
self.assertTrue(
self.evaluate(
math_ops.reduce_all(uniform.prob(uniform.sample(10)) > 0)))
@test_util.run_in_graph_and_eager_modes
def testUniformBroadcasting(self):
a = 10.0
b = [11.0, 20.0]
uniform = uniform_lib.Uniform(a, b)
pdf = uniform.prob([[10.5, 11.5], [9.0, 19.0], [10.5, 21.0]])
expected_pdf = np.array([[1.0, 0.1], [0.0, 0.1], [1.0, 0.0]])
self.assertAllClose(expected_pdf, self.evaluate(pdf))
@test_util.run_in_graph_and_eager_modes
def testUniformSampleWithShape(self):
a = 10.0
b = [11.0, 20.0]
uniform = uniform_lib.Uniform(a, b)
pdf = uniform.prob(uniform.sample((2, 3)))
# pylint: disable=bad-continuation
expected_pdf = [
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
[[1.0, 0.1], [1.0, 0.1], [1.0, 0.1]],
]
# pylint: enable=bad-continuation
self.assertAllClose(expected_pdf, self.evaluate(pdf))
pdf = uniform.prob(uniform.sample())
expected_pdf = [1.0, 0.1]
self.assertAllClose(expected_pdf, self.evaluate(pdf))
def testFullyReparameterized(self):
a = constant_op.constant(0.1)
b = constant_op.constant(0.8)
with backprop.GradientTape() as tape:
tape.watch(a)
tape.watch(b)
uniform = uniform_lib.Uniform(a, b)
samples = uniform.sample(100)
grad_a, grad_b = tape.gradient(samples, [a, b])
self.assertIsNotNone(grad_a)
self.assertIsNotNone(grad_b)
# Eager doesn't pass due to a type mismatch in one of the ops.
def testUniformFloat64(self):
uniform = uniform_lib.Uniform(
low=np.float64(0.), high=np.float64(1.))
self.assertAllClose(
[1., 1.],
self.evaluate(uniform.prob(np.array([0.5, 0.6], dtype=np.float64))))
self.assertAllClose(
[0.5, 0.6],
self.evaluate(uniform.cdf(np.array([0.5, 0.6], dtype=np.float64))))
self.assertAllClose(0.5, self.evaluate(uniform.mean()))
self.assertAllClose(1 / 12., self.evaluate(uniform.variance()))
self.assertAllClose(0., self.evaluate(uniform.entropy()))
if __name__ == "__main__":
test.main()
| UniformTest |
python | getsentry__sentry | tests/relay_integration/lang/java/test_plugin.py | {
"start": 32401,
"end": 84922
} | class ____(RelayStoreHelper, TransactionTestCase):
@pytest.fixture(autouse=True)
def initialize(self, set_sentry_option, live_server):
with set_sentry_option("system.url-prefix", live_server.url):
# Run test case
yield
def upload_proguard_mapping(self, uuid, mapping_file_content):
url = reverse(
"sentry-api-0-dsym-files",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
},
)
self.login_as(user=self.user)
out = BytesIO()
f = zipfile.ZipFile(out, "w")
f.writestr("proguard/%s.txt" % uuid, mapping_file_content)
f.writestr("ignored-file.txt", b"This is just some stuff")
f.close()
response = self.client.post(
url,
{
"file": SimpleUploadedFile(
"symbols.zip", out.getvalue(), content_type="application/zip"
)
},
format="multipart",
)
assert response.status_code == 201, response.content
assert len(response.json()) == 1
@requires_symbolicator
@pytest.mark.symbolicator
def test_basic_resolving(self) -> None:
self.upload_proguard_mapping(PROGUARD_UUID, PROGUARD_SOURCE)
event_data = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"platform": "java",
"debug_meta": {"images": [{"type": "proguard", "uuid": PROGUARD_UUID}]},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "a",
"abs_path": None,
"module": "org.a.b.g$a",
"filename": None,
"lineno": 67,
},
{
"function": "a",
"abs_path": None,
"module": "org.a.b.g$a",
"filename": None,
"lineno": 69,
},
]
},
"module": "org.a.b",
"type": "g$a",
"value": "Attempt to invoke virtual method 'org.a.b.g$a.a' on a null object reference",
}
]
},
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
exc = event.interfaces["exception"].values[0]
bt = exc.stacktrace
frames = bt.frames
assert exc.type == "Util$ClassContextSecurityManager"
assert (
exc.value
== "Attempt to invoke virtual method 'org.slf4j.helpers.Util$ClassContextSecurityManager.getExtraClassContext' on a null object reference"
)
assert exc.module == "org.slf4j.helpers"
assert frames[0].function == "getClassContext"
assert frames[0].module == "org.slf4j.helpers.Util$ClassContextSecurityManager"
assert frames[1].function == "getExtraClassContext"
assert frames[1].module == "org.slf4j.helpers.Util$ClassContextSecurityManager"
assert event.culprit == (
"org.slf4j.helpers.Util$ClassContextSecurityManager " "in getExtraClassContext"
)
@requires_symbolicator
@pytest.mark.symbolicator
def test_value_only_class_names_are_deobfuscated(self) -> None:
self.upload_proguard_mapping(PROGUARD_UUID, PROGUARD_SOURCE)
event_data = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"platform": "java",
"debug_meta": {"images": [{"type": "proguard", "uuid": PROGUARD_UUID}]},
"exception": {
"values": [
{
# No module/type, only value with obfuscated class reference
"value": "Encountered class org.a.b.g$a during processing",
}
]
},
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
exc = event.interfaces["exception"].values[0]
# Ensure the value got deobfuscated via classes mapping
assert "org.slf4j.helpers.Util$ClassContextSecurityManager" in exc.value
assert "org.a.b.g$a" not in exc.value
@requires_symbolicator
@pytest.mark.symbolicator
def test_value_only_multiple_exceptions_are_all_deobfuscated(self) -> None:
self.upload_proguard_mapping(PROGUARD_UUID, PROGUARD_SOURCE)
event_data = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"platform": "java",
"debug_meta": {"images": [{"type": "proguard", "uuid": PROGUARD_UUID}]},
"exception": {
"values": [
{"value": "First mentions org.a.b.g$a"},
{"value": "Second mentions org.a.b.g$b"},
]
},
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
excs = event.interfaces["exception"].values
assert any(
"org.slf4j.helpers.Util$ClassContextSecurityManager" in e.value
and "org.a.b.g$a" not in e.value
for e in excs
)
# Util$ClassContext maps to g$b as well in the provided mapping
assert any(
"org.slf4j.helpers.Util$ClassContext" in e.value and "org.a.b.g$b" not in e.value
for e in excs
)
@requires_symbolicator
@pytest.mark.symbolicator
def test_resolving_does_not_fail_when_no_value(self) -> None:
self.upload_proguard_mapping(PROGUARD_UUID, PROGUARD_SOURCE)
event_data = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"platform": "java",
"debug_meta": {"images": [{"type": "proguard", "uuid": PROGUARD_UUID}]},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "a",
"abs_path": None,
"module": "org.a.b.g$a",
"filename": None,
"lineno": 67,
},
{
"function": "a",
"abs_path": None,
"module": "org.a.b.g$a",
"filename": None,
"lineno": 69,
},
]
},
"module": "org.a.b",
"type": "g$a",
}
]
},
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
metrics = event.data["_metrics"]
assert not metrics.get("flag.processing.error")
@requires_symbolicator
@pytest.mark.symbolicator
def test_resolving_does_not_fail_when_no_module_or_function(self) -> None:
self.upload_proguard_mapping(PROGUARD_UUID, PROGUARD_SOURCE)
event_data = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"platform": "java",
"debug_meta": {"images": [{"type": "proguard", "uuid": PROGUARD_UUID}]},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "a",
"abs_path": None,
"module": "org.a.b.g$a",
"filename": None,
"lineno": 67,
},
{
"function": "a",
"abs_path": None,
"module": "org.a.b.g$a",
"filename": None,
"lineno": 69,
},
{
"function": "__start_thread",
"package": "/apex/com.android.art/lib64/libart.so",
"lineno": 196,
"in_app": False,
},
{
"package": "/apex/com.android.art/lib64/libart.so",
"lineno": 214,
"in_app": False,
},
]
},
"module": "org.a.b",
"type": "g$a",
"value": "Attempt to invoke virtual method 'org.a.b.g$a.a' on a null object reference",
}
]
},
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
metrics = event.data["_metrics"]
assert not metrics.get("flag.processing.error")
@requires_symbolicator
@pytest.mark.symbolicator
def test_sets_inapp_after_resolving(self) -> None:
self.upload_proguard_mapping(PROGUARD_UUID, PROGUARD_SOURCE)
version = "org.slf4j@1.2.3"
env_name = "some_env"
event = self.store_event(
data={"release": version, "environment": env_name}, project_id=self.project.id
)
event_data = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"release": "org.slf4j@1.2.3",
"project": self.project.id,
"platform": "java",
"debug_meta": {"images": [{"type": "proguard", "uuid": PROGUARD_UUID}]},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "a",
"abs_path": None,
"module": "org.a.b.g$a",
"filename": None,
"lineno": 67,
},
{
"function": "a",
"abs_path": None,
"module": "org.a.b.g$a",
"filename": None,
"lineno": 69,
"in_app": False,
},
{
"function": "a",
"abs_path": None,
"module": "org.a.b.g$a",
"filename": None,
"lineno": 68,
"in_app": True,
},
{
"function": "init",
"abs_path": None,
"module": "com.android.Zygote",
"filename": None,
"lineno": 62,
},
{
"function": "a",
"abs_path": None,
"module": "org.a.b.g$b",
"filename": None,
"lineno": 70,
},
]
},
"module": "org.a.b",
"type": "g$a",
"value": "Attempt to invoke virtual method 'org.a.b.g$a.a' on a null object reference",
}
]
},
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
exc = event.interfaces["exception"].values[0]
bt = exc.stacktrace
frames = bt.frames
assert exc.module == "org.slf4j.helpers"
assert frames[0].in_app is True
assert frames[1].in_app is False
assert frames[2].in_app is True
assert frames[3].in_app is False
assert frames[4].in_app is True
@requires_symbolicator
@pytest.mark.symbolicator
def test_resolving_inline(self) -> None:
self.upload_proguard_mapping(PROGUARD_INLINE_UUID, PROGUARD_INLINE_SOURCE)
event_data = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"platform": "java",
"debug_meta": {"images": [{"type": "proguard", "uuid": PROGUARD_INLINE_UUID}]},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "onClick",
"abs_path": None,
"module": "e.a.c.a",
"filename": None,
"lineno": 2,
},
{
"function": "t",
"abs_path": None,
"module": "io.sentry.sample.MainActivity",
"filename": "MainActivity.java",
"lineno": 1,
},
]
},
"module": "org.a.b",
"type": "g$a",
"value": "Oh no",
}
]
},
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
exc = event.interfaces["exception"].values[0]
bt = exc.stacktrace
frames = bt.frames
assert len(frames) == 4
assert frames[0].function == "onClick"
assert frames[0].module == "io.sentry.sample.-$$Lambda$r3Avcbztes2hicEObh02jjhQqd4"
assert frames[1].filename == "MainActivity.java"
assert frames[1].module == "io.sentry.sample.MainActivity"
assert frames[1].function == "onClickHandler"
assert frames[1].lineno == 40
assert frames[2].function == "foo"
assert frames[2].lineno == 44
assert frames[3].function == "bar"
assert frames[3].lineno == 54
assert frames[3].filename == "MainActivity.java"
assert frames[3].module == "io.sentry.sample.MainActivity"
@requires_symbolicator
@pytest.mark.symbolicator
def test_resolving_inline_with_native_frames(self) -> None:
self.upload_proguard_mapping(PROGUARD_INLINE_UUID, PROGUARD_INLINE_SOURCE)
event_data = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"platform": "java",
"debug_meta": {"images": [{"type": "proguard", "uuid": PROGUARD_INLINE_UUID}]},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "onClick",
"abs_path": None,
"module": "e.a.c.a",
"filename": None,
"lineno": 2,
},
{
"function": "t",
"abs_path": None,
"module": "io.sentry.sample.MainActivity",
"filename": "MainActivity.java",
"lineno": 1,
},
{
"abs_path": "Thread.java",
"filename": "Thread.java",
"function": "sleep",
"lineno": 450,
"lock": {
"address": "0x0ddc1f22",
"class_name": "Object",
"package_name": "java.lang",
"type:": 1,
},
"module": "java.lang.Thread",
},
{
"function": "__start_thread",
"package": "/apex/com.android.art/lib64/libart.so",
"lineno": 196,
"in_app": False,
},
]
},
"module": "org.a.b",
"type": "g$a",
"value": "Oh no",
}
]
},
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
exc = event.interfaces["exception"].values[0]
bt = exc.stacktrace
frames = bt.frames
assert len(frames) == 6
assert frames[0].function == "onClick"
assert frames[0].module == "io.sentry.sample.-$$Lambda$r3Avcbztes2hicEObh02jjhQqd4"
assert frames[1].filename == "MainActivity.java"
assert frames[1].module == "io.sentry.sample.MainActivity"
assert frames[1].function == "onClickHandler"
assert frames[1].lineno == 40
assert frames[2].function == "foo"
assert frames[2].lineno == 44
assert frames[3].function == "bar"
assert frames[3].lineno == 54
assert frames[3].filename == "MainActivity.java"
assert frames[3].module == "io.sentry.sample.MainActivity"
assert frames[4].function == "sleep"
assert frames[4].lineno == 450
assert frames[4].filename == "Thread.java"
assert frames[4].module == "java.lang.Thread"
assert frames[5].function == "__start_thread"
assert frames[5].package == "/apex/com.android.art/lib64/libart.so"
@requires_symbolicator
@pytest.mark.symbolicator
def test_error_on_resolving(self) -> None:
url = reverse(
"sentry-api-0-dsym-files",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
},
)
self.login_as(user=self.user)
out = BytesIO()
f = zipfile.ZipFile(out, "w")
f.writestr("proguard/%s.txt" % PROGUARD_BUG_UUID, PROGUARD_BUG_SOURCE)
f.close()
response = self.client.post(
url,
{
"file": SimpleUploadedFile(
"symbols.zip", out.getvalue(), content_type="application/zip"
)
},
format="multipart",
)
assert response.status_code == 201, response.content
assert len(response.json()) == 1
event_data = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"platform": "java",
"debug_meta": {"images": [{"type": "proguard", "uuid": PROGUARD_BUG_UUID}]},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "a",
"abs_path": None,
"module": "org.a.b.g$a",
"filename": None,
"lineno": 67,
},
{
"function": "a",
"abs_path": None,
"module": "org.a.b.g$a",
"filename": None,
"lineno": 69,
},
]
},
"type": "RuntimeException",
"value": "Oh no",
}
]
},
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
assert len(event.data["errors"]) == 1
error = event.data["errors"][0]
assert error["mapping_uuid"] == "071207ac-b491-4a74-957c-2c94fd9594f2"
assert error["type"] == "proguard_missing_lineno"
def upload_jvm_bundle(self, debug_id, source_files):
files = {}
for source_file in source_files:
files[f"files/_/_/{source_file}"] = {"url": f"~/{source_file}"}
manifest = {
"org": self.project.organization.slug,
"project": self.project.slug,
"debug_id": debug_id,
"files": files,
}
file_like = BytesIO(b"SYSB")
with zipfile.ZipFile(file_like, "a") as zip:
for path, contents in source_files.items():
zip.writestr(f"files/_/_/{path}", contents)
zip.writestr("manifest.json", json.dumps(manifest))
file_like.seek(0)
file = File.objects.create(
name="bundle.zip",
type="sourcebundle",
headers={"Content-Type": "application/x-sentry-bundle+zip"},
)
file.putfile(file_like)
ProjectDebugFile.objects.create(
project_id=self.project.id,
debug_id=debug_id,
file=file,
)
@requires_symbolicator
@pytest.mark.symbolicator
def test_basic_source_lookup(self) -> None:
debug_id = str(uuid4())
self.upload_jvm_bundle(debug_id, {"io/sentry/samples/MainActivity.jvm": JVM_SOURCE})
event_data = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"platform": "java",
"debug_meta": {"images": [{"type": "jvm", "debug_id": debug_id}]},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "otherMethod",
"abs_path": "OtherActivity.java",
"module": "OtherActivity",
"filename": "OtherActivity.java",
"lineno": 100,
},
{
"function": "differentMethod",
"abs_path": "DifferentActivity",
"module": "DifferentActivity",
"filename": "DifferentActivity",
"lineno": 200,
},
{
"function": "onCreate",
"abs_path": None,
"module": "io.sentry.samples.MainActivity",
"filename": None,
"lineno": 11,
},
{
"function": "whoops",
"abs_path": "MainActivity.kt",
"module": "io.sentry.samples.MainActivity$InnerClass",
"filename": "MainActivity.kt",
"lineno": 20,
},
{
"function": "whoops2",
"abs_path": None,
"module": "io.sentry.samples.MainActivity$AnotherInnerClass",
"filename": None,
"lineno": 26,
},
{
"function": "whoops3",
"abs_path": "MainActivity.kt",
"module": "io.sentry.samples.MainActivity$AdditionalInnerClass",
"filename": "MainActivity.kt",
"lineno": 32,
},
{
"function": "whoops4",
"abs_path": "SourceFile",
"module": "io.sentry.samples.MainActivity$OneMoreInnerClass",
"filename": "SourceFile",
"lineno": 38,
},
]
},
"module": "io.sentry.samples",
"type": "RuntimeException",
"value": "whoops",
}
]
},
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
exc = event.interfaces["exception"].values[0]
bt = exc.stacktrace
frames = bt.frames
assert exc.type == "RuntimeException"
assert exc.value == "whoops"
assert exc.module == "io.sentry.samples"
assert frames[0].function == "otherMethod"
assert frames[0].module == "OtherActivity"
assert frames[0].lineno == 100
assert frames[0].context_line is None
assert frames[0].pre_context is None
assert frames[0].post_context is None
assert frames[1].function == "differentMethod"
assert frames[1].module == "DifferentActivity"
assert frames[1].lineno == 200
assert frames[1].context_line is None
assert frames[1].pre_context is None
assert frames[1].post_context is None
assert frames[2].function == "onCreate"
assert frames[2].module == "io.sentry.samples.MainActivity"
assert frames[2].lineno == 11
assert frames[2].context_line == " InnerClass().whoops()"
assert frames[2].pre_context == [
"",
"class MainActivity : ComponentActivity() {",
" override fun onCreate(savedInstanceState: Bundle?) {",
" super.onCreate(savedInstanceState)",
" setContentView(R.layout.activity_main)",
]
assert frames[2].post_context == [
"",
" val list = findViewById<RecyclerView>(R.id.list)",
" list.layoutManager = LinearLayoutManager(this)",
" list.adapter = TrackAdapter()",
" }",
]
assert frames[3].function == "whoops"
assert frames[3].module == "io.sentry.samples.MainActivity$InnerClass"
assert frames[3].lineno == 20
assert frames[3].context_line == " AnotherInnerClass().whoops2()"
assert frames[3].pre_context == [
" list.adapter = TrackAdapter()",
" }",
"",
" class InnerClass {",
" fun whoops() {",
]
assert frames[3].post_context == [
" }",
" }",
"",
" class AnotherInnerClass {",
" fun whoops2() {",
]
assert frames[4].function == "whoops2"
assert frames[4].module == "io.sentry.samples.MainActivity$AnotherInnerClass"
assert frames[4].lineno == 26
assert frames[4].context_line == " AdditionalInnerClass().whoops3()"
assert frames[4].pre_context == [
" }",
" }",
"",
" class AnotherInnerClass {",
" fun whoops2() {",
]
assert frames[4].post_context == [
" }",
" }",
"",
" class AdditionalInnerClass {",
" fun whoops3() {",
]
assert frames[5].function == "whoops3"
assert frames[5].module == "io.sentry.samples.MainActivity$AdditionalInnerClass"
assert frames[5].lineno == 32
assert frames[5].context_line == " OneMoreInnerClass().whoops4()"
assert frames[5].pre_context == [
" }",
" }",
"",
" class AdditionalInnerClass {",
" fun whoops3() {",
]
assert frames[5].post_context == [
" }",
" }",
"",
" class OneMoreInnerClass {",
" fun whoops4() {",
]
assert frames[6].function == "whoops4"
assert frames[6].module == "io.sentry.samples.MainActivity$OneMoreInnerClass"
assert frames[6].lineno == 38
assert frames[6].context_line == ' throw RuntimeException("whoops")'
assert frames[6].pre_context == [
" }",
" }",
"",
" class OneMoreInnerClass {",
" fun whoops4() {",
]
assert frames[6].post_context == [" }", " }", "}", ""]
@pytest.mark.skip(reason="flaky: #93951")
@requires_symbolicator
@pytest.mark.symbolicator
def test_source_lookup_with_proguard(self) -> None:
self.upload_proguard_mapping(PROGUARD_SOURCE_LOOKUP_UUID, PROGUARD_SOURCE_LOOKUP_SOURCE)
debug_id1 = str(uuid4())
self.upload_jvm_bundle(
debug_id1,
{
"io/sentry/samples/instrumentation/ui/EditActivity.jvm": EDIT_ACTIVITY_SOURCE,
},
)
debug_id2 = str(uuid4())
self.upload_jvm_bundle(
debug_id2,
{
"io/sentry/samples/instrumentation/ui/SomeService.jvm": SOME_SERVICE_SOURCE,
},
)
event_data = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"platform": "java",
"debug_meta": {
"images": [
{
"type": "jvm",
"debug_id": debug_id1,
},
{
"type": "jvm",
"debug_id": debug_id2,
},
{
"type": "jvm",
"debug_id": str(uuid4()),
}, # does not exist
{"type": "proguard", "uuid": PROGUARD_SOURCE_LOOKUP_UUID},
{"type": "proguard", "uuid": str(uuid4())}, # does not exist
]
},
"exception": {
"values": [
{
"type": "RuntimeException",
"value": "thrown on purpose to test ProGuard Android source context",
"module": "java.lang",
"thread_id": 1,
"stacktrace": {
"frames": [
{
"filename": "ZygoteInit.java",
"function": "main",
"module": "com.android.internal.os.ZygoteInit",
"lineno": 698,
"native": False,
},
{
"filename": "ZygoteInit.java",
"function": "run",
"module": "com.android.internal.os.ZygoteInit$MethodAndArgsCaller",
"lineno": 903,
"native": False,
},
{
"filename": "Method.java",
"function": "invoke",
"module": "java.lang.reflect.Method",
"lineno": 372,
"native": False,
},
{
"filename": "Method.java",
"function": "invoke",
"module": "java.lang.reflect.Method",
"native": True,
},
{
"filename": "ActivityThread.java",
"function": "main",
"module": "android.app.ActivityThread",
"lineno": 5254,
"native": False,
},
{
"filename": "Looper.java",
"function": "loop",
"module": "android.os.Looper",
"lineno": 135,
"native": False,
},
{
"filename": "Handler.java",
"function": "dispatchMessage",
"module": "android.os.Handler",
"lineno": 95,
"native": False,
},
{
"filename": "Handler.java",
"function": "handleCallback",
"module": "android.os.Handler",
"lineno": 739,
"native": False,
},
{
"filename": "View.java",
"function": "run",
"module": "android.view.View$PerformClick",
"lineno": 19866,
"native": False,
},
{
"filename": "View.java",
"function": "performClick",
"module": "android.view.View",
"lineno": 4780,
"native": False,
},
{
"filename": "ActionMenuItemView.java",
"function": "onClick",
"module": "androidx.appcompat.view.menu.ActionMenuItemView",
"lineno": 7,
"native": False,
},
{
"filename": "ActionMenuView.java",
"function": "invokeItem",
"module": "androidx.appcompat.widget.ActionMenuView",
"lineno": 4,
"native": False,
},
{
"filename": "MenuBuilder.java",
"function": "performItemAction",
"module": "androidx.appcompat.view.menu.MenuBuilder",
"lineno": 1,
"native": False,
},
{
"filename": "MenuBuilder.java",
"function": "performItemAction",
"module": "androidx.appcompat.view.menu.MenuBuilder",
"lineno": 4,
"native": False,
},
{
"filename": "MenuItemImpl.java",
"function": "invoke",
"module": "androidx.appcompat.view.menu.MenuItemImpl",
"lineno": 15,
"native": False,
},
{
"filename": "MenuBuilder.java",
"function": "dispatchMenuItemSelected",
"module": "androidx.appcompat.view.menu.MenuBuilder",
"lineno": 5,
"native": False,
},
{
"filename": "ActionMenuView.java",
"function": "onMenuItemSelected",
"module": "androidx.appcompat.widget.ActionMenuView$MenuBuilderCallback",
"lineno": 7,
"native": False,
},
{
"filename": "Toolbar.java",
"function": "onMenuItemClick",
"module": "androidx.appcompat.widget.Toolbar$1",
"lineno": 7,
"native": False,
},
{
"filename": "R8$$SyntheticClass",
"function": "onMenuItemClick",
"module": "io.sentry.samples.instrumentation.ui.g",
"lineno": 40,
"in_app": True,
"native": False,
},
]
},
}
]
},
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
exc = event.interfaces["exception"].values[0]
bt = exc.stacktrace
frames = bt.frames
assert exc.type == "RuntimeException"
assert exc.value == "thrown on purpose to test ProGuard Android source context"
assert exc.module == "java.lang"
assert frames[18].function == "onMenuItemClick"
assert (
frames[18].module
== "io.sentry.samples.instrumentation.ui.EditActivity$$InternalSyntheticLambda$1$ebaa538726b99bb77e0f5e7c86443911af17d6e5be2b8771952ae0caa4ff2ac7$0"
)
assert frames[18].lineno == 0
assert frames[18].context_line is None
assert frames[18].pre_context is None
assert frames[18].post_context is None
assert frames[19].function == "onCreate$lambda-1"
assert frames[19].module == "io.sentry.samples.instrumentation.ui.EditActivity"
assert frames[19].lineno == 37
assert frames[19].context_line == " SomeService().helloThere()"
assert frames[19].pre_context == [
" }",
"",
" findViewById<Toolbar>(R.id.toolbar).setOnMenuItemClickListener {",
" if (it.itemId == R.id.action_save) {",
" try {",
]
assert frames[19].post_context == [
" } catch (e: Exception) {",
" Sentry.captureException(e)",
" }",
"",
" val transaction = Sentry.startTransaction(",
]
assert frames[20].function == "helloThere"
assert frames[20].module == "io.sentry.samples.instrumentation.ui.SomeService"
assert frames[20].lineno == 5
assert frames[20].context_line == " InnerClassOfSomeService().helloInner()"
assert frames[20].pre_context == [
"package io.sentry.samples.instrumentation.ui",
"",
"class SomeService {",
" fun helloThere() {",
]
assert frames[20].post_context == [
" }",
"",
" class InnerClassOfSomeService {",
" fun helloInner() {",
" AnotherClassInSameFile().helloOther()",
]
assert frames[21].function == "helloInner"
assert (
frames[21].module
== "io.sentry.samples.instrumentation.ui.SomeService$InnerClassOfSomeService"
)
assert frames[21].lineno == 10
assert frames[21].context_line == " AnotherClassInSameFile().helloOther()"
assert frames[21].pre_context == [
" InnerClassOfSomeService().helloInner()",
" }",
"",
" class InnerClassOfSomeService {",
" fun helloInner() {",
]
assert frames[21].post_context == [
" }",
" }",
"}",
"",
"class AnotherClassInSameFile {",
]
assert frames[22].function == "helloOther"
assert frames[22].module == "io.sentry.samples.instrumentation.ui.AnotherClassInSameFile"
assert frames[22].lineno == 17
assert frames[22].context_line is None
assert frames[22].pre_context is None
assert frames[22].post_context is None
assert frames[23].function == "otherFun"
assert frames[23].module == "io.sentry.samples.instrumentation.ui.AnotherClassInSameFile"
assert frames[23].lineno == 21
assert frames[23].context_line is None
assert frames[23].pre_context is None
assert frames[23].post_context is None
assert frames[24].function == "helloOtherInner"
assert (
frames[24].module
== "io.sentry.samples.instrumentation.ui.AnotherClassInSameFile$AnotherInnerClass"
)
assert frames[24].lineno == 26
assert frames[24].context_line is None
assert frames[24].pre_context is None
assert frames[24].post_context is None
@pytest.mark.skip(reason="flaky: #93949")
@requires_symbolicator
@pytest.mark.symbolicator
def test_invalid_exception(self) -> None:
event_data = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"platform": "java",
"debug_meta": {},
"exception": {
"values": [
{"type": "PlatformException"},
{"type": "SecurityException", "module": "java.lang"},
{"type": "RemoteException", "module": "android.os"},
]
},
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
expected = [
{"type": e.get("type", None), "module": e.get("module", None)}
for e in event_data["exception"]["values"]
]
received = [
{"type": e.type, "module": e.module} for e in event.interfaces["exception"].values
]
assert received == expected
def test_is_jvm_event(self) -> None:
from sentry.lang.java.utils import is_jvm_event
event = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"platform": "java",
"debug_meta": {"images": [{"type": "jvm", "debug_id": PROGUARD_INLINE_UUID}]},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "whoops4",
"abs_path": "SourceFile",
"module": "io.sentry.samples.MainActivity$OneMoreInnerClass",
"filename": "SourceFile",
"lineno": 38,
},
]
},
"module": "io.sentry.samples",
"type": "RuntimeException",
"value": "whoops",
}
]
},
"timestamp": before_now(seconds=1),
}
stacktraces = find_stacktraces_in_data(event)
assert is_jvm_event(event, stacktraces)
event = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"debug_meta": {"images": [{"type": "jvm", "debug_id": PROGUARD_INLINE_UUID}]},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "whoops4",
"abs_path": "SourceFile",
"module": "io.sentry.samples.MainActivity$OneMoreInnerClass",
"filename": "SourceFile",
"lineno": 38,
},
]
},
"module": "io.sentry.samples",
"type": "RuntimeException",
"value": "whoops",
}
]
},
"timestamp": before_now(seconds=1),
}
# has no platform
stacktraces = find_stacktraces_in_data(event)
assert is_jvm_event(event, stacktraces)
event = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"platform": "java",
"debug_meta": {},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "whoops4",
"abs_path": "SourceFile",
"module": "io.sentry.samples.MainActivity$OneMoreInnerClass",
"filename": "SourceFile",
"lineno": 38,
},
]
},
"module": "io.sentry.samples",
"type": "RuntimeException",
"value": "whoops",
}
]
},
"timestamp": before_now(seconds=1),
}
# has no modules
stacktraces = find_stacktraces_in_data(event)
assert is_jvm_event(event, stacktraces)
event = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"debug_meta": {},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"platform": "java",
"function": "whoops4",
"abs_path": "SourceFile",
"module": "io.sentry.samples.MainActivity$OneMoreInnerClass",
"filename": "SourceFile",
"lineno": 38,
},
]
},
"module": "io.sentry.samples",
"type": "RuntimeException",
"value": "whoops",
}
]
},
"timestamp": before_now(seconds=1),
}
# has a Java frame
stacktraces = find_stacktraces_in_data(event)
assert is_jvm_event(event, stacktraces)
event = {
"user": {"ip_address": "31.172.207.97"},
"extra": {},
"project": self.project.id,
"debug_meta": {},
"exception": {
"values": [
{
"stacktrace": {
"frames": [
{
"function": "whoops4",
"abs_path": "SourceFile",
"module": "io.sentry.samples.MainActivity$OneMoreInnerClass",
"filename": "SourceFile",
"lineno": 38,
},
]
},
"module": "io.sentry.samples",
"type": "RuntimeException",
"value": "whoops",
}
]
},
"timestamp": before_now(seconds=1),
}
# has no platform, frame, or modules
stacktraces = find_stacktraces_in_data(event)
assert not is_jvm_event(event, stacktraces)
| BasicResolvingIntegrationTest |
python | urllib3__urllib3 | test/test_exceptions.py | {
"start": 2420,
"end": 2649
} | class ____:
def test_header_parsing_errors(self) -> None:
hpe = HeaderParsingError([MessageDefect("defects")], "unparsed_data")
assert "defects" in str(hpe)
assert "unparsed_data" in str(hpe)
| TestFormat |
python | bokeh__bokeh | src/bokeh/models/glyphs.py | {
"start": 30439,
"end": 32149
} | class ____(Glyph, LineGlyph, FillGlyph, HatchGlyph):
''' Render several MultiPolygon.
Modeled on geoJSON - the data for the ``MultiPolygons`` glyph is
different in that the vector of values is not a vector of scalars.
Rather, it is a "list of lists of lists of lists".
During box selection only multi-polygons entirely contained in the
selection box will be included.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
__example__ = "examples/reference/models/MultiPolygons.py"
_args = ('xs', 'ys')
xs = NumberSpec(default=field("xs"), help="""
The x-coordinates for all the patches, given as a nested list.
.. note::
Each item in ``MultiPolygons`` represents one MultiPolygon and each
MultiPolygon is comprised of ``n`` Polygons. Each Polygon is made of
one exterior ring optionally followed by ``m`` interior rings (holes).
""")
ys = NumberSpec(default=field("ys"), help="""
The y-coordinates for all the patches, given as a "list of lists".
.. note::
Each item in ``MultiPolygons`` represents one MultiPolygon and each
MultiPolygon is comprised of ``n`` Polygons. Each Polygon is made of
one exterior ring optionally followed by ``m`` interior rings (holes).
""")
line_props = Include(LineProps, help="""
The {prop} values for the multipolygons.
""")
fill_props = Include(FillProps, help="""
The {prop} values for the multipolygons.
""")
hatch_props = Include(HatchProps, help="""
The {prop} values for the multipolygons.
""")
| MultiPolygons |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_str_returned.py | {
"start": 192,
"end": 303
} | class ____:
"""__str__ returns <type 'str'>"""
def __str__(self):
return "some str"
| FirstGoodStr |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 61015,
"end": 61617
} | class ____(PrefectFilterBaseModel):
"""Filter by `BlockDocument.is_anonymous`."""
eq_: Optional[bool] = Field(
default=None,
description=(
"Filter block documents for only those that are or are not anonymous."
),
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.eq_ is not None:
filters.append(db.BlockDocument.is_anonymous.is_(self.eq_))
return filters
| BlockDocumentFilterIsAnonymous |
python | doocs__leetcode | solution/2300-2399/2302.Count Subarrays With Score Less Than K/Solution.py | {
"start": 0,
"end": 435
} | class ____:
def countSubarrays(self, nums: List[int], k: int) -> int:
s = list(accumulate(nums, initial=0))
ans = 0
for i in range(1, len(s)):
l, r = 0, i
while l < r:
mid = (l + r + 1) >> 1
if (s[i] - s[i - mid]) * mid < k:
l = mid
else:
r = mid - 1
ans += l
return ans
| Solution |
python | scipy__scipy | scipy/stats/_axis_nan_policy.py | {
"start": 1614,
"end": 32644
} | class ____(RuntimeWarning):
pass
def _broadcast_arrays(arrays, axis=None, xp=None):
"""
Broadcast shapes of arrays, ignoring incompatibility of specified axes
"""
arrays = tuple(arrays)
if not arrays:
return arrays
xp = array_namespace(*arrays) if xp is None else xp
arrays = [xp.asarray(arr) for arr in arrays]
shapes = [arr.shape for arr in arrays]
new_shapes = _broadcast_shapes(shapes, axis)
if axis is None:
new_shapes = [new_shapes]*len(arrays)
return [xp.broadcast_to(array, new_shape)
for array, new_shape in zip(arrays, new_shapes)]
def _broadcast_shapes(shapes, axis=None):
"""
Broadcast shapes, ignoring incompatibility of specified axes
"""
if not shapes:
return shapes
# input validation
if axis is not None:
axis = np.atleast_1d(axis)
message = '`axis` must be an integer, a tuple of integers, or `None`.'
try:
with np.errstate(invalid='ignore'):
axis_int = axis.astype(int)
except ValueError as e:
raise AxisError(message) from e
if not np.array_equal(axis_int, axis):
raise AxisError(message)
axis = axis_int
# First, ensure all shapes have same number of dimensions by prepending 1s.
n_dims = max([len(shape) for shape in shapes])
new_shapes = np.ones((len(shapes), n_dims), dtype=int)
for row, shape in zip(new_shapes, shapes):
row[len(row)-len(shape):] = shape # can't use negative indices (-0:)
# Remove the shape elements of the axes to be ignored, but remember them.
if axis is not None:
axis[axis < 0] = n_dims + axis[axis < 0]
axis = np.sort(axis)
if axis[-1] >= n_dims or axis[0] < 0:
message = (f"`axis` is out of bounds "
f"for array of dimension {n_dims}")
raise AxisError(message)
if len(np.unique(axis)) != len(axis):
raise AxisError("`axis` must contain only distinct elements")
removed_shapes = new_shapes[:, axis]
new_shapes = np.delete(new_shapes, axis, axis=1)
# If arrays are broadcastable, shape elements that are 1 may be replaced
# with a corresponding non-1 shape element. Assuming arrays are
# broadcastable, that final shape element can be found with:
new_shape = np.max(new_shapes, axis=0)
# except in case of an empty array:
new_shape *= new_shapes.all(axis=0)
# Among all arrays, there can only be one unique non-1 shape element.
# Therefore, if any non-1 shape element does not match what we found
# above, the arrays must not be broadcastable after all.
if np.any(~((new_shapes == 1) | (new_shapes == new_shape))):
raise ValueError("Array shapes are incompatible for broadcasting.")
if axis is not None:
# Add back the shape elements that were ignored
new_axis = axis - np.arange(len(axis))
new_shapes = [tuple(np.insert(new_shape, new_axis, removed_shape))
for removed_shape in removed_shapes]
return new_shapes
else:
return tuple(new_shape)
def _broadcast_array_shapes_remove_axis(arrays, axis=None):
"""
Broadcast shapes of arrays, dropping specified axes
Given a sequence of arrays `arrays` and an integer or tuple `axis`, find
the shape of the broadcast result after consuming/dropping `axis`.
In other words, return output shape of a typical hypothesis test on
`arrays` vectorized along `axis`.
Examples
--------
>>> import numpy as np
>>> from scipy.stats._axis_nan_policy import _broadcast_array_shapes_remove_axis
>>> a = np.zeros((5, 2, 1))
>>> b = np.zeros((9, 3))
>>> _broadcast_array_shapes_remove_axis((a, b), 1)
(5, 3)
"""
# Note that here, `axis=None` means do not consume/drop any axes - _not_
# ravel arrays before broadcasting.
shapes = [arr.shape for arr in arrays]
return _broadcast_shapes_remove_axis(shapes, axis)
def _broadcast_shapes_remove_axis(shapes, axis=None):
"""
Broadcast shapes, dropping specified axes
Same as _broadcast_array_shapes_remove_axis, but given a sequence
of array shapes `shapes` instead of the arrays themselves.
"""
shapes = _broadcast_shapes(shapes, axis)
shape = shapes[0]
if axis is not None:
shape = np.delete(shape, axis)
return tuple(shape)
def _broadcast_concatenate(arrays, axis, paired=False, xp=None):
"""Concatenate arrays along an axis with broadcasting."""
xp = array_namespace(*arrays) if xp is None else xp
arrays = _broadcast_arrays(arrays, axis if not paired else None, xp=xp)
res = xp.concat(arrays, axis=axis)
return res
def _remove_nans(samples, paired, xp=None):
"Remove nans from paired or unpaired 1D samples"
# potential optimization: don't copy arrays that don't contain nans
xp = array_namespace(*samples)
if not paired:
return [sample[~xp.isnan(sample)] for sample in samples]
# for paired samples, we need to remove the whole pair when any part
# has a nan
nans = xp.isnan(samples[0])
for sample in samples[1:]:
nans = nans | xp.isnan(sample)
not_nans = ~nans
return [sample[not_nans] for sample in samples]
def _remove_sentinel(samples, paired, sentinel):
"Remove sentinel values from paired or unpaired 1D samples"
# could consolidate with `_remove_nans`, but it's not quite as simple as
# passing `sentinel=np.nan` because `(np.nan == np.nan) is False`
# potential optimization: don't copy arrays that don't contain sentinel
if not paired:
return [sample[sample != sentinel] for sample in samples]
# for paired samples, we need to remove the whole pair when any part
# has a nan
sentinels = (samples[0] == sentinel)
for sample in samples[1:]:
sentinels = sentinels | (sample == sentinel)
not_sentinels = ~sentinels
return [sample[not_sentinels] for sample in samples]
def _masked_arrays_2_sentinel_arrays(samples):
# masked arrays in `samples` are converted to regular arrays, and values
# corresponding with masked elements are replaced with a sentinel value
# return without modifying arrays if none have a mask
has_mask = False
for sample in samples:
mask = getattr(sample, 'mask', False)
has_mask = has_mask or np.any(mask)
if not has_mask:
return samples, None # None means there is no sentinel value
# Choose a sentinel value. We can't use `np.nan`, because sentinel (masked)
# values are always omitted, but there are different nan policies.
dtype = np.result_type(*samples)
dtype = dtype if np.issubdtype(dtype, np.number) else np.float64
for i in range(len(samples)):
# Things get more complicated if the arrays are of different types.
# We could have different sentinel values for each array, but
# the purpose of this code is convenience, not efficiency.
samples[i] = samples[i].astype(dtype, copy=False)
inexact = np.issubdtype(dtype, np.inexact)
info = np.finfo if inexact else np.iinfo
max_possible, min_possible = info(dtype).max, info(dtype).min
nextafter = np.nextafter if inexact else (lambda x, _: x - 1)
sentinel = max_possible
# For simplicity, min_possible/np.infs are not candidate sentinel values
while sentinel > min_possible:
for sample in samples:
if np.any(sample == sentinel): # choose a new sentinel value
sentinel = nextafter(sentinel, -np.inf)
break
else: # when sentinel value is OK, break the while loop
break
else:
message = ("This function replaces masked elements with sentinel "
"values, but the data contains all distinct values of this "
"data type. Consider promoting the dtype to `np.float64`.")
raise ValueError(message)
# replace masked elements with sentinel value
out_samples = []
for sample in samples:
mask = getattr(sample, 'mask', None)
if mask is not None: # turn all masked arrays into sentinel arrays
mask = np.broadcast_to(mask, sample.shape)
sample = sample.data.copy() if np.any(mask) else sample.data
sample = np.asarray(sample) # `sample.data` could be a memoryview?
sample[mask] = sentinel
out_samples.append(sample)
return out_samples, sentinel
def _check_empty_inputs(samples, axis, xp=None):
"""
Check for empty sample; return appropriate output for a vectorized hypotest
"""
xp = array_namespace(*samples) if xp is None else xp
# if none of the samples are empty, we need to perform the test
if not any(xp_size(sample) == 0 for sample in samples):
return None
# otherwise, the statistic and p-value will be either empty arrays or
# arrays with NaNs. Produce the appropriate array and return it.
output_shape = _broadcast_array_shapes_remove_axis(samples, axis)
NaN = _get_nan(*samples)
output = xp.full(output_shape, xp.nan, dtype=NaN.dtype)
return output
def _add_reduced_axes(res, reduced_axes, keepdims, xp=np):
"""
Add reduced axes back to all the arrays in the result object
if keepdims = True.
"""
return ([xpx.expand_dims(output, axis=reduced_axes)
if not isinstance(output, int) else output for output in res]
if keepdims else res)
# Standard docstring / signature entries for `axis`, `nan_policy`, `keepdims`
_name = 'axis'
_desc = (
"""If an int, the axis of the input along which to compute the statistic.
The statistic of each axis-slice (e.g. row) of the input will appear in a
corresponding element of the output.
If ``None``, the input will be raveled before computing the statistic."""
.split('\n'))
def _get_axis_params(default_axis=0, _name=_name, _desc=_desc): # bind NOW
_type = f"int or None, default: {default_axis}"
_axis_parameter_doc = Parameter(_name, _type, _desc)
_axis_parameter = inspect.Parameter(_name,
inspect.Parameter.KEYWORD_ONLY,
default=default_axis)
return _axis_parameter_doc, _axis_parameter
_name = 'nan_policy'
_type = "{'propagate', 'omit', 'raise'}"
_desc = (
"""Defines how to handle input NaNs.
- ``propagate``: if a NaN is present in the axis slice (e.g. row) along
which the statistic is computed, the corresponding entry of the output
will be NaN.
- ``omit``: NaNs will be omitted when performing the calculation.
If insufficient data remains in the axis slice along which the
statistic is computed, the corresponding entry of the output will be
NaN.
- ``raise``: if a NaN is present, a ``ValueError`` will be raised."""
.split('\n'))
_nan_policy_parameter_doc = Parameter(_name, _type, _desc)
_nan_policy_parameter = inspect.Parameter(_name,
inspect.Parameter.KEYWORD_ONLY,
default='propagate')
_name = 'keepdims'
_type = "bool, default: False"
_desc = (
"""If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array."""
.split('\n'))
_keepdims_parameter_doc = Parameter(_name, _type, _desc)
_keepdims_parameter = inspect.Parameter(_name,
inspect.Parameter.KEYWORD_ONLY,
default=False)
_standard_note_addition = (
"""\nBeginning in SciPy 1.9, ``np.matrix`` inputs (not recommended for new
code) are converted to ``np.ndarray`` before the calculation is performed. In
this case, the output will be a scalar or ``np.ndarray`` of appropriate shape
rather than a 2D ``np.matrix``. Similarly, while masked elements of masked
arrays are ignored, the output will be a scalar or ``np.ndarray`` rather than a
masked array with ``mask=False``.""").split('\n')
def _axis_nan_policy_factory(tuple_to_result, default_axis=0,
n_samples=1, paired=False,
result_to_tuple=None, too_small=0,
n_outputs=2, kwd_samples=(), override=None):
"""Factory for a wrapper that adds axis/nan_policy params to a function.
Parameters
----------
tuple_to_result : callable
Callable that returns an object of the type returned by the function
being wrapped (e.g. the namedtuple or dataclass returned by a
statistical test) provided the separate components (e.g. statistic,
pvalue).
default_axis : int, default: 0
The default value of the axis argument. Standard is 0 except when
backwards compatibility demands otherwise (e.g. `None`).
n_samples : int or callable, default: 1
The number of data samples accepted by the function
(e.g. `mannwhitneyu`), a callable that accepts a dictionary of
parameters passed into the function and returns the number of data
samples (e.g. `wilcoxon`), or `None` to indicate an arbitrary number
of samples (e.g. `kruskal`).
paired : {False, True}
Whether the function being wrapped treats the samples as paired (i.e.
corresponding elements of each sample should be considered as different
components of the same sample.)
result_to_tuple : callable, optional
Function that unpacks the results of the function being wrapped into
a tuple. This is essentially the inverse of `tuple_to_result`. Default
is `None`, which is appropriate for statistical tests that return a
statistic, pvalue tuple (rather than, e.g., a non-iterable datalass).
too_small : int or callable, default: 0
The largest unnacceptably small sample for the function being wrapped.
For example, some functions require samples of size two or more or they
raise an error. This argument prevents the error from being raised when
input is not 1D and instead places a NaN in the corresponding element
of the result. If callable, it must accept a list of samples, axis,
and a dictionary of keyword arguments passed to the wrapper function as
arguments and return a bool indicating weather the samples passed are
too small.
n_outputs : int or callable, default: 2
The number of outputs produced by the function given 1d sample(s). For
example, hypothesis tests that return a namedtuple or result object
with attributes ``statistic`` and ``pvalue`` use the default
``n_outputs=2``; summary statistics with scalar output use
``n_outputs=1``. Alternatively, may be a callable that accepts a
dictionary of arguments passed into the wrapped function and returns
the number of outputs corresponding with those arguments.
kwd_samples : sequence, default: ()
The names of keyword parameters that should be treated as samples. For
example, `gmean` accepts as its first argument a sample `a` but
also `weights` as a fourth, optional keyword argument. In this case, we
use `n_samples=1` and kwd_samples=['weights'].
override : dict, default: {'vectorization': False, 'nan_propagation': True}
Pass a dictionary with ``'vectorization': True`` to ensure that the
decorator overrides the function's behavior for multimensional input.
Use ``'nan_propagation': False`` to ensure that the decorator does not
override the function's behavior for ``nan_policy='propagate'``.
"""
# Specify which existing behaviors the decorator must override
temp = override or {}
override = {'vectorization': False,
'nan_propagation': True}
override.update(temp)
if result_to_tuple is None:
def result_to_tuple(res, _):
return res
if not callable(too_small):
def is_too_small(samples, *ts_args, axis=-1, **ts_kwargs):
for sample in samples:
if sample.shape[axis] <= too_small:
return True
return False
else:
is_too_small = too_small
def axis_nan_policy_decorator(hypotest_fun_in):
@wraps(hypotest_fun_in)
def axis_nan_policy_wrapper(*args, _no_deco=False, **kwds):
if _no_deco: # for testing, decorator does nothing
return hypotest_fun_in(*args, **kwds)
# For now, skip the decorator entirely if using array API. In the future,
# we'll probably want to use it for `keepdims`, `axis` tuples, etc.
if len(args) == 0: # extract sample from `kwds` if there are no `args`
used_kwd_samples = list(set(kwds).intersection(set(kwd_samples)))
temp = used_kwd_samples[:1]
else:
temp = args[0]
if is_lazy_array(temp):
msg = ("Use of `nan_policy` and `keepdims` "
"is incompatible with lazy arrays.")
if 'nan_policy' in kwds or 'keepdims' in kwds:
raise NotImplementedError(msg)
return hypotest_fun_in(*args, **kwds)
# We need to be flexible about whether position or keyword
# arguments are used, but we need to make sure users don't pass
# both for the same parameter. To complicate matters, some
# functions accept samples with *args, and some functions already
# accept `axis` and `nan_policy` as positional arguments.
# The strategy is to make sure that there is no duplication
# between `args` and `kwds`, combine the two into `kwds`, then
# the samples, `nan_policy`, and `axis` from `kwds`, as they are
# dealt with separately.
# Check for intersection between positional and keyword args
params = list(inspect.signature(hypotest_fun_in).parameters)
if n_samples is None:
# Give unique names to each positional sample argument
# Note that *args can't be provided as a keyword argument
params = [f"arg{i}" for i in range(len(args))] + params[1:]
# raise if there are too many positional args
maxarg = (np.inf if inspect.getfullargspec(hypotest_fun_in).varargs
else len(inspect.getfullargspec(hypotest_fun_in).args))
if len(args) > maxarg: # let the function raise the right error
hypotest_fun_in(*args, **kwds)
# raise if multiple values passed for same parameter
d_args = dict(zip(params, args))
intersection = set(d_args) & set(kwds)
if intersection: # let the function raise the right error
hypotest_fun_in(*args, **kwds)
# Consolidate other positional and keyword args into `kwds`
kwds.update(d_args)
# rename avoids UnboundLocalError
if callable(n_samples):
# Future refactoring idea: no need for callable n_samples.
# Just replace `n_samples` and `kwd_samples` with a single
# list of the names of all samples, and treat all of them
# as `kwd_samples` are treated below.
n_samp = n_samples(kwds)
else:
n_samp = n_samples or len(args)
# get the number of outputs
n_out = n_outputs # rename to avoid UnboundLocalError
if callable(n_out):
n_out = n_out(kwds)
# If necessary, rearrange function signature: accept other samples
# as positional args right after the first n_samp args
kwd_samp = [name for name in kwd_samples
if kwds.get(name, None) is not None]
n_kwd_samp = len(kwd_samp)
if not kwd_samp:
hypotest_fun_out = hypotest_fun_in
else:
def hypotest_fun_out(*samples, **kwds):
new_kwds = dict(zip(kwd_samp, samples[n_samp:]))
kwds.update(new_kwds)
return hypotest_fun_in(*samples[:n_samp], **kwds)
# Extract the things we need here
try: # if something is missing
samples = [kwds.pop(param) for param in (params[:n_samp] + kwd_samp)]
xp = array_namespace(*samples)
samples = xp_promote(*samples, xp=xp)
samples = (samples,) if not isinstance(samples, tuple) else samples
samples = [xpx.atleast_nd(sample, ndim=1) for sample in samples]
except KeyError: # let the function raise the right error
# might need to revisit this if required arg is not a "sample"
hypotest_fun_in(*args, **kwds)
vectorized = True if 'axis' in params else False
vectorized = vectorized and not override['vectorization']
axis = kwds.pop('axis', default_axis)
nan_policy = kwds.pop('nan_policy', 'propagate')
keepdims = kwds.pop("keepdims", False)
del args # avoid the possibility of passing both `args` and `kwds`
# convert masked arrays to regular arrays with sentinel values
sentinel = None
if is_numpy(xp):
samples, sentinel = _masked_arrays_2_sentinel_arrays(samples)
# standardize to always work along last axis
reduced_axes = axis
if axis is None:
if samples:
# when axis=None, take the maximum of all dimensions since
# all the dimensions are reduced.
n_dims = max([xp.asarray(sample).ndim for sample in samples])
reduced_axes = tuple(range(n_dims))
samples = [xp_ravel(sample) for sample in samples]
else:
# don't ignore any axes when broadcasting if paired
samples = _broadcast_arrays(samples, axis=axis if not paired else None)
axis = (axis,) if np.isscalar(axis) else axis
n_axes = len(axis)
# move all axes in `axis` to the end to be raveled
samples = [xp.moveaxis(sample, axis, tuple(range(-len(axis), 0)))
for sample in samples]
shapes = [sample.shape for sample in samples]
# New shape is unchanged for all axes _not_ in `axis`
# At the end, we append the product of the shapes of the axes
# in `axis`. Appending -1 doesn't work for zero-size arrays!
new_shapes = [shape[:-n_axes] + (math.prod(shape[-n_axes:]),)
for shape in shapes]
samples = [xp.reshape(sample, new_shape)
for sample, new_shape in zip(samples, new_shapes)]
axis = -1 # work over the last axis
NaN = _get_nan(*samples) if samples else xp.nan
# if axis is not needed, just handle nan_policy and return
ndims = np.array([sample.ndim for sample in samples]) # NumPy OK for ndims
if np.all(ndims <= 1):
# Addresses nan_policy == "raise"
if nan_policy != 'propagate' or override['nan_propagation']:
contains_nan = [_contains_nan(sample, nan_policy)
for sample in samples]
else:
# Behave as though there are no NaNs (even if there are)
contains_nan = [False] * len(samples)
# Addresses nan_policy == "propagate"
if any(contains_nan) and (nan_policy == 'propagate'
and override['nan_propagation']):
res = xp.full(n_out, xp.nan, dtype=NaN.dtype)
res = _add_reduced_axes(res, reduced_axes, keepdims)
return tuple_to_result(*res)
# Addresses nan_policy == "omit"
too_small_msg = too_small_1d_not_omit
if any(contains_nan) and nan_policy == 'omit':
# consider passing in contains_nan
samples = _remove_nans(samples, paired)
too_small_msg = too_small_1d_omit
if sentinel:
samples = _remove_sentinel(samples, paired, sentinel)
if is_too_small(samples, kwds):
warnings.warn(too_small_msg, SmallSampleWarning, stacklevel=2)
res = xp.full(n_out, xp.nan, dtype=NaN.dtype)
res = _add_reduced_axes(res, reduced_axes, keepdims)
return tuple_to_result(*res)
res = hypotest_fun_out(*samples, **kwds)
res = result_to_tuple(res, n_out)
res = _add_reduced_axes(res, reduced_axes, keepdims)
return tuple_to_result(*res)
# check for empty input
empty_output = _check_empty_inputs(samples, axis, xp=xp)
# only return empty output if zero sized input is too small.
if (
empty_output is not None
and (is_too_small(samples, kwds) or xp_size(empty_output) == 0)
):
if is_too_small(samples, kwds) and xp_size(empty_output) != 0:
warnings.warn(too_small_nd_not_omit, SmallSampleWarning,
stacklevel=2)
res = [xp_copy(empty_output) for i in range(n_out)]
res = _add_reduced_axes(res, reduced_axes, keepdims)
return tuple_to_result(*res)
if not is_numpy(xp) and 'nan_policy' in kwds:
msg = ("Use of `nan_policy` is incompatible with multidimensional "
"non-NumPy arrays.")
raise NotImplementedError(msg)
if not is_numpy(xp):
res = hypotest_fun_out(*samples, axis=axis, **kwds)
res = result_to_tuple(res, n_out)
res = _add_reduced_axes(res, reduced_axes, keepdims, xp=xp)
return tuple_to_result(*res)
# otherwise, concatenate all samples along axis, remembering where
# each separate sample begins
lengths = np.array([sample.shape[axis] for sample in samples])
split_indices = np.cumsum(lengths)
x = _broadcast_concatenate(samples, axis, paired=paired)
# Addresses nan_policy == "raise"
if nan_policy != 'propagate' or override['nan_propagation']:
contains_nan = _contains_nan(x, nan_policy)
else:
contains_nan = False # behave like there are no NaNs
if vectorized and not contains_nan and not sentinel:
res = hypotest_fun_out(*samples, axis=axis, **kwds)
res = result_to_tuple(res, n_out)
res = _add_reduced_axes(res, reduced_axes, keepdims)
return tuple_to_result(*res)
# Addresses nan_policy == "omit"
if contains_nan and nan_policy == 'omit':
def hypotest_fun(x):
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
samples = _remove_nans(samples, paired)
if sentinel:
samples = _remove_sentinel(samples, paired, sentinel)
if is_too_small(samples, kwds):
warnings.warn(too_small_nd_omit, SmallSampleWarning,
stacklevel=4)
return np.full(n_out, NaN)
return result_to_tuple(hypotest_fun_out(*samples, **kwds), n_out)
# Addresses nan_policy == "propagate"
elif (contains_nan and nan_policy == 'propagate'
and override['nan_propagation']):
def hypotest_fun(x):
if np.isnan(x).any():
return np.full(n_out, NaN)
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
if sentinel:
samples = _remove_sentinel(samples, paired, sentinel)
if is_too_small(samples, kwds):
return np.full(n_out, NaN)
return result_to_tuple(hypotest_fun_out(*samples, **kwds), n_out)
else:
def hypotest_fun(x):
samples = np.split(x, split_indices)[:n_samp+n_kwd_samp]
if sentinel:
samples = _remove_sentinel(samples, paired, sentinel)
if is_too_small(samples, kwds):
return np.full(n_out, NaN)
return result_to_tuple(hypotest_fun_out(*samples, **kwds), n_out)
x = np.moveaxis(x, axis, 0)
res = np.apply_along_axis(hypotest_fun, axis=0, arr=x)
res = _add_reduced_axes(res, reduced_axes, keepdims)
return tuple_to_result(*res)
_axis_parameter_doc, _axis_parameter = _get_axis_params(default_axis)
doc = FunctionDoc(axis_nan_policy_wrapper)
parameter_names = [param.name for param in doc['Parameters']]
if 'axis' in parameter_names:
doc['Parameters'][parameter_names.index('axis')] = (
_axis_parameter_doc)
else:
doc['Parameters'].append(_axis_parameter_doc)
if 'nan_policy' in parameter_names:
doc['Parameters'][parameter_names.index('nan_policy')] = (
_nan_policy_parameter_doc)
else:
doc['Parameters'].append(_nan_policy_parameter_doc)
if 'keepdims' in parameter_names:
doc['Parameters'][parameter_names.index('keepdims')] = (
_keepdims_parameter_doc)
else:
doc['Parameters'].append(_keepdims_parameter_doc)
doc['Notes'] += _standard_note_addition
doc = str(doc).split("\n", 1)[1].lstrip(" \n") # remove signature
axis_nan_policy_wrapper.__doc__ = str(doc)
sig = inspect.signature(axis_nan_policy_wrapper)
parameters = sig.parameters
parameter_list = list(parameters.values())
if 'axis' not in parameters:
parameter_list.append(_axis_parameter)
if 'nan_policy' not in parameters:
parameter_list.append(_nan_policy_parameter)
if 'keepdims' not in parameters:
parameter_list.append(_keepdims_parameter)
sig = sig.replace(parameters=parameter_list)
axis_nan_policy_wrapper.__signature__ = sig
return axis_nan_policy_wrapper
return axis_nan_policy_decorator
| SmallSampleWarning |
python | getsentry__sentry | src/sentry/hybridcloud/rpc/sig.py | {
"start": 482,
"end": 656
} | class ____(_SerializableFunctionSignatureException):
"""Indicate that a function signature can't be set up for serialization."""
| SerializableFunctionSignatureSetupException |
python | getsentry__sentry | src/sentry/api/serializers/models/group_stream.py | {
"start": 9670,
"end": 11634
} | class ____(TypedDict):
id: str
# from base response
shareId: NotRequired[str]
shortId: NotRequired[str]
title: NotRequired[str]
culprit: NotRequired[str | None]
permalink: NotRequired[str]
logger: NotRequired[str | None]
level: NotRequired[str]
status: NotRequired[str]
statusDetails: NotRequired[GroupStatusDetailsResponseOptional]
substatus: NotRequired[str | None]
isPublic: NotRequired[bool]
platform: NotRequired[str | None]
priority: NotRequired[str | None]
priorityLockedAt: NotRequired[datetime | None]
seerFixabilityScore: NotRequired[float | None]
seerAutofixLastTriggered: NotRequired[datetime | None]
project: NotRequired[GroupProjectResponse]
type: NotRequired[str]
issueType: NotRequired[str]
issueCategory: NotRequired[str]
metadata: NotRequired[dict[str, Any]]
numComments: NotRequired[int]
assignedTo: NotRequired[ActorSerializerResponse]
isBookmarked: NotRequired[bool]
isSubscribed: NotRequired[bool]
subscriptionDetails: NotRequired[SubscriptionDetails | None]
hasSeen: NotRequired[bool]
annotations: NotRequired[list[GroupAnnotation]]
# from base response optional
isUnhandled: NotRequired[bool]
count: NotRequired[str]
userCount: NotRequired[int]
firstSeen: NotRequired[datetime | None]
lastSeen: NotRequired[datetime | None]
# from the serializer itself
stats: NotRequired[dict[str, Any]]
lifetime: NotRequired[dict[str, Any]]
filtered: NotRequired[_Filtered | None]
sessionCount: NotRequired[int]
inbox: NotRequired[InboxDetails]
owners: NotRequired[OwnersSerialized]
pluginActions: NotRequired[list[tuple[str, str]]]
pluginIssues: NotRequired[list[dict[str, Any]]]
integrationIssues: NotRequired[list[dict[str, Any]]]
sentryAppIssues: NotRequired[list[dict[str, Any]]]
latestEventHasAttachments: NotRequired[bool]
| StreamGroupSerializerSnubaResponse |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_florida_zip.py | {
"start": 742,
"end": 1743
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_florida_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_florida_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidFloridaZip |
python | pytorch__pytorch | tools/test/test_create_alerts.py | {
"start": 1471,
"end": 2772
} | class ____(TestCase):
# Should fail when jobs are ? ? Fail Fail
def test_alert(self) -> None:
modified_data: list[Any] = [{}]
modified_data.append({})
modified_data.extend(MOCK_TEST_DATA)
status = JobStatus(JOB_NAME, modified_data)
self.assertTrue(status.should_alert())
# test filter job names
def test_job_filter(self) -> None:
job_names = [
"pytorch_linux_xenial_py3_6_gcc5_4_test",
"pytorch_linux_xenial_py3_6_gcc5_4_test2",
]
self.assertListEqual(
filter_job_names(job_names, ""),
job_names,
"empty regex should match all jobs",
)
self.assertListEqual(filter_job_names(job_names, ".*"), job_names)
self.assertListEqual(filter_job_names(job_names, ".*xenial.*"), job_names)
self.assertListEqual(
filter_job_names(job_names, ".*xenial.*test2"),
["pytorch_linux_xenial_py3_6_gcc5_4_test2"],
)
self.assertListEqual(filter_job_names(job_names, ".*xenial.*test3"), [])
self.assertRaises(
Exception,
lambda: filter_job_names(job_names, "["),
msg="malformed regex should throw exception",
)
if __name__ == "__main__":
main()
| TestGitHubPR |
python | walkccc__LeetCode | solutions/1712. Ways to Split Array Into Three Subarrays/1712.py | {
"start": 0,
"end": 1135
} | class ____:
def waysToSplit(self, nums: list[int]) -> int:
MOD = 1_000_000_007
n = len(nums)
ans = 0
prefix = list(itertools.accumulate(nums))
def firstGreaterEqual(i: int) -> int:
"""Finds the first index j s.t.
Mid = prefix[j] - prefix[i] >= left = prefix[i]
"""
l = i + 1
r = n - 1
while l < r:
m = (l + r) // 2
if prefix[m] - prefix[i] >= prefix[i]:
r = m
else:
l = m + 1
return l
def firstGreater(i: int) -> int:
"""Finds the first index k s.t.
mid = prefix[k] - prefix[i] > right = prefix[-1] - prefix[k]
"""
l = i + 1
r = n - 1
while l < r:
m = (l + r) // 2
if prefix[m] - prefix[i] > prefix[-1] - prefix[m]:
r = m
else:
l = m + 1
return l
for i in range(n - 2):
j = firstGreaterEqual(i)
if j == n - 1:
break
mid = prefix[j] - prefix[i]
right = prefix[-1] - prefix[j]
if mid > right:
continue
k = firstGreater(i)
ans = (ans + k - j) % MOD
return ans
| Solution |
python | pandas-dev__pandas | pandas/tests/indexes/multi/test_get_level_values.py | {
"start": 174,
"end": 4308
} | class ____:
def test_get_level_values_box_datetime64(self):
dates = date_range("1/1/2000", periods=4)
levels = [dates, [0, 1]]
codes = [[0, 0, 1, 1, 2, 2, 3, 3], [0, 1, 0, 1, 0, 1, 0, 1]]
index = MultiIndex(levels=levels, codes=codes)
assert isinstance(index.get_level_values(0)[0], Timestamp)
def test_get_level_values(idx):
result = idx.get_level_values(0)
expected = Index(["foo", "foo", "bar", "baz", "qux", "qux"], name="first")
tm.assert_index_equal(result, expected)
assert result.name == "first"
result = idx.get_level_values("first")
expected = idx.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(["A", "B"]), CategoricalIndex([1, 2, 3])],
codes=[np.array([0, 0, 0, 1, 1, 1]), np.array([0, 1, 2, 0, 1, 2])],
)
exp = CategoricalIndex(["A", "A", "A", "B", "B", "B"])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_level_values_all_na():
# GH#17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ["a", np.nan, 1]]
index = MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = Index(["a", np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_int_with_na():
# GH#17924
arrays = [["a", "b", "b"], [1, np.nan, 2]]
index = MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [["a", "b", "b"], [np.nan, np.nan, 2]]
index = MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na():
arrays = [[np.nan, np.nan, np.nan], ["a", np.nan, 1]]
index = MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = Index(["a", np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [["a", "b", "b"], pd.DatetimeIndex([0, 1, pd.NaT])]
index = MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_when_periods():
# GH33131. See also discussion in GH32669.
# This test can probably be removed when PeriodIndex._engine is removed.
from pandas import (
Period,
PeriodIndex,
)
idx = MultiIndex.from_arrays(
[PeriodIndex([Period("2019Q1"), Period("2019Q2")], name="b")]
)
idx2 = MultiIndex.from_arrays(
[idx._get_level_values(level) for level in range(idx.nlevels)]
)
assert all(x.is_monotonic_increasing for x in idx2.levels)
def test_values_loses_freq_of_underlying_index():
# GH#49054
idx = pd.DatetimeIndex(date_range("20200101", periods=3, freq="BME"))
expected = idx.copy(deep=True)
idx2 = Index([1, 2, 3])
midx = MultiIndex(levels=[idx, idx2], codes=[[0, 1, 2], [0, 1, 2]])
midx.values
assert idx.freq is not None
tm.assert_index_equal(idx, expected)
def test_get_level_values_gets_frequency_correctly():
# GH#57949 GH#58327
datetime_index = date_range(start=pd.to_datetime("1/1/2018"), periods=4, freq="YS")
other_index = ["A"]
multi_index = MultiIndex.from_product([datetime_index, other_index])
assert multi_index.get_level_values(0).freq == datetime_index.freq
| TestGetLevelValues |
python | has2k1__plotnine | plotnine/scales/scale_color.py | {
"start": 15501,
"end": 15558
} | class ____(scale_color_cmap_d):
pass
| scale_colour_ordinal |
python | astropy__astropy | astropy/modeling/powerlaws.py | {
"start": 2136,
"end": 4582
} | class ____(Fittable1DModel):
"""
One dimensional power law model with a break.
Parameters
----------
amplitude : float
Model amplitude at the break point.
x_break : float
Break point.
alpha_1 : float
Power law index for x < x_break.
alpha_2 : float
Power law index for x > x_break.
See Also
--------
PowerLaw1D, ExponentialCutoffPowerLaw1D, LogParabola1D
Notes
-----
Model formula (with :math:`A` for ``amplitude`` and :math:`\\alpha_1`
for ``alpha_1`` and :math:`\\alpha_2` for ``alpha_2``):
.. math::
f(x) = \\left \\{
\\begin{array}{ll}
A (x / x_{break}) ^ {-\\alpha_1} & : x < x_{break} \\\\
A (x / x_{break}) ^ {-\\alpha_2} & : x > x_{break} \\\\
\\end{array}
\\right.
"""
amplitude = Parameter(default=1, description="Peak value at break point")
x_break = Parameter(default=1, description="Break point")
alpha_1 = Parameter(default=1, description="Power law index before break point")
alpha_2 = Parameter(default=1, description="Power law index after break point")
@staticmethod
def evaluate(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law model function."""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
return amplitude * xx ** (-alpha)
@staticmethod
def fit_deriv(x, amplitude, x_break, alpha_1, alpha_2):
"""One dimensional broken power law derivative with respect to parameters."""
alpha = np.where(x < x_break, alpha_1, alpha_2)
xx = x / x_break
d_amplitude = xx ** (-alpha)
d_x_break = amplitude * alpha * d_amplitude / x_break
d_alpha = -amplitude * d_amplitude * np.log(xx)
d_alpha_1 = np.where(x < x_break, d_alpha, 0)
d_alpha_2 = np.where(x >= x_break, d_alpha, 0)
return [d_amplitude, d_x_break, d_alpha_1, d_alpha_2]
@property
def input_units(self):
if self.x_break.input_unit is None:
return None
return {self.inputs[0]: self.x_break.input_unit}
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"x_break": inputs_unit[self.inputs[0]],
"amplitude": outputs_unit[self.outputs[0]],
}
| BrokenPowerLaw1D |
python | getsentry__sentry | src/sentry/notifications/migrations/0002_notificationmessage_jsonfield.py | {
"start": 188,
"end": 1536
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("notifications", "0001_move_notifications_models"),
]
operations = [
migrations.AlterField(
model_name="notificationmessage",
name="error_details",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(null=True),
),
]
| Migration |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/core.py | {
"start": 59322,
"end": 60908
} | class ____(Layer):
"""Wraps an instance property access (e.g. `x.foo`) in a Keras Layer.
This layer takes an attribute name `attr_name` in the constructor and,
when called on input tensor `obj` returns `obj.attr_name`.
KerasTensors specialized for specific extension types use it to
represent instance property accesses on the represented object in the
case where the property needs to be dynamically accessed as opposed to
being statically computed from the typespec, e.g.
x = keras.Input(..., ragged=True)
out = x.flat_values
"""
@trackable.no_automatic_dependency_tracking
def __init__(self, attr_name, **kwargs):
self.attr_name = attr_name
if 'name' not in kwargs:
kwargs['name'] = K.unique_object_name(
'input.' + self.attr_name, zero_based=True, avoid_observed_names=True)
kwargs['autocast'] = False
# Do not individually trace op layers in the SavedModel.
self._must_restore_from_config = True
super(InstanceProperty, self).__init__(**kwargs)
# Preserve all argument data structures when saving/loading a config
# (e.g., don't unnest lists that contain one element)
self._preserve_input_structure_in_config = True
def call(self, obj):
return getattr(obj, self.attr_name)
def get_config(self):
config = {
'attr_name': self.attr_name
}
base_config = super(InstanceProperty, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
| InstanceProperty |
python | getsentry__sentry | src/sentry/apidocs/parameters.py | {
"start": 13852,
"end": 15074
} | class ____:
DETECTOR_ID = OpenApiParameter(
name="detector_id",
location="path",
required=True,
type=int,
description="The ID of the detector you'd like to query.",
)
QUERY = OpenApiParameter(
name="query",
location="query",
required=False,
type=str,
description="An optional search query for filtering detectors.",
)
SORT = OpenApiParameter(
name="sortBy",
location="query",
required=False,
type=str,
description="""The property to sort results by. If not specified, the results are sorted by id.
Available fields are:
- `name`
- `id`
- `type`
- `connectedWorkflows`
Prefix with `-` to sort in descending order.
""",
)
ID = OpenApiParameter(
name="id",
location="query",
required=False,
type=int,
description="The ID of the detector you'd like to query.",
many=True,
)
TYPE = OpenApiParameter(
name="type",
location="query",
required=False,
type=str,
many=True,
description="Filter by detector type(s). Can be specified multiple times.",
)
| DetectorParams |
python | TheAlgorithms__Python | data_structures/binary_tree/segment_tree_other.py | {
"start": 244,
"end": 607
} | class ____:
def __init__(self, start, end, val, left=None, right=None):
self.start = start
self.end = end
self.val = val
self.mid = (start + end) // 2
self.left = left
self.right = right
def __repr__(self):
return f"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
| SegmentTreeNode |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_volume_attributes_class_list.py | {
"start": 383,
"end": 7305
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1beta1VolumeAttributesClass]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1beta1VolumeAttributesClassList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1beta1VolumeAttributesClassList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1VolumeAttributesClassList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1VolumeAttributesClassList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1VolumeAttributesClassList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1beta1VolumeAttributesClassList. # noqa: E501
items is the list of VolumeAttributesClass objects. # noqa: E501
:return: The items of this V1beta1VolumeAttributesClassList. # noqa: E501
:rtype: list[V1beta1VolumeAttributesClass]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1beta1VolumeAttributesClassList.
items is the list of VolumeAttributesClass objects. # noqa: E501
:param items: The items of this V1beta1VolumeAttributesClassList. # noqa: E501
:type: list[V1beta1VolumeAttributesClass]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1beta1VolumeAttributesClassList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1VolumeAttributesClassList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1VolumeAttributesClassList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1VolumeAttributesClassList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1VolumeAttributesClassList. # noqa: E501
:return: The metadata of this V1beta1VolumeAttributesClassList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1VolumeAttributesClassList.
:param metadata: The metadata of this V1beta1VolumeAttributesClassList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1VolumeAttributesClassList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1VolumeAttributesClassList):
return True
return self.to_dict() != other.to_dict()
| V1beta1VolumeAttributesClassList |
python | getsentry__sentry | src/sentry/api/serializers/models/environment.py | {
"start": 359,
"end": 584
} | class ____(Serializer):
def serialize(self, obj: Environment, attrs, user, **kwargs) -> EnvironmentSerializerResponse:
return {"id": str(obj.id), "name": obj.name}
@register(EnvironmentProject)
| EnvironmentSerializer |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/canfail/package.py | {
"start": 227,
"end": 939
} | class ____(Package):
"""Package which fails install unless a special attribute is set"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
def set_install_succeed(self):
os.environ["CANFAIL_SUCCEED"] = "1"
def set_install_fail(self):
os.environ.pop("CANFAIL_SUCCEED", None)
@property
def succeed(self):
result = True if "CANFAIL_SUCCEED" in os.environ else False
return result
def install(self, spec, prefix):
if not self.succeed:
raise InstallError("'succeed' was false")
touch(join_path(prefix, "an_installation_file"))
| Canfail |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_south_dakota_zip.py | {
"start": 767,
"end": 1782
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_south_dakota_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_south_dakota_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidSouthDakotaZip |
python | scrapy__scrapy | scrapy/logformatter.py | {
"start": 1035,
"end": 1145
} | class ____(TypedDict):
level: int
msg: str
args: dict[str, Any] | tuple[Any, ...]
| LogFormatterResult |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/Terminal.py | {
"start": 269,
"end": 10616
} | class ____(object):
def __init__(self, node, name, io, optional=False, multi=False, pos=None, renamable=False, removable=False, multiable=False, bypass=None):
"""
Construct a new terminal.
============== =================================================================================
**Arguments:**
node the node to which this terminal belongs
name string, the name of the terminal
io 'in' or 'out'
optional bool, whether the node may process without connection to this terminal
multi bool, for inputs: whether this terminal may make multiple connections
for outputs: whether this terminal creates a different value for each connection
pos [x, y], the position of the terminal within its node's boundaries
renamable (bool) Whether the terminal can be renamed by the user
removable (bool) Whether the terminal can be removed by the user
multiable (bool) Whether the user may toggle the *multi* option for this terminal
bypass (str) Name of the terminal from which this terminal's value is derived
when the Node is in bypass mode.
============== =================================================================================
"""
self._io = io
self._optional = optional
self._multi = multi
self._node = weakref.ref(node)
self._name = name
self._renamable = renamable
self._removable = removable
self._multiable = multiable
self._connections = {}
self._graphicsItem = TerminalGraphicsItem(self, parent=self._node().graphicsItem())
self._bypass = bypass
if multi:
self._value = {} ## dictionary of terminal:value pairs.
else:
self._value = None
self.valueOk = None
self.recolor()
def value(self, term=None):
"""Return the value this terminal provides for the connected terminal"""
if term is None:
return self._value
if self.isMultiValue():
return self._value.get(term, None)
else:
return self._value
def bypassValue(self):
return self._bypass
def setValue(self, val, process=True):
"""If this is a single-value terminal, val should be a single value.
If this is a multi-value terminal, val should be a dict of terminal:value pairs"""
if not self.isMultiValue():
if fn.eq(val, self._value):
return
self._value = val
else:
if not isinstance(self._value, dict):
self._value = {}
if val is not None:
self._value.update(val)
self.setValueAcceptable(None) ## by default, input values are 'unchecked' until Node.update().
if self.isInput() and process:
self.node().update()
self.recolor()
def setOpts(self, **opts):
self._renamable = opts.get('renamable', self._renamable)
self._removable = opts.get('removable', self._removable)
self._multiable = opts.get('multiable', self._multiable)
if 'multi' in opts:
self.setMultiValue(opts['multi'])
def connected(self, term):
"""Called whenever this terminal has been connected to another. (note--this function is called on both terminals)"""
if self.isInput() and term.isOutput():
self.inputChanged(term)
if self.isOutput() and self.isMultiValue():
self.node().update()
self.node().connected(self, term)
def disconnected(self, term):
"""Called whenever this terminal has been disconnected from another. (note--this function is called on both terminals)"""
if self.isMultiValue() and term in self._value:
del self._value[term]
self.node().update()
else:
if self.isInput():
self.setValue(None)
self.node().disconnected(self, term)
def inputChanged(self, term, process=True):
"""Called whenever there is a change to the input value to this terminal.
It may often be useful to override this function."""
if self.isMultiValue():
self.setValue({term: term.value(self)}, process=process)
else:
self.setValue(term.value(self), process=process)
def valueIsAcceptable(self):
"""Returns True->acceptable None->unknown False->Unacceptable"""
return self.valueOk
def setValueAcceptable(self, v=True):
self.valueOk = v
self.recolor()
def connections(self):
return self._connections
def node(self):
return self._node()
def isInput(self):
return self._io == 'in'
def isMultiValue(self):
return self._multi
def setMultiValue(self, multi):
"""Set whether this is a multi-value terminal."""
self._multi = multi
if not multi and len(self.inputTerminals()) > 1:
self.disconnectAll()
for term in self.inputTerminals():
self.inputChanged(term)
def isOutput(self):
return self._io == 'out'
def isRenamable(self):
return self._renamable
def isRemovable(self):
return self._removable
def isMultiable(self):
return self._multiable
def name(self):
return self._name
def graphicsItem(self):
return self._graphicsItem
def isConnected(self):
return len(self.connections()) > 0
def connectedTo(self, term):
return term in self.connections()
def hasInput(self):
for t in self.connections():
if t.isOutput():
return True
return False
def inputTerminals(self):
"""Return the terminal(s) that give input to this one."""
return [t for t in self.connections() if t.isOutput()]
def dependentNodes(self):
"""Return the list of nodes which receive input from this terminal."""
return set([t.node() for t in self.connections() if t.isInput()])
def connectTo(self, term, connectionItem=None):
try:
if self.connectedTo(term):
raise Exception('Already connected')
if term is self:
raise Exception('Not connecting terminal to self')
if term.node() is self.node():
raise Exception("Can't connect to terminal on same node.")
for t in [self, term]:
if t.isInput() and not t._multi and len(t.connections()) > 0:
raise Exception("Cannot connect %s <-> %s: Terminal %s is already connected to %s (and does not allow multiple connections)" % (self, term, t, list(t.connections().keys())))
except:
if connectionItem is not None:
connectionItem.close()
raise
if connectionItem is None:
connectionItem = ConnectionItem(self.graphicsItem(), term.graphicsItem())
self.graphicsItem().getViewBox().addItem(connectionItem)
self._connections[term] = connectionItem
term._connections[self] = connectionItem
self.recolor()
self.connected(term)
term.connected(self)
return connectionItem
def disconnectFrom(self, term):
if not self.connectedTo(term):
return
item = self._connections[term]
item.close()
del self._connections[term]
del term._connections[self]
self.recolor()
term.recolor()
self.disconnected(term)
term.disconnected(self)
def disconnectAll(self):
for t in list(self._connections.keys()):
self.disconnectFrom(t)
def recolor(self, color=None, recurse=True):
if color is None:
if not self.isConnected(): ## disconnected terminals are black
color = QtGui.QColor(0,0,0)
elif self.isInput() and not self.hasInput(): ## input terminal with no connected output terminals
color = QtGui.QColor(200,200,0)
elif self._value is None or fn.eq(self._value, {}): ## terminal is connected but has no data (possibly due to processing error)
color = QtGui.QColor(255,255,255)
elif self.valueIsAcceptable() is None: ## terminal has data, but it is unknown if the data is ok
color = QtGui.QColor(200, 200, 0)
elif self.valueIsAcceptable() is True: ## terminal has good input, all ok
color = QtGui.QColor(0, 200, 0)
else: ## terminal has bad input
color = QtGui.QColor(200, 0, 0)
self.graphicsItem().setBrush(QtGui.QBrush(color))
if recurse:
for t in self.connections():
t.recolor(color, recurse=False)
def rename(self, name):
oldName = self._name
self._name = name
self.node().terminalRenamed(self, oldName)
self.graphicsItem().termRenamed(name)
def __repr__(self):
return "<Terminal %s.%s>" % (str(self.node().name()), str(self.name()))
def __hash__(self):
return id(self)
def close(self):
self.disconnectAll()
item = self.graphicsItem()
if item.scene() is not None:
item.scene().removeItem(item)
def saveState(self):
return {'io': self._io, 'multi': self._multi, 'optional': self._optional, 'renamable': self._renamable, 'removable': self._removable, 'multiable': self._multiable}
def __lt__(self, other):
"""When the terminal is multi value, the data passed to the DatTreeWidget for each input or output, is {Terminal: value}.
To make this sortable, we provide the < operator.
"""
return self._name < other._name
| Terminal |
python | getsentry__sentry | src/sentry/unmerge.py | {
"start": 2867,
"end": 4487
} | class ____(UnmergeReplacement):
"""
The "classical unmerge": Moving events out of the group based on primary_hash.
"""
fingerprints: Collection[str]
def get_unmerge_key(
self, event: GroupEvent, locked_primary_hashes: Collection[str]
) -> str | None:
primary_hash = event.get_primary_hash()
if primary_hash in self.fingerprints and primary_hash in locked_primary_hashes:
return _DEFAULT_UNMERGE_KEY
return None
@property
def primary_hashes_to_lock(self) -> Collection[str]:
return self.fingerprints
def start_snuba_replacement(
self, project: Project, source_id: int, destination_id: int
) -> EventstreamState:
return eventstream.backend.start_unmerge(
project.id, self.fingerprints, source_id, destination_id
)
def stop_snuba_replacement(self, eventstream_state: EventstreamState) -> None:
eventstream.backend.end_unmerge(eventstream_state)
def run_postgres_replacement(
self, project: Project, destination_id: int, locked_primary_hashes: Collection[str]
) -> None:
# Move the group hashes to the destination.
GroupHash.objects.filter(project_id=project.id, hash__in=locked_primary_hashes).update(
group=destination_id
)
def get_activity_args(self) -> Mapping[str, Any]:
return {"fingerprints": self.fingerprints}
_REPLACEMENT_TYPE_LABELS: BidirectionalMapping = BidirectionalMapping(
{
PrimaryHashUnmergeReplacement: "primary_hash",
}
)
@dataclass(frozen=True)
| PrimaryHashUnmergeReplacement |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 3634,
"end": 3865
} | class ____(FlexConfig):
"""
ROCm subclass for FlexAttn, with AMD backend specific tuneable kernargs
"""
matrix_instr_nonkdim: int = 0
waves_per_eu: int = 0
kpack: int = 2
@dataclasses.dataclass
| ROCmFlexConfig |
python | joke2k__faker | faker/providers/phone_number/es_AR/__init__.py | {
"start": 49,
"end": 2070
} | class ____(PhoneNumberProvider):
"""
According to official specs:
https://es.wikipedia.org/wiki/N%C3%BAmeros_telef%C3%B3nicos_en_Argentina
https://www.argentina.gob.ar/pais/codigo-telefonia
"""
formats = (
"+54 15 2%## ####", # National telephone to mobile phone
"+54 9 3%## ####", # International phone}
)
landline_codes = (
"351", # Córdoba (capital city of Córdoba province)
"379", # Corrientes (capital city of Corrientes province)
"221", # La Plata (capital city of Buenos Aires province)
"380", # La Rioja (capital city of La Rioja province)
"261", # Mendoza (capital city of Mendoza province)
"299", # Neuquén (capital city of Neuquén province)
"343", # Paraná (capital city of Entre Ríos province)
"376", # Posadas (capital city of Misiones province)
"280", # Rawson (capital city of Chubut province)
"362", # Resistencia (capital city of Chaco province)
"2966", # Río Gallegos (capital city of Santa Cruz province)
"387", # Salta (capital city of Salta province)
"383", # San Fernando del Valle de Catamarca (capital city of Catamarca province)
"264", # San Juan (capital city of San Juan province)
"266", # San Luis (capital city of San Luis province)
"381", # San Miguel de Tucumán (capital city of Tucumán province)
"388", # San Salvador de Jujuy (capital city of Jujuy province)
"342", # Santa Fe (capital city of Santa Fe province)
"2954", # Santa Rosa (capital city of La Pampa province)
"385", # Santiago del Estero (capital city of Santiago del Estero province)
"391", # Ushuaia (capital city of Tierra del Fuego province)
"2920", # Viedma (capital city of Rio Negro province)
)
special_codes = (
"600", # Nationalwide
"800", # Nationalwide, toll-free
)
cellphone_blocks = ("2", "3", "4", "5", "6", "7", "8", "9")
| Provider |
python | ansible__ansible | lib/ansible/_internal/ansible_collections/ansible/_protomatter/plugins/test/tagged.py | {
"start": 206,
"end": 319
} | class ____:
@staticmethod
def tests() -> dict[str, t.Callable]:
return dict(tagged=tagged)
| TestModule |
python | PyCQA__pylint | doc/data/messages/t/too-many-function-args/good.py | {
"start": 0,
"end": 134
} | class ____:
def __init__(self, color, name):
self.color = color
self.name = name
apple = Fruit("red", "apple")
| Fruit |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_size_op_test.py | {
"start": 1023,
"end": 1776
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
{'size': 1, 'test_input': 1},
{'size': 0, 'test_input': []},
{'size': 0, 'test_input': [], 'ragged_rank': 1},
{'size': 3, 'test_input': [1, 1, 1]},
{'size': 3, 'test_input': [[1, 1], [1]]},
{'size': 5, 'test_input': [[[1, 1, 1], [1]], [[1]]]},
{'size': 6, 'test_input': [[[1, 1], [1, 1]], [[1, 1]]], 'ragged_rank': 1},
])
def testRaggedSize(self, test_input, size, ragged_rank=None):
input_rt = ragged_factory_ops.constant(test_input, ragged_rank=ragged_rank)
self.assertAllEqual(ragged_array_ops.size(input_rt), size)
if __name__ == '__main__':
googletest.main()
| RaggedSizeOpTest |
python | ansible__ansible | lib/ansible/modules/service_facts.py | {
"start": 17869,
"end": 21408
} | class ____(BaseService):
_pid_regex = r'.+ is running as pid (\d+)\.'
def get_info(self, service):
service_info = {'status': 'unknown'}
rc, stdout, stderr = self.module.run_command("%s %s describe" % (self.service, service))
if rc == 0:
service_info['description'] = stdout
rc, stdout, stderr = self.module.run_command("%s %s status" % (self.service, service))
if rc == 0:
service_info['status'] = 'running'
p = re.compile(r'^\s?%s is running as pid (\d+).' % service)
matches = p.match(stdout[0])
if matches:
# does not always get pid output
service_info['pid'] = matches[0]
else:
service_info['pid'] = 'N/A'
elif rc == 1:
if stdout and 'is not running' in stdout.splitlines()[0]:
service_info['status'] = 'stopped'
elif stderr and 'unknown directive' in stderr.splitlines()[0]:
service_info['status'] = 'unknown'
self.module.warn('Status query not supported for %s' % service)
else:
service_info['status'] = 'unknown'
out = stderr if stderr else stdout
self.module.warn('Could not retrieve status for %s: %s' % (service, out))
else:
out = stderr if stderr else stdout
self.module.warn("Failed to get info for %s, no system message (rc=%s): %s" % (service, rc, out))
return service_info
def get_enabled(self):
services = []
rc, stdout, stderr = self.module.run_command("%s -e" % (self.service))
if rc == 0:
for line in stdout.splitlines():
if line.startswith('/'):
services.append(os.path.basename(line))
elif stderr:
self.module.warn("Failed to get services: %s" % stderr)
elif stdout:
self.module.warn("Failed to get services: %s" % stdout)
else:
self.module.warn("Failed to get services, no system message: rc=%s" % rc)
return services
def gather_services(self):
services = {}
if sys.platform.startswith('freebsd'):
self.service = self.module.get_bin_path("service")
if self.service:
for svc in self.get_enabled():
services[svc] = self.get_info(svc)
return services
def main():
module = AnsibleModule(argument_spec=dict(), supports_check_mode=True)
locale = get_best_parsable_locale(module)
module.run_command_environ_update = dict(LANG=locale, LC_ALL=locale)
if sys.platform.startswith('freebsd'):
# frebsd is not compatible but will match other classes
service_modules = (FreeBSDScanService,)
else:
service_modules = (ServiceScanService, SystemctlScanService, AIXScanService, OpenBSDScanService)
all_services = {}
for svc_module in service_modules:
svcmod = svc_module(module)
svc = svcmod.gather_services()
if svc:
all_services.update(svc)
if len(all_services) == 0:
results = dict(skipped=True, msg="Failed to find any services. This can be due to privileges or some other configuration issue.")
else:
results = dict(ansible_facts=dict(services=all_services))
module.exit_json(**results)
if __name__ == '__main__':
main()
| FreeBSDScanService |
python | pytorch__pytorch | torch/jit/quantized.py | {
"start": 1754,
"end": 2025
} | class ____(torch.jit.ScriptModule):
def __init__(self, other, dtype=torch.int8):
raise RuntimeError(
"torch.jit.QuantizedRNNBase is no longer supported. "
"Please use the torch.ao.nn.quantized.dynamic instead."
)
| QuantizedRNNBase |
python | keras-team__keras | keras/src/layers/normalization/layer_normalization.py | {
"start": 285,
"end": 8622
} | class ____(Layer):
"""Layer normalization layer (Ba et al., 2016).
Normalize the activations of the previous layer for each given example in a
batch independently, rather than across a batch like Batch Normalization.
i.e. applies a transformation that maintains the mean activation within each
example close to 0 and the activation standard deviation close to 1.
If `scale` or `center` are enabled, the layer will scale the normalized
outputs by broadcasting them with a trainable variable `gamma`, and center
the outputs by broadcasting with a trainable variable `beta`. `gamma` will
default to a ones tensor and `beta` will default to a zeros tensor, so that
centering and scaling are no-ops before training has begun.
So, with scaling and centering enabled the normalization equations
are as follows:
Let the intermediate activations for a mini-batch to be the `inputs`.
For each sample `x_i` in `inputs` with `k` features, we compute the mean and
variance of the sample:
```python
mean_i = sum(x_i[j] for j in range(k)) / k
var_i = sum((x_i[j] - mean_i) ** 2 for j in range(k)) / k
```
and then compute a normalized `x_i_normalized`, including a small factor
`epsilon` for numerical stability.
```python
x_i_normalized = (x_i - mean_i) / sqrt(var_i + epsilon)
```
And finally `x_i_normalized ` is linearly transformed by `gamma` and `beta`,
which are learned parameters:
```python
output_i = x_i_normalized * gamma + beta
```
`gamma` and `beta` will span the axes of `inputs` specified in `axis`, and
this part of the inputs' shape must be fully defined.
For example:
>>> layer = keras.layers.LayerNormalization(axis=[1, 2, 3])
>>> layer.build([5, 20, 30, 40])
>>> print(layer.beta.shape)
(20, 30, 40)
>>> print(layer.gamma.shape)
(20, 30, 40)
Note that other implementations of layer normalization may choose to define
`gamma` and `beta` over a separate set of axes from the axes being
normalized across. For example, Group Normalization
([Wu et al. 2018](https://arxiv.org/abs/1803.08494)) with group size of 1
corresponds to a Layer Normalization that normalizes across height, width,
and channel and has `gamma` and `beta` span only the channel dimension.
So, this Layer Normalization implementation will not match a Group
Normalization layer with group size set to 1.
Args:
axis: Integer or List/Tuple. The axis or axes to normalize across.
Typically, this is the features axis/axes. The left-out axes are
typically the batch axis/axes. `-1` is the last dimension in the
input. Defaults to `-1`.
epsilon: Small float added to variance to avoid dividing by zero.
Defaults to 1e-3.
center: If True, add offset of `beta` to normalized tensor. If False,
`beta` is ignored. Defaults to `True`.
scale: If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling will be done by the next layer.
Defaults to `True`.
beta_initializer: Initializer for the beta weight. Defaults to zeros.
gamma_initializer: Initializer for the gamma weight. Defaults to ones.
beta_regularizer: Optional regularizer for the beta weight.
None by default.
gamma_regularizer: Optional regularizer for the gamma weight.
None by default.
beta_constraint: Optional constraint for the beta weight.
None by default.
gamma_constraint: Optional constraint for the gamma weight.
None by default.
**kwargs: Base layer keyword arguments (e.g. `name` and `dtype`).
Reference:
- [Lei Ba et al., 2016](https://arxiv.org/abs/1607.06450).
"""
def __init__(
self,
axis=-1,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs,
):
rms_scaling = kwargs.pop("rms_scaling", False)
if rms_scaling:
warnings.warn(
"You passed `rms_scaling=True`, which is deprecated. This "
"argument incorrectly scales the input by the variance, not "
"the root mean square. To correctly use RMS Normalization, "
"please use `keras.layers.RMSNormalization` instead."
)
super().__init__(**kwargs)
if isinstance(axis, (list, tuple)):
self.axis = list(axis)
elif isinstance(axis, int):
self.axis = axis
else:
raise TypeError(
"Expected an int or a list/tuple of ints for the "
"argument 'axis', but received: %r" % axis
)
self.epsilon = epsilon
self.center = center
self.scale = scale
self.rms_scaling = rms_scaling
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
self.supports_masking = True
self.autocast = False
def build(self, input_shape):
if isinstance(self.axis, list):
shape = tuple([input_shape[dim] for dim in self.axis])
else:
shape = (input_shape[self.axis],)
self.axis = [self.axis]
if self.scale or self.rms_scaling:
self.gamma = self.add_weight(
name="gamma",
shape=shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
autocast=False,
)
else:
self.gamma = None
if self.center and not self.rms_scaling:
self.beta = self.add_weight(
name="beta",
shape=shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
autocast=False,
)
else:
self.beta = None
def call(self, inputs):
outputs = ops.layer_normalization(
inputs,
self.gamma,
self.beta,
self.axis,
self.epsilon,
rms_scaling=self.rms_scaling,
)
return ops.cast(outputs, self.compute_dtype)
def compute_output_shape(self, input_shape):
if isinstance(self.axis, int):
axes = [self.axis]
else:
axes = self.axis
for axis in axes:
if axis >= len(input_shape) or axis < -len(input_shape):
raise ValueError(
f"Axis {axis} is out of bounds for "
f"input shape {input_shape}. "
f"Received: axis={self.axis}"
)
return input_shape
def get_config(self):
config = {
"axis": self.axis,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"rms_scaling": self.rms_scaling,
"beta_initializer": initializers.serialize(self.beta_initializer),
"gamma_initializer": initializers.serialize(self.gamma_initializer),
"beta_regularizer": regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": regularizers.serialize(self.gamma_regularizer),
"beta_constraint": constraints.serialize(self.beta_constraint),
"gamma_constraint": constraints.serialize(self.gamma_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
| LayerNormalization |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.