language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | matplotlib__matplotlib | lib/matplotlib/ticker.py | {
"start": 70838,
"end": 72220
} | class ____:
"""
Helper for `.MaxNLocator`, `.MultipleLocator`, etc.
Take floating-point precision limitations into account when calculating
tick locations as integer multiples of a step.
"""
def __init__(self, step, offset):
"""
Parameters
----------
step : float > 0
Interval between ticks.
offset : float
Offset subtracted from the data limits prior to calculating tick
locations.
"""
if step <= 0:
raise ValueError("'step' must be positive")
self.step = step
self._offset = abs(offset)
def closeto(self, ms, edge):
# Allow more slop when the offset is large compared to the step.
if self._offset > 0:
digits = np.log10(self._offset / self.step)
tol = max(1e-10, 10 ** (digits - 12))
tol = min(0.4999, tol)
else:
tol = 1e-10
return abs(ms - edge) < tol
def le(self, x):
"""Return the largest n: n*step <= x."""
d, m = divmod(x, self.step)
if self.closeto(m / self.step, 1):
return d + 1
return d
def ge(self, x):
"""Return the smallest n: n*step >= x."""
d, m = divmod(x, self.step)
if self.closeto(m / self.step, 0):
return d
return d + 1
| _Edge_integer |
python | walkccc__LeetCode | solutions/1154. Day of the Year/1154.py | {
"start": 0,
"end": 378
} | class ____:
def dayOfYear(self, date: str) -> int:
def isLeapYear(year: int) -> bool:
return (year % 4 == 0 and year % 100 != 0) or year % 400 == 0
year = int(date[:4])
month = int(date[5:7])
day = int(date[8:])
days = [31, 29 if isLeapYear(
year) else 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
return sum(days[:month - 1]) + day
| Solution |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 2560,
"end": 2703
} | class ____:
name: str
slug: Optional[str]
description: Optional[str]
p = AddProject(name='x', slug='y', description='z')
| AddProject |
python | pyca__cryptography | tests/hazmat/primitives/test_xofhash.py | {
"start": 1155,
"end": 2610
} | class ____:
def test_hash_reject_unicode(self, backend):
m = hashes.XOFHash(hashes.SHAKE128(sys.maxsize))
with pytest.raises(TypeError):
m.update("\u00fc") # type: ignore[arg-type]
def test_incorrect_hash_algorithm_type(self, backend):
with pytest.raises(TypeError):
# Instance required
hashes.XOFHash(hashes.SHAKE128) # type: ignore[arg-type]
with pytest.raises(TypeError):
hashes.XOFHash(hashes.SHA256()) # type: ignore[arg-type]
def test_raises_update_after_squeeze(self, backend):
h = hashes.XOFHash(hashes.SHAKE128(digest_size=256))
h.update(b"foo")
h.squeeze(5)
with pytest.raises(AlreadyFinalized):
h.update(b"bar")
def test_copy(self, backend):
h = hashes.XOFHash(hashes.SHAKE128(digest_size=256))
h.update(b"foo")
h.update(b"bar")
h2 = h.copy()
assert h2.squeeze(10) == h.squeeze(10)
def test_exhaust_bytes(self, backend):
h = hashes.XOFHash(hashes.SHAKE128(digest_size=256))
h.update(b"foo")
with pytest.raises(ValueError):
h.squeeze(257)
h.squeeze(200)
h.squeeze(56)
with pytest.raises(ValueError):
h.squeeze(1)
@pytest.mark.supported(
only_if=lambda backend: rust_openssl.CRYPTOGRAPHY_OPENSSL_330_OR_GREATER,
skip_message="Requires backend with XOF support",
)
| TestXOFHash |
python | walkccc__LeetCode | solutions/1921. Eliminate Maximum Number of Monsters/1921.py | {
"start": 0,
"end": 255
} | class ____:
def eliminateMaximum(self, dist: list[int], speed: list[int]) -> int:
for i, arrivalTime in enumerate(
sorted([(d - 1) // s for d, s in zip(dist, speed)])):
if i > arrivalTime:
return i
return len(dist)
| Solution |
python | kamyu104__LeetCode-Solutions | Python/number-of-laser-beams-in-a-bank.py | {
"start": 33,
"end": 371
} | class ____(object):
def numberOfBeams(self, bank):
"""
:type bank: List[str]
:rtype: int
"""
result = prev = 0
for x in bank:
cnt = x.count('1')
if not cnt:
continue
result += prev*cnt
prev = cnt
return result
| Solution |
python | geekcomputers__Python | Python Programs/Python Program to Reverse a linked list.py | {
"start": 107,
"end": 249
} | class ____:
# Constructor to initialize the node object
def __init__(self, data):
self.data = data
self.next = None
| Node |
python | sqlalchemy__sqlalchemy | test/orm/test_deprecations.py | {
"start": 8170,
"end": 10787
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"users",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30), nullable=False),
test_needs_acid=True,
test_needs_fk=True,
)
Table(
"addresses",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("email_address", String(50), nullable=False),
test_needs_acid=True,
test_needs_fk=True,
)
Table(
"orders",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("user_id", None, ForeignKey("users.id")),
Column("address_id", None, ForeignKey("addresses.id")),
Column("description", String(30)),
Column("isopen", Integer),
test_needs_acid=True,
test_needs_fk=True,
)
Table(
"dingalings",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("address_id", None, ForeignKey("addresses.id")),
Column("data", String(30)),
test_needs_acid=True,
test_needs_fk=True,
)
def _option_test_fixture(self):
users, addresses, dingalings = (
self.tables.users,
self.tables.addresses,
self.tables.dingalings,
)
# these must be module level for pickling
from .test_pickled import Address
from .test_pickled import Dingaling
from .test_pickled import User
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={"dingaling": relationship(Dingaling)},
)
self.mapper_registry.map_imperatively(Dingaling, dingalings)
sess = fixture_session()
u1 = User(name="ed")
u1.addresses.append(Address(email_address="ed@bar.com"))
sess.add(u1)
sess.flush()
sess.expunge_all()
return sess, User, Address, Dingaling
| PickleTest |
python | doocs__leetcode | solution/2400-2499/2497.Maximum Star Sum of a Graph/Solution.py | {
"start": 0,
"end": 415
} | class ____:
def maxStarSum(self, vals: List[int], edges: List[List[int]], k: int) -> int:
g = defaultdict(list)
for a, b in edges:
if vals[b] > 0:
g[a].append(vals[b])
if vals[a] > 0:
g[b].append(vals[a])
for bs in g.values():
bs.sort(reverse=True)
return max(v + sum(g[i][:k]) for i, v in enumerate(vals))
| Solution |
python | mitsuhiko__rye | rye-devtools/src/rye_devtools/find_downloads.py | {
"start": 8855,
"end": 14000
} | class ____(Finder):
implementation = PythonImplementation.PYPY
RELEASE_URL = "https://raw.githubusercontent.com/pypy/pypy/main/pypy/tool/release/versions.json"
CHECKSUM_URL = (
"https://raw.githubusercontent.com/pypy/pypy.org/main/pages/checksums.rst"
)
CHECKSUM_RE = re.compile(
r"^\s*(?P<checksum>\w{64})\s+(?P<filename>pypy.+)$", re.MULTILINE
)
ARCH_MAPPING = {
"x64": "x86_64",
"i686": "x86",
"aarch64": "aarch64",
"arm64": "aarch64",
}
PLATFORM_MAPPING = {
"darwin": "macos",
"win64": "windows",
"linux": "linux",
}
def __init__(self, client: httpx.AsyncClient):
self.client = client
async def find(self) -> list[PythonDownload]:
downloads = await self.fetch_downloads()
await self.fetch_checksums(downloads)
return downloads
async def fetch_downloads(self) -> list[PythonDownload]:
log("Fetching pypy downloads...")
resp = await fetch(self.client, self.RELEASE_URL)
versions = resp.json()
results = {}
for version in versions:
if not version["stable"]:
continue
python_version = Version.from_str(version["python_version"])
if python_version < (3, 7, 0):
continue
for file in version["files"]:
arch = self.ARCH_MAPPING.get(file["arch"])
platform = self.PLATFORM_MAPPING.get(file["platform"])
if arch is None or platform is None:
continue
environment = "gnu" if platform == "linux" else None
download = PythonDownload(
version=python_version,
triple=PlatformTriple(
arch=arch,
platform=platform,
environment=environment,
flavor=None,
),
implementation=PythonImplementation.PYPY,
filename=file["filename"],
url=file["download_url"],
)
# Only keep the latest pypy version of each arch/platform
if (python_version, arch, platform) not in results:
results[(python_version, arch, platform)] = download
return list(results.values())
async def fetch_checksums(self, downloads: list[PythonDownload]) -> None:
log("Fetching pypy checksums...")
resp = await fetch(self.client, self.CHECKSUM_URL)
text = resp.text
checksums = {}
for match in self.CHECKSUM_RE.finditer(text):
checksums[match.group("filename")] = match.group("checksum")
for download in downloads:
download.sha256 = checksums.get(download.filename)
def render(downloads: list[PythonDownload]):
"""Render downloads.inc."""
def sort_key(download: PythonDownload) -> tuple[int, Version, PlatformTriple]:
# Sort by implementation, version (latest first), and then by triple.
impl_order = [PythonImplementation.PYPY, PythonImplementation.CPYTHON]
return (
impl_order.index(download.implementation),
-download.version,
download.triple,
)
downloads.sort(key=sort_key)
print("// Generated by rye-devtools. DO NOT EDIT.")
print(
"// To regenerate, run `rye run find-downloads > rye/src/sources/generated/python_downloads.inc` from the root of the repository."
)
print("use std::borrow::Cow;")
print("pub const PYTHON_VERSIONS: &[(PythonVersion, &str, Option<&str>)] = &[")
for download in downloads:
triple = download.triple
version = download.version
sha256 = f'Some("{download.sha256}")' if download.sha256 else "None"
print(
f' (PythonVersion {{ name: Cow::Borrowed("{download.implementation}"), arch: Cow::Borrowed("{triple.arch}"), os: Cow::Borrowed("{triple.platform}"), major: {version.major}, minor: {version.minor}, patch: {version.patch}, suffix: None }}, "{download.url}", {sha256}),'
)
print("];")
async def async_main():
token = os.environ.get("GITHUB_TOKEN")
if not token:
try:
token = open("token.txt").read().strip()
except Exception:
pass
if not token:
log("Please set GITHUB_TOKEN environment variable or create a token.txt file.")
sys.exit(1)
headers = {
"X-GitHub-Api-Version": "2022-11-28",
"Authorization": "Bearer " + token,
}
client = httpx.AsyncClient(follow_redirects=True, headers=headers)
finders = [
CPythonFinder(client),
PyPyFinder(client),
]
downloads = []
log("Fetching all Python downloads and generating code.")
async with client:
for finder in finders:
log(f"Finding {finder.implementation} downloads...")
downloads.extend(await finder.find())
render(downloads)
def main():
asyncio.run(async_main())
if __name__ == "__main__":
main()
| PyPyFinder |
python | dagster-io__dagster | python_modules/libraries/dagster-powerbi/dagster_powerbi/resource.py | {
"start": 2097,
"end": 3475
} | class ____(ConfigurableResource):
"""Authenticates with PowerBI using a service principal."""
client_id: str = Field(..., description="The application client ID for the service principal.")
client_secret: str = Field(
..., description="A client secret created for the service principal."
)
tenant_id: str = Field(
..., description="The Entra tenant ID where service principal was created."
)
_api_token: Optional[str] = PrivateAttr(default=None)
def get_api_token(self) -> str:
headers = {"Content-Type": "application/x-www-form-urlencoded"}
login_url = MICROSOFT_LOGIN_URL.format(tenant_id=self.tenant_id)
response = requests.post(
url=login_url,
headers=headers,
data=(
"grant_type=client_credentials"
"&resource=https://analysis.windows.net/powerbi/api"
f"&client_id={self.client_id}"
f"&client_secret={self.client_secret}"
),
allow_redirects=True,
)
response.raise_for_status()
out = response.json()
self._api_token = out["access_token"]
return out["access_token"]
@property
def api_token(self) -> str:
if not self._api_token:
return self.get_api_token()
return self._api_token
| PowerBIServicePrincipal |
python | getsentry__sentry | tests/sentry/integrations/slack/threads/activity_notifications/test_external_issue_created_activity.py | {
"start": 1752,
"end": 3063
} | class ____(BaseTestCase):
def test_returns_fallback_when_provider_key_is_not_in_map(self) -> None:
self.activity.data = {}
create_issue_activity = _ExternalIssueCreatedActivity(self.activity)
ret = create_issue_activity.get_provider()
assert ret == create_issue_activity.DEFAULT_PROVIDER_FALLBACK_TEXT
def test_returns_fallback_when_provider_key_is_empty(self) -> None:
self.activity.data = {"provider": None}
create_issue_activity = _ExternalIssueCreatedActivity(self.activity)
ret = create_issue_activity.get_provider()
assert ret == create_issue_activity.DEFAULT_PROVIDER_FALLBACK_TEXT
def test_returns_correct_value(self) -> None:
provider_value = "whatever"
self.activity.data = {"provider": provider_value}
create_issue_activity = _ExternalIssueCreatedActivity(self.activity)
ret = create_issue_activity.get_provider()
assert ret == provider_value
def test_returns_lowercase_value(self) -> None:
provider_value = "WHATEVER"
self.activity.data = {"provider": provider_value}
create_issue_activity = _ExternalIssueCreatedActivity(self.activity)
ret = create_issue_activity.get_provider()
assert ret == provider_value.lower()
| TestGetProvider |
python | numpy__numpy | numpy/random/tests/test_generator_mt19937_regressions.py | {
"start": 134,
"end": 8638
} | class ____:
def _create_generator(self):
return Generator(MT19937(121263137472525314065))
def test_vonmises_range(self):
# Make sure generated random variables are in [-pi, pi].
# Regression test for ticket #986.
mt19937 = self._create_generator()
for mu in np.linspace(-7., 7., 5):
r = mt19937.vonmises(mu, 1, 50)
assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
def test_hypergeometric_range(self):
# Test for ticket #921
mt19937 = self._create_generator()
assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0))
# Test for ticket #5623
args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems
assert_(mt19937.hypergeometric(*args) > 0)
def test_logseries_convergence(self):
# Test for ticket #923
mt19937 = self._create_generator()
N = 1000
rvsn = mt19937.logseries(0.8, size=N)
# these two frequency counts should be close to theoretical
# numbers with this large sample
# theoretical large N result is 0.49706795
freq = np.sum(rvsn == 1) / N
msg = f'Frequency was {freq:f}, should be > 0.45'
assert_(freq > 0.45, msg)
# theoretical large N result is 0.19882718
freq = np.sum(rvsn == 2) / N
msg = f'Frequency was {freq:f}, should be < 0.23'
assert_(freq < 0.23, msg)
def test_shuffle_mixed_dimension(self):
# Test for trac ticket #2074
for t in [[1, 2, 3, None],
[(1, 1), (2, 2), (3, 3), None],
[1, (2, 2), (3, 3), None],
[(1, 1), 2, 3, None]]:
mt19937 = Generator(MT19937(12345))
shuffled = np.array(t, dtype=object)
mt19937.shuffle(shuffled)
expected = np.array([t[2], t[0], t[3], t[1]], dtype=object)
assert_array_equal(np.array(shuffled, dtype=object), expected)
def test_call_within_randomstate(self):
# Check that custom BitGenerator does not call into global state
res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4])
for i in range(3):
mt19937 = Generator(MT19937(i))
m = Generator(MT19937(4321))
# If m.state is not honored, the result will change
assert_array_equal(m.choice(10, size=10, p=np.ones(10) / 10.), res)
def test_multivariate_normal_size_types(self):
# Test for multivariate_normal issue with 'size' argument.
# Check that the multivariate_normal size argument can be a
# numpy integer.
mt19937 = self._create_generator()
mt19937.multivariate_normal([0], [[0]], size=1)
mt19937.multivariate_normal([0], [[0]], size=np.int_(1))
mt19937.multivariate_normal([0], [[0]], size=np.int64(1))
def test_beta_small_parameters(self):
# Test that beta with small a and b parameters does not produce
# NaNs due to roundoff errors causing 0 / 0, gh-5851
mt19937 = self._create_generator()
x = mt19937.beta(0.0001, 0.0001, size=100)
assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta')
def test_beta_very_small_parameters(self):
# gh-24203: beta would hang with very small parameters.
mt19937 = self._create_generator()
mt19937.beta(1e-49, 1e-40)
def test_beta_ridiculously_small_parameters(self):
# gh-24266: beta would generate nan when the parameters
# were subnormal or a small multiple of the smallest normal.
mt19937 = self._create_generator()
tiny = np.finfo(1.0).tiny
x = mt19937.beta(tiny / 32, tiny / 40, size=50)
assert not np.any(np.isnan(x))
def test_beta_expected_zero_frequency(self):
# gh-24475: For small a and b (e.g. a=0.0025, b=0.0025), beta
# would generate too many zeros.
mt19937 = self._create_generator()
a = 0.0025
b = 0.0025
n = 1000000
x = mt19937.beta(a, b, size=n)
nzeros = np.count_nonzero(x == 0)
# beta CDF at x = np.finfo(np.double).smallest_subnormal/2
# is p = 0.0776169083131899, e.g,
#
# import numpy as np
# from mpmath import mp
# mp.dps = 160
# x = mp.mpf(np.finfo(np.float64).smallest_subnormal)/2
# # CDF of the beta distribution at x:
# p = mp.betainc(a, b, x1=0, x2=x, regularized=True)
# n = 1000000
# exprected_freq = float(n*p)
#
expected_freq = 77616.90831318991
assert 0.95 * expected_freq < nzeros < 1.05 * expected_freq
def test_choice_sum_of_probs_tolerance(self):
# The sum of probs should be 1.0 with some tolerance.
# For low precision dtypes the tolerance was too tight.
# See numpy github issue 6123.
mt19937 = self._create_generator()
a = [1, 2, 3]
counts = [4, 4, 2]
for dt in np.float16, np.float32, np.float64:
probs = np.array(counts, dtype=dt) / sum(counts)
c = mt19937.choice(a, p=probs)
assert_(c in a)
with pytest.raises(ValueError):
mt19937.choice(a, p=probs * 0.9)
def test_shuffle_of_array_of_different_length_strings(self):
# Test that permuting an array of different length strings
# will not cause a segfault on garbage collection
# Tests gh-7710
mt19937 = self._create_generator()
a = np.array(['a', 'a' * 1000])
for _ in range(100):
mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_shuffle_of_array_of_objects(self):
# Test that permuting an array of objects will not cause
# a segfault on garbage collection.
# See gh-7719
mt19937 = self._create_generator()
a = np.array([np.arange(1), np.arange(4)], dtype=object)
for _ in range(1000):
mt19937.shuffle(a)
# Force Garbage Collection - should not segfault.
import gc
gc.collect()
def test_permutation_subclass(self):
class N(np.ndarray):
pass
mt19937 = Generator(MT19937(1))
orig = np.arange(3).view(N)
perm = mt19937.permutation(orig)
assert_array_equal(perm, np.array([2, 0, 1]))
assert_array_equal(orig, np.arange(3).view(N))
class M:
a = np.arange(5)
def __array__(self, dtype=None, copy=None):
return self.a
mt19937 = Generator(MT19937(1))
m = M()
perm = mt19937.permutation(m)
assert_array_equal(perm, np.array([4, 1, 3, 0, 2]))
assert_array_equal(m.__array__(), np.arange(5))
def test_gamma_0(self):
mt19937 = self._create_generator()
assert mt19937.standard_gamma(0.0) == 0.0
assert_array_equal(mt19937.standard_gamma([0.0]), 0.0)
actual = mt19937.standard_gamma([0.0], dtype='float')
expected = np.array([0.], dtype=np.float32)
assert_array_equal(actual, expected)
def test_geometric_tiny_prob(self):
# Regression test for gh-17007.
# When p = 1e-30, the probability that a sample will exceed 2**63-1
# is 0.9999999999907766, so we expect the result to be all 2**63-1.
mt19937 = self._create_generator()
assert_array_equal(mt19937.geometric(p=1e-30, size=3),
np.iinfo(np.int64).max)
def test_zipf_large_parameter(self):
# Regression test for part of gh-9829: a call such as rng.zipf(10000)
# would hang.
mt19937 = self._create_generator()
n = 8
sample = mt19937.zipf(10000, size=n)
assert_array_equal(sample, np.ones(n, dtype=np.int64))
def test_zipf_a_near_1(self):
# Regression test for gh-9829: a call such as rng.zipf(1.0000000000001)
# would hang.
mt19937 = self._create_generator()
n = 100000
sample = mt19937.zipf(1.0000000000001, size=n)
# Not much of a test, but let's do something more than verify that
# it doesn't hang. Certainly for a monotonically decreasing
# discrete distribution truncated to signed 64 bit integers, more
# than half should be less than 2**62.
assert np.count_nonzero(sample < 2**62) > n / 2
| TestRegression |
python | huggingface__transformers | src/transformers/models/internvl/modeling_internvl.py | {
"start": 30245,
"end": 31831
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
@auto_docstring(
custom_intro="""
The INTERNVL model which consists of a vision backbone and a language model.
"""
)
| InternVLCausalLMOutputWithPast |
python | python-markdown__markdown | tests/test_syntax/blocks/test_html_blocks.py | {
"start": 821,
"end": 42180
} | class ____(TestCase):
def test_raw_paragraph(self):
self.assertMarkdownRenders(
'<p>A raw paragraph.</p>',
'<p>A raw paragraph.</p>'
)
def test_raw_skip_inline_markdown(self):
self.assertMarkdownRenders(
'<p>A *raw* paragraph.</p>',
'<p>A *raw* paragraph.</p>'
)
def test_raw_indent_one_space(self):
self.assertMarkdownRenders(
' <p>A *raw* paragraph.</p>',
'<p>A *raw* paragraph.</p>'
)
def test_raw_indent_two_spaces(self):
self.assertMarkdownRenders(
' <p>A *raw* paragraph.</p>',
'<p>A *raw* paragraph.</p>'
)
def test_raw_indent_three_spaces(self):
self.assertMarkdownRenders(
' <p>A *raw* paragraph.</p>',
'<p>A *raw* paragraph.</p>'
)
def test_raw_indent_four_spaces(self):
self.assertMarkdownRenders(
' <p>code block</p>',
self.dedent(
"""
<pre><code><p>code block</p>
</code></pre>
"""
)
)
def test_raw_span(self):
self.assertMarkdownRenders(
'<span>*inline*</span>',
'<p><span><em>inline</em></span></p>'
)
def test_code_span(self):
self.assertMarkdownRenders(
'`<p>code span</p>`',
'<p><code><p>code span</p></code></p>'
)
def test_code_span_open_gt(self):
self.assertMarkdownRenders(
'*bar* `<` *foo*',
'<p><em>bar</em> <code><</code> <em>foo</em></p>'
)
def test_raw_empty(self):
self.assertMarkdownRenders(
'<p></p>',
'<p></p>'
)
def test_raw_empty_space(self):
self.assertMarkdownRenders(
'<p> </p>',
'<p> </p>'
)
def test_raw_empty_newline(self):
self.assertMarkdownRenders(
'<p>\n</p>',
'<p>\n</p>'
)
def test_raw_empty_blank_line(self):
self.assertMarkdownRenders(
'<p>\n\n</p>',
'<p>\n\n</p>'
)
def test_raw_uppercase(self):
self.assertMarkdownRenders(
'<DIV>*foo*</DIV>',
'<DIV>*foo*</DIV>'
)
def test_raw_uppercase_multiline(self):
self.assertMarkdownRenders(
self.dedent(
"""
<DIV>
*foo*
</DIV>
"""
),
self.dedent(
"""
<DIV>
*foo*
</DIV>
"""
)
)
def test_multiple_raw_single_line(self):
self.assertMarkdownRenders(
'<p>*foo*</p><div>*bar*</div>',
self.dedent(
"""
<p>*foo*</p>
<div>*bar*</div>
"""
)
)
def test_multiple_raw_single_line_with_pi(self):
self.assertMarkdownRenders(
"<p>*foo*</p><?php echo '>'; ?>",
self.dedent(
"""
<p>*foo*</p>
<?php echo '>'; ?>
"""
)
)
def test_multiline_raw(self):
self.assertMarkdownRenders(
self.dedent(
"""
<p>
A raw paragraph
with multiple lines.
</p>
"""
),
self.dedent(
"""
<p>
A raw paragraph
with multiple lines.
</p>
"""
)
)
def test_blank_lines_in_raw(self):
self.assertMarkdownRenders(
self.dedent(
"""
<p>
A raw paragraph...
with many blank lines.
</p>
"""
),
self.dedent(
"""
<p>
A raw paragraph...
with many blank lines.
</p>
"""
)
)
def test_raw_surrounded_by_Markdown(self):
self.assertMarkdownRenders(
self.dedent(
"""
Some *Markdown* text.
<p>*Raw* HTML.</p>
More *Markdown* text.
"""
),
self.dedent(
"""
<p>Some <em>Markdown</em> text.</p>
<p>*Raw* HTML.</p>
<p>More <em>Markdown</em> text.</p>
"""
)
)
def test_raw_surrounded_by_text_without_blank_lines(self):
self.assertMarkdownRenders(
self.dedent(
"""
Some *Markdown* text.
<p>*Raw* HTML.</p>
More *Markdown* text.
"""
),
self.dedent(
"""
<p>Some <em>Markdown</em> text.</p>
<p>*Raw* HTML.</p>
<p>More <em>Markdown</em> text.</p>
"""
)
)
def test_multiline_markdown_with_code_span(self):
self.assertMarkdownRenders(
self.dedent(
"""
A paragraph with a block-level
`<p>code span</p>`, which is
at the start of a line.
"""
),
self.dedent(
"""
<p>A paragraph with a block-level
<code><p>code span</p></code>, which is
at the start of a line.</p>
"""
)
)
def test_raw_block_preceded_by_markdown_code_span_with_unclosed_block_tag(self):
self.assertMarkdownRenders(
self.dedent(
"""
A paragraph with a block-level code span: `<div>`.
<p>*not markdown*</p>
This is *markdown*
"""
),
self.dedent(
"""
<p>A paragraph with a block-level code span: <code><div></code>.</p>
<p>*not markdown*</p>
<p>This is <em>markdown</em></p>
"""
)
)
def test_raw_one_line_followed_by_text(self):
self.assertMarkdownRenders(
'<p>*foo*</p>*bar*',
self.dedent(
"""
<p>*foo*</p>
<p><em>bar</em></p>
"""
)
)
def test_raw_one_line_followed_by_span(self):
self.assertMarkdownRenders(
"<p>*foo*</p><span>*bar*</span>",
self.dedent(
"""
<p>*foo*</p>
<p><span><em>bar</em></span></p>
"""
)
)
def test_raw_with_markdown_blocks(self):
self.assertMarkdownRenders(
self.dedent(
"""
<div>
Not a Markdown paragraph.
* Not a list item.
* Another non-list item.
Another non-Markdown paragraph.
</div>
"""
),
self.dedent(
"""
<div>
Not a Markdown paragraph.
* Not a list item.
* Another non-list item.
Another non-Markdown paragraph.
</div>
"""
)
)
def test_adjacent_raw_blocks(self):
self.assertMarkdownRenders(
self.dedent(
"""
<p>A raw paragraph.</p>
<p>A second raw paragraph.</p>
"""
),
self.dedent(
"""
<p>A raw paragraph.</p>
<p>A second raw paragraph.</p>
"""
)
)
def test_adjacent_raw_blocks_with_blank_lines(self):
self.assertMarkdownRenders(
self.dedent(
"""
<p>A raw paragraph.</p>
<p>A second raw paragraph.</p>
"""
),
self.dedent(
"""
<p>A raw paragraph.</p>
<p>A second raw paragraph.</p>
"""
)
)
def test_nested_raw_one_line(self):
self.assertMarkdownRenders(
'<div><p>*foo*</p></div>',
'<div><p>*foo*</p></div>'
)
def test_nested_raw_block(self):
self.assertMarkdownRenders(
self.dedent(
"""
<div>
<p>A raw paragraph.</p>
</div>
"""
),
self.dedent(
"""
<div>
<p>A raw paragraph.</p>
</div>
"""
)
)
def test_nested_indented_raw_block(self):
self.assertMarkdownRenders(
self.dedent(
"""
<div>
<p>A raw paragraph.</p>
</div>
"""
),
self.dedent(
"""
<div>
<p>A raw paragraph.</p>
</div>
"""
)
)
def test_nested_raw_blocks(self):
self.assertMarkdownRenders(
self.dedent(
"""
<div>
<p>A raw paragraph.</p>
<p>A second raw paragraph.</p>
</div>
"""
),
self.dedent(
"""
<div>
<p>A raw paragraph.</p>
<p>A second raw paragraph.</p>
</div>
"""
)
)
def test_nested_raw_blocks_with_blank_lines(self):
self.assertMarkdownRenders(
self.dedent(
"""
<div>
<p>A raw paragraph.</p>
<p>A second raw paragraph.</p>
</div>
"""
),
self.dedent(
"""
<div>
<p>A raw paragraph.</p>
<p>A second raw paragraph.</p>
</div>
"""
)
)
def test_nested_inline_one_line(self):
self.assertMarkdownRenders(
'<p><em>foo</em><br></p>',
'<p><em>foo</em><br></p>'
)
def test_raw_nested_inline(self):
self.assertMarkdownRenders(
self.dedent(
"""
<div>
<p>
<span>*text*</span>
</p>
</div>
"""
),
self.dedent(
"""
<div>
<p>
<span>*text*</span>
</p>
</div>
"""
)
)
def test_raw_nested_inline_with_blank_lines(self):
self.assertMarkdownRenders(
self.dedent(
"""
<div>
<p>
<span>*text*</span>
</p>
</div>
"""
),
self.dedent(
"""
<div>
<p>
<span>*text*</span>
</p>
</div>
"""
)
)
def test_raw_html5(self):
self.assertMarkdownRenders(
self.dedent(
"""
<section>
<header>
<hgroup>
<h1>Hello :-)</h1>
</hgroup>
</header>
<figure>
<img src="image.png" alt="" />
<figcaption>Caption</figcaption>
</figure>
<footer>
<p>Some footer</p>
</footer>
</section>
"""
),
self.dedent(
"""
<section>
<header>
<hgroup>
<h1>Hello :-)</h1>
</hgroup>
</header>
<figure>
<img src="image.png" alt="" />
<figcaption>Caption</figcaption>
</figure>
<footer>
<p>Some footer</p>
</footer>
</section>
"""
)
)
def test_raw_pre_tag(self):
self.assertMarkdownRenders(
self.dedent(
"""
Preserve whitespace in raw html
<pre>
class Foo():
bar = 'bar'
@property
def baz(self):
return self.bar
</pre>
"""
),
self.dedent(
"""
<p>Preserve whitespace in raw html</p>
<pre>
class Foo():
bar = 'bar'
@property
def baz(self):
return self.bar
</pre>
"""
)
)
def test_raw_pre_tag_nested_escaped_html(self):
self.assertMarkdownRenders(
self.dedent(
"""
<pre>
<p>foo</p>
</pre>
"""
),
self.dedent(
"""
<pre>
<p>foo</p>
</pre>
"""
)
)
def test_raw_p_no_end_tag(self):
self.assertMarkdownRenders(
'<p>*text*',
'<p>*text*'
)
def test_raw_multiple_p_no_end_tag(self):
self.assertMarkdownRenders(
self.dedent(
"""
<p>*text*'
<p>more *text*
"""
),
self.dedent(
"""
<p>*text*'
<p>more *text*
"""
)
)
def test_raw_p_no_end_tag_followed_by_blank_line(self):
self.assertMarkdownRenders(
self.dedent(
"""
<p>*raw text*'
Still part of *raw* text.
"""
),
self.dedent(
"""
<p>*raw text*'
Still part of *raw* text.
"""
)
)
def test_raw_nested_p_no_end_tag(self):
self.assertMarkdownRenders(
'<div><p>*text*</div>',
'<div><p>*text*</div>'
)
def test_raw_open_bracket_only(self):
self.assertMarkdownRenders(
'<',
'<p><</p>'
)
def test_raw_open_bracket_followed_by_space(self):
self.assertMarkdownRenders(
'< foo',
'<p>< foo</p>'
)
def test_raw_missing_close_bracket(self):
self.assertMarkdownRenders(
'<foo',
'<p><foo</p>'
)
def test_raw_unclosed_tag_in_code_span(self):
self.assertMarkdownRenders(
self.dedent(
"""
`<div`.
<div>
hello
</div>
"""
),
self.dedent(
"""
<p><code><div</code>.</p>
<div>
hello
</div>
"""
)
)
def test_raw_unclosed_tag_in_code_span_space(self):
self.assertMarkdownRenders(
self.dedent(
"""
` <div `.
<div>
hello
</div>
"""
),
self.dedent(
"""
<p><code><div</code>.</p>
<div>
hello
</div>
"""
)
)
def test_raw_attributes(self):
self.assertMarkdownRenders(
'<p id="foo", class="bar baz", style="margin: 15px; line-height: 1.5; text-align: center;">text</p>',
'<p id="foo", class="bar baz", style="margin: 15px; line-height: 1.5; text-align: center;">text</p>'
)
def test_raw_attributes_nested(self):
self.assertMarkdownRenders(
self.dedent(
"""
<div id="foo, class="bar", style="background: #ffe7e8; border: 2px solid #e66465;">
<p id="baz", style="margin: 15px; line-height: 1.5; text-align: center;">
<img scr="../foo.jpg" title="with 'quoted' text." valueless_attr weirdness="<i>foo</i>" />
</p>
</div>
"""
),
self.dedent(
"""
<div id="foo, class="bar", style="background: #ffe7e8; border: 2px solid #e66465;">
<p id="baz", style="margin: 15px; line-height: 1.5; text-align: center;">
<img scr="../foo.jpg" title="with 'quoted' text." valueless_attr weirdness="<i>foo</i>" />
</p>
</div>
"""
)
)
def test_raw_comment_one_line(self):
self.assertMarkdownRenders(
'<!-- *foo* -->',
'<!-- *foo* -->'
)
def test_raw_comment_one_line_with_tag(self):
self.assertMarkdownRenders(
'<!-- <tag> -->',
'<!-- <tag> -->'
)
def test_comment_in_code_span(self):
self.assertMarkdownRenders(
'`<!-- *foo* -->`',
'<p><code><!-- *foo* --></code></p>'
)
def test_raw_comment_one_line_followed_by_text(self):
self.assertMarkdownRenders(
'<!-- *foo* -->*bar*',
self.dedent(
"""
<!-- *foo* -->
<p><em>bar</em></p>
"""
)
)
def test_raw_comment_one_line_followed_by_html(self):
self.assertMarkdownRenders(
'<!-- *foo* --><p>*bar*</p>',
self.dedent(
"""
<!-- *foo* -->
<p>*bar*</p>
"""
)
)
# Note: Trailing (insignificant) whitespace is not preserved, which does not match the
# reference implementation. However, it is not a change in behavior for Python-Markdown.
def test_raw_comment_trailing_whitespace(self):
self.assertMarkdownRenders(
'<!-- *foo* --> ',
'<!-- *foo* -->'
)
def test_bogus_comment(self):
self.assertMarkdownRenders(
'<!invalid>',
'<p><!invalid></p>'
)
def test_bogus_comment_endtag(self):
self.assertMarkdownRenders(
'</#invalid>',
'<p></#invalid></p>'
)
def test_raw_multiline_comment(self):
self.assertMarkdownRenders(
self.dedent(
"""
<!--
*foo*
-->
"""
),
self.dedent(
"""
<!--
*foo*
-->
"""
)
)
def test_raw_multiline_comment_with_tag(self):
self.assertMarkdownRenders(
self.dedent(
"""
<!--
<tag>
-->
"""
),
self.dedent(
"""
<!--
<tag>
-->
"""
)
)
def test_raw_multiline_comment_first_line(self):
self.assertMarkdownRenders(
self.dedent(
"""
<!-- *foo*
-->
"""
),
self.dedent(
"""
<!-- *foo*
-->
"""
)
)
def test_raw_multiline_comment_last_line(self):
self.assertMarkdownRenders(
self.dedent(
"""
<!--
*foo* -->
"""
),
self.dedent(
"""
<!--
*foo* -->
"""
)
)
def test_raw_comment_with_blank_lines(self):
self.assertMarkdownRenders(
self.dedent(
"""
<!--
*foo*
-->
"""
),
self.dedent(
"""
<!--
*foo*
-->
"""
)
)
def test_raw_comment_with_blank_lines_with_tag(self):
self.assertMarkdownRenders(
self.dedent(
"""
<!--
<tag>
-->
"""
),
self.dedent(
"""
<!--
<tag>
-->
"""
)
)
def test_raw_comment_with_blank_lines_first_line(self):
self.assertMarkdownRenders(
self.dedent(
"""
<!-- *foo*
-->
"""
),
self.dedent(
"""
<!-- *foo*
-->
"""
)
)
def test_raw_comment_with_blank_lines_last_line(self):
self.assertMarkdownRenders(
self.dedent(
"""
<!--
*foo* -->
"""
),
self.dedent(
"""
<!--
*foo* -->
"""
)
)
def test_raw_comment_indented(self):
self.assertMarkdownRenders(
self.dedent(
"""
<!--
*foo*
-->
"""
),
self.dedent(
"""
<!--
*foo*
-->
"""
)
)
def test_raw_comment_indented_with_tag(self):
self.assertMarkdownRenders(
self.dedent(
"""
<!--
<tag>
-->
"""
),
self.dedent(
"""
<!--
<tag>
-->
"""
)
)
def test_raw_comment_nested(self):
self.assertMarkdownRenders(
self.dedent(
"""
<div>
<!-- *foo* -->
</div>
"""
),
self.dedent(
"""
<div>
<!-- *foo* -->
</div>
"""
)
)
def test_comment_in_code_block(self):
self.assertMarkdownRenders(
' <!-- *foo* -->',
self.dedent(
"""
<pre><code><!-- *foo* -->
</code></pre>
"""
)
)
# Note: This is a change in behavior. Previously, Python-Markdown interpreted this in the same manner
# as browsers and all text after the opening comment tag was considered to be in a comment. However,
# that did not match the reference implementation. The new behavior does.
def test_unclosed_comment(self):
self.assertMarkdownRenders(
self.dedent(
"""
<!-- unclosed comment
*not* a comment
"""
),
self.dedent(
"""
<p><!-- unclosed comment</p>
<p><em>not</em> a comment</p>
"""
)
)
def test_invalid_comment_end(self):
self.assertMarkdownRenders(
self.dedent(
"""
<!-- This comment is malformed and never closes -- >
Some content after the bad comment.
"""
),
self.dedent(
"""
<p><!-- This comment is malformed and never closes -- >
Some content after the bad comment.</p>
"""
)
)
def test_raw_processing_instruction_one_line(self):
self.assertMarkdownRenders(
"<?php echo '>'; ?>",
"<?php echo '>'; ?>"
)
# This is a change in behavior and does not match the reference implementation.
# We have no way to determine if text is on the same line, so we get this. TODO: reevaluate!
def test_raw_processing_instruction_one_line_followed_by_text(self):
self.assertMarkdownRenders(
"<?php echo '>'; ?>*bar*",
self.dedent(
"""
<?php echo '>'; ?>
<p><em>bar</em></p>
"""
)
)
def test_raw_multiline_processing_instruction(self):
self.assertMarkdownRenders(
self.dedent(
"""
<?php
echo '>';
?>
"""
),
self.dedent(
"""
<?php
echo '>';
?>
"""
)
)
def test_raw_processing_instruction_with_blank_lines(self):
self.assertMarkdownRenders(
self.dedent(
"""
<?php
echo '>';
?>
"""
),
self.dedent(
"""
<?php
echo '>';
?>
"""
)
)
def test_raw_processing_instruction_indented(self):
self.assertMarkdownRenders(
self.dedent(
"""
<?php
echo '>';
?>
"""
),
self.dedent(
"""
<?php
echo '>';
?>
"""
)
)
def test_raw_processing_instruction_code_span(self):
self.assertMarkdownRenders(
self.dedent(
"""
`<?php`
<div>
foo
</div>
"""
),
self.dedent(
"""
<p><code><?php</code></p>
<div>
foo
</div>
"""
)
)
def test_raw_declaration_one_line(self):
self.assertMarkdownRenders(
'<!DOCTYPE html>',
'<!DOCTYPE html>'
)
# This is a change in behavior and does not match the reference implementation.
# We have no way to determine if text is on the same line, so we get this. TODO: reevaluate!
def test_raw_declaration_one_line_followed_by_text(self):
self.assertMarkdownRenders(
'<!DOCTYPE html>*bar*',
self.dedent(
"""
<!DOCTYPE html>
<p><em>bar</em></p>
"""
)
)
def test_raw_multiline_declaration(self):
self.assertMarkdownRenders(
self.dedent(
"""
<!DOCTYPE html PUBLIC
"-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
"""
),
self.dedent(
"""
<!DOCTYPE html PUBLIC
"-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
"""
)
)
def test_raw_declaration_code_span(self):
self.assertMarkdownRenders(
self.dedent(
"""
`<!`
<div>
foo
</div>
"""
),
self.dedent(
"""
<p><code><!</code></p>
<div>
foo
</div>
"""
)
)
def test_raw_cdata_one_line(self):
self.assertMarkdownRenders(
'<![CDATA[ document.write(">"); ]]>',
'<![CDATA[ document.write(">"); ]]>'
)
# Note: this is a change. Neither previous output nor this match reference implementation.
def test_raw_cdata_one_line_followed_by_text(self):
self.assertMarkdownRenders(
'<![CDATA[ document.write(">"); ]]>*bar*',
self.dedent(
"""
<![CDATA[ document.write(">"); ]]>
<p><em>bar</em></p>
"""
)
)
def test_raw_multiline_cdata(self):
self.assertMarkdownRenders(
self.dedent(
"""
<![CDATA[
document.write(">");
]]>
"""
),
self.dedent(
"""
<![CDATA[
document.write(">");
]]>
"""
)
)
def test_raw_cdata_with_blank_lines(self):
self.assertMarkdownRenders(
self.dedent(
"""
<![CDATA[
document.write(">");
]]>
"""
),
self.dedent(
"""
<![CDATA[
document.write(">");
]]>
"""
)
)
def test_raw_cdata_indented(self):
self.assertMarkdownRenders(
self.dedent(
"""
<![CDATA[
document.write(">");
]]>
"""
),
self.dedent(
"""
<![CDATA[
document.write(">");
]]>
"""
)
)
def test_not_actually_cdata(self):
# Ensure bug reported in #1534 is avoided.
self.assertMarkdownRenders(
'< stuff>
<some>> <<unbalanced>> <<brackets>
"""
),
self.dedent(
"""
<p><some <a href="http://example.com">weird</a> stuff></p>
<p><some>> <<unbalanced>> <<brackets></p>
"""
)
)
def test_script_tags(self):
self.assertMarkdownRenders(
self.dedent(
"""
<script>
*random stuff* <div> &
</script>
<style>
**more stuff**
</style>
"""
),
self.dedent(
"""
<script>
*random stuff* <div> &
</script>
<style>
**more stuff**
</style>
"""
)
)
def test_unclosed_script_tag(self):
# Ensure we have a working fix for https://bugs.python.org/issue41989
self.assertMarkdownRenders(
self.dedent(
"""
<script>
*random stuff* <div> &
Still part of the *script* tag
"""
),
self.dedent(
"""
<script>
*random stuff* <div> &
Still part of the *script* tag
"""
)
)
def test_inline_script_tags(self):
# Ensure inline script tags doesn't cause the parser to eat content (see #1036).
self.assertMarkdownRenders(
self.dedent(
"""
Text `<script>` more *text*.
<div>
*foo*
</div>
<div>
bar
</div>
A new paragraph with a closing `</script>` tag.
"""
),
self.dedent(
"""
<p>Text <code><script></code> more <em>text</em>.</p>
<div>
*foo*
</div>
<div>
bar
</div>
<p>A new paragraph with a closing <code></script></code> tag.</p>
"""
)
)
def test_hr_only_start(self):
self.assertMarkdownRenders(
self.dedent(
"""
*emphasis1*
<hr>
*emphasis2*
"""
),
self.dedent(
"""
<p><em>emphasis1</em></p>
<hr>
<p><em>emphasis2</em></p>
"""
)
)
def test_hr_self_close(self):
self.assertMarkdownRenders(
self.dedent(
"""
*emphasis1*
<hr/>
*emphasis2*
"""
),
self.dedent(
"""
<p><em>emphasis1</em></p>
<hr/>
<p><em>emphasis2</em></p>
"""
)
)
def test_hr_start_and_end(self):
# Browsers ignore ending hr tags, so we don't try to do anything to handle them special.
self.assertMarkdownRenders(
self.dedent(
"""
*emphasis1*
<hr></hr>
*emphasis2*
"""
),
self.dedent(
"""
<p><em>emphasis1</em></p>
<hr>
<p></hr>
<em>emphasis2</em></p>
"""
)
)
def test_hr_only_end(self):
# Browsers ignore ending hr tags, so we don't try to do anything to handle them special.
self.assertMarkdownRenders(
self.dedent(
"""
*emphasis1*
</hr>
*emphasis2*
"""
),
self.dedent(
"""
<p><em>emphasis1</em>
</hr>
<em>emphasis2</em></p>
"""
)
)
def test_hr_with_content(self):
# Browsers ignore ending hr tags, so we don't try to do anything to handle them special.
# Content is not allowed and will be treated as normal content between two hr tags.
self.assertMarkdownRenders(
self.dedent(
"""
*emphasis1*
<hr>
**content**
</hr>
*emphasis2*
"""
),
self.dedent(
"""
<p><em>emphasis1</em></p>
<hr>
<p><strong>content</strong>
</hr>
<em>emphasis2</em></p>
"""
)
)
def test_placeholder_in_source(self):
# This should never occur, but third party extensions could create weird edge cases.
md = markdown.Markdown()
# Ensure there is an `htmlstash` so relevant code (nested in `if replacements`) is run.
md.htmlStash.store('foo')
# Run with a placeholder which is not in the stash
placeholder = md.htmlStash.get_placeholder(md.htmlStash.html_counter + 1)
result = md.postprocessors['raw_html'].run(placeholder)
self.assertEqual(placeholder, result)
def test_noname_tag(self):
self.assertMarkdownRenders(
self.dedent(
"""
<div>
</>
</div>
"""
),
self.dedent(
"""
<div>
</>
</div>
"""
)
)
| TestHTMLBlocks |
python | langchain-ai__langchain | libs/text-splitters/langchain_text_splitters/python.py | {
"start": 225,
"end": 573
} | class ____(RecursiveCharacterTextSplitter):
"""Attempts to split the text along Python syntax."""
def __init__(self, **kwargs: Any) -> None:
"""Initialize a PythonCodeTextSplitter."""
separators = self.get_separators_for_language(Language.PYTHON)
super().__init__(separators=separators, **kwargs)
| PythonCodeTextSplitter |
python | pydantic__pydantic | .github/actions/people/people.py | {
"start": 6714,
"end": 6844
} | class ____(BaseModel):
"""Top-level container for pull requests response data."""
repository: PRsRepository
| PRsResponseData |
python | tensorflow__tensorflow | tensorflow/python/framework/test_util.py | {
"start": 149702,
"end": 152428
} | class ____:
"""A utility class to track increments to test counters."""
def __init__(self, name, label):
self.name = name
self.label = label
self.Reset()
def Reset(self) -> None:
self.last_value = _test_metrics_util.test_counter_value(
self.name, self.label)
def Get(self) -> int:
value = _test_metrics_util.test_counter_value(self.name, self.label)
return value - self.last_value
@tf_export("test.experimental.sync_devices")
def sync_devices() -> None:
"""Synchronizes all devices.
By default, GPUs run asynchronously. This means that when you run an op on the
GPU, like `tf.linalg.matmul`, the op may still be running on the GPU when the
function returns. Non-GPU devices can also be made to run asynchronously by
calling `tf.config.experimental.set_synchronous_execution(False)`. Calling
`sync_devices()` blocks until pending ops have finished executing. This is
primarily useful for measuring performance during a benchmark.
For example, here is how you can measure how long `tf.linalg.matmul` runs:
>>> import time
>>> x = tf.random.normal((4096, 4096))
>>> tf.linalg.matmul(x, x) # Warmup.
>>> tf.test.experimental.sync_devices() # Block until warmup has completed.
>>>
>>> start = time.time()
>>> y = tf.linalg.matmul(x, x)
>>> tf.test.experimental.sync_devices() # Block until matmul has completed.
>>> end = time.time()
>>> print(f'Time taken: {end - start}')
If the call to `sync_devices()` was omitted, the time printed could be too
small. This is because the op could still be running asynchronously when
the line `end = time.time()` is executed.
Raises:
RuntimeError: If run outside Eager mode. This must be called in Eager mode,
outside any `tf.function`s.
"""
if not context.executing_eagerly():
raise RuntimeError(
"sync_devices() must only be called in Eager mode, outside tf.functions"
)
# There are two sources of asynchrony in TensorFlow:
#
# 1. On GPUs, kernels are run on a CUDA stream, which is inherently
# asynchronous.
# 2. Calling `tf.config.experimental.set_synchronous_execution(False)` makes
# all ops asynchronous, in which case TensorFlow maintains internal queues
# of pending ops.
#
# Calling SyncDevice addresses source (1). Calling async_await addresses
# source (2). It is important that SyncDevice() is called before async_wait(),
# otherwise the SyncDevice op itself may still be pending on an internal
# TensorFlow queue when the sync_devices() Python function returns.
devices = config.list_logical_devices()
for dev in devices:
with ops.device(dev.name):
gen_sync_ops.SyncDevice()
context.async_wait()
| TestDelta |
python | crytic__slither | slither/core/declarations/structure_top_level.py | {
"start": 280,
"end": 510
} | class ____(Structure, TopLevel):
def __init__(self, compilation_unit: "SlitherCompilationUnit", scope: "FileScope") -> None:
super().__init__(compilation_unit)
self.file_scope: "FileScope" = scope
| StructureTopLevel |
python | giampaolo__psutil | tests/test_windows.py | {
"start": 1276,
"end": 2472
} | class ____(PsutilTestCase):
pass
def powershell(cmd):
"""Currently not used, but available just in case. Usage:
>>> powershell(
"Get-CIMInstance Win32_PageFileUsage | Select AllocatedBaseSize")
"""
if not shutil.which("powershell.exe"):
return pytest.skip("powershell.exe not available")
cmdline = (
"powershell.exe -ExecutionPolicy Bypass -NoLogo -NonInteractive "
f"-NoProfile -WindowStyle Hidden -Command \"{cmd}\"" # noqa: Q003
)
return sh(cmdline)
def wmic(path, what, converter=int):
"""Currently not used, but available just in case. Usage:
>>> wmic("Win32_OperatingSystem", "FreePhysicalMemory")
2134124534
"""
out = sh(f"wmic path {path} get {what}").strip()
data = "".join(out.splitlines()[1:]).strip() # get rid of the header
if converter is not None:
if "," in what:
return tuple(converter(x) for x in data.split())
else:
return converter(data)
else:
return data
# ===================================================================
# System APIs
# ===================================================================
| WindowsTestCase |
python | huggingface__transformers | src/transformers/models/falcon_mamba/modular_falcon_mamba.py | {
"start": 25842,
"end": 25909
} | class ____(MambaPreTrainedModel):
pass
| FalconMambaPreTrainedModel |
python | networkx__networkx | networkx/algorithms/tests/test_matching.py | {
"start": 14833,
"end": 15594
} | class ____:
"""Unit tests for the
:func:`~networkx.algorithms.matching.is_maximal_matching` function.
"""
def test_dict(self):
G = nx.path_graph(4)
assert nx.is_maximal_matching(G, {0: 1, 1: 0, 2: 3, 3: 2})
def test_valid(self):
G = nx.path_graph(4)
assert nx.is_maximal_matching(G, {(0, 1), (2, 3)})
def test_not_matching(self):
G = nx.path_graph(4)
assert not nx.is_maximal_matching(G, {(0, 1), (1, 2), (2, 3)})
assert not nx.is_maximal_matching(G, {(0, 3)})
G.add_edge(0, 0)
assert not nx.is_maximal_matching(G, {(0, 0)})
def test_not_maximal(self):
G = nx.path_graph(4)
assert not nx.is_maximal_matching(G, {(0, 1)})
| TestIsMaximalMatching |
python | numba__numba | numba/cuda/cudadrv/driver.py | {
"start": 58512,
"end": 62795
} | class ____(object):
"""
CUDA IPC handle. Serialization of the CUDA IPC handle object is implemented
here.
:param base: A reference to the original allocation to keep it alive
:type base: MemoryPointer
:param handle: The CUDA IPC handle, as a ctypes array of bytes.
:param size: Size of the original allocation
:type size: int
:param source_info: The identity of the device on which the IPC handle was
opened.
:type source_info: dict
:param offset: The offset into the underlying allocation of the memory
referred to by this IPC handle.
:type offset: int
"""
def __init__(self, base, handle, size, source_info=None, offset=0):
self.base = base
self.handle = handle
self.size = size
self.source_info = source_info
self._impl = None
self.offset = offset
def _sentry_source_info(self):
if self.source_info is None:
raise RuntimeError("IPC handle doesn't have source info")
def can_access_peer(self, context):
"""Returns a bool indicating whether the active context can peer
access the IPC handle
"""
self._sentry_source_info()
if self.source_info == context.device.get_device_identity():
return True
source_device = Device.from_identity(self.source_info)
return context.can_access_peer(source_device.id)
def open_staged(self, context):
"""Open the IPC by allowing staging on the host memory first.
"""
self._sentry_source_info()
if self._impl is not None:
raise ValueError('IpcHandle is already opened')
self._impl = _StagedIpcImpl(self, self.source_info)
return self._impl.open(context)
def open_direct(self, context):
"""
Import the IPC memory and returns a raw CUDA memory pointer object
"""
if self._impl is not None:
raise ValueError('IpcHandle is already opened')
self._impl = _CudaIpcImpl(self)
return self._impl.open(context)
def open(self, context):
"""Open the IPC handle and import the memory for usage in the given
context. Returns a raw CUDA memory pointer object.
This is enhanced over CUDA IPC that it will work regardless of whether
the source device is peer-accessible by the destination device.
If the devices are peer-accessible, it uses .open_direct().
If the devices are not peer-accessible, it uses .open_staged().
"""
if self.source_info is None or self.can_access_peer(context):
fn = self.open_direct
else:
fn = self.open_staged
return fn(context)
def open_array(self, context, shape, dtype, strides=None):
"""
Similar to `.open()` but returns an device array.
"""
from . import devicearray
# by default, set strides to itemsize
if strides is None:
strides = dtype.itemsize
dptr = self.open(context)
# read the device pointer as an array
return devicearray.DeviceNDArray(shape=shape, strides=strides,
dtype=dtype, gpu_data=dptr)
def close(self):
if self._impl is None:
raise ValueError('IpcHandle not opened')
self._impl.close()
self._impl = None
def __reduce__(self):
# Preprocess the IPC handle, which is defined as a byte array.
if USE_NV_BINDING:
preprocessed_handle = self.handle.reserved
else:
preprocessed_handle = tuple(self.handle)
args = (
self.__class__,
preprocessed_handle,
self.size,
self.source_info,
self.offset,
)
return (serialize._rebuild_reduction, args)
@classmethod
def _rebuild(cls, handle_ary, size, source_info, offset):
if USE_NV_BINDING:
handle = binding.CUipcMemHandle()
handle.reserved = handle_ary
else:
handle = drvapi.cu_ipc_mem_handle(*handle_ary)
return cls(base=None, handle=handle, size=size,
source_info=source_info, offset=offset)
| IpcHandle |
python | huggingface__transformers | src/transformers/models/afmoe/modular_afmoe.py | {
"start": 5601,
"end": 7000
} | class ____(nn.Module):
"""
Mixture of Experts (MoE) module for AFMoE.
This module implements a sparse MoE layer with both shared experts (always active) and
routed experts (activated based on token-choice routing).
"""
def __init__(self, config):
super().__init__()
self.config = config
self.router = AfmoeTokenChoiceRouter(config)
self.shared_experts = AfmoeMLP(config, config.moe_intermediate_size * config.num_shared_experts)
self.experts = AfmoeExperts(config)
self.expert_bias = nn.Parameter(torch.zeros(config.num_experts, dtype=torch.float32), requires_grad=False)
def forward(self, hidden_states):
batch_size, seq_len, hidden_dim = hidden_states.shape
hidden_states_flat = hidden_states.view(-1, hidden_dim)
# Get routing decisions
top_scores, selected_experts = self.router(hidden_states, self.expert_bias)
top_scores = top_scores.view(batch_size, seq_len, self.config.num_experts_per_tok)
selected_experts = selected_experts.view(batch_size, seq_len, self.config.num_experts_per_tok)
# Process through shared experts
shared_output = self.shared_experts(hidden_states_flat).view(batch_size, seq_len, hidden_dim)
routed_output = self.experts(hidden_states, selected_experts, top_scores)
return shared_output + routed_output
| AfmoeMoE |
python | numpy__numpy | numpy/_core/tests/test_unicode.py | {
"start": 12071,
"end": 12221
} | class ____(ByteorderValues):
"""Check the byteorder in unicode (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
| TestByteorder_1_UCS2 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass9.py | {
"start": 528,
"end": 568
} | class ____:
pass
dataclass_only(A())
| A |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 35054,
"end": 35278
} | class ____(VOTableSpecWarning):
"""
The integer value is out of range for the size of the field.
"""
message_template = "Value '{}' is out of range for a {} integer field"
default_args = ("x", "n-bit")
| W51 |
python | pypa__twine | twine/auth.py | {
"start": 1529,
"end": 1833
} | class ____(t.TypedDict, total=False):
message: t.Optional[str]
errors: t.Optional[list[TrustedPublishingTokenRetrievalError]]
token: t.Optional[str]
success: t.Optional[bool]
# Depends on https://github.com/pypi/warehouse/issues/18235
expires: t.Optional[int]
| TrustedPublishingToken |
python | sanic-org__sanic | sanic/application/state.py | {
"start": 452,
"end": 689
} | class ____:
"""Information about a server instance."""
settings: dict[str, Any]
stage: ServerStage = field(default=ServerStage.STOPPED)
server: Optional[AsyncioServer] = field(default=None)
@dataclass
| ApplicationServerInfo |
python | sympy__sympy | sympy/physics/quantum/tests/test_innerproduct.py | {
"start": 936,
"end": 1032
} | class ____(Bra, FooState):
@classmethod
def dual_class(self):
return FooKet
| FooBra |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_bedrock.py | {
"start": 1498,
"end": 1798
} | class ____:
EXPECTED_WAITER_NAME: str | None = None
def test_setup(self):
# Ensure that all subclasses have an expected waiter name set.
if self.__class__.__name__ != "TestBaseBedrockTrigger":
assert isinstance(self.EXPECTED_WAITER_NAME, str)
| TestBaseBedrockTrigger |
python | TheAlgorithms__Python | sorts/external_sort.py | {
"start": 2867,
"end": 4238
} | class ____:
def __init__(self, block_size):
self.block_size = block_size
def sort(self, filename, sort_key=None):
num_blocks = self.get_number_blocks(filename, self.block_size)
splitter = FileSplitter(filename)
splitter.split(self.block_size, sort_key)
merger = FileMerger(NWayMerge())
buffer_size = self.block_size / (num_blocks + 1)
merger.merge(splitter.get_block_filenames(), filename + ".out", buffer_size)
splitter.cleanup()
def get_number_blocks(self, filename, block_size):
return (os.stat(filename).st_size / block_size) + 1
def parse_memory(string):
if string[-1].lower() == "k":
return int(string[:-1]) * 1024
elif string[-1].lower() == "m":
return int(string[:-1]) * 1024 * 1024
elif string[-1].lower() == "g":
return int(string[:-1]) * 1024 * 1024 * 1024
else:
return int(string)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-m", "--mem", help="amount of memory to use for sorting", default="100M"
)
parser.add_argument(
"filename", metavar="<filename>", nargs=1, help="name of file to sort"
)
args = parser.parse_args()
sorter = ExternalSort(parse_memory(args.mem))
sorter.sort(args.filename[0])
if __name__ == "__main__":
main()
| ExternalSort |
python | huggingface__transformers | tests/models/x_clip/test_modeling_x_clip.py | {
"start": 21912,
"end": 24901
} | class ____(unittest.TestCase):
@slow
def test_inference(self):
model_name = "microsoft/xclip-base-patch32"
model = XCLIPModel.from_pretrained(model_name).to(torch_device)
processor = XCLIPProcessor.from_pretrained(model_name)
video = prepare_video()
inputs = processor(
text=["playing sports", "eating spaghetti", "go shopping"], videos=video, return_tensors="pt", padding=True
).to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
self.assertEqual(
outputs.logits_per_video.shape,
torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
)
self.assertEqual(
outputs.logits_per_text.shape,
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[14.0181, 20.2771, 14.4776]], device=torch_device)
torch.testing.assert_close(outputs.logits_per_video, expected_logits, rtol=1e-3, atol=1e-3)
@slow
def test_inference_interpolate_pos_encoding(self):
# XCLIP models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
model = XCLIPModel.from_pretrained("microsoft/xclip-base-patch32").to(torch_device)
processor = XCLIPProcessor.from_pretrained(
"microsoft/xclip-base-patch32", size=180, crop_size={"height": 180, "width": 180}
)
video = prepare_video()
inputs = processor(text="what's in the video", videos=video, return_tensors="pt").to(torch_device)
# interpolate_pos_encodiung false should return value error
with self.assertRaises(ValueError, msg="doesn't match model"):
with torch.no_grad():
model(**inputs, interpolate_pos_encoding=False)
# forward pass
with torch.no_grad():
outputs = model(**inputs, interpolate_pos_encoding=True)
# verify the logits
expected_shape = torch.Size((8, 26, 768))
self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape)
expectations = Expectations(
{
(None, None): [[0.0126, 0.2109, 0.0609], [0.0448, 0.5862, -0.1688], [-0.0881, 0.8525, -0.3044]],
("cuda", 8): [[0.0126, 0.2109, 0.0609], [0.0448, 0.5862, -0.1688], [-0.0881, 0.8525, -0.3044]],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(
outputs.vision_model_output.last_hidden_state[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4
)
| XCLIPModelIntegrationTest |
python | facelessuser__soupsieve | tests/test_level4/test_where.py | {
"start": 50,
"end": 755
} | class ____(util.TestCase):
"""Test where selectors."""
MARKUP = """
<div>
<p>Some text <span id="1"> in a paragraph</span>.
<a id="2" href="http://google.com">Link</a>
</p>
</div>
"""
def test_where(self):
"""Test multiple selectors with "where"."""
self.assert_selector(
self.MARKUP,
":where(span, a)",
["1", "2"],
flags=util.HTML
)
def test_nested_where(self):
"""Test multiple nested selectors with "where"."""
self.assert_selector(
self.MARKUP,
":where(span, a:where(#\\32))",
["1", "2"],
flags=util.HTML
)
| TestWhere |
python | pypa__pipenv | pipenv/vendor/plette/models/sections.py | {
"start": 248,
"end": 316
} | class ____(DataModelMapping):
item_class = Script
| ScriptCollection |
python | huggingface__transformers | src/transformers/models/dpt/modeling_dpt.py | {
"start": 18133,
"end": 19487
} | class ____(GradientCheckpointingLayer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: DPTConfig):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = DPTViTAttention(config)
self.intermediate = DPTViTIntermediate(config)
self.output = DPTViTOutput(config)
self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states_norm = self.layernorm_before(hidden_states)
attention_output = self.attention(hidden_states_norm)
# first residual connection
hidden_states = attention_output + hidden_states
# in ViT, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.intermediate(layer_output)
# second residual connection is done here
layer_output = self.output(layer_output, hidden_states)
return layer_output
# Copied from transformers.models.dinov2.modeling_dinov2.Dinov2Encoder with Dinov2Config->DPTConfig, Dinov2->DPTViT
| DPTViTLayer |
python | pandas-dev__pandas | asv_bench/benchmarks/algos/isin.py | {
"start": 2378,
"end": 3130
} | class ____:
params = [
[np.float64, np.int64, np.uint64, np.object_],
range(10, 21),
["inside", "outside"],
]
param_names = ["dtype", "exponent", "title"]
def setup(self, dtype, exponent, title):
M = 3 * 2 ** (exponent - 2)
# 0.77-the maximal share of occupied buckets
self.series = Series(np.random.randint(0, M, M)).astype(dtype)
values = np.random.randint(0, M, M).astype(dtype)
if title == "inside":
self.values = values
elif title == "outside":
self.values = values + M
else:
raise ValueError(title)
def time_isin(self, dtype, exponent, title):
self.series.isin(self.values)
| IsinAlmostFullWithRandomInt |
python | sympy__sympy | sympy/polys/numberfields/modules.py | {
"start": 42205,
"end": 53531
} | class ____(IntegerPowerable):
r"""
Represents an element of a :py:class:`~.Module`.
NOTE: Should not be constructed directly. Use the
:py:meth:`~.Module.__call__` method or the :py:func:`make_mod_elt()`
factory function instead.
"""
def __init__(self, module, col, denom=1):
"""
Parameters
==========
module : :py:class:`~.Module`
The module to which this element belongs.
col : :py:class:`~.DomainMatrix` over :ref:`ZZ`
Column vector giving the numerators of the coefficients of this
element.
denom : int, optional (default=1)
Denominator for the coefficients of this element.
"""
self.module = module
self.col = col
self.denom = denom
self._QQ_col = None
def __repr__(self):
r = str([int(c) for c in self.col.flat()])
if self.denom > 1:
r += f'/{self.denom}'
return r
def reduced(self):
"""
Produce a reduced version of this ModuleElement, i.e. one in which the
gcd of the denominator together with all numerator coefficients is 1.
"""
if self.denom == 1:
return self
g = igcd(self.denom, *self.coeffs)
if g == 1:
return self
return type(self)(self.module,
(self.col / g).convert_to(ZZ),
denom=self.denom // g)
def reduced_mod_p(self, p):
"""
Produce a version of this :py:class:`~.ModuleElement` in which all
numerator coefficients have been reduced mod *p*.
"""
return make_mod_elt(self.module,
self.col.convert_to(FF(p)).convert_to(ZZ),
denom=self.denom)
@classmethod
def from_int_list(cls, module, coeffs, denom=1):
"""
Make a :py:class:`~.ModuleElement` from a list of ints (instead of a
column vector).
"""
col = to_col(coeffs)
return cls(module, col, denom=denom)
@property
def n(self):
"""The length of this element's column."""
return self.module.n
def __len__(self):
return self.n
def column(self, domain=None):
"""
Get a copy of this element's column, optionally converting to a domain.
"""
if domain is None:
return self.col.copy()
else:
return self.col.convert_to(domain)
@property
def coeffs(self):
return self.col.flat()
@property
def QQ_col(self):
"""
:py:class:`~.DomainMatrix` over :ref:`QQ`, equal to
``self.col / self.denom``, and guaranteed to be dense.
See Also
========
.Submodule.QQ_matrix
"""
if self._QQ_col is None:
self._QQ_col = (self.col / self.denom).to_dense()
return self._QQ_col
def to_parent(self):
"""
Transform into a :py:class:`~.ModuleElement` belonging to the parent of
this element's module.
"""
if not isinstance(self.module, Submodule):
raise ValueError('Not an element of a Submodule.')
return make_mod_elt(
self.module.parent, self.module.matrix * self.col,
denom=self.module.denom * self.denom)
def to_ancestor(self, anc):
"""
Transform into a :py:class:`~.ModuleElement` belonging to a given
ancestor of this element's module.
Parameters
==========
anc : :py:class:`~.Module`
"""
if anc == self.module:
return self
else:
return self.to_parent().to_ancestor(anc)
def over_power_basis(self):
"""
Transform into a :py:class:`~.PowerBasisElement` over our
:py:class:`~.PowerBasis` ancestor.
"""
e = self
while not isinstance(e.module, PowerBasis):
e = e.to_parent()
return e
def is_compat(self, other):
"""
Test whether other is another :py:class:`~.ModuleElement` with same
module.
"""
return isinstance(other, ModuleElement) and other.module == self.module
def unify(self, other):
"""
Try to make a compatible pair of :py:class:`~.ModuleElement`, one
equivalent to this one, and one equivalent to the other.
Explanation
===========
We search for the nearest common ancestor module for the pair of
elements, and represent each one there.
Returns
=======
Pair ``(e1, e2)``
Each ``ei`` is a :py:class:`~.ModuleElement`, they belong to the
same :py:class:`~.Module`, ``e1`` is equivalent to ``self``, and
``e2`` is equivalent to ``other``.
Raises
======
UnificationFailed
If ``self`` and ``other`` have no common ancestor module.
"""
if self.module == other.module:
return self, other
nca = self.module.nearest_common_ancestor(other.module)
if nca is not None:
return self.to_ancestor(nca), other.to_ancestor(nca)
raise UnificationFailed(f"Cannot unify {self} with {other}")
def __eq__(self, other):
if self.is_compat(other):
return self.QQ_col == other.QQ_col
return NotImplemented
def equiv(self, other):
"""
A :py:class:`~.ModuleElement` may test as equivalent to a rational
number or another :py:class:`~.ModuleElement`, if they represent the
same algebraic number.
Explanation
===========
This method is intended to check equivalence only in those cases in
which it is easy to test; namely, when *other* is either a
:py:class:`~.ModuleElement` that can be unified with this one (i.e. one
which shares a common :py:class:`~.PowerBasis` ancestor), or else a
rational number (which is easy because every :py:class:`~.PowerBasis`
represents every rational number).
Parameters
==========
other : int, :ref:`ZZ`, :ref:`QQ`, :py:class:`~.ModuleElement`
Returns
=======
bool
Raises
======
UnificationFailed
If ``self`` and ``other`` do not share a common
:py:class:`~.PowerBasis` ancestor.
"""
if self == other:
return True
elif isinstance(other, ModuleElement):
a, b = self.unify(other)
return a == b
elif is_rat(other):
if isinstance(self, PowerBasisElement):
return self == self.module(0) * other
else:
return self.over_power_basis().equiv(other)
return False
def __add__(self, other):
"""
A :py:class:`~.ModuleElement` can be added to a rational number, or to
another :py:class:`~.ModuleElement`.
Explanation
===========
When the other summand is a rational number, it will be converted into
a :py:class:`~.ModuleElement` (belonging to the first ancestor of this
module that starts with unity).
In all cases, the sum belongs to the nearest common ancestor (NCA) of
the modules of the two summands. If the NCA does not exist, we return
``NotImplemented``.
"""
if self.is_compat(other):
d, e = self.denom, other.denom
m = ilcm(d, e)
u, v = m // d, m // e
col = to_col([u * a + v * b for a, b in zip(self.coeffs, other.coeffs)])
return type(self)(self.module, col, denom=m).reduced()
elif isinstance(other, ModuleElement):
try:
a, b = self.unify(other)
except UnificationFailed:
return NotImplemented
return a + b
elif is_rat(other):
return self + self.module.element_from_rational(other)
return NotImplemented
__radd__ = __add__
def __neg__(self):
return self * -1
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
return -self + other
def __mul__(self, other):
"""
A :py:class:`~.ModuleElement` can be multiplied by a rational number,
or by another :py:class:`~.ModuleElement`.
Explanation
===========
When the multiplier is a rational number, the product is computed by
operating directly on the coefficients of this
:py:class:`~.ModuleElement`.
When the multiplier is another :py:class:`~.ModuleElement`, the product
will belong to the nearest common ancestor (NCA) of the modules of the
two operands, and that NCA must have a multiplication table. If the NCA
does not exist, we return ``NotImplemented``. If the NCA does not have
a mult. table, ``ClosureFailure`` will be raised.
"""
if self.is_compat(other):
M = self.module.mult_tab()
A, B = self.col.flat(), other.col.flat()
n = self.n
C = [0] * n
for u in range(n):
for v in range(u, n):
c = A[u] * B[v]
if v > u:
c += A[v] * B[u]
if c != 0:
R = M[u][v]
for k in range(n):
C[k] += c * R[k]
d = self.denom * other.denom
return self.from_int_list(self.module, C, denom=d)
elif isinstance(other, ModuleElement):
try:
a, b = self.unify(other)
except UnificationFailed:
return NotImplemented
return a * b
elif is_rat(other):
a, b = get_num_denom(other)
if a == b == 1:
return self
else:
return make_mod_elt(self.module,
self.col * a, denom=self.denom * b).reduced()
return NotImplemented
__rmul__ = __mul__
def _zeroth_power(self):
return self.module.one()
def _first_power(self):
return self
def __floordiv__(self, a):
if is_rat(a):
a = QQ(a)
return self * (1/a)
elif isinstance(a, ModuleElement):
return self * (1//a)
return NotImplemented
def __rfloordiv__(self, a):
return a // self.over_power_basis()
def __mod__(self, m):
r"""
Reduce this :py:class:`~.ModuleElement` mod a :py:class:`~.Submodule`.
Parameters
==========
m : int, :ref:`ZZ`, :ref:`QQ`, :py:class:`~.Submodule`
If a :py:class:`~.Submodule`, reduce ``self`` relative to this.
If an integer or rational, reduce relative to the
:py:class:`~.Submodule` that is our own module times this constant.
See Also
========
.Submodule.reduce_element
"""
if is_rat(m):
m = m * self.module.whole_submodule()
if isinstance(m, Submodule) and m.parent == self.module:
return m.reduce_element(self)
return NotImplemented
| ModuleElement |
python | spack__spack | lib/spack/spack/builder.py | {
"start": 27350,
"end": 29287
} | class ____(BuilderWithDefaults):
"""The associated builder for the :class:`Package` base class. This class is typically only
used in ``package.py`` files when a package has multiple build systems. Packagers need to
implement the :meth:`install` phase to define how the package is installed.
This is the only builder that is defined in the Spack core, all other builders are defined
in the builtin package repository :mod:`spack_repo.builtin.build_systems`.
Example::
from spack.package import *
class MyPackage(Package):
\"\"\"A package that does not use a specific build system.\"\"\"
homepage = "https://example.com/mypackage"
url = "https://example.com/mypackage-1.0.tar.gz"
version("1.0", sha256="...")
class GenericBuilder(GenericBuilder):
def install(self, pkg: Package, spec: Spec, prefix: Prefix) -> None:
pass
"""
#: A generic package has only the ``install`` phase
phases = ("install",)
#: Names associated with package methods in the old build-system format
package_methods: Tuple[str, ...] = ()
#: Names associated with package attributes in the old build-system format
package_attributes: Tuple[str, ...] = ("archive_files", "install_time_test_callbacks")
#: Callback names for post-install phase tests
install_time_test_callbacks = []
# On macOS, force rpaths for shared library IDs and remove duplicate rpaths
spack.phase_callbacks.run_after("install", when="platform=darwin")(apply_macos_rpath_fixups)
# unconditionally perform any post-install phase tests
spack.phase_callbacks.run_after("install")(execute_install_time_tests)
def install(self, pkg: Package, spec: spack.spec.Spec, prefix: Prefix) -> None:
"""Install phase for the generic builder, to be implemented by packagers."""
raise NotImplementedError
| GenericBuilder |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 17143,
"end": 17765
} | class ____(_RerankerProvider):
reranker: Union[Rerankers, _EnumLikeStr] = Field(
default=Rerankers.NVIDIA, frozen=True, exclude=True
)
model: Optional[str] = Field(default=None)
baseURL: Optional[AnyHttpUrl]
def _to_dict(self) -> Dict[str, Any]:
ret_dict = super()._to_dict()
if self.baseURL is not None:
ret_dict["baseURL"] = self.baseURL.unicode_string()
return ret_dict
RerankerContextualAIModel = Literal[
"ctxl-rerank-v2-instruct-multilingual",
"ctxl-rerank-v2-instruct-multilingual-mini",
"ctxl-rerank-v1-instruct",
]
| _RerankerNvidiaConfig |
python | kamyu104__LeetCode-Solutions | Python/maximum-total-beauty-of-the-gardens.py | {
"start": 1197,
"end": 2333
} | class ____(object):
def maximumBeauty(self, flowers, newFlowers, target, full, partial):
"""
:type flowers: List[int]
:type newFlowers: int
:type target: int
:type full: int
:type partial: int
:rtype: int
"""
flowers.sort()
n = bisect.bisect_left(flowers, target)
prefix = [0]*(n+1)
for i in xrange(n):
prefix[i+1] = prefix[i]+flowers[i]
result = suffix = 0
left = n
for right in reversed(xrange(n+1)):
if right != n:
suffix += flowers[right]
total = newFlowers-((n-right)*target-suffix)
if total < 0:
continue
left = min(left, right)
while not (left == 0 or (prefix[left]-prefix[left-1])*left-prefix[left] <= total):
left -= 1
mn = min((total+prefix[left])//left if left else 0, target-1)
result = max(result, mn*partial+(len(flowers)-right)*full)
return result
# Time: O(nlogn)
# Space: O(n)
import bisect
# sort, prefix sum, greedy, binary search
| Solution2 |
python | lazyprogrammer__machine_learning_examples | hmm_class/hmmc_scaled_concat.py | {
"start": 753,
"end": 9035
} | class ____:
def __init__(self, M, K):
self.M = M # number of hidden states
self.K = K # number of Gaussians
def fit(self, X, max_iter=25, eps=1e-1):
# train the HMM model using the Baum-Welch algorithm
# a specific instance of the expectation-maximization algorithm
# concatenate sequences in X and determine start/end positions
sequenceLengths = []
for x in X:
sequenceLengths.append(len(x))
Xc = np.concatenate(X)
T = len(Xc)
startPositions = np.zeros(len(Xc), dtype=np.bool)
endPositions = np.zeros(len(Xc), dtype=np.bool)
startPositionValues = []
last = 0
for length in sequenceLengths:
startPositionValues.append(last)
startPositions[last] = 1
if last > 0:
endPositions[last - 1] = 1
last += length
D = X[0].shape[1] # assume each x is organized (T, D)
# randomly initialize all parameters
self.pi = np.ones(self.M) / self.M # initial state distribution
self.A = random_normalized(self.M, self.M) # state transition matrix
self.R = np.ones((self.M, self.K)) / self.K # mixture proportions
self.mu = np.zeros((self.M, self.K, D))
for i in range(self.M):
for k in range(self.K):
random_idx = np.random.choice(T)
self.mu[i,k] = Xc[random_idx]
self.sigma = np.zeros((self.M, self.K, D, D))
for j in range(self.M):
for k in range(self.K):
self.sigma[j,k] = np.eye(D)
# main EM loop
costs = []
for it in range(max_iter):
if it % 1 == 0:
print("it:", it)
scale = np.zeros(T)
# calculate B so we can lookup when updating alpha and beta
B = np.zeros((self.M, T))
component = np.zeros((self.M, self.K, T)) # we'll need these later
for j in range(self.M):
for k in range(self.K):
p = self.R[j,k] * mvn.pdf(Xc, self.mu[j,k], self.sigma[j,k])
component[j,k,:] = p
B[j,:] += p
alpha = np.zeros((T, self.M))
alpha[0] = self.pi*B[:,0]
scale[0] = alpha[0].sum()
alpha[0] /= scale[0]
for t in range(1, T):
if startPositions[t] == 0:
alpha_t_prime = alpha[t-1].dot(self.A) * B[:,t]
else:
alpha_t_prime = self.pi * B[:,t]
scale[t] = alpha_t_prime.sum()
alpha[t] = alpha_t_prime / scale[t]
logP = np.log(scale).sum()
beta = np.zeros((T, self.M))
beta[-1] = 1
for t in range(T - 2, -1, -1):
if startPositions[t + 1] == 1:
beta[t] = 1
else:
beta[t] = self.A.dot(B[:,t+1] * beta[t+1]) / scale[t+1]
# update for Gaussians
gamma = np.zeros((T, self.M, self.K))
for t in range(T):
alphabeta = alpha[t,:].dot(beta[t,:])
for j in range(self.M):
factor = alpha[t,j] * beta[t,j] / alphabeta
for k in range(self.K):
gamma[t,j,k] = factor * component[j,k,t] / B[j,t]
costs.append(logP)
# now re-estimate pi, A, R, mu, sigma
self.pi = np.sum((alpha[t] * beta[t]) for t in startPositionValues) / len(startPositionValues)
a_den = np.zeros((self.M, 1)) # prob don't need this
a_num = np.zeros((self.M, self.M))
r_num = np.zeros((self.M, self.K))
r_den = np.zeros(self.M)
mu_num = np.zeros((self.M, self.K, D))
sigma_num = np.zeros((self.M, self.K, D, D))
nonEndPositions = (1 - endPositions).astype(np.bool)
a_den += (alpha[nonEndPositions] * beta[nonEndPositions]).sum(axis=0, keepdims=True).T
# numerator for A
for i in range(self.M):
for j in range(self.M):
for t in range(T-1):
if endPositions[t] != 1:
a_num[i,j] += alpha[t,i] * beta[t+1,j] * self.A[i,j] * B[j,t+1] / scale[t+1]
self.A = a_num / a_den
# update mixture components
r_num_n = np.zeros((self.M, self.K))
r_den_n = np.zeros(self.M)
for j in range(self.M):
for k in range(self.K):
for t in range(T):
r_num_n[j,k] += gamma[t,j,k]
r_den_n[j] += gamma[t,j,k]
r_num = r_num_n
r_den = r_den_n
mu_num_n = np.zeros((self.M, self.K, D))
sigma_num_n = np.zeros((self.M, self.K, D, D))
for j in range(self.M):
for k in range(self.K):
for t in range(T):
# update means
mu_num_n[j,k] += gamma[t,j,k] * Xc[t]
# update covariances
sigma_num_n[j,k] += gamma[t,j,k] * np.outer(Xc[t] - self.mu[j,k], Xc[t] - self.mu[j,k])
mu_num = mu_num_n
sigma_num = sigma_num_n
# update R, mu, sigma
for j in range(self.M):
for k in range(self.K):
self.R[j,k] = r_num[j,k] / r_den[j]
self.mu[j,k] = mu_num[j,k] / r_num[j,k]
self.sigma[j,k] = sigma_num[j,k] / r_num[j,k] + np.eye(D)*eps
assert(np.all(self.R <= 1))
assert(np.all(self.A <= 1))
print("A:", self.A)
print("mu:", self.mu)
print("sigma:", self.sigma)
print("R:", self.R)
print("pi:", self.pi)
plt.plot(costs)
plt.show()
def log_likelihood(self, x):
# returns log P(x | model)
# using the forward part of the forward-backward algorithm
T = len(x)
scale = np.zeros(T)
B = np.zeros((self.M, T))
for j in range(self.M):
for k in range(self.K):
p = self.R[j,k] * mvn.pdf(x, self.mu[j,k], self.sigma[j,k])
B[j,:] += p
alpha = np.zeros((T, self.M))
alpha[0] = self.pi*B[:,0]
scale[0] = alpha[0].sum()
alpha[0] /= scale[0]
for t in range(1, T):
alpha_t_prime = alpha[t-1].dot(self.A) * B[:,t]
scale[t] = alpha_t_prime.sum()
alpha[t] = alpha_t_prime / scale[t]
return np.log(scale).sum()
def log_likelihood_multi(self, X):
return np.array([self.log_likelihood(x) for x in X])
def set(self, pi, A, R, mu, sigma):
self.pi = pi
self.A = A
self.R = R
self.mu = mu
self.sigma = sigma
M, K = R.shape
self.M = M
self.K = K
def real_signal():
spf = wave.open('helloworld.wav', 'r')
#Extract Raw Audio from Wav File
# If you right-click on the file and go to "Get Info", you can see:
# sampling rate = 16000 Hz
# bits per sample = 16
# The first is quantization in time
# The second is quantization in amplitude
# We also do this for images!
# 2^16 = 65536 is how many different sound levels we have
signal = spf.readframes(-1)
signal = np.fromstring(signal, 'Int16')
T = len(signal)
signal = (signal - signal.mean()) / signal.std()
hmm = HMM(5, 3)
hmm.fit(signal.reshape(1, T, 1))
print("LL for fitted params:", hmm.log_likelihood(signal.reshape(T, 1)))
def fake_signal(init=big_init):
signals = get_signals(init=init)
# for signal in signals:
# for d in xrange(signal.shape[1]):
# plt.plot(signal[:,d])
# plt.show()
hmm = HMM(5, 3)
hmm.fit(signals)
L = hmm.log_likelihood_multi(signals).sum()
print("LL for fitted params:", L)
# test in actual params
_, _, _, pi, A, R, mu, sigma = init()
hmm.set(pi, A, R, mu, sigma)
L = hmm.log_likelihood_multi(signals).sum()
print("LL for actual params:", L)
if __name__ == '__main__':
# real_signal()
fake_signal()
| HMM |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_composer.py | {
"start": 18252,
"end": 23678
} | class ____(GoogleCloudBaseOperator):
r"""
Update an environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param environment: A patch environment. Fields specified by the ``updateMask`` will be copied from the
patch environment into the environment under update.
:param update_mask: Required. A comma-separated list of paths, relative to ``Environment``, of fields to
update. If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id:
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: Run operator in the deferrable mode
:param pooling_period_seconds: Optional: Control the rate of the poll for the result of deferrable run.
By default, the trigger will poll every 30 seconds.
"""
template_fields = (
"project_id",
"region",
"environment_id",
"impersonation_chain",
)
operator_extra_links = (CloudComposerEnvironmentLink(),)
def __init__(
self,
*,
project_id: str,
region: str,
environment_id: str,
environment: dict | Environment,
update_mask: dict | FieldMask,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
pooling_period_seconds: int = 30,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.environment_id = environment_id
self.environment = environment
self.update_mask = update_mask
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.pooling_period_seconds = pooling_period_seconds
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"project_id": self.project_id,
"region": self.region,
"environment_id": self.environment_id,
}
def execute(self, context: Context):
hook = CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.update_environment(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
environment=self.environment,
update_mask=self.update_mask,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudComposerEnvironmentLink.persist(context=context)
if not self.deferrable:
environment = hook.wait_for_operation(timeout=self.timeout, operation=result)
return Environment.to_dict(environment)
self.defer(
trigger=CloudComposerExecutionTrigger(
project_id=self.project_id,
region=self.region,
operation_name=result.operation.name,
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
pooling_period_seconds=self.pooling_period_seconds,
),
method_name=GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME,
)
def execute_complete(self, context: Context, event: dict):
if event["operation_done"]:
hook = CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
env = hook.get_environment(
project_id=self.project_id,
region=self.region,
environment_id=self.environment_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return Environment.to_dict(env)
raise AirflowException(f"Unexpected error in the operation: {event['operation_name']}")
| CloudComposerUpdateEnvironmentOperator |
python | pytorch__pytorch | torch/ao/quantization/fx/custom_config.py | {
"start": 19979,
"end": 21815
} | class ____:
"""
Custom configuration for :func:`~torch.ao.quantization.quantize_fx.fuse_fx`.
Example usage::
fuse_custom_config = FuseCustomConfig().set_preserved_attributes(
["attr1", "attr2"]
)
"""
def __init__(self) -> None:
self.preserved_attributes: list[str] = []
def __repr__(self):
dict_nonempty = {k: v for k, v in self.__dict__.items() if len(v) > 0}
return f"FuseCustomConfig({dict_nonempty})"
def set_preserved_attributes(self, attributes: list[str]) -> FuseCustomConfig:
"""
Set the names of the attributes that will persist in the graph module even if they are not used in
the model's ``forward`` method.
"""
self.preserved_attributes = attributes
return self
# TODO: remove this
@classmethod
def from_dict(cls, fuse_custom_config_dict: dict[str, Any]) -> FuseCustomConfig:
"""
Create a ``ConvertCustomConfig`` from a dictionary with the following items:
"preserved_attributes": a list of attributes that persist even if they are not used in ``forward``
This function is primarily for backward compatibility and may be removed in the future.
"""
conf = cls()
conf.set_preserved_attributes(
fuse_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, [])
)
return conf
def to_dict(self) -> dict[str, Any]:
"""
Convert this ``FuseCustomConfig`` to a dictionary with the items described in
:func:`~torch.ao.quantization.fx.custom_config.ConvertCustomConfig.from_dict`.
"""
d: dict[str, Any] = {}
if len(self.preserved_attributes) > 0:
d[PRESERVED_ATTRIBUTES_DICT_KEY] = self.preserved_attributes
return d
| FuseCustomConfig |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_imsi.py | {
"start": 1842,
"end": 4508
} | class ____(ColumnMapExpectation):
"""Expect column values to be valid IMSI (International Mobile Subscriber Identity)."""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"all_valid": [
"429011234567890",
"310150123456789",
"460001234567890",
"313460000000001",
"310410123456789",
],
"some_other": [
"429011234567890",
"310150123456789",
"460001234567890",
"313460000000001",
"439011234567890",
],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "all_valid"},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "some_other", "mostly": 1},
"out": {
"success": False,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.to_be_valid_imsi"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental",
"tags": [
"hackathon-22",
"experimental",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@szecsip", # Don't forget to add your github handle here!
],
"requirements": ["python-stdnum"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidImsi().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidImsi |
python | huggingface__transformers | src/transformers/models/depth_pro/image_processing_depth_pro_fast.py | {
"start": 1434,
"end": 6697
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 1536, "width": 1536}
do_resize = True
do_rescale = True
do_normalize = True
# DepthPro resizes image after rescaling and normalizing,
# which makes it different from BaseImageProcessorFast._preprocess
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
# Group images by size for batched scaling
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
if do_resize:
stacked_images = self.resize(
image=stacked_images,
size=size,
interpolation=interpolation,
antialias=False,
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
# Copied from transformers.models.depth_pro.image_processing_depth_pro.DepthProImageProcessor.post_process_depth_estimation
def post_process_depth_estimation(
self,
outputs: "DepthProDepthEstimatorOutput",
target_sizes: Optional[Union[TensorType, list[tuple[int, int]], None]] = None,
) -> list[dict[str, TensorType]]:
"""
Post-processes the raw depth predictions from the model to generate
final depth predictions which is caliberated using the field of view if provided
and resized to specified target sizes if provided.
Args:
outputs ([`DepthProDepthEstimatorOutput`]):
Raw outputs of the model.
target_sizes (`Optional[Union[TensorType, list[tuple[int, int]], None]]`, *optional*, defaults to `None`):
Target sizes to resize the depth predictions. Can be a tensor of shape `(batch_size, 2)`
or a list of tuples `(height, width)` for each image in the batch. If `None`, no resizing
is performed.
Returns:
`list[dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
predictions, and field of view (degrees) and focal length (pixels) if `field_of_view` is given in `outputs`.
Raises:
`ValueError`:
If the lengths of `predicted_depths`, `fovs`, or `target_sizes` are mismatched.
"""
requires_backends(self, "torch")
predicted_depth = outputs.predicted_depth
fov = outputs.field_of_view
batch_size = len(predicted_depth)
if target_sizes is not None and batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many fov values as the batch dimension of the predicted depth"
)
results = []
fov = [None] * batch_size if fov is None else fov
target_sizes = [None] * batch_size if target_sizes is None else target_sizes
for depth, fov_value, target_size in zip(predicted_depth, fov, target_sizes):
focal_length = None
if target_size is not None:
# scale image w.r.t fov
if fov_value is not None:
width = target_size[1]
focal_length = 0.5 * width / torch.tan(0.5 * torch.deg2rad(fov_value))
depth = depth * width / focal_length
# interpolate
depth = torch.nn.functional.interpolate(
# input should be (B, C, H, W)
input=depth.unsqueeze(0).unsqueeze(1),
size=target_size,
mode=pil_torch_interpolation_mapping[self.resample].value,
).squeeze()
# inverse the depth
depth = 1.0 / torch.clamp(depth, min=1e-4, max=1e4)
results.append(
{
"predicted_depth": depth,
"field_of_view": fov_value,
"focal_length": focal_length,
}
)
return results
__all__ = ["DepthProImageProcessorFast"]
| DepthProImageProcessorFast |
python | pandas-dev__pandas | pandas/_testing/__init__.py | {
"start": 9534,
"end": 16809
} | class ____(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs)
# error: Cannot override writeable attribute with read-only property
@property
def _constructor_sliced(self): # type: ignore[override]
return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs)
def convert_rows_list_to_csv_str(rows_list: list[str]) -> str:
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from com._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of an NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : str
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
# -----------------------------------------------------------------------------
# Indexing test helpers
def getitem(x):
return x
def setitem(x):
return x
def loc(x):
return x.loc
def iloc(x):
return x.iloc
def at(x):
return x.at
def iat(x):
return x.iat
# -----------------------------------------------------------------------------
_UNITS = ["s", "ms", "us", "ns"]
def get_finest_unit(left: str, right: str) -> str:
"""
Find the higher of two datetime64 units.
"""
if _UNITS.index(left) >= _UNITS.index(right):
return left
return right
def shares_memory(left, right) -> bool:
"""
Pandas-compat for np.shares_memory.
"""
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
return np.shares_memory(left, right)
elif isinstance(left, np.ndarray):
# Call with reversed args to get to unpacking logic below.
return shares_memory(right, left)
if isinstance(left, RangeIndex):
return False
if isinstance(left, MultiIndex):
return shares_memory(left._codes, right)
if isinstance(left, (Index, Series)):
if isinstance(right, (Index, Series)):
return shares_memory(left._values, right._values)
return shares_memory(left._values, right)
if isinstance(left, NDArrayBackedExtensionArray):
return shares_memory(left._ndarray, right)
if isinstance(left, pd.core.arrays.SparseArray):
return shares_memory(left.sp_values, right)
if isinstance(left, pd.core.arrays.IntervalArray):
return shares_memory(left._left, right) or shares_memory(left._right, right)
if isinstance(left, ArrowExtensionArray):
if isinstance(right, ArrowExtensionArray):
# https://github.com/pandas-dev/pandas/pull/43930#discussion_r736862669
left_pa_data = left._pa_array
right_pa_data = right._pa_array
left_buf1 = left_pa_data.chunk(0).buffers()[1]
right_buf1 = right_pa_data.chunk(0).buffers()[1]
return left_buf1.address == right_buf1.address
else:
# if we have one one ArrowExtensionArray and one other array, assume
# they can only share memory if they share the same numpy buffer
return np.shares_memory(left, right)
if isinstance(left, BaseMaskedArray) and isinstance(right, BaseMaskedArray):
# By convention, we'll say these share memory if they share *either*
# the _data or the _mask
return np.shares_memory(left._data, right._data) or np.shares_memory(
left._mask, right._mask
)
if isinstance(left, DataFrame) and len(left._mgr.blocks) == 1:
arr = left._mgr.blocks[0].values
return shares_memory(arr, right)
raise NotImplementedError(type(left), type(right))
__all__ = [
"ALL_INT_EA_DTYPES",
"ALL_INT_NUMPY_DTYPES",
"ALL_NUMPY_DTYPES",
"ALL_REAL_NUMPY_DTYPES",
"BOOL_DTYPES",
"BYTES_DTYPES",
"COMPLEX_DTYPES",
"DATETIME64_DTYPES",
"ENDIAN",
"FLOAT_EA_DTYPES",
"FLOAT_NUMPY_DTYPES",
"NARROW_NP_DTYPES",
"NP_NAT_OBJECTS",
"NULL_OBJECTS",
"OBJECT_DTYPES",
"SIGNED_INT_EA_DTYPES",
"SIGNED_INT_NUMPY_DTYPES",
"STRING_DTYPES",
"TIMEDELTA64_DTYPES",
"UNSIGNED_INT_EA_DTYPES",
"UNSIGNED_INT_NUMPY_DTYPES",
"SubclassedDataFrame",
"SubclassedSeries",
"assert_almost_equal",
"assert_attr_equal",
"assert_categorical_equal",
"assert_class_equal",
"assert_contains_all",
"assert_copy",
"assert_datetime_array_equal",
"assert_dict_equal",
"assert_equal",
"assert_extension_array_equal",
"assert_frame_equal",
"assert_index_equal",
"assert_indexing_slices_equivalent",
"assert_interval_array_equal",
"assert_is_sorted",
"assert_metadata_equivalent",
"assert_numpy_array_equal",
"assert_period_array_equal",
"assert_produces_warning",
"assert_series_equal",
"assert_sp_array_equal",
"assert_timedelta_array_equal",
"at",
"box_expected",
"can_set_locale",
"convert_rows_list_to_csv_str",
"decompress_file",
"ensure_clean",
"external_error_raised",
"get_cython_table_params",
"get_dtype",
"get_finest_unit",
"get_locales",
"get_obj",
"get_op_from_name",
"getitem",
"iat",
"iloc",
"loc",
"maybe_produces_warning",
"raise_assert_detail",
"raises_chained_assignment_error",
"round_trip_pathlib",
"round_trip_pickle",
"set_locale",
"set_timezone",
"setitem",
"shares_memory",
"to_array",
"with_csv_dialect",
"write_to_compressed",
]
| SubclassedDataFrame |
python | readthedocs__readthedocs.org | readthedocs/integrations/migrations/0011_add_created_and_updated_fields.py | {
"start": 183,
"end": 1223
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("integrations", "0010_remove_old_jsonfields"),
]
operations = [
migrations.AlterModelOptions(
name="integration",
options={"get_latest_by": "modified"},
),
migrations.AddField(
model_name="integration",
name="created",
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True,
default=django.utils.timezone.now,
verbose_name="created",
null=True,
blank=True,
),
preserve_default=False,
),
migrations.AddField(
model_name="integration",
name="modified",
field=django_extensions.db.fields.ModificationDateTimeField(
auto_now=True,
verbose_name="modified",
null=True,
blank=True,
),
),
]
| Migration |
python | getsentry__sentry | src/sentry/flags/models.py | {
"start": 339,
"end": 793
} | class ____(Enum):
CREATED = 0
DELETED = 1
UPDATED = 2
@classmethod
def to_string(cls, integer):
if integer == 0:
return "created"
if integer == 1:
return "deleted"
if integer == 2:
return "updated"
raise ValueError
ACTION_MAP = {
"created": ActionEnum.CREATED.value,
"deleted": ActionEnum.DELETED.value,
"updated": ActionEnum.UPDATED.value,
}
| ActionEnum |
python | doocs__leetcode | solution/2300-2399/2385.Amount of Time for Binary Tree to Be Infected/Solution.py | {
"start": 192,
"end": 865
} | class ____:
def amountOfTime(self, root: Optional[TreeNode], start: int) -> int:
def dfs(node: Optional[TreeNode], fa: Optional[TreeNode]):
if node is None:
return
if fa:
g[node.val].append(fa.val)
g[fa.val].append(node.val)
dfs(node.left, node)
dfs(node.right, node)
def dfs2(node: int, fa: int) -> int:
ans = 0
for nxt in g[node]:
if nxt != fa:
ans = max(ans, 1 + dfs2(nxt, node))
return ans
g = defaultdict(list)
dfs(root, None)
return dfs2(start, -1)
| Solution |
python | google__flatbuffers | tests/MyGame/Example/Vec3.py | {
"start": 176,
"end": 1793
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 32
# Vec3
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Vec3
def X(self): return self._tab.Get(flatbuffers.number_types.Float32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0))
# Vec3
def Y(self): return self._tab.Get(flatbuffers.number_types.Float32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(4))
# Vec3
def Z(self): return self._tab.Get(flatbuffers.number_types.Float32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(8))
# Vec3
def Test1(self): return self._tab.Get(flatbuffers.number_types.Float64Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(16))
# Vec3
def Test2(self): return self._tab.Get(flatbuffers.number_types.Uint8Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(24))
# Vec3
def Test3(self, obj):
obj.Init(self._tab.Bytes, self._tab.Pos + 26)
return obj
def CreateVec3(builder, x, y, z, test1, test2, test3_a, test3_b):
builder.Prep(8, 32)
builder.Pad(2)
builder.Prep(2, 4)
builder.Pad(1)
builder.PrependInt8(test3_b)
builder.PrependInt16(test3_a)
builder.Pad(1)
builder.PrependUint8(test2)
builder.PrependFloat64(test1)
builder.Pad(4)
builder.PrependFloat32(z)
builder.PrependFloat32(y)
builder.PrependFloat32(x)
return builder.Offset()
import MyGame.Example.Test
try:
from typing import Optional
except:
pass
| Vec3 |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-service-now/llama_index/readers/service_now/base.py | {
"start": 7984,
"end": 29197
} | class ____(BaseReader):
"""
ServiceNow Knowledge Base reader using PySNC with username/password or password grant flow.
This reader requires custom parsers for processing different file types. At minimum,
an HTML parser must be provided for processing article bodies. Additional parsers
can be provided for other file types as needed.
The reader uses LlamaIndex's standard instrumentation event system to provide detailed
tracking of the loading process. Events are fired at various stages during knowledge base
article retrieval and attachment processing, allowing for monitoring and debugging.
Required file types:
- FileType.HTML: For HTML content (required for article body processing)
Recommended file types to provide parsers for:
- FileType.PDF: For PDF documents
- FileType.DOCUMENT: For Word documents (.docx)
- FileType.TEXT: For plain text files
- FileType.SPREADSHEET: For Excel files (.xlsx)
- FileType.PRESENTATION: For PowerPoint files (.pptx)
Args:
instance: ServiceNow instance name (without .service-now.com)
custom_parsers: Dictionary mapping FileType enum values to BaseReader instances.
This is REQUIRED and must include at least FileType.HTML.
Each parser must implement the load_data method.
username: ServiceNow username for authentication (required)
password: ServiceNow password for authentication (required)
client_id: OAuth client ID for ServiceNow (optional, but if provided, client_secret is required)
client_secret: OAuth client secret for ServiceNow (optional, but if provided, client_id is required)
process_attachment_callback: Optional callback to filter attachments (content_type: str, size_bytes: int, file_name: str) -> tuple[bool, str]
process_document_callback: Optional callback to filter documents (kb_number: str) -> bool
custom_folder: Folder for temporary files during parsing
fail_on_error: Whether to fail on parsing errors or continue
kb_table: ServiceNow table name for knowledge base articles
logger: Optional logger instance
Authentication:
- Basic auth: Provide username and password only
- OAuth flow: Provide username, password, client_id, and client_secret
Events:
The reader fires various events during processing using LlamaIndex's standard
instrumentation system. Available events include page fetch events, attachment
processing events, and error events. Use get_dispatcher() to subscribe to events.
Raises:
ValueError: If required parameters are missing or invalid, or if HTML parser is not provided
TypeError: If custom_parsers types are incorrect
"""
def __init__(
self,
instance: str,
custom_parsers: Dict[FileType, BaseReader],
username: Optional[str] = None,
password: Optional[str] = None,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
process_attachment_callback: Optional[
Callable[[str, int], tuple[bool, str]]
] = None,
process_document_callback: Optional[Callable[[str], bool]] = None,
custom_folder: Optional[str] = None,
fail_on_error: bool = True,
kb_table: str = "kb_knowledge",
logger=None,
):
# Validate required parameters
if not instance:
raise ValueError("instance parameter is required")
if custom_parsers is None:
raise ValueError("custom_parsers parameter is required and cannot be None")
if not custom_parsers:
raise ValueError("custom_parsers parameter is required and cannot be empty")
if not isinstance(custom_parsers, dict):
raise TypeError("custom_parsers must be a dictionary")
# Validate custom_parsers dictionary - ensure it has at least one parser
if len(custom_parsers) == 0:
raise ValueError("custom_parsers must contain at least one parser")
# Validate each custom parser
for file_type, parser in custom_parsers.items():
if not isinstance(file_type, FileType):
raise TypeError(
f"custom_parsers keys must be FileType enum values, got {type(file_type)}"
)
if not isinstance(parser, BaseReader):
raise TypeError(
f"custom_parsers values must be BaseReader instances, got {type(parser)} for {file_type}"
)
# Validate that parser has required load_data method
if not hasattr(parser, "load_data") or not callable(parser.load_data):
raise TypeError(
f"custom_parsers[{file_type}] must have a callable 'load_data' method"
)
# Validate authentication parameters
# Username and password are always required
if not username:
raise ValueError("username parameter is required")
if not password:
raise ValueError("password parameter is required")
# If client_id is provided, client_secret must also be provided (for OAuth flow)
if client_id is not None and client_secret is None:
raise ValueError("client_secret is required when client_id is provided")
if client_secret is not None and client_id is None:
raise ValueError("client_id is required when client_secret is provided")
self.instance = instance
self.username = username
self.password = password
self.client_id = client_id
self.client_secret = client_secret
self.custom_parsers = custom_parsers
self.custom_folder = custom_folder or os.path.join(
os.getcwd(), "custom_parsers"
)
# Validate recommended parsers and warn if missing
self.logger = logger or internal_logger
CustomParserManager.validate_recommended_parsers(custom_parsers, self.logger)
# Ensure custom_folder exists and is writable
try:
os.makedirs(self.custom_folder, exist_ok=True)
# Test write permissions
test_file = os.path.join(self.custom_folder, ".test_write")
with open(test_file, "w") as f:
f.write("test")
os.remove(test_file)
except (OSError, PermissionError) as e:
raise ValueError(
f"Custom folder '{self.custom_folder}' is not accessible or writable: {e}"
)
self.process_attachment_callback = process_attachment_callback
self.process_document_callback = process_document_callback
self.fail_on_error = fail_on_error
self.kb_table = kb_table
self.pysnc_client = None
self.initialize_client()
self.custom_parser_manager = CustomParserManager(
custom_parsers=custom_parsers,
custom_folder=self.custom_folder,
logger=self.logger,
)
def _format_attachment_header(self, attachment: dict) -> str:
"""Formats the attachment title as a markdown header."""
return f"# {attachment['file_name']}\n"
def initialize_client(self):
"""Initialize a new ServiceNowClient instance with fresh credentials."""
try:
self.logger.info("Initializing ServiceNow client")
instance = self.instance
user = self.username
password = self.password
# Use OAuth flow if client_id and client_secret are provided, otherwise use basic auth
if self.client_id and self.client_secret:
client_id = self.client_id
client_secret = self.client_secret
self.pysnc_client = ServiceNowClient(
instance,
ServiceNowPasswordGrantFlow(
user, password, client_id, client_secret
),
)
else:
# Basic authentication with username and password
self.pysnc_client = ServiceNowClient(instance, (user, password))
except Exception as e:
self.logger.error(f"Error initializing ServiceNow client: {e}")
raise ValueError(f"Error initializing ServiceNow client: {e}")
def load_data(
self,
article_sys_id: Optional[str] = None,
numbers: Optional[List[str]] = None,
status="Published",
) -> List[Document]:
"""
Load a KB article by sys_id or number using PySNC. Returns a list with one Document.
"""
gr = self.pysnc_client.GlideRecord(self.kb_table)
if article_sys_id:
gr.add_query("sys_id", article_sys_id)
elif numbers:
gr.add_query("number", "IN", ",".join(numbers))
else:
raise ValueError("Must provide article_sys_id or number")
# Handle latest field: include records where latest is true OR latest field is not present/empty
latest_condition = gr.add_query("latest", "true")
latest_condition.add_or_condition("latest", "ISEMPTY")
gr.add_query(
"workflow_state", status or DEFAULT_WORKFLOW_STATE
) # Include only published articles
gr.query()
if not gr.has_next():
self.logger.error(
f"No KB article found for sys_id {article_sys_id} or numbers {numbers}"
)
raise ValueError(
f"No KB article found for sys_id {article_sys_id} or numbers {numbers}"
)
docs = []
total_pages = gr.get_row_count()
self.logger.info(
f"Found {total_pages} KB articles matching criteria: sys_id={article_sys_id}, numbers={numbers}, status={status}"
)
dispatcher.event(SNOWKBTotalPagesEvent(total_pages=total_pages))
while gr.next():
try:
kb_number = gr.number.get_value()
dispatcher.event(SNOWKBPageFetchStartEvent(page_id=kb_number))
# Check if document should be processed using callback
if self.process_document_callback:
should_process = self.process_document_callback(kb_number)
if not should_process:
self.logger.info(
f"Skipping document {kb_number} based on process_document_callback"
)
continue
# Process article text and attachments
txt_lm = (
gr.article_body
if hasattr(gr, "article_body") and gr.article_body
else gr.text.get_value()
)
attachments = self.handle_attachments(
gr.sys_id.get_value(), kb_number=gr.number.get_value()
)
try:
article_markdown = (
self.custom_parser_manager.process_text_with_custom_parser(
FileType.HTML, txt_lm, "html"
)
)
except ValueError as e:
self.logger.error(
f"Error processing article HTML with custom parser: {e}"
)
if self.fail_on_error:
raise
article_markdown = txt_lm # Fallback to original text
complete_text = (
article_markdown
+ "\n\n"
+ "\n".join(
self._format_attachment_header(attach) + attach["markdown_text"]
for attach in attachments
if "markdown_text" in attach
)
)
display_number = (
gr.get_display_value("display_number")
if hasattr(gr, "display_number")
else None
)
sys_updated_on = (
gr.get_value("sys_updated_on")
if hasattr(gr, "sys_updated_on")
else None
)
kb_number = gr.get_value("number") if hasattr(gr, "number") else None
kb_status = (
gr.workflow_state.get_display_value()
if hasattr(gr, "workflow_state")
else "Unknown"
)
doc = Document(
text=complete_text,
extra_info={
"title": gr.short_description.get_display_value()
if hasattr(gr, "short_description")
else "No Title",
"page_id": kb_number,
"status": kb_status,
"version": display_number,
"sys_updated_on": sys_updated_on,
"kb_number": kb_number,
},
)
metadata = {
"version": display_number,
"sys_updated_on": sys_updated_on,
"kb_number": kb_number,
}
dispatcher.event(
SNOWKBPageFetchCompletedEvent(
page_id=kb_number,
document=doc,
metadata=metadata,
)
)
docs.append(doc)
except Exception as e:
self.logger.error(
f"Error processing KB article {gr.number.get_value()}: {e}"
)
dispatcher.event(
SNOWKBPageFailedEvent(
page_id=gr.number.get_value(),
error=str(e),
)
)
if self.fail_on_error:
raise
return docs
def _get_attachment_data(self, gr_attach: GlideRecord, page_id: str) -> dict:
"""Helper method to get attachment data for events."""
return {
"page_id": page_id,
"attachment_id": f"{gr_attach.get_value('sys_id')}",
"attachment_name": f"{gr_attach.get_value('file_name')}",
"attachment_type": f"{gr_attach.get_value('content_type')}",
"attachment_size": int(f"{gr_attach.get_value('size_bytes')}"),
"attachment_link": f"https://{self.instance}.service-now.com/sys_attachment.do?sys_id={gr_attach.get_value('sys_id')}",
}
def handle_attachment(self, gr_attach: GlideRecord, kb_number: str) -> dict:
"""
Process a single attachment GlideRecord and return its info dict.
"""
if not hasattr(gr_attach, "file_name") or not hasattr(
gr_attach, "content_type"
):
self.logger.error(
"Invalid GlideRecord for attachment, missing required fields."
)
return {}
attachment_id = f"{gr_attach.get_value('sys_id')}"
size_bytes = int(f"{gr_attach.get_value('size_bytes')}")
file_name = f"{gr_attach.get_value('file_name')}"
content_type = f"{gr_attach.get_value('content_type')}"
self.logger.info(f"Processing attachment {file_name}")
attachment_data = self._get_attachment_data(gr_attach, kb_number)
dispatcher.event(SNOWKBAttachmentProcessingStartEvent(**attachment_data))
if self.process_attachment_callback:
can_process, message = self.process_attachment_callback(
content_type, size_bytes, file_name
)
if not can_process:
attachment_data = self._get_attachment_data(gr_attach, kb_number)
dispatcher.event(
SNOWKBAttachmentSkippedEvent(**attachment_data, reason=message)
)
self.logger.info(f"Skipping attachment {file_name}: {message}")
return {}
try:
res: requests.Response = self._download_attachment_content(gr_attach.sys_id)
if not res or not getattr(res, "ok", False):
self.logger.error(
f"Failed to download attachment content for {file_name}"
)
return {}
else:
file_content = res.content
file_type = self.get_File_type(file_name)
# Check if parser is available for this file type
if file_type not in self.custom_parsers:
self.logger.warning(
f"No custom parser available for file type {file_type.value} (file: {file_name}). Skipping attachment."
)
attachment_data = self._get_attachment_data(gr_attach, kb_number)
dispatcher.event(
SNOWKBAttachmentSkippedEvent(
**attachment_data, reason=f"No parser for {file_type.value}"
)
)
return {} # Skip this attachment if no parser available
try:
markdown_text = self.custom_parser_manager.process_with_custom_parser(
file_type, file_content, file_name.split(".")[-1]
)
except ValueError as e:
self.logger.error(
f"Error processing attachment {file_name} with custom parser: {e}"
)
attachment_data = self._get_attachment_data(gr_attach, kb_number)
dispatcher.event(
SNOWKBAttachmentFailedEvent(**attachment_data, error=str(e))
)
if self.fail_on_error:
raise
return {} # Skip this attachment if custom parser fails
self.logger.debug(markdown_text)
attachment_data = self._get_attachment_data(gr_attach, kb_number)
dispatcher.event(SNOWKBAttachmentProcessedEvent(**attachment_data))
return {
"file_name": file_name,
"content_type": content_type,
"size_bytes": size_bytes,
"markdown_text": markdown_text,
"sys_id": gr_attach.sys_id,
}
except Exception as e:
self.logger.error(f"Error processing attachment {file_name}: {e}")
attachment_data = self._get_attachment_data(gr_attach, kb_number)
dispatcher.event(
SNOWKBAttachmentFailedEvent(**attachment_data, error=str(e))
)
return {}
def handle_attachments(self, sys_id: str, kb_number: str) -> list:
"""
Download all attachments for a given KB article sys_id. Returns a list of attachment info dicts.
"""
attachments = []
try:
gr_attach = self.pysnc_client.GlideRecord("sys_attachment")
gr_attach.add_query("table_sys_id", sys_id)
gr_attach.add_query("table_name", self.kb_table)
gr_attach.query()
while gr_attach.next():
attachment_info = self.handle_attachment(gr_attach, kb_number)
if "markdown_text" in attachment_info:
attachments.append(attachment_info)
except Exception as e:
self.logger.error(f"Error downloading attachments: {e}")
return attachments
def get_File_type(self, file_name: str) -> FileType:
"""
Determine the file type based on the file name extension.
"""
ext = os.path.splitext(file_name)[1].lower()
if ext in [".jpg", ".jpeg", ".png", ".gif"]:
return FileType.IMAGE
elif ext in [".pdf"]:
return FileType.PDF
elif ext in [".txt"]:
return FileType.TEXT
elif ext in [".csv"]:
return FileType.CSV
elif ext in [".html"]:
return FileType.HTML
elif ext in [".docx"]:
return FileType.DOCUMENT
elif ext in [".xlsx"]:
return FileType.SPREADSHEET
elif ext in [".pptx"]:
return FileType.PRESENTATION
elif ext in [".md"]:
return FileType.MARKDOWN
else:
return FileType.UNKNOWN
def _download_attachment_content(self, sys_id: str) -> Optional[bytes]:
"""
Download attachment content using PySNC's attachment.get_file method.
"""
try:
if hasattr(self.pysnc_client, "attachment_api") and hasattr(
self.pysnc_client.attachment_api, "get_file"
):
return self.pysnc_client.attachment_api.get_file(sys_id)
else:
self.logger.error(
"self.pysnc_client.attachment_api.get_file is not available. Please check your PySNC version."
)
return None
except Exception as e:
self.logger.error(f"Attachment download failed for {sys_id}: {e}")
return None
| SnowKBReader |
python | google__python-fire | fire/trace_test.py | {
"start": 4067,
"end": 5112
} | class ____(testutils.BaseTestCase):
def testFireTraceElementHasError(self):
el = trace.FireTraceElement()
self.assertFalse(el.HasError())
el = trace.FireTraceElement(error=ValueError('example error'))
self.assertTrue(el.HasError())
def testFireTraceElementAsStringNoMetadata(self):
el = trace.FireTraceElement(
component='Example',
action='Fake action',
)
self.assertEqual(str(el), 'Fake action')
def testFireTraceElementAsStringWithTarget(self):
el = trace.FireTraceElement(
component='Example',
action='Created toy',
target='Beaker',
)
self.assertEqual(str(el), 'Created toy "Beaker"')
def testFireTraceElementAsStringWithTargetAndLineNo(self):
el = trace.FireTraceElement(
component='Example',
action='Created toy',
target='Beaker',
filename='beaker.py',
lineno=10,
)
self.assertEqual(str(el), 'Created toy "Beaker" (beaker.py:10)')
if __name__ == '__main__':
testutils.main()
| FireTraceElementTest |
python | optuna__optuna | optuna/storages/_rdb/models.py | {
"start": 10926,
"end": 11996
} | class ____(BaseModel):
__tablename__ = "trial_system_attributes"
__table_args__: Any = (UniqueConstraint("trial_id", "key"),)
trial_system_attribute_id = _Column(Integer, primary_key=True)
trial_id = _Column(Integer, ForeignKey("trials.trial_id"))
key = _Column(String(MAX_INDEXED_STRING_LENGTH))
value_json = _Column(Text())
trial = orm.relationship(
TrialModel, backref=orm.backref("system_attributes", cascade="all, delete-orphan")
)
@classmethod
def find_by_trial_and_key(
cls, trial: TrialModel, key: str, session: orm.Session
) -> "TrialSystemAttributeModel" | None:
attribute = (
session.query(cls)
.filter(cls.trial_id == trial.trial_id)
.filter(cls.key == key)
.one_or_none()
)
return attribute
@classmethod
def where_trial_id(
cls, trial_id: int, session: orm.Session
) -> list["TrialSystemAttributeModel"]:
return session.query(cls).filter(cls.trial_id == trial_id).all()
| TrialSystemAttributeModel |
python | pytorch__pytorch | test/distributed/tensor/test_redistribute.py | {
"start": 31524,
"end": 44495
} | class ____(DTensorTestBase):
@property
def world_size(self) -> int:
return 8
def _extract_redistribute_trace_from_debug_mode(self, s: str) -> str:
import re
match = re.search(r"trace:\s*(.*)\)", s)
if match:
trace_str = match.group(1)
return trace_str
else:
return ""
@with_comms
def test_ordered_redistribute(self):
"""Test ordered redistribution with various sharding syntaxes"""
torch.manual_seed(21)
mesh = init_device_mesh(self.device_type, (2, 2, 2))
input_data = torch.randn((8, 8, 8), device=self.device_type)
sharding_src_dst_pairs_with_expected_trace = [
(
(
[Shard(0), Shard(0), Shard(0)],
(ShardOrderEntry(tensor_dim=0, mesh_dims=(0, 1, 2)),),
),
(
[Replicate(), Shard(0), Shard(0)],
(ShardOrderEntry(tensor_dim=0, mesh_dims=(1, 2)),),
),
),
(
(
[Shard(0), Shard(0), Shard(0)],
(ShardOrderEntry(tensor_dim=0, mesh_dims=(1, 0, 2)),),
),
(
[Replicate(), Shard(0), Shard(0)],
(ShardOrderEntry(tensor_dim=0, mesh_dims=(1, 2)),),
),
),
(
(
[Shard(0), Shard(0), Shard(0)],
(ShardOrderEntry(tensor_dim=0, mesh_dims=(1, 0, 2)),),
),
(
[Shard(0), Shard(0), Replicate()],
(ShardOrderEntry(tensor_dim=0, mesh_dims=(0, 1)),),
),
),
# If we use the graph search solution, the redistribution path will
# be S(0)[0, 1] -> S(0)[0]S(1)[1] -> S(1)[1] -> S(0)[2]S(1)[1],
# which takes only 1 comm count. However, this placement follows the
# default device order and the greedy solution will be triggered,
# which results in path: S(0)[0, 1] -> S(0)[0]S(1)[1] -> S(1)[1] ->
# S(0)[2]S(1)[1] with 2 comm count
(
(
[Shard(0), Shard(0), Replicate()],
(ShardOrderEntry(tensor_dim=0, mesh_dims=(0, 1)),),
),
(
[Replicate(), Shard(1), Shard(0)],
(
ShardOrderEntry(tensor_dim=0, mesh_dims=(2,)),
ShardOrderEntry(tensor_dim=1, mesh_dims=(1,)),
),
),
),
]
for idx, ((src_placement, src_order), (dst_placement, dst_order)) in enumerate(
sharding_src_dst_pairs_with_expected_trace
):
sharded_dt = _distribute_tensor(
input_data.clone(), mesh, src_placement, shard_order=src_order
)
with DebugMode(record_torchfunction=False) as debug_mode:
sharded_dt = redistribute(sharded_dt, mesh, dst_placement, dst_order)
trace_str = self._extract_redistribute_trace_from_debug_mode(
debug_mode.debug_string()
)
if idx == 0:
self.assertExpectedInline(
trace_str,
"""S(0)[0]S(0)[1]S(0)[2]->S(0)[0]S(0)[1]S(1)->S(0)S(1)[1]S(1)[0]->RS(1)[1]S(1)[0]->RS(0)S(1)->RS(0)[0]S(0)[1]""",
)
elif idx == 1:
self.assertExpectedInline(
trace_str,
"""S(0)[1]S(0)[0]S(0)[2]->S(0)[1]S(0)[0]S(1)->RS(0)S(1)->RS(0)[0]S(0)[1]""",
)
elif idx == 2:
self.assertExpectedInline(
trace_str,
"""S(0)[1]S(0)[0]S(0)[2]->S(0)[1]S(0)[0]R->S(1)S(0)R->S(1)S(2)R->S(0)S(2)R->S(0)[0]S(0)[1]R""",
)
elif idx == 3:
self.assertExpectedInline(
trace_str,
"""S(0)[0]S(0)[1]R->S(0)S(1)R->RS(1)R->RS(1)S(0)""",
)
expected_dt = _distribute_tensor(
input_data.clone(), mesh, dst_placement, shard_order=dst_order
)
self.assertEqual(sharded_dt.to_local(), expected_dt.to_local())
@with_comms
def test_generate_shard_orders(self):
"""Check if `generate_shard_orders` generates unique sharding combinations"""
import math
test_inputs = [
{"mesh": init_device_mesh(self.device_type, (2, 2, 2)), "tensor_rank": 2},
{"mesh": init_device_mesh(self.device_type, (2, 2, 2)), "tensor_rank": 3},
{"mesh": init_device_mesh(self.device_type, (2, 2, 2)), "tensor_rank": 4},
]
for test_input in test_inputs:
all_combinations = []
for shard_order in generate_shard_orders(
test_input["mesh"], test_input["tensor_rank"]
):
all_combinations.append(shard_order) # noqa: PERF402
for i in range(len(all_combinations)):
for j in range(i + 1, len(all_combinations)):
assert all_combinations[i] != all_combinations[j], (
f"Duplicate elements found in all_combinations {all_combinations[i]}, {all_combinations[j]}"
)
expected_total_combination = 0
N = test_input["mesh"].ndim
M = test_input["tensor_rank"]
for i in range(1, N + 1):
# assign total i split of device to tensor dims
if M < i:
continue
device_combination_count = math.comb(
N - 1, i - 1
) # choose i-1 non-empty segments from a list of size N
tensor_dim_order_permutation = math.comb(M, i) # choose i tensor dims
expected_total_combination += (
device_combination_count * tensor_dim_order_permutation
)
# multiply by total possible permutation of device order
expected_total_combination *= math.factorial(N)
self.assertEqual(len(all_combinations), expected_total_combination)
@with_comms
def test_ordered_distribute_all_combination(self):
"""Exhaustively test all possible sharding combinations and verify correctness"""
torch.manual_seed(21)
with maybe_disable_local_tensor_mode():
mesh = init_device_mesh(self.device_type, (2, 2, 2))
input_tensor_shape = [
# even sharding
(16, 8),
(8, 16, 32),
(8, 32, 16, 16),
# uneven sharding with padding
(17, 5),
(13, 2, 13),
(33, 16, 8, 1),
]
# 1. Verify correctness of distribute_tensor from Tensor to DTensor.
for tensor_shape in input_tensor_shape:
input_data = torch.randn(tensor_shape, device=self.device_type)
tensor_rank = input_data.ndim
with maybe_disable_local_tensor_mode():
shard_orders = generate_shard_orders(mesh, tensor_rank)
for shard_order in shard_orders:
sharded_dt = _distribute_tensor(
input_data.clone(), mesh, placements=None, shard_order=shard_order
)
self.assertEqual(make_full_tensor(sharded_dt), input_data)
# 2. Verify the correctness of redistribution from DTensor to DTensor.
# This test repeatedly redistributes a DTensor to various ordered
# placements and checks that the resulting tensor matches the original
# full tensor.
for tensor_shape in input_tensor_shape:
input_data = torch.randn(tensor_shape, device=self.device_type)
tensor_rank = input_data.ndim
prev_sharded_dt = None
with maybe_disable_local_tensor_mode():
shard_orders = generate_shard_orders(mesh, tensor_rank)
for shard_order in shard_orders:
if prev_sharded_dt is None:
prev_sharded_dt = _distribute_tensor(
input_data.clone(),
mesh,
placements=None,
shard_order=shard_order,
)
else:
sharded_dt = redistribute(
prev_sharded_dt, mesh, placements=None, shard_order=shard_order
)
self.assertEqual(make_full_tensor(sharded_dt), input_data)
prev_sharded_dt = sharded_dt
@with_comms
def test_ordered_redistribute_with_partial(self):
"""Test mixing Partial in the original placements and do redistribute."""
# This test takes 226s to complete on 8XA100...
torch.manual_seed(21)
with maybe_disable_local_tensor_mode():
mesh = init_device_mesh(self.device_type, (2, 2, 2))
input_tensor_shape = [
# even sharding
(16, 8),
(8, 16, 32),
# uneven sharding with padding
(17, 5),
(13, 2, 13),
(33, 16, 8, 1),
]
placement_choice = [
Shard(0),
Shard(1),
Shard(2),
Partial("sum"),
Partial("min"),
Replicate(),
]
# pick 3 for the 3D mesh
partial_placement_comb = list(itertools.combinations(placement_choice, 3))
def _is_valid_placement(placements, tensor_rank):
# Check if placements is valid for tensor with rank `tensor_rank`
for placement in placements:
if isinstance(placement, Shard):
if placement.dim >= tensor_rank:
return False
return True
for shape in input_tensor_shape:
for placements in partial_placement_comb:
if not _is_valid_placement(placements, len(shape)):
continue
local_tensor = torch.randn(shape, device=self.device_type)
full_tensor = DTensor.from_local(local_tensor, mesh, placements)
with maybe_disable_local_tensor_mode():
shard_orders = generate_shard_orders(mesh, len(shape))
for shard_order in shard_orders:
sharded_dt = redistribute(
full_tensor, mesh, placements=None, shard_order=shard_order
)
self.assertEqual(
make_full_tensor(sharded_dt), make_full_tensor(full_tensor)
)
@unittest.skip(
"Temporarily skipping until we support special placement types in "
"graph based redistribution"
)
@with_comms
def test_ordered_redistribute_for_special_placement(self):
"""Test ordered redistribution with special placement"""
torch.manual_seed(21)
mesh = init_device_mesh(self.device_type, (8,))
input_data = torch.randn((8, 8), device=self.device_type)
src_placement = [Shard(1)]
tgt_placement = [
(MaskPartial(offset_shape=torch.Size([10, 20]), offset_dim=0),)
]
sharded_dt = _distribute_tensor(
input_data.clone(),
mesh,
src_placement,
shard_order=(ShardOrderEntry(tensor_dim=1, mesh_dims=(0,)),),
)
sharded_dt = redistribute(sharded_dt, mesh, tgt_placement, shard_order=None)
@with_comms
def test_shard_order_same_data_as_strided_shard(self):
device_mesh = init_device_mesh(self.device_type, (4, 2))
x = torch.randn(8, 4, device=self.device_type)
# specify right-to-left order use _StridedShard
strided_placement = [_StridedShard(-2, split_factor=2), Shard(-2)]
x_strided_dt = distribute_tensor(x, device_mesh, strided_placement)
# specify right-to-left order use ordered shard
x_ordered_dt = _distribute_tensor(
x,
device_mesh,
placements=[Shard(0), Shard(0)],
shard_order=(ShardOrderEntry(tensor_dim=0, mesh_dims=(1, 0)),),
)
self.assertEqual(x_ordered_dt.to_local(), x_strided_dt.to_local())
RedistributeTestWithLocalTensor = create_local_tensor_test_class(
RedistributeTest,
)
MultiDimRedistributeTestWithLocalTensor = create_local_tensor_test_class(
MultiDimRedistributeTest,
skipped_tests=["test_multi_dim_mesh"],
)
DistributeWithDeviceOrderTestWithLocalTensor = create_local_tensor_test_class(
DistributeWithDeviceOrderTest,
)
if __name__ == "__main__":
run_tests()
| DistributeWithDeviceOrderTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 41364,
"end": 41649
} | class ____(sgqlc.types.Enum):
"""The possible states of a milestone.
Enumeration Choices:
* `CLOSED`: A milestone that has been closed.
* `OPEN`: A milestone that is still open.
"""
__schema__ = github_schema
__choices__ = ("CLOSED", "OPEN")
| MilestoneState |
python | getsentry__sentry | src/sentry/integrations/api/serializers/models/external_issue.py | {
"start": 573,
"end": 1738
} | class ____(Serializer):
def get_attrs(
self,
item_list: Sequence[ExternalIssue],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
):
result = {}
for item in item_list:
# Get the integration (e.g. Jira, GitHub, etc) associated with that issue
integration = integration_service.get_integration(integration_id=item.integration_id)
if integration is None:
continue
installation = integration.get_installation(organization_id=item.organization.id)
if hasattr(installation, "get_issue_display_name"):
result[item] = {
"id": str(item.id),
"key": item.key,
"title": item.title,
"description": item.description,
"displayName": installation.get_issue_display_name(item),
"integrationKey": integration.provider,
"integrationName": integration.name,
}
return result
def serialize(self, obj, attrs, user, **kwargs):
return attrs
| ExternalIssueSerializer |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 261541,
"end": 268575
} | class ____(rv_continuous):
r"""A pearson type III continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pearson3` is:
.. math::
f(x, \kappa) = \frac{|\beta|}{\Gamma(\alpha)}
(\beta (x - \zeta))^{\alpha - 1}
\exp(-\beta (x - \zeta))
where:
.. math::
\beta = \frac{2}{\kappa}
\alpha = \beta^2 = \frac{4}{\kappa^2}
\zeta = -\frac{\alpha}{\beta} = -\beta
:math:`\Gamma` is the gamma function (`scipy.special.gamma`).
Pass the skew :math:`\kappa` into `pearson3` as the shape parameter
``skew``.
%(after_notes)s
%(example)s
References
----------
R.W. Vogel and D.E. McMartin, "Probability Plot Goodness-of-Fit and
Skewness Estimation Procedures for the Pearson Type 3 Distribution", Water
Resources Research, Vol.27, 3149-3158 (1991).
L.R. Salvosa, "Tables of Pearson's Type III Function", Ann. Math. Statist.,
Vol.1, 191-198 (1930).
"Using Modern Computing Tools to Fit the Pearson Type III Distribution to
Aviation Loads Data", Office of Aviation Research (2003).
"""
def _preprocess(self, x, skew):
# The real 'loc' and 'scale' are handled in the calling pdf(...). The
# local variables 'loc' and 'scale' within pearson3._pdf are set to
# the defaults just to keep them as part of the equations for
# documentation.
loc = 0.0
scale = 1.0
# If skew is small, return _norm_pdf. The divide between pearson3
# and norm was found by brute force and is approximately a skew of
# 0.000016. No one, I hope, would actually use a skew value even
# close to this small.
norm2pearson_transition = 0.000016
ans, x, skew = np.broadcast_arrays(1.0, x, skew)
ans = ans.copy()
# mask is True where skew is small enough to use the normal approx.
mask = np.absolute(skew) < norm2pearson_transition
invmask = ~mask
beta = 2.0 / (skew[invmask] * scale)
alpha = (scale * beta)**2
zeta = loc - alpha / beta
transx = beta * (x[invmask] - zeta)
return ans, x, transx, mask, invmask, beta, alpha, zeta
def _argcheck(self, skew):
# The _argcheck function in rv_continuous only allows positive
# arguments. The skew argument for pearson3 can be zero (which I want
# to handle inside pearson3._pdf) or negative. So just return True
# for all skew args.
return np.isfinite(skew)
def _shape_info(self):
return [_ShapeInfo("skew", False, (-np.inf, np.inf), (False, False))]
def _stats(self, skew):
m = 0.0
v = 1.0
s = skew
k = 1.5*skew**2
return m, v, s, k
def _pdf(self, x, skew):
# pearson3.pdf(x, skew) = abs(beta) / gamma(alpha) *
# (beta * (x - zeta))**(alpha - 1) * exp(-beta*(x - zeta))
# Do the calculation in _logpdf since helps to limit
# overflow/underflow problems
ans = np.exp(self._logpdf(x, skew))
if ans.ndim == 0:
if np.isnan(ans):
return 0.0
return ans
ans[np.isnan(ans)] = 0.0
return ans
def _logpdf(self, x, skew):
# PEARSON3 logpdf GAMMA logpdf
# np.log(abs(beta))
# + (alpha - 1)*np.log(beta*(x - zeta)) + (a - 1)*np.log(x)
# - beta*(x - zeta) - x
# - sc.gammalnalpha) - sc.gammalna)
ans, x, transx, mask, invmask, beta, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = np.log(_norm_pdf(x[mask]))
# use logpdf instead of _logpdf to fix issue mentioned in gh-12640
# (_logpdf does not return correct result for alpha = 1)
ans[invmask] = np.log(abs(beta)) + gamma.logpdf(transx, alpha)
return ans
def _cdf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_cdf(x[mask])
skew = np.broadcast_to(skew, invmask.shape)
invmask1a = np.logical_and(invmask, skew > 0)
invmask1b = skew[invmask] > 0
# use cdf instead of _cdf to fix issue mentioned in gh-12640
# (_cdf produces NaNs for inputs outside support)
ans[invmask1a] = gamma.cdf(transx[invmask1b], alpha[invmask1b])
# The gamma._cdf approach wasn't working with negative skew.
# Note that multiplying the skew by -1 reflects about x=0.
# So instead of evaluating the CDF with negative skew at x,
# evaluate the SF with positive skew at -x.
invmask2a = np.logical_and(invmask, skew < 0)
invmask2b = skew[invmask] < 0
# gamma._sf produces NaNs when transx < 0, so use gamma.sf
ans[invmask2a] = gamma.sf(transx[invmask2b], alpha[invmask2b])
return ans
def _sf(self, x, skew):
ans, x, transx, mask, invmask, _, alpha, _ = (
self._preprocess(x, skew))
ans[mask] = _norm_sf(x[mask])
skew = np.broadcast_to(skew, invmask.shape)
invmask1a = np.logical_and(invmask, skew > 0)
invmask1b = skew[invmask] > 0
ans[invmask1a] = gamma.sf(transx[invmask1b], alpha[invmask1b])
invmask2a = np.logical_and(invmask, skew < 0)
invmask2b = skew[invmask] < 0
ans[invmask2a] = gamma.cdf(transx[invmask2b], alpha[invmask2b])
return ans
def _rvs(self, skew, size=None, random_state=None):
skew = np.broadcast_to(skew, size)
ans, _, _, mask, invmask, beta, alpha, zeta = (
self._preprocess([0], skew))
nsmall = mask.sum()
nbig = mask.size - nsmall
ans[mask] = random_state.standard_normal(nsmall)
ans[invmask] = random_state.standard_gamma(alpha, nbig)/beta + zeta
if size == ():
ans = ans[0]
return ans
def _ppf(self, q, skew):
ans, q, _, mask, invmask, beta, alpha, zeta = (
self._preprocess(q, skew))
ans[mask] = _norm_ppf(q[mask])
q = q[invmask]
q[beta < 0] = 1 - q[beta < 0] # for negative skew; see gh-17050
ans[invmask] = sc.gammaincinv(alpha, q)/beta + zeta
return ans
@_call_super_mom
@extend_notes_in_docstring(rv_continuous, notes="""\
Note that method of moments (`method='MM'`) is not
available for this distribution.\n\n""")
def fit(self, data, *args, **kwds):
if kwds.get("method", None) == 'MM':
raise NotImplementedError("Fit `method='MM'` is not available for "
"the Pearson3 distribution. Please try "
"the default `method='MLE'`.")
else:
return super(type(self), self).fit(data, *args, **kwds)
pearson3 = pearson3_gen(name="pearson3")
| pearson3_gen |
python | jazzband__django-oauth-toolkit | oauth2_provider/views/token.py | {
"start": 196,
"end": 697
} | class ____(LoginRequiredMixin, ListView):
"""
Show a page where the current logged-in user can see his tokens so they can revoke them
"""
context_object_name = "authorized_tokens"
template_name = "oauth2_provider/authorized-tokens.html"
model = get_access_token_model()
def get_queryset(self):
"""
Show only user's tokens
"""
return super().get_queryset().select_related("application").filter(user=self.request.user)
| AuthorizedTokensListView |
python | dagster-io__dagster | python_modules/automation/automation/parse_dataproc_configs.py | {
"start": 4681,
"end": 9022
} | class ____:
def __init__(self, schemas):
self.schemas = schemas
# Stashing these in a global so that we can write out after we're done constructing configs
self.all_enums = {}
def extract_config(self, base_field, suffix):
with IndentingBufferPrinter() as printer:
printer.write_header()
printer.line("from dagster import Bool, Field, Int, Permissive, Shape, String")
printer.blank_line()
# Optionally write enum includes
if self.all_enums:
enums = ", ".join(self.all_enums.keys())
printer.line(f"from dagster_gcp.dataproc.types_{suffix} import {enums}")
printer.blank_line()
printer.line(f"def define_{suffix}_config():")
with printer.with_indent():
printer.append("return ")
base_field.write(printer)
return printer.read().strip().encode("utf-8")
def extract_enums(self):
if not self.all_enums:
return
with IndentingBufferPrinter() as printer:
printer.write_header()
printer.line("from dagster import Enum, EnumValue")
printer.blank_line()
for enum in self.all_enums:
self.all_enums[enum].write(printer)
printer.blank_line()
return printer.read().strip().encode("utf-8")
def parse_object(self, obj, name=None, depth=0, enum_descriptions=None):
# This is a reference to another object that we should substitute by recursing
if "$ref" in obj:
name = obj["$ref"]
return self.parse_object(self.schemas.get(name), name, depth + 1)
# Print type tree
prefix = "|" + ("-" * 4 * depth) + " " if depth > 0 else ""
print(prefix + (name or obj.get("type"))) # noqa: T201
# Switch on object type
obj_type = obj.get("type")
# Handle enums
if "enum" in obj:
# I think this is a bug in the API JSON spec where enum descriptions are a level higher
# than they should be for type "Component" and the name isn't there
if name is None:
name = "Component"
else:
name = name[0].upper() + name[1:]
enum = Enum(name, obj["enum"], enum_descriptions or obj.get("enumDescriptions"))
self.all_enums[name] = enum
fields = enum
# Handle dicts / objects
elif obj_type == "object":
# This is a generic k:v map
if "additionalProperties" in obj:
fields = "Permissive()"
else:
fields = {
k: self.parse_object(v, k, depth + 1) for k, v in obj["properties"].items()
}
# Handle arrays
elif obj_type == "array":
fields = List(
self.parse_object(
obj.get("items"), None, depth + 1, enum_descriptions=obj.get("enumDescriptions")
)
)
# Scalars
elif obj_type in SCALAR_TYPES:
fields = SCALAR_TYPES.get(obj_type)
# Should never get here
else:
raise Exception("unknown type: ", obj)
description = obj.get("description")
is_required = description is not None and description.startswith("Required.")
return Field(fields, is_required=is_required, description=description)
def extract_schema_for_object(self, object_name, name):
# Reset enums for this object
self.all_enums = {}
obj = self.parse_object(self.schemas.get(object_name), object_name)
return ParsedConfig(
name=name, configs=self.extract_config(obj, name), enums=self.extract_enums()
)
def main():
api_url = "https://www.googleapis.com/discovery/v1/apis/dataproc/v1/rest"
base_path = "../libraries/dagster-gcp/dagster_gcp/dataproc/"
json_schema = requests.get(api_url).json().get("schemas")
c = ConfigParser(json_schema)
parsed = c.extract_schema_for_object("Job", "dataproc_job")
parsed.write_configs(base_path)
parsed = c.extract_schema_for_object("ClusterConfig", "dataproc_cluster")
parsed.write_configs(base_path)
if __name__ == "__main__":
main()
| ConfigParser |
python | pytorch__pytorch | torch/utils/_debug_mode.py | {
"start": 14815,
"end": 15505
} | class ____(_DebugCall):
"""Designates entering an nn.Module's forward method"""
def __init__(self, module_name: str, call_depth: int, stack: bool = False) -> None:
super().__init__(call_depth, stack=stack)
self.module_name = module_name
def stringify_args(
self, attributes: list[str], tensor_memo: TensorIdTracker | None = None
) -> None:
pass # nothing to stringify
def render(self, attributes: list[str]) -> str:
return f"[nn.Mod] {self.module_name}"
def __iter__(self):
yield from [
f"[nn.Mod] {self.module_name}",
(),
{},
self.call_depth,
]
| _NNModuleCall |
python | falconry__falcon | falcon/testing/helpers.py | {
"start": 3435,
"end": 10142
} | class ____:
"""Emits events on-demand to an ASGI app.
This class can be used to drive a standard ASGI app callable in order to
perform functional tests on the app in question.
Note:
In order to ensure the app is able to handle subtle variations
in the ASGI events that are allowed by the specification, such
variations are applied to the emitted events at unspecified
intervals. This includes whether or not the `more_body` field
is explicitly set, or whether or not the request `body` chunk in
the event is occasionally empty,
Keyword Args:
body (str): The body content to use when emitting http.request
events. May be an empty string. If a byte string, it will
be used as-is; otherwise it will be encoded as UTF-8
(default ``b''``).
chunk_size (int): The maximum number of bytes to include in
a single http.request event (default 4096).
disconnect_at (float): The Unix timestamp after which to begin
emitting ``'http.disconnect'`` events (default now + 30s). The
value may be either an ``int`` or a ``float``, depending
on the precision required. Setting `disconnect_at` to
``0`` is treated as a special case, and will result in an
``'http.disconnect'`` event being immediately emitted (rather than
first emitting an ``'http.request'`` event).
"""
# TODO(kgriffs): If this pattern later becomes useful elsewhere,
# factor out into a standalone helper class.
_branch_decider: dict[str, bool] = defaultdict(bool)
def __init__(
self,
body: str | bytes | None = None,
chunk_size: int | None = None,
disconnect_at: int | float | None = None,
) -> None:
if body is None:
body = b''
elif not isinstance(body, bytes):
body = body.encode()
body = memoryview(body)
if disconnect_at is None:
disconnect_at = time.time() + 30
if chunk_size is None:
chunk_size = 4096
self._body: memoryview | None = body
self._chunk_size = chunk_size
self._emit_empty_chunks = True
self._disconnect_at = disconnect_at
self._disconnected = False
self._exhaust_body = True
self._emitted_empty_chunk_a = False
self._emitted_empty_chunk_b = False
@property
def disconnected(self) -> bool:
"""Returns ``True`` if the simulated client connection is in a
"disconnected" state.
""" # noqa: D205
return self._disconnected or (self._disconnect_at <= time.time())
def disconnect(self, exhaust_body: bool | None = None) -> None:
"""Set the client connection state to disconnected.
Call this method to simulate an immediate client disconnect and
begin emitting ``'http.disconnect'`` events.
Arguments:
exhaust_body (bool): Set to ``False`` in order to
begin emitting ``'http.disconnect'`` events without first
emitting at least one ``'http.request'`` event.
"""
if exhaust_body is not None:
self._exhaust_body = exhaust_body
self._disconnected = True
async def emit(self) -> AsgiEvent:
# NOTE(kgriffs): Special case: if we are immediately disconnected,
# the first event should be 'http.disconnect'
if self._disconnect_at == 0:
return {'type': EventType.HTTP_DISCONNECT}
#
# NOTE(kgriffs): Based on my reading of the ASGI spec, at least one
# 'http.request' event should be emitted before 'http.disconnect'
# for normal requests. However, the server may choose to
# immediately abandon a connection for some reason, in which case
# an 'http.request' event may never be sent.
#
# See also: https://asgi.readthedocs.io/en/latest/specs/main.html#events
#
if self._body is None or not self._exhaust_body:
# NOTE(kgriffs): When there are no more events, an ASGI
# server will hang until the client connection
# disconnects.
while not self.disconnected:
await asyncio.sleep(0.001)
return {'type': EventType.HTTP_DISCONNECT}
event: dict[str, Any] = {'type': EventType.HTTP_REQUEST}
if self._emit_empty_chunks:
# NOTE(kgriffs): Return a couple variations on empty chunks
# every time, to ensure test coverage.
if not self._emitted_empty_chunk_a:
self._emitted_empty_chunk_a = True
event['more_body'] = True
return event
if not self._emitted_empty_chunk_b:
self._emitted_empty_chunk_b = True
event['more_body'] = True
event['body'] = b''
return event
# NOTE(kgriffs): Part of the time just return an
# empty chunk to make sure the app handles that
# correctly.
if self._toggle_branch('return_empty_chunk'):
event['more_body'] = True
# NOTE(kgriffs): Since ASGI specifies that
# 'body' is optional, we toggle whether
# or not to explicitly set it to b'' to ensure
# the app handles both correctly.
if self._toggle_branch('explicit_empty_body_1'):
event['body'] = b''
return event
chunk = self._body[: self._chunk_size]
self._body = self._body[self._chunk_size :] or None
if chunk:
event['body'] = bytes(chunk)
elif self._toggle_branch('explicit_empty_body_2'):
# NOTE(kgriffs): Since ASGI specifies that
# 'body' is optional, we toggle whether
# or not to explicitly set it to b'' to ensure
# the app handles both correctly.
event['body'] = b''
if self._body:
event['more_body'] = True
elif self._toggle_branch('set_more_body_false'):
# NOTE(kgriffs): The ASGI spec allows leaving off
# the 'more_body' key when it would be set to
# False, so toggle one of the approaches
# to make sure the app handles both cases.
event['more_body'] = False
return event
__call__ = emit
def _toggle_branch(self, name: str) -> bool:
self._branch_decider[name] = not self._branch_decider[name]
return self._branch_decider[name]
| ASGIRequestEventEmitter |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/runtime_evaluated_base_classes_1.py | {
"start": 429,
"end": 469
} | class ____(E):
x: pyproj.Transformer
| F |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/flare/prompts.py | {
"start": 150,
"end": 1498
} | class ____(BaseOutputParser[tuple[str, bool]]):
"""Output parser that checks if the output is finished."""
finished_value: str = "FINISHED"
"""Value that indicates the output is finished."""
@override
def parse(self, text: str) -> tuple[str, bool]:
cleaned = text.strip()
finished = self.finished_value in cleaned
return cleaned.replace(self.finished_value, ""), finished
PROMPT_TEMPLATE = """\
Respond to the user message using any relevant context. \
If context is provided, you should ground your answer in that context. \
Once you're done responding return FINISHED.
>>> CONTEXT: {context}
>>> USER INPUT: {user_input}
>>> RESPONSE: {response}\
"""
PROMPT = PromptTemplate(
template=PROMPT_TEMPLATE,
input_variables=["user_input", "context", "response"],
)
QUESTION_GENERATOR_PROMPT_TEMPLATE = """\
Given a user input and an existing partial response as context, \
ask a question to which the answer is the given term/entity/phrase:
>>> USER INPUT: {user_input}
>>> EXISTING PARTIAL RESPONSE: {current_response}
The question to which the answer is the term/entity/phrase "{uncertain_span}" is:"""
QUESTION_GENERATOR_PROMPT = PromptTemplate(
template=QUESTION_GENERATOR_PROMPT_TEMPLATE,
input_variables=["user_input", "current_response", "uncertain_span"],
)
| FinishedOutputParser |
python | mlflow__mlflow | mlflow/webhooks/types.py | {
"start": 7848,
"end": 8599
} | class ____(TypedDict):
"""Payload sent when a tag is set on a prompt version.
Example payload:
.. code-block:: python
{
"name": "example_prompt",
"version": "1",
"key": "example_key",
"value": "example_value",
}
"""
name: str
"""The name of the prompt."""
version: str
"""The version of the prompt."""
key: str
"""The tag key being set."""
value: str
"""The tag value being set."""
@classmethod
def example(cls) -> "PromptVersionTagSetPayload":
return cls(
name="example_prompt",
version="1",
key="example_key",
value="example_value",
)
| PromptVersionTagSetPayload |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol3.py | {
"start": 4472,
"end": 4517
} | class ____(Proto13[T13], Protocol): ...
| Proto14 |
python | google__pytype | pytype/io_test.py | {
"start": 385,
"end": 6005
} | class ____(unittest.TestCase):
"""Test IO functions."""
def test_read_source_file_utf8(self):
with self._tmpfile("abc□def\n") as f:
self.assertEqual(io.read_source_file(f.name), "abc□def\n")
@contextlib.contextmanager
def _tmpfile(self, contents):
tempfile_options = {"mode": "w", "suffix": ".txt", "encoding": "utf-8"}
with compatible_tempfile.NamedTemporaryFile(**tempfile_options) as f:
f.write(contents)
f.flush()
yield f
def test_wrap_pytype_exceptions(self):
with self.assertRaises(ValueError):
with io.wrap_pytype_exceptions(ValueError, "foo.py"):
io.read_source_file("missing_file")
def test_wrap_pytype_exception_traceback(self):
class CustomError(Exception):
pass
def called_function():
raise OSError("error!")
def calling_function():
called_function()
err = None
trace = None
try:
with io.wrap_pytype_exceptions(CustomError, "foo.py"):
calling_function()
except CustomError as e:
err = e
_, _, tb = sys.exc_info()
trace = traceback.format_tb(tb)
self.assertIn("OSError: error!", err.args[0])
self.assertTrue(any("in calling_function" in x for x in trace))
def test_check_py(self):
errorlog = io.check_py("undefined_var").context.errorlog
(error,) = errorlog.unique_sorted_errors()
self.assertEqual(error.name, "name-error")
def test_check_py_with_options(self):
options = config.Options.create(disable="name-error")
errorlog = io.check_py("undefined_var", options).context.errorlog
self.assertFalse(errorlog.unique_sorted_errors())
def test_generate_pyi(self):
ret, pyi_string = io.generate_pyi("x = 42")
self.assertFalse(ret.context.errorlog.unique_sorted_errors())
self.assertEqual(pyi_string, "x: int\n")
self.assertIsInstance(ret.ast, pytd.TypeDeclUnit)
def test_generate_pyi_with_options(self):
with self._tmpfile("x: int") as pyi:
pyi_name, _ = path_utils.splitext(path_utils.basename(pyi.name))
with self._tmpfile(f"{pyi_name} {pyi.name}") as imports_map:
src = "import {mod}; y = {mod}.x".format(mod=pyi_name)
options = config.Options.create(imports_map=imports_map.name)
_, pyi_string = io.generate_pyi(src, options)
self.assertEqual(pyi_string, f"import {pyi_name}\n\ny: int\n")
def test_generate_pyi__overload_order(self):
_, pyi_string = io.generate_pyi(textwrap.dedent("""
from typing import Any, overload
@overload
def f(x: None) -> None: ...
@overload
def f(x: Any) -> int: ...
def f(x):
return __any_object__
""".lstrip("\n")))
self.assertMultiLineEqual(
pyi_string,
textwrap.dedent("""
from typing import overload
@overload
def f(x: None) -> None: ...
@overload
def f(x) -> int: ...
""".lstrip("\n")),
)
def test_check_or_generate_pyi__check(self):
with self._tmpfile("") as f:
options = config.Options.create(f.name, check=True)
ret = io.check_or_generate_pyi(options)
self.assertIsNone(ret.pyi)
self.assertIsNone(ret.ast)
def test_check_or_generate_pyi__generate(self):
with self._tmpfile("") as f:
options = config.Options.create(f.name, check=False)
ret = io.check_or_generate_pyi(options)
self.assertIsNotNone(ret.pyi)
self.assertIsNotNone(ret.ast)
def test_check_or_generate_pyi__open_function(self):
def mock_open(filename, *args, **kwargs):
if filename == "my_amazing_file.py":
return builtins_io.StringIO("x = 0.0")
else:
return open(filename, *args, **kwargs) # pylint: disable=consider-using-with
options = config.Options.create(
"my_amazing_file.py", check=False, open_function=mock_open
)
ret = io.check_or_generate_pyi(options)
self.assertEqual(ret.pyi, "x: float\n")
def test_write_pickle(self):
ast = pytd.TypeDeclUnit(None, (), (), (), (), ())
options = config.Options.create(
output="/dev/null" if sys.platform != "win32" else "NUL"
)
io.write_pickle(ast, options) # just make sure we don't crash
def test_unused_imports_info_files(self):
with test_utils.Tempdir() as d, file_utils.cd(d.path):
d.create_file("common/foo.pyi", "from common import bar\nx: bar.Bar")
d.create_file("common/bar.pyi", "class Bar: pass")
d.create_file("common/baz.pyi", "class Baz: pass")
d.create_file("aaa/other.pyi", "class Other: pass")
imports_info = d.create_file(
"imports_info",
file_utils.replace_separator(textwrap.dedent("""
common/foo common/foo.pyi
common/bar common/bar.pyi
common/baz common/baz.pyi
aaa/other aaa/other.pyi
""")),
)
module = d.create_file("m.py", "from common import foo; print(foo.x)")
unused_imports_info_files = path_utils.join(d.path, "unused_imports_info")
options = config.Options.create(
module,
imports_map=imports_info,
unused_imports_info_files=unused_imports_info_files,
)
ret = io.process_one_file(options)
self.assertEqual(0, ret)
self.assertTrue(
path_utils.exists(unused_imports_info_files),
f"{unused_imports_info_files!r} does not exist",
)
with options.open_function(unused_imports_info_files) as f:
content = f.read()
self.assertEqual(
content,
file_utils.replace_separator("aaa/other.pyi\ncommon/baz.pyi\n"),
)
if __name__ == "__main__":
unittest.main()
| IOTest |
python | dagster-io__dagster | examples/experimental/assets_yaml_dsl/assets_yaml_dsl/domain_specific_dsl/stocks_dsl.py | {
"start": 1254,
"end": 3590
} | class ____(NamedTuple):
stock_infos: list[StockInfo]
index_strategy: IndexStrategy
forecast: Forecast
def build_stock_assets_object(stocks_dsl_document: dict[str, dict]) -> StockAssets:
return StockAssets(
stock_infos=[
StockInfo(ticker=stock_block["ticker"])
for stock_block in stocks_dsl_document["stocks_to_index"]
],
index_strategy=IndexStrategy(type=stocks_dsl_document["index_strategy"]["type"]),
forecast=Forecast(int(stocks_dsl_document["forecast"]["days"])),
)
def get_stocks_dsl_example_defs() -> list[AssetsDefinition]:
stocks_dsl_document = load_yaml("stocks.yaml")
stock_assets = build_stock_assets_object(stocks_dsl_document)
return assets_defs_from_stock_assets(stock_assets)
def assets_defs_from_stock_assets(stock_assets: StockAssets) -> list[AssetsDefinition]:
group_name = "stocks"
def spec_for_stock_info(stock_info: StockInfo) -> AssetSpec:
ticker = stock_info.ticker
return AssetSpec(
key=AssetKey(ticker),
group_name=group_name,
description=f"Fetch {ticker} from internal service",
)
tickers = [stock_info.ticker for stock_info in stock_assets.stock_infos]
ticker_specs = [spec_for_stock_info(stock_info) for stock_info in stock_assets.stock_infos]
@multi_asset(specs=ticker_specs)
def fetch_the_tickers(
context: AssetExecutionContext, pipes_subprocess_client: PipesSubprocessClient
):
python_executable = shutil.which("python")
assert python_executable is not None
script_path = file_relative_path(__file__, "user_scripts/fetch_the_tickers.py")
return pipes_subprocess_client.run(
command=[python_executable, script_path], context=context, extras={"tickers": tickers}
).get_results()
@asset(deps=fetch_the_tickers.keys, group_name=group_name)
def index_strategy() -> None:
stored_ticker_data = {}
for ticker in tickers:
stored_ticker_data[ticker] = fetch_data_for_ticker(ticker)
# do someting with stored_ticker_data
@asset(deps=fetch_the_tickers.keys, group_name=group_name)
def forecast() -> None:
# do some forecast thing
pass
return [fetch_the_tickers, index_strategy, forecast]
| StockAssets |
python | pypa__pip | src/pip/_internal/exceptions.py | {
"start": 6145,
"end": 6850
} | class ____(DiagnosticPipError):
"""Raised when pyproject.toml has `build-system`, but no `build-system.requires`."""
reference = "missing-pyproject-build-system-requires"
def __init__(self, *, package: str) -> None:
super().__init__(
message=f"Can not process {escape(package)}",
context=Text(
"This package has an invalid pyproject.toml file.\n"
"The [build-system] table is missing the mandatory `requires` key."
),
note_stmt="This is an issue with the package mentioned above, not pip.",
hint_stmt=Text("See PEP 518 for the detailed specification."),
)
| MissingPyProjectBuildRequires |
python | apache__thrift | test/py/TestClient.py | {
"start": 13603,
"end": 13710
} | class ____(AbstractTest):
def get_protocol2(self, transport):
return None
| MultiplexedOptionalTest |
python | tensorflow__tensorflow | tensorflow/python/data/ops/options.py | {
"start": 12298,
"end": 18752
} | class ____(options_lib.OptionsBase):
"""Represents options for dataset optimizations.
You can set the optimization options of a dataset through the
`experimental_optimization` property of `tf.data.Options`; the property is
an instance of `tf.data.experimental.OptimizationOptions`.
```python
options = tf.data.Options()
options.experimental_optimization.noop_elimination = True
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset.with_options(options)
```
"""
apply_default_optimizations = options_lib.create_option(
name="apply_default_optimizations",
ty=bool,
docstring=
"Whether to apply default graph optimizations. If False, only graph "
"optimizations that have been explicitly enabled will be applied.")
filter_fusion = options_lib.create_option(
name="filter_fusion",
ty=bool,
docstring=
"Whether to fuse filter transformations. If None, defaults to False.")
filter_parallelization = options_lib.create_option(
name="filter_parallelization",
ty=bool,
docstring=
"Whether to parallelize stateless filter transformations. If None, "
"defaults to False.")
inject_prefetch = options_lib.create_option(
name="inject_prefetch",
ty=bool,
docstring=
"Whether to inject prefetch transformation as the last transformation "
"when the last transformation is a synchronous transformation. If None, "
"defaults to True.")
seq_interleave_prefetch = options_lib.create_option(
name="seq_interleave_prefetch",
ty=bool,
docstring=(
"Whether to replace parallel interleave using a sequential interleave"
" that prefetches elements from its input iterators. If None,"
" defaults to False."
),
)
map_and_batch_fusion = options_lib.create_option(
name="map_and_batch_fusion",
ty=bool,
docstring=
"Whether to fuse map and batch transformations. If None, defaults to "
"True.")
map_and_filter_fusion = options_lib.create_option(
name="map_and_filter_fusion",
ty=bool,
docstring=
"Whether to fuse map and filter transformations. If None, defaults to "
"False.")
map_fusion = options_lib.create_option(
name="map_fusion",
ty=bool,
docstring=(
"Whether to fuse map transformations with `num_parallel_calls` set to"
" `tf.data.AUTOTUNE`, no captured inputs and same `deterministic`"
" value. If None, defaults to False."
),
)
map_parallelization = options_lib.create_option(
name="map_parallelization",
ty=bool,
docstring=
"Whether to parallelize stateless map transformations. If None, defaults "
"to True.")
noop_elimination = options_lib.create_option(
name="noop_elimination",
ty=bool,
docstring=
"Whether to eliminate no-op transformations. If None, defaults to True.")
parallel_batch = options_lib.create_option(
name="parallel_batch",
ty=bool,
docstring="Whether to parallelize copying of batch elements. If None, "
"defaults to True.")
shuffle_and_repeat_fusion = options_lib.create_option(
name="shuffle_and_repeat_fusion",
ty=bool,
docstring="Whether to fuse shuffle and repeat transformations. If None, "
"defaults to True.")
def _to_proto(self):
pb = dataset_options_pb2.OptimizationOptions()
if self.apply_default_optimizations is not None:
pb.apply_default_optimizations = self.apply_default_optimizations
if self.filter_fusion is not None:
pb.filter_fusion = self.filter_fusion
if self.filter_parallelization is not None:
pb.filter_parallelization = self.filter_parallelization
if self.inject_prefetch is not None:
pb.inject_prefetch = self.inject_prefetch
if self.seq_interleave_prefetch is not None:
pb.seq_interleave_prefetch = self.seq_interleave_prefetch
if self.map_and_batch_fusion is not None:
pb.map_and_batch_fusion = self.map_and_batch_fusion
if self.map_and_filter_fusion is not None:
pb.map_and_filter_fusion = self.map_and_filter_fusion
if self.map_fusion is not None:
pb.map_fusion = self.map_fusion
if self.map_parallelization is not None:
pb.map_parallelization = self.map_parallelization
if self.noop_elimination is not None:
pb.noop_elimination = self.noop_elimination
if self.parallel_batch is not None:
pb.parallel_batch = self.parallel_batch
if self.shuffle_and_repeat_fusion is not None:
pb.shuffle_and_repeat_fusion = self.shuffle_and_repeat_fusion
return pb
def _from_proto(self, pb):
if pb.WhichOneof("optional_apply_default_optimizations") is not None:
self.apply_default_optimizations = pb.apply_default_optimizations
if pb.WhichOneof("optional_filter_fusion") is not None:
self.filter_fusion = pb.filter_fusion
if pb.WhichOneof("optional_filter_parallelization") is not None:
self.filter_parallelization = pb.filter_parallelization
if pb.WhichOneof("optional_inject_prefetch") is not None:
self.inject_prefetch = pb.inject_prefetch
if pb.WhichOneof("optional_seq_interleave_prefetch") is not None:
self.seq_interleave_prefetch = pb.seq_interleave_prefetch
if pb.WhichOneof("optional_map_and_batch_fusion") is not None:
self.map_and_batch_fusion = pb.map_and_batch_fusion
if pb.WhichOneof("optional_map_and_filter_fusion") is not None:
self.map_and_filter_fusion = pb.map_and_filter_fusion
if pb.WhichOneof("optional_map_fusion") is not None:
self.map_fusion = pb.map_fusion
if pb.WhichOneof("optional_map_parallelization") is not None:
self.map_parallelization = pb.map_parallelization
if pb.WhichOneof("optional_noop_elimination") is not None:
self.noop_elimination = pb.noop_elimination
if pb.WhichOneof("optional_parallel_batch") is not None:
self.parallel_batch = pb.parallel_batch
if pb.WhichOneof("optional_shuffle_and_repeat_fusion") is not None:
self.shuffle_and_repeat_fusion = pb.shuffle_and_repeat_fusion
def _set_mutable(self, mutable):
"""Change the mutability value to `mutable` on this options and children."""
# pylint: disable=protected-access
object.__setattr__(self, "_mutable", mutable)
@tf_export("data.experimental.ServiceOptions")
| OptimizationOptions |
python | falconry__falcon | falcon/_typing.py | {
"start": 9078,
"end": 10762
} | class ____(Protocol[_AReqT, _ARespT]):
"""WSGI/ASGI middleware with response handler."""
async def process_response_async(
self,
req: _AReqT,
resp: _ARespT,
resource: object,
req_succeeded: bool,
) -> None: ...
# NOTE(jkmnt): This typing is far from perfect due to the Python typing limitations,
# but better than nothing. Middleware conforming to any protocol of the union
# will pass the type check. Other protocols violations are not checked.
SyncMiddleware = Union[
WsgiMiddlewareWithProcessRequest[_ReqT, _RespT],
WsgiMiddlewareWithProcessResource[_ReqT, _RespT],
WsgiMiddlewareWithProcessResponse[_ReqT, _RespT],
]
"""Synchronous (WSGI) application middleware.
This type alias reflects the middleware interface for
components that can be used with a WSGI app.
"""
AsyncMiddleware = Union[
AsgiMiddlewareWithProcessRequest[_AReqT, _ARespT],
AsgiMiddlewareWithProcessResource[_AReqT, _ARespT],
AsgiMiddlewareWithProcessResponse[_AReqT, _ARespT],
# Lifespan middleware
AsgiMiddlewareWithProcessStartup,
AsgiMiddlewareWithProcessShutdown,
# WebSocket middleware
AsgiMiddlewareWithProcessRequestWs[_AReqT],
AsgiMiddlewareWithProcessResourceWs[_AReqT],
# Universal middleware with process_*_async methods
UniversalMiddlewareWithProcessRequest[_AReqT, _ARespT],
UniversalMiddlewareWithProcessResource[_AReqT, _ARespT],
UniversalMiddlewareWithProcessResponse[_AReqT, _ARespT],
]
"""Asynchronous (ASGI) application middleware.
This type alias reflects the middleware interface for components that can be
used with an ASGI app.
"""
| UniversalMiddlewareWithProcessResponse |
python | huggingface__transformers | src/transformers/models/whisper/modeling_whisper.py | {
"start": 62321,
"end": 67612
} | class ____(WhisperPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.encoder = WhisperEncoder(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def freeze_encoder(self):
"""
Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will
not be updated during training. Only the projection layers and classification head will be updated.
"""
self.encoder._freeze_parameters()
def get_input_embeddings(self) -> nn.Module:
return self.encoder.get_input_embeddings()
def set_input_embeddings(self, value: nn.Module):
self.encoder.set_input_embeddings(value)
@auto_docstring
def forward(
self,
input_features: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Example:
```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, WhisperForAudioClassification
>>> from datasets import load_dataset
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("sanchit-gandhi/whisper-medium-fleurs-lang-id")
>>> model = WhisperForAudioClassification.from_pretrained("sanchit-gandhi/whisper-medium-fleurs-lang-id")
>>> ds = load_dataset("google/fleurs", "all", split="validation", streaming=True)
>>> sample = next(iter(ds))
>>> inputs = feature_extractor(
... sample["audio"]["array"], sampling_rate=sample["audio"]["sampling_rate"], return_tensors="pt"
... )
>>> input_features = inputs.input_features
>>> with torch.no_grad():
... logits = model(input_features).logits
>>> predicted_class_ids = torch.argmax(logits).item()
>>> predicted_label = model.config.id2label[predicted_class_ids]
>>> predicted_label
'Afrikaans'
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if self.config.use_weighted_layer_sum:
output_hidden_states = True
elif output_hidden_states is None:
output_hidden_states = self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_features,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = encoder_outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = encoder_outputs[0]
hidden_states = self.projector(hidden_states)
pooled_output = hidden_states.mean(dim=1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# move labels to correct device to enable PP
labels = labels.to(logits.device)
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + encoder_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
__all__ = [
"WhisperForCausalLM",
"WhisperForConditionalGeneration",
"WhisperModel",
"WhisperPreTrainedModel",
"WhisperForAudioClassification",
]
| WhisperForAudioClassification |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/via_type_of.py | {
"start": 320,
"end": 1036
} | class ____:
def __init__(self, x: int, y: str, z: str) -> None:
self.x: int = x
self.y: str = y
self.z: Annotated[str, "test1"] = z
def test1_alarm1():
# always-via-type:int
c = Test1_C(**_test_source())
_test_sink(c.x)
def test1_alarm2():
# always-via-type:str
c = Test1_C(**_test_source())
_test_sink(c.y)
def test1_alarm3():
# always-via-type:typing.Annotated[str]
c = Test1_C(**_test_source())
_test_sink(c.z)
def test1_alarm4(foo):
# via-type:int, via-type:str, via-type:typing.Annotated[str]
c = Test1_C(**_test_source())
foo = c.x
if 1:
foo = c.y
elif 2:
foo = c.z
_test_sink(foo)
@dataclass
| Test1_C |
python | ApeWorX__ape | src/ape_ethereum/transactions.py | {
"start": 6673,
"end": 7138
} | class ____(DynamicFeeTransaction):
"""
`EIP-4844 <https://eips.ethereum.org/EIPS/eip-4844>`__ transactions.
"""
max_fee_per_blob_gas: HexInt = Field(default=0, alias="maxFeePerBlobGas")
blob_versioned_hashes: list[HexBytes] = Field(default_factory=list, alias="blobVersionedHashes")
receiver: AddressType = Field(default=ZERO_ADDRESS, alias="to")
"""
Overridden because EIP-4844 states it cannot be nil.
"""
| SharedBlobTransaction |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/middle_adds_virtual/package.py | {
"start": 216,
"end": 429
} | class ____(Package):
url = "http://www.example.com/"
url = "http://www.example.com/2.0.tar.gz"
version("1.0", md5="abcdef1234567890abcdef1234567890")
depends_on("leaf-adds-virtual")
| MiddleAddsVirtual |
python | openai__openai-python | src/openai/types/audio/transcription.py | {
"start": 372,
"end": 630
} | class ____(BaseModel):
token: Optional[str] = None
"""The token in the transcription."""
bytes: Optional[List[float]] = None
"""The bytes of the token."""
logprob: Optional[float] = None
"""The log probability of the token."""
| Logprob |
python | doocs__leetcode | lcof/面试题20. 表示数值的字符串/Solution.py | {
"start": 0,
"end": 811
} | class ____:
def isNumber(self, s: str) -> bool:
i, j = 0, len(s) - 1
while i < j and s[i] == " ":
i += 1
while i <= j and s[j] == " ":
j -= 1
if i > j:
return False
digit = dot = e = False
while i <= j:
if s[i] in "+-":
if i and s[i - 1] not in " eE":
return False
elif s[i].isdigit():
digit = True
elif s[i] == ".":
if dot or e:
return False
dot = True
elif s[i] in "eE":
if not digit or e:
return False
e, digit = True, False
else:
return False
i += 1
return digit
| Solution |
python | jina-ai__jina | jina/orchestrate/pods/container.py | {
"start": 10737,
"end": 16460
} | class ____(BasePod):
"""
:class:`ContainerPod` starts a runtime of :class:`BaseRuntime` inside a container. It leverages :class:`multiprocessing.Process` to manage the logs and the lifecycle of docker container object in a robust way.
"""
def __init__(self, args: 'argparse.Namespace'):
super().__init__(args)
if (
self.args.docker_kwargs
and 'extra_hosts' in self.args.docker_kwargs
and __docker_host__ in self.args.docker_kwargs['extra_hosts']
):
self.args.docker_kwargs.pop('extra_hosts')
self._net_mode = None
self.worker = None
self.container_name = slugify(f'{self.name}/{random_name()}')
self.net_mode, self.runtime_ctrl_address = self._get_control_address()
def _get_control_address(self):
import docker
client = docker.from_env()
try:
network = get_docker_network(client)
if (
self.args.docker_kwargs
and 'extra_hosts' in self.args.docker_kwargs
and __docker_host__ in self.args.docker_kwargs['extra_hosts']
):
ctrl_host = __docker_host__
elif network:
# If the caller is already in a docker network, replace ctrl-host with network gateway
try:
ctrl_host = client.networks.get(network).attrs['IPAM']['Config'][0][
'Gateway'
]
except:
ctrl_host = __docker_host__
else:
ctrl_host = self.args.host
ctrl_address = f'{ctrl_host}:{self.args.port[0]}'
net_mode, runtime_ctrl_address = self._get_network_for_dind_linux(
client, ctrl_address
)
finally:
client.close()
return net_mode, runtime_ctrl_address
def _get_network_for_dind_linux(self, client: 'DockerClient', ctrl_address: str):
import sys
from platform import uname
# Related to potential docker-in-docker communication. If `Runtime` lives already inside a container.
# it will need to communicate using the `bridge` network.
# In WSL, we need to set ports explicitly
net_mode, runtime_ctrl_address = (
getattr(self.args, 'force_network_mode', DockerNetworkMode.AUTO),
ctrl_address,
)
if sys.platform in ('linux', 'linux2') and 'microsoft' not in uname().release:
if net_mode == DockerNetworkMode.AUTO:
net_mode = DockerNetworkMode.HOST
if net_mode != DockerNetworkMode.NONE:
try:
bridge_network = client.networks.get('bridge')
if bridge_network:
runtime_ctrl_address = f'{bridge_network.attrs["IPAM"]["Config"][0]["Gateway"]}:{self.args.port[0]}'
except Exception as ex:
self.logger.warning(
f'Unable to set control address from "bridge" network: {ex!r}'
f' Control address set to {runtime_ctrl_address}'
)
if net_mode in {DockerNetworkMode.AUTO, DockerNetworkMode.NONE}:
net_mode = None
else:
net_mode = net_mode.to_string().lower()
return net_mode, runtime_ctrl_address
@property
def _container(self):
import docker
client = docker.from_env()
container = None
try:
container = client.containers.get(self.container_name)
finally:
client.close()
return container
def start(self):
"""Start the ContainerPod.
.. #noqa: DAR201
"""
self.worker = multiprocessing.Process(
target=run,
kwargs={
'args': self.args,
'name': self.name,
'container_name': self.container_name,
'net_mode': self.net_mode,
'runtime_ctrl_address': self.runtime_ctrl_address,
'envs': self._envs,
'is_started': self.is_started,
'is_signal_handlers_installed': self.is_signal_handlers_installed,
'is_shutdown': self.is_shutdown,
'is_ready': self.is_ready,
},
daemon=True,
)
self.worker.start()
if not self.args.noblock_on_start:
self.wait_start_success()
return self
def _terminate(self):
"""Terminate the Pod.
This method kills the container inside the Pod
"""
# terminate the docker
try:
self._container.stop()
finally:
self.is_shutdown.wait(self.args.timeout_ctrl)
self.logger.debug(f'terminating the runtime process')
self.worker.terminate()
self.logger.debug(f'runtime process properly terminated')
def join(self, *args, **kwargs):
"""Joins the Pod.
:param args: extra positional arguments to pass to join
:param kwargs: extra keyword arguments to pass to join
"""
import docker
client = docker.from_env()
try:
container_id = self._container.id
containers = client.containers.list()
while container_id in containers:
time.sleep(0.1)
containers = client.containers.list()
except docker.errors.NotFound:
pass
self.logger.debug(f'joining the process')
self.worker.join(timeout=10, *args, **kwargs)
self.logger.debug(f'successfully joined the process')
| ContainerPod |
python | kamyu104__LeetCode-Solutions | Python/number-of-student-replacements.py | {
"start": 42,
"end": 365
} | class ____(object):
def totalReplacements(self, ranks):
"""
:type ranks: List[int]
:rtype: int
"""
result = -1
mn = float("inf")
for x in ranks:
if x >= mn:
continue
mn = x
result += 1
return result
| Solution |
python | huggingface__transformers | tests/models/pix2struct/test_modeling_pix2struct.py | {
"start": 14809,
"end": 27474
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (Pix2StructForConditionalGeneration,) if is_torch_available() else ()
pipeline_model_mapping = (
{"image-to-text": Pix2StructForConditionalGeneration, "image-text-to-text": Pix2StructForConditionalGeneration}
if is_torch_available()
else {}
)
test_resize_embeddings = True
test_attention_outputs = False
def setUp(self):
self.model_tester = Pix2StructModelTester(self)
def test_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
output = model(**input_dict)
self.assertEqual(
output[1].shape,
(
self.model_tester.vision_model_tester.batch_size,
self.model_tester.text_model_tester.seq_length,
self.model_tester.text_model_tester.vocab_size,
),
)
def test_generative_model(self):
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_generative_model_classes:
model = model_class(config).eval().to(torch_device)
output = model.generate(**input_dict, use_cache=False, min_new_tokens=10, max_new_tokens=10)
output_use_cache = model.generate(**input_dict, use_cache=True, min_new_tokens=10, max_new_tokens=10)
torch.testing.assert_close(output, output_use_cache)
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="Pix2StructModel does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = [
"flattened_patches",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
"encoder_outputs",
"past_key_values",
"labels",
"decoder_inputs_embeds",
"use_cache",
]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
def test_training(self):
if not self.model_tester.is_training:
self.skipTest(reason="model_tester.is_training is set to False")
for model_class in self.all_model_classes[:-1]:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
# hardcode labels to be the same as input_ids
inputs["labels"] = inputs["input_ids"]
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
if not self.model_tester.is_training:
self.skipTest(reason="model_tester.is_training is set to False")
for model_class in self.all_model_classes[:-1]:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.gradient_checkpointing_enable()
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
# hardcode labels to be the same as input_ids
inputs["labels"] = inputs["input_ids"]
loss = model(**inputs).loss
loss.backward()
# overwrite because `vocab_size` is not an attribute of `Pix2StructConfig` but rather `Pix2StructTextConfig`
def test_resize_tokens_embeddings(self):
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
self.skipTest(reason="test_resize_embeddings is set to False")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
if self.model_tester.is_training is False:
model.eval()
model_vocab_size = config.text_config.vocab_size
# Retrieve the embeddings and clone theme
model_embed = model.resize_token_embeddings(model_vocab_size)
cloned_embeddings = model_embed.weight.clone()
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model_embed = model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Decoder input ids should be clamped to the maximum size of the vocabulary
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that adding and removing tokens has not modified the first part of the embedding matrix.
models_equal = True
for p1, p2 in zip(cloned_embeddings, model_embed.weight):
if p1.data.ne(p2.data).sum() > 0:
models_equal = False
self.assertTrue(models_equal)
# overwrite because `vocab_size` is not an attribute of `Pix2StructConfig` but rather `Pix2StructTextConfig`
def test_resize_embeddings_untied(self):
original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.test_resize_embeddings:
self.skipTest(reason="test_resize_embeddings is set to False")
original_config.tie_word_embeddings = False
# if model cannot untied embeddings -> leave test
if original_config.tie_word_embeddings:
self.skipTest(reason="Model cannot untie embeddings")
for model_class in self.all_model_classes:
config = copy.deepcopy(original_config)
model = model_class(config).to(torch_device)
model.eval()
# if no output embeddings -> leave test
if model.get_output_embeddings() is None:
continue
# Check that resizing the token embeddings with a larger vocab size increases the model's vocab size
model_vocab_size = config.text_config.vocab_size
model.resize_token_embeddings(model_vocab_size + 10)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10)
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
# Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size
model.resize_token_embeddings(model_vocab_size - 15)
self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15)
# Check that it actually resizes the embeddings matrix
output_embeds = model.get_output_embeddings()
self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15)
# Check bias if present
if output_embeds.bias is not None:
self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
# Decoder input ids should be clamped to the maximum size of the vocabulary
if "decoder_input_ids" in inputs_dict:
inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))
def test_load_vision_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save Pix2StructConfig and check if we can load Pix2StructVisionConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
vision_config = Pix2StructVisionConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
# Save Pix2StructConfig and check if we can load Pix2StructTextConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
text_config = Pix2StructTextConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
def _check_encoder_attention_for_generate(self, attentions, batch_size, config, prompt_length):
# overwrite because # pix2struct seq length depends on image inputs
prompt_length = self.model_tester.max_patches
encoder_expected_shape = (batch_size, config.num_attention_heads, prompt_length, prompt_length)
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[layer_attentions.shape for layer_attentions in attentions],
[encoder_expected_shape] * len(attentions),
)
def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, prompt_length):
# overwrite because # pix2struct seq length depends on image inputs
prompt_length = self.model_tester.max_patches
encoder_expected_shape = (batch_size, prompt_length, config.hidden_size)
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in hidden_states],
[encoder_expected_shape] * len(hidden_states),
)
@unittest.skip("Pix2Struct has no base model, it was implemented before standardization")
def test_model_base_model_prefix(self):
pass
# We will verify our results on an image of a stop sign
def prepare_img():
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@require_vision
@require_torch
@slow
| Pix2StructModelTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-monday/unit_tests/integrations/monday_responses/records/boards_record_builder.py | {
"start": 187,
"end": 464
} | class ____(MondayRecordBuilder):
@classmethod
def boards_record(cls) -> "BoardsRecordBuilder":
record_template = cls.extract_record("boards", __file__, NestedPath(["data", "boards", 0]))
return cls(record_template, FieldPath("id"), None)
| BoardsRecordBuilder |
python | python__mypy | test-data/unit/plugins/fnplugin.py | {
"start": 143,
"end": 548
} | class ____(Plugin):
def get_function_hook(self, fullname: str) -> Callable[[FunctionContext], Type] | None:
if fullname == "__main__.f":
return my_hook
assert fullname is not None
return None
def my_hook(ctx: FunctionContext) -> Type:
return ctx.api.named_generic_type("builtins.int", [])
def plugin(version: str) -> type[MyPlugin]:
return MyPlugin
| MyPlugin |
python | huggingface__transformers | src/transformers/models/exaone4/modeling_exaone4.py | {
"start": 16135,
"end": 19829
} | class ____(Exaone4PreTrainedModel):
def __init__(self, config: Exaone4Config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Exaone4DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = Exaone4RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Exaone4RotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutputWithPast]:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
}
if "sliding_attention" in self.config.layer_types:
causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for i, decoder_layer in enumerate(self.layers):
layer_type = self.config.layer_types[i]
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[layer_type],
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
@auto_docstring
| Exaone4Model |
python | getsentry__sentry | src/sentry/preprod/models.py | {
"start": 612,
"end": 12617
} | class ____(DefaultFieldsModel):
"""
A pre-production artifact provided by the user, presumably from their CI/CD pipeline or a manual build.
With this, we can analyze their artifact and provide them with insights to fix _before_
it's released to production.
Examples:
- iOS app builds
- Android app builds
"""
class ArtifactState(IntEnum):
UPLOADING = 0
"""The user has initiated the upload, but it is not yet complete."""
UPLOADED = 1
"""The upload is complete, but the artifact is not yet processed."""
PROCESSED = 3
"""The artifact has been processed and is ready to be used."""
FAILED = 4
"""The artifact failed to upload or process. Read the error_code and error_message for more details."""
@classmethod
def as_choices(cls) -> tuple[tuple[int, str], ...]:
return (
(cls.UPLOADING, "uploading"),
(cls.UPLOADED, "uploaded"),
(cls.PROCESSED, "processed"),
(cls.FAILED, "failed"),
)
class ArtifactType(IntEnum):
XCARCHIVE = 0
"""Apple Xcode archive."""
AAB = 1
"""Android App Bundle."""
APK = 2
"""Android APK."""
@classmethod
def as_choices(cls) -> tuple[tuple[int, str], ...]:
return (
(cls.XCARCHIVE, "xcarchive"),
(cls.AAB, "aab"),
(cls.APK, "apk"),
)
class ErrorCode(IntEnum):
UNKNOWN = 0
"""The error code is unknown. Try to use a descriptive error code if possible."""
UPLOAD_TIMEOUT = 1
"""The upload timed out."""
ARTIFACT_PROCESSING_TIMEOUT = 2
"""The artifact processing timed out."""
ARTIFACT_PROCESSING_ERROR = 3
"""The artifact processing failed."""
@classmethod
def as_choices(cls) -> tuple[tuple[int, str], ...]:
return (
(cls.UNKNOWN, "unknown"),
(cls.UPLOAD_TIMEOUT, "upload_timeout"),
(cls.ARTIFACT_PROCESSING_TIMEOUT, "artifact_processing_timeout"),
(cls.ARTIFACT_PROCESSING_ERROR, "artifact_processing_error"),
)
__relocation_scope__ = RelocationScope.Excluded
project = FlexibleForeignKey("sentry.Project")
# Nullable in case the file upload fails
file_id = BoundedBigIntegerField(db_index=True, null=True)
# The date the artifact was built. E.g. an artifact could be built on 05/21/2025,
# but the user uploaded it on 05/22/2025.
date_built = models.DateTimeField(null=True)
build_configuration = FlexibleForeignKey(
"preprod.PreprodBuildConfiguration", null=True, on_delete=models.SET_NULL
)
state = BoundedPositiveIntegerField(
default=ArtifactState.UPLOADING, choices=ArtifactState.as_choices()
)
# Nullable because we only know the type after the artifact has been processed
artifact_type = BoundedPositiveIntegerField(choices=ArtifactType.as_choices(), null=True)
error_code = BoundedPositiveIntegerField(choices=ErrorCode.as_choices(), null=True)
error_message = models.TextField(null=True)
# E.g. 1.2.300
build_version = models.CharField(max_length=255, null=True)
# E.g. 9999
build_number = BoundedBigIntegerField(null=True)
# Miscellaneous fields that we don't need columns for, e.g. enqueue/dequeue times, user-agent, etc.
extras = models.JSONField(null=True)
commit_comparison = FlexibleForeignKey(
"sentry.CommitComparison", null=True, on_delete=models.SET_NULL
)
# DEPRECATED, soon to be removed
commit = FlexibleForeignKey(
"sentry.Commit", null=True, on_delete=models.SET_NULL, db_constraint=False
)
# Installable file like IPA or APK
installable_app_file_id = BoundedBigIntegerField(db_index=True, null=True)
# The name of the app, e.g. "My App"
app_name = models.CharField(max_length=255, null=True)
# The identifier of the app, e.g. "com.myapp.MyApp"
app_id = models.CharField(max_length=255, null=True)
# An identifier for the main binary
main_binary_identifier = models.CharField(max_length=255, db_index=True, null=True)
# The objectstore id of the app icon
app_icon_id = models.CharField(max_length=255, null=True)
def get_sibling_artifacts_for_commit(self) -> list[PreprodArtifact]:
"""
Get sibling artifacts for the same commit, deduplicated by (app_id, artifact_type).
When multiple artifacts exist for the same (app_id, artifact_type) combination
(e.g., due to reprocessing or CI retries), this method returns only one artifact
per combination to prevent duplicate rows in status checks:
- For the calling artifact's (app_id, artifact_type): Returns the calling artifact itself
- For other combinations: Returns the earliest (oldest) artifact for that combination
Note: Deduplication by both app_id and artifact_type is necessary because
iOS and Android apps can share the same app_id (e.g., "com.example.app").
Results are filtered by the current artifact's organization for security.
Returns:
List of PreprodArtifact objects, deduplicated by (app_id, artifact_type),
ordered by app_id
"""
if not self.commit_comparison:
return []
all_artifacts = PreprodArtifact.objects.filter(
commit_comparison=self.commit_comparison,
project__organization_id=self.project.organization_id,
).order_by("app_id", "artifact_type", "date_added")
artifacts_by_key = defaultdict(list)
for artifact in all_artifacts:
key = (artifact.app_id, artifact.artifact_type)
artifacts_by_key[key].append(artifact)
selected_artifacts = []
for (app_id, artifact_type), artifacts in artifacts_by_key.items():
if self.app_id == app_id and self.artifact_type == artifact_type:
selected_artifacts.append(self)
else:
selected_artifacts.append(artifacts[0])
selected_artifacts.sort(key=lambda a: a.app_id or "")
return selected_artifacts
def get_base_artifact_for_commit(
self, artifact_type: ArtifactType | None = None
) -> models.QuerySet[PreprodArtifact]:
"""
Get the base artifact for the same commit comparison (monorepo scenario).
Multiple artifacts can share the same commit comparison, but only one should
match the same (app_id, artifact_type, build_configuration) combination.
"""
if not self.commit_comparison:
return PreprodArtifact.objects.none()
base_commit_comparisons_qs = CommitComparison.objects.filter(
head_sha=self.commit_comparison.base_sha,
organization_id=self.project.organization_id,
).order_by("date_added")
base_commit_comparisons = list(base_commit_comparisons_qs)
if len(base_commit_comparisons) == 0:
return PreprodArtifact.objects.none()
elif len(base_commit_comparisons) == 1:
base_commit_comparison = base_commit_comparisons[0]
else:
logger.warning(
"preprod.models.get_base_artifact_for_commit.multiple_base_commit_comparisons",
extra={
"head_sha": self.commit_comparison.head_sha,
"organization_id": self.project.organization_id,
"base_commit_comparison_ids": [c.id for c in base_commit_comparisons],
},
)
sentry_sdk.capture_message(
"Multiple base commitcomparisons found",
level="error",
extras={
"sha": self.commit_comparison.head_sha,
},
)
# Take first (oldest) commit comparison
base_commit_comparison = base_commit_comparisons[0]
return PreprodArtifact.objects.filter(
commit_comparison=base_commit_comparison,
project__organization_id=self.project.organization_id,
app_id=self.app_id,
artifact_type=artifact_type if artifact_type is not None else self.artifact_type,
build_configuration=self.build_configuration,
)
def get_head_artifacts_for_commit(
self, artifact_type: ArtifactType | None = None
) -> models.QuerySet[PreprodArtifact]:
"""
Get all head artifacts for the same commit comparison (monorepo scenario).
There can be multiple head artifacts for a commit comparison, as multiple
CommitComparisons can have the same base SHA.
"""
if not self.commit_comparison:
return PreprodArtifact.objects.none()
head_commit_comparisons = CommitComparison.objects.filter(
base_sha=self.commit_comparison.head_sha,
organization_id=self.project.organization_id,
)
return PreprodArtifact.objects.filter(
commit_comparison__in=head_commit_comparisons,
project__organization_id=self.project.organization_id,
app_id=self.app_id,
artifact_type=artifact_type if artifact_type is not None else self.artifact_type,
)
def get_size_metrics(
self,
metrics_artifact_type: PreprodArtifactSizeMetrics.MetricsArtifactType | None = None,
identifier: str | None = None,
) -> models.QuerySet[PreprodArtifactSizeMetrics]:
"""Get size metrics for this artifact with optional filtering."""
queryset = self.preprodartifactsizemetrics_set.all()
if metrics_artifact_type is not None:
queryset = queryset.filter(metrics_artifact_type=metrics_artifact_type)
if identifier is not None:
queryset = queryset.filter(identifier=identifier)
return queryset
@classmethod
def get_size_metrics_for_artifacts(
cls,
artifacts: models.QuerySet[PreprodArtifact] | list[PreprodArtifact],
metrics_artifact_type: PreprodArtifactSizeMetrics.MetricsArtifactType | None = None,
identifier: str | None = None,
) -> dict[int, models.QuerySet[PreprodArtifactSizeMetrics]]:
"""
Get size metrics for multiple artifacts using a single query.
Returns:
Dict mapping artifact_id -> QuerySet of size metrics
"""
from sentry.preprod.models import PreprodArtifactSizeMetrics
if isinstance(artifacts, list):
artifact_ids = [a.id for a in artifacts]
else:
artifact_ids = list(artifacts.values_list("id", flat=True))
if not artifact_ids:
return {}
queryset = PreprodArtifactSizeMetrics.objects.filter(preprod_artifact_id__in=artifact_ids)
if metrics_artifact_type is not None:
queryset = queryset.filter(metrics_artifact_type=metrics_artifact_type)
if identifier is not None:
queryset = queryset.filter(identifier=identifier)
# Group results by artifact_id
results: dict[int, models.QuerySet[PreprodArtifactSizeMetrics]] = {}
for artifact_id in artifact_ids:
results[artifact_id] = queryset.filter(preprod_artifact_id=artifact_id)
return results
def is_android(self) -> bool:
return (
self.artifact_type == self.ArtifactType.AAB
or self.artifact_type == self.ArtifactType.APK
)
def is_ios(self) -> bool:
return self.artifact_type == self.ArtifactType.XCARCHIVE
def get_platform_label(self) -> str | None:
if self.is_android():
return "Android"
elif self.is_ios():
return "iOS"
return None
class Meta:
app_label = "preprod"
db_table = "sentry_preprodartifact"
@region_silo_model
| PreprodArtifact |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/pipeline.py | {
"start": 6790,
"end": 7445
} | class ____(graphene.ObjectType):
"""The primary dimension of a multipartitioned asset is the time-partitioned dimension.
If both dimensions of the asset are static or time-partitioned, the primary dimension is
the first defined dimension.
"""
primaryDimStartKey = graphene.NonNull(graphene.String)
primaryDimEndKey = graphene.NonNull(graphene.String)
primaryDimStartTime = graphene.Field(graphene.Float)
primaryDimEndTime = graphene.Field(graphene.Float)
secondaryDim = graphene.NonNull(GraphenePartitionStatus1D)
class Meta:
name = "MaterializedPartitionRangeStatuses2D"
| GrapheneMultiPartitionRangeStatuses |
python | doocs__leetcode | solution/1400-1499/1476.Subrectangle Queries/Solution.py | {
"start": 0,
"end": 719
} | class ____:
def __init__(self, rectangle: List[List[int]]):
self.g = rectangle
self.ops = []
def updateSubrectangle(
self, row1: int, col1: int, row2: int, col2: int, newValue: int
) -> None:
self.ops.append((row1, col1, row2, col2, newValue))
def getValue(self, row: int, col: int) -> int:
for r1, c1, r2, c2, v in self.ops[::-1]:
if r1 <= row <= r2 and c1 <= col <= c2:
return v
return self.g[row][col]
# Your SubrectangleQueries object will be instantiated and called as such:
# obj = SubrectangleQueries(rectangle)
# obj.updateSubrectangle(row1,col1,row2,col2,newValue)
# param_2 = obj.getValue(row,col)
| SubrectangleQueries |
python | allegroai__clearml | examples/hyperdatasets/dataview_pytorch_dataloader.py | {
"start": 237,
"end": 4386
} | class ____(torch.utils.data.IterableDataset):
"""PyTorch IterableDataset wrapper around a DataView."""
def __init__(self, query_kwargs: Dict[str, Any], projection: Iterable[str] = None):
super().__init__()
self._query_kwargs = dict(query_kwargs)
self._projection = list(projection) if projection else None
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
# ClearML DataView streaming the requested dataset frames
dv = DataView(auto_connect_with_task=False)
dv.add_query(**self._query_kwargs)
iterator = dv.get_iterator(
projection=self._projection,
num_workers=worker_info.num_workers if worker_info else None,
worker_index=worker_info.id if worker_info else None,
)
for entry in iterator:
if hasattr(entry, "to_api_object"):
yield entry.to_api_object()
elif isinstance(entry, dict):
yield entry
else:
yield entry
def parse_args():
parser = argparse.ArgumentParser(description="Stream HyperDataset frames with PyTorch DataLoader")
name_group = parser.add_argument_group("Name-based selection")
name_group.add_argument("--project", help="ClearML project name containing the dataset", default=None)
name_group.add_argument("--dataset-name", help="Dataset collection name", default=None)
name_group.add_argument("--version-name", help="Dataset version name", default=None)
id_group = parser.add_argument_group("ID-based selection")
id_group.add_argument("--dataset-id", help="Dataset collection id", default=None)
id_group.add_argument("--version-id", help="Dataset version id", default=None)
parser.add_argument("--projection", nargs="*", help="Optional projection fields", default=None)
parser.add_argument("--batch-size", type=int, default=8, help="DataLoader batch size")
parser.add_argument("--num-workers", type=int, default=0, help="Number of DataLoader worker processes")
parser.add_argument("--max-batches", type=int, default=5, help="Maximum number of batches to display")
return parser.parse_args()
def resolve_dataset(args):
if args.dataset_name and args.dataset_id:
raise ValueError("Provide either --dataset-name or --dataset-id, not both")
if args.version_name and args.version_id:
raise ValueError("Provide either --version-name or --version-id, not both")
if args.dataset_id:
dataset = HyperDatasetManagement.get(dataset_id=args.dataset_id, version_id=args.version_id)
else:
if not args.dataset_name:
raise ValueError("Either --dataset-id or --dataset-name must be supplied")
# Resolve ClearML dataset/version identifiers
dataset = HyperDatasetManagement.get(
dataset_name=args.dataset_name,
version_name=args.version_name,
project_name=args.project,
)
return dataset
def main():
args = parse_args()
dataset = resolve_dataset(args)
# ClearML query parameters targeting the chosen HyperDataset version
query_kwargs = {
"project_id": dataset.project_id or "*",
"dataset_id": dataset.dataset_id,
"version_id": dataset.version_id,
}
ds = HyperDatasetIterable(query_kwargs=query_kwargs, projection=args.projection)
loader = torch.utils.data.DataLoader(ds, batch_size=args.batch_size, num_workers=args.num_workers)
print(
f"Streaming frames from dataset_id={dataset.dataset_id} version_id={dataset.version_id} "
f"with batch_size={args.batch_size} num_workers={args.num_workers}"
)
for batch_index, batch in enumerate(loader):
if batch_index >= args.max_batches:
break
if isinstance(batch, dict):
summary = {k: v if isinstance(v, (list, tuple)) else type(v).__name__ for k, v in batch.items()}
else:
summary = batch
print(f"Batch {batch_index} -> {summary}")
if __name__ == "__main__":
os.environ.setdefault("CLEARML_LOG_LEVEL", "INFO")
main()
| HyperDatasetIterable |
python | PrefectHQ__prefect | tests/runtime/test_task_run.py | {
"start": 3818,
"end": 4359
} | class ____:
async def test_run_count_is_attribute(self):
assert "run_count" in dir(task_run)
async def test_run_count_is_zero_when_not_set(self):
assert task_run.run_count == 0
async def test_run_count_returns_run_count_when_present_dynamically(self):
assert task_run.run_count == 0
with TaskRunContext.model_construct(
task_run=TaskRun.model_construct(id="foo", run_count=10)
):
assert task_run.run_count == 10
assert task_run.run_count == 0
| TestRunCount |
python | coleifer__peewee | tests/manytomany.py | {
"start": 570,
"end": 827
} | class ____(TestModel):
user = ForeignKeyField(User, backref='_xx_rel')
note = ForeignKeyField(AltNote, backref='_xx_rel')
class Meta:
primary_key = CompositeKey('user', 'note')
AltThroughDeferred.set_model(AltThroughModel)
| AltThroughModel |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/relativity/relativity.py | {
"start": 14921,
"end": 24477
} | class ____:
def __init__(self, clocks, ref, duration, dt):
self.clocks = clocks
self.ref = ref
self.duration = duration
self.dt = dt
@staticmethod
def hypTStep(dt, v0, x0, tau0, g):
## Hyperbolic step.
## If an object has proper acceleration g and starts at position x0 with speed v0 and proper time tau0
## as seen from an inertial frame, then return the new v, x, tau after time dt has elapsed.
if g == 0:
return v0, x0 + v0*dt, tau0 + dt * (1. - v0**2)**0.5
v02 = v0**2
g2 = g**2
tinit = v0 / (g * (1 - v02)**0.5)
B = (1 + (g2 * (dt+tinit)**2))**0.5
v1 = g * (dt+tinit) / B
dtau = (np.arcsinh(g * (dt+tinit)) - np.arcsinh(g * tinit)) / g
tau1 = tau0 + dtau
x1 = x0 + (1.0 / g) * ( B - 1. / (1.-v02)**0.5 )
return v1, x1, tau1
@staticmethod
def tStep(dt, v0, x0, tau0, g):
## Linear step.
## Probably not as accurate as hyperbolic step, but certainly much faster.
gamma = (1. - v0**2)**-0.5
dtau = dt / gamma
return v0 + dtau * g, x0 + v0*dt, tau0 + dtau
@staticmethod
def tauStep(dtau, v0, x0, t0, g):
## linear step in proper time of clock.
## If an object has proper acceleration g and starts at position x0 with speed v0 at time t0
## as seen from an inertial frame, then return the new v, x, t after proper time dtau has elapsed.
## Compute how much t will change given a proper-time step of dtau
gamma = (1. - v0**2)**-0.5
if g == 0:
dt = dtau * gamma
else:
v0g = v0 * gamma
dt = (np.sinh(dtau * g + np.arcsinh(v0g)) - v0g) / g
#return v0 + dtau * g, x0 + v0*dt, t0 + dt
v1, x1, t1 = Simulation.hypTStep(dt, v0, x0, t0, g)
return v1, x1, t0+dt
@staticmethod
def hypIntersect(x0r, t0r, vr, x0, t0, v0, g):
## given a reference clock (seen from inertial frame) has rx, rt, and rv,
## and another clock starts at x0, t0, and v0, with acceleration g,
## compute the intersection time of the object clock's hyperbolic path with
## the reference plane.
## I'm sure we can simplify this...
if g == 0: ## no acceleration, path is linear (and hyperbola is undefined)
#(-t0r + t0 v0 vr - vr x0 + vr x0r)/(-1 + v0 vr)
t = (-t0r + t0 *v0 *vr - vr *x0 + vr *x0r)/(-1 + v0 *vr)
return t
gamma = (1.0-v0**2)**-0.5
sel = (1 if g>0 else 0) + (1 if vr<0 else 0)
sel = sel%2
if sel == 0:
#(1/(g^2 (-1 + vr^2)))(-g^2 t0r + g gamma vr + g^2 t0 vr^2 -
#g gamma v0 vr^2 - g^2 vr x0 +
#g^2 vr x0r + \[Sqrt](g^2 vr^2 (1 + gamma^2 (v0 - vr)^2 - vr^2 +
#2 g gamma (v0 - vr) (-t0 + t0r + vr (x0 - x0r)) +
#g^2 (t0 - t0r + vr (-x0 + x0r))^2)))
t = (1./(g**2 *(-1. + vr**2)))*(-g**2 *t0r + g *gamma *vr + g**2 *t0 *vr**2 - g *gamma *v0 *vr**2 - g**2 *vr *x0 + g**2 *vr *x0r + np.sqrt(g**2 *vr**2 *(1. + gamma**2 *(v0 - vr)**2 - vr**2 + 2 *g *gamma *(v0 - vr)* (-t0 + t0r + vr *(x0 - x0r)) + g**2 *(t0 - t0r + vr* (-x0 + x0r))**2)))
else:
#-(1/(g^2 (-1 + vr^2)))(g^2 t0r - g gamma vr - g^2 t0 vr^2 +
#g gamma v0 vr^2 + g^2 vr x0 -
#g^2 vr x0r + \[Sqrt](g^2 vr^2 (1 + gamma^2 (v0 - vr)^2 - vr^2 +
#2 g gamma (v0 - vr) (-t0 + t0r + vr (x0 - x0r)) +
#g^2 (t0 - t0r + vr (-x0 + x0r))^2)))
t = -(1./(g**2 *(-1. + vr**2)))*(g**2 *t0r - g *gamma* vr - g**2 *t0 *vr**2 + g *gamma *v0 *vr**2 + g**2* vr* x0 - g**2 *vr *x0r + np.sqrt(g**2* vr**2 *(1. + gamma**2 *(v0 - vr)**2 - vr**2 + 2 *g *gamma *(v0 - vr) *(-t0 + t0r + vr *(x0 - x0r)) + g**2 *(t0 - t0r + vr *(-x0 + x0r))**2)))
return t
def run(self):
nPts = int(self.duration/self.dt)+1
for cl in self.clocks.values():
cl.init(nPts)
if self.ref is None:
self.runInertial(nPts)
else:
self.runReference(nPts)
def runInertial(self, nPts):
clocks = self.clocks
dt = self.dt
tVals = np.linspace(0, dt*(nPts-1), nPts)
for cl in self.clocks.values():
for i in range(1,nPts):
nextT = tVals[i]
while True:
tau1, tau2 = cl.accelLimits()
x = cl.x
v = cl.v
tau = cl.pt
g = cl.acceleration()
v1, x1, tau1 = self.hypTStep(dt, v, x, tau, g)
if tau1 > tau2:
dtau = tau2-tau
cl.v, cl.x, cl.t = self.tauStep(dtau, v, x, cl.t, g)
cl.pt = tau2
else:
cl.v, cl.x, cl.pt = v1, x1, tau1
cl.t += dt
if cl.t >= nextT:
cl.refx = cl.x
cl.refv = cl.v
cl.reft = cl.t
cl.recordFrame(i)
break
def runReference(self, nPts):
clocks = self.clocks
ref = self.ref
dt = self.dt
dur = self.duration
## make sure reference clock is not present in the list of clocks--this will be handled separately.
clocks = clocks.copy()
for k,v in clocks.items():
if v is ref:
del clocks[k]
break
ref.refx = 0
ref.refv = 0
ref.refm = ref.m0
## These are the set of proper times (in the reference frame) that will be simulated
ptVals = np.linspace(ref.pt, ref.pt + dt*(nPts-1), nPts)
for i in range(1,nPts):
## step reference clock ahead one time step in its proper time
nextPt = ptVals[i] ## this is where (when) we want to end up
while True:
tau1, tau2 = ref.accelLimits()
dtau = min(nextPt-ref.pt, tau2-ref.pt) ## do not step past the next command boundary
g = ref.acceleration()
v, x, t = Simulation.tauStep(dtau, ref.v, ref.x, ref.t, g)
ref.pt += dtau
ref.v = v
ref.x = x
ref.t = t
ref.reft = ref.pt
if ref.pt >= nextPt:
break
#else:
#print "Stepped to", tau2, "instead of", nextPt
ref.recordFrame(i)
## determine plane visible to reference clock
## this plane goes through the point ref.x, ref.t and has slope = ref.v
## update all other clocks
for cl in clocks.values():
while True:
g = cl.acceleration()
tau1, tau2 = cl.accelLimits()
##Given current position / speed of clock, determine where it will intersect reference plane
#t1 = (ref.v * (cl.x - cl.v * cl.t) + (ref.t - ref.v * ref.x)) / (1. - cl.v)
t1 = Simulation.hypIntersect(ref.x, ref.t, ref.v, cl.x, cl.t, cl.v, g)
dt1 = t1 - cl.t
## advance clock by correct time step
v, x, tau = Simulation.hypTStep(dt1, cl.v, cl.x, cl.pt, g)
## check to see whether we have gone past an acceleration command boundary.
## if so, we must instead advance the clock to the boundary and start again
if tau < tau1:
dtau = tau1 - cl.pt
cl.v, cl.x, cl.t = Simulation.tauStep(dtau, cl.v, cl.x, cl.t, g)
cl.pt = tau1-0.000001
continue
if tau > tau2:
dtau = tau2 - cl.pt
cl.v, cl.x, cl.t = Simulation.tauStep(dtau, cl.v, cl.x, cl.t, g)
cl.pt = tau2
continue
## Otherwise, record the new values and exit the loop
cl.v = v
cl.x = x
cl.pt = tau
cl.t = t1
cl.m = None
break
## transform position into reference frame
x = cl.x - ref.x
t = cl.t - ref.t
gamma = (1.0 - ref.v**2) ** -0.5
vg = -ref.v * gamma
cl.refx = gamma * (x - ref.v * t)
cl.reft = ref.pt # + gamma * (t - ref.v * x) # this term belongs here, but it should always be equal to 0.
cl.refv = (cl.v - ref.v) / (1.0 - cl.v * ref.v)
cl.refm = None
cl.recordFrame(i)
t += dt
def plot(self, plot):
plot.clear()
for cl in self.clocks.values():
c, p = cl.getCurve()
plot.addItem(c)
plot.addItem(p)
| Simulation |
python | getsentry__sentry | src/sentry/data_export/endpoints/data_export_details.py | {
"start": 669,
"end": 2678
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.DATA_BROWSING
permission_classes = (OrganizationDataExportPermission,)
def get(
self, request: Request, organization: Organization, data_export_id: str
) -> Response | StreamingHttpResponse:
"""
Retrieve information about the temporary file record.
Used to populate page emailed to the user.
"""
try:
data_export = ExportedData.objects.get(id=data_export_id, organization=organization)
except ExportedData.DoesNotExist:
return Response(status=404)
# Check data export permissions
if data_export.query_info.get("project"):
project_ids = [int(project) for project in data_export.query_info.get("project", [])]
projects = Project.objects.filter(organization=organization, id__in=project_ids)
if any(p for p in projects if not request.access.has_project_access(p)):
raise PermissionDenied(
detail="You don't have permissions to view some of the data this export contains."
)
# Ignore the download parameter unless we have a file to stream
if request.GET.get("download") is not None and data_export._get_file() is not None:
return self.download(data_export)
return Response(serialize(data_export, request.user))
def download(self, data_export: ExportedData) -> StreamingHttpResponse:
metrics.incr("dataexport.download", sample_rate=1.0)
file = data_export._get_file()
assert file is not None
raw_file = file.getfile()
response = StreamingHttpResponse(
iter(lambda: raw_file.read(4096), b""), content_type="text/csv"
)
response["Content-Length"] = file.size
response["Content-Disposition"] = f'attachment; filename="{file.name}"'
return response
| DataExportDetailsEndpoint |
python | numba__numba | numba/tests/test_ufuncs.py | {
"start": 2560,
"end": 5451
} | class ____(MemoryLeakMixin):
def setUp(self):
super(BaseUFuncTest, self).setUp()
self.inputs = [
(np.uint32(0), types.uint32),
(np.uint32(1), types.uint32),
(np.int32(-1), types.int32),
(np.int32(0), types.int32),
(np.int32(1), types.int32),
(np.uint64(0), types.uint64),
(np.uint64(1), types.uint64),
(np.int64(-1), types.int64),
(np.int64(0), types.int64),
(np.int64(1), types.int64),
(np.float32(-0.5), types.float32),
(np.float32(0.0), types.float32),
(np.float32(0.5), types.float32),
(np.float64(-0.5), types.float64),
(np.float64(0.0), types.float64),
(np.float64(0.5), types.float64),
(np.array([0,1], dtype='u4'), types.Array(types.uint32, 1, 'C')),
(np.array([0,1], dtype='u8'), types.Array(types.uint64, 1, 'C')),
(np.array([-1,0,1], dtype='i4'), types.Array(types.int32, 1, 'C')),
(np.array([-1,0,1], dtype='i8'), types.Array(types.int64, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f4'),
types.Array(types.float32, 1, 'C')),
(np.array([-0.5, 0.0, 0.5], dtype='f8'),
types.Array(types.float64, 1, 'C')),
(np.array([0,1], dtype=np.int8), types.Array(types.int8, 1, 'C')),
(np.array([0,1], dtype=np.int16), types.Array(types.int16, 1, 'C')),
(np.array([0,1], dtype=np.uint8), types.Array(types.uint8, 1, 'C')),
(np.array([0,1], dtype=np.uint16),
types.Array(types.uint16, 1, 'C')),
]
@functools.lru_cache(maxsize=None)
def _compile(self, pyfunc, args, nrt=False):
# NOTE: to test the implementation of Numpy ufuncs, we disable
# rewriting of array expressions.
return njit(args, _nrt=nrt, no_rewrites=True)(pyfunc)
def _determine_output_type(self, input_type, int_output_type=None,
float_output_type=None):
ty = input_type
if isinstance(ty, types.Array):
ndim = ty.ndim
ty = ty.dtype
else:
ndim = 1
if ty in types.signed_domain:
if int_output_type:
output_type = types.Array(int_output_type, ndim, 'C')
else:
output_type = types.Array(ty, ndim, 'C')
elif ty in types.unsigned_domain:
if int_output_type:
output_type = types.Array(int_output_type, ndim, 'C')
else:
output_type = types.Array(ty, ndim, 'C')
else:
if float_output_type:
output_type = types.Array(float_output_type, ndim, 'C')
else:
output_type = types.Array(ty, ndim, 'C')
return output_type
| BaseUFuncTest |
python | ansible__ansible | lib/ansible/_internal/_ssh/_ssh_agent.py | {
"start": 15377,
"end": 15625
} | class ____(PublicKeyMsg):
type: KeyAlgo
p: mpint
q: mpint
g: mpint
y: mpint
comments: unicode_string = dataclasses.field(default=unicode_string(''), compare=False)
@dataclasses.dataclass(order=True, slots=True)
| DSAPublicKeyMsg |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 56369,
"end": 56468
} | class ____(_ConfigBase):
b: float
k1: float
BM25Config = _BM25Config
@dataclass
| _BM25Config |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/conv2d_transpose_test.py | {
"start": 1418,
"end": 13419
} | class ____(test.TestCase):
def testConv2DTransposeSingleStride(self):
with self.cached_session():
for dtype in (dtypes.float32, dtypes.int32):
strides = [1, 1, 1, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 6, 4, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(1, shape=x_shape, name="x", dtype=dtype)
f = constant_op.constant(1, shape=f_shape, name="filter", dtype=dtype)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
# We count the number of cells being added at the locations in the
# output.
# At the center, #cells=kernel_height * kernel_width
# At the corners, #cells=ceil(kernel_height/2) * ceil(kernel_width/2)
# At the borders, #cells=ceil(kernel_height/2)*kernel_width or
# kernel_height * ceil(kernel_width/2)
for n in range(x_shape[0]):
for k in range(f_shape[2]):
for w in range(y_shape[2]):
for h in range(y_shape[1]):
target = 4 * 3
h_in = h > 0 and h < y_shape[1] - 1
w_in = w > 0 and w < y_shape[2] - 1
if h_in and w_in:
target += 5 * 3
elif h_in or w_in:
target += 2 * 3
if dtype.is_integer:
self.assertAllEqual(target, value[n, h, w, k])
else:
self.assertAllClose(target, value[n, h, w, k])
def testConv2DTransposeSame(self):
with self.cached_session():
for dtype in (dtypes.float32, dtypes.int32):
strides = [1, 2, 2, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 12, 8, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(1, shape=x_shape, name="x", dtype=dtype)
f = constant_op.constant(1, shape=f_shape, name="filter", dtype=dtype)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
value = self.evaluate(output)
for n in range(x_shape[0]):
for k in range(f_shape[2]):
for w in range(y_shape[2]):
for h in range(y_shape[1]):
target = 3
# We add a case for locations divisible by the stride.
h_in = h % strides[1] == 0 and h > 0 and h < y_shape[1] - 1
w_in = w % strides[2] == 0 and w > 0 and w < y_shape[2] - 1
if h_in and w_in:
target += 9
elif h_in or w_in:
target += 3
if dtype.is_integer:
self.assertAllEqual(target, value[n, h, w, k])
else:
self.assertAllClose(target, value[n, h, w, k])
def testConv2DTransposeValid(self):
with self.cached_session():
for dtype in (dtypes.float32, dtypes.int32):
strides = [1, 2, 2, 1]
# Input, output: [batch, height, width, depth]
x_shape = [2, 6, 4, 3]
y_shape = [2, 13, 9, 2]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(1, shape=x_shape, name="x", dtype=dtype)
f = constant_op.constant(1, shape=f_shape, name="filter", dtype=dtype)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="VALID")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in range(x_shape[0]):
for k in range(f_shape[2]):
for w in range(pad, y_shape[2] - pad):
for h in range(pad, y_shape[1] - pad):
target = 3
# We add a case for locations divisible by the stride.
h_in = h % strides[1] == 0 and h > pad and h < y_shape[
1] - 1 - pad
w_in = w % strides[2] == 0 and w > pad and w < y_shape[
2] - 1 - pad
if h_in and w_in:
target += 9
elif h_in or w_in:
target += 3
cache_values[n, h, w, k] = target
# copy values in the border
cache_values[n, :, 0, k] = cache_values[n, :, 1, k]
cache_values[n, :, -1, k] = cache_values[n, :, -2, k]
cache_values[n, 0, :, k] = cache_values[n, 1, :, k]
cache_values[n, -1, :, k] = cache_values[n, -2, :, k]
if dtype.is_integer:
self.assertAllEqual(cache_values, value)
else:
self.assertAllClose(cache_values, value)
@test_util.run_deprecated_v1
def testGradient(self):
self.skipTest("b/262851489: Fix nightly build for GPU.")
x_shape = [2, 6, 4, 3]
f_shape = [3, 3, 2, 3]
y_shape = [2, 12, 8, 2]
strides = [1, 2, 2, 1]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
f_val = np.random.random_sample(f_shape).astype(np.float64)
with self.cached_session():
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
f = constant_op.constant(f_val, name="f", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME")
err = gradient_checker.compute_gradient_error([x, f], [x_shape, f_shape],
output, y_shape)
print("conv2d_transpose gradient err = %g " % err)
err_tolerance = 0.0006
self.assertLess(err, err_tolerance)
def testConv2DTransposeSingleStrideNCHW(self):
# `NCHW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.session():
strides = [1, 1, 1, 1]
# Input, output: [batch, depth, height, width, depth]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 6, 4]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = self.evaluate(output)
for n in range(x_shape[0]):
for k in range(f_shape[2]):
for w in range(y_shape[3]):
for h in range(y_shape[2]):
target = 4 * 3.0
h_in = h > 0 and h < y_shape[2] - 1
w_in = w > 0 and w < y_shape[3] - 1
if h_in and w_in:
target += 5 * 3.0
elif h_in or w_in:
target += 2 * 3.0
self.assertAllClose(target, value[n, k, h, w])
def testConv2DTransposeSameNCHW(self):
# `NCHW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.session():
strides = [1, 1, 2, 2]
# Input, output: [batch, depth, height, width]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 12, 8]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="SAME", data_format="NCHW")
value = self.evaluate(output)
for n in range(x_shape[0]):
for k in range(f_shape[2]):
for w in range(y_shape[3]):
for h in range(y_shape[2]):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[2] == 0 and h > 0 and h < y_shape[2] - 1
w_in = w % strides[3] == 0 and w > 0 and w < y_shape[3] - 1
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
self.assertAllClose(target, value[n, k, h, w])
def testConv2DTransposeValidNCHW(self):
# `NCHW` data format is only supported for CUDA device.
if test.is_gpu_available(cuda_only=True):
with self.session():
strides = [1, 1, 2, 2]
# Input, output: [batch, depth, height, width]
x_shape = [2, 3, 6, 4]
y_shape = [2, 2, 13, 9]
# Filter: [kernel_height, kernel_width, output_depth, input_depth]
f_shape = [3, 3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv2d_transpose(
x, f, y_shape, strides=strides, padding="VALID", data_format="NCHW")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in range(x_shape[0]):
for k in range(f_shape[2]):
for w in range(pad, y_shape[3] - pad):
for h in range(pad, y_shape[2] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
h_in = h % strides[2] == 0 and h > pad and h < y_shape[
2] - 1 - pad
w_in = w % strides[3] == 0 and w > pad and w < y_shape[
3] - 1 - pad
if h_in and w_in:
target += 9.0
elif h_in or w_in:
target += 3.0
cache_values[n, k, h, w] = target
# copy values in the border
cache_values[n, k, :, 0] = cache_values[n, k, :, 1]
cache_values[n, k, :, -1] = cache_values[n, k, :, -2]
cache_values[n, k, 0, :] = cache_values[n, k, 1, :]
cache_values[n, k, -1, :] = cache_values[n, k, -2, :]
self.assertAllClose(cache_values, value)
def testConv2DTransposeShapeInference(self):
# Test case for 8972
initializer = random_ops.truncated_normal(
[3, 3, 5, 1], mean=0.0, stddev=0.01, dtype=dtypes.float32)
x = variables.Variable(random_ops.random_normal([3, 10, 5, 1]))
f = variable_scope.get_variable("f", initializer=initializer)
f_shape = array_ops_stack.stack([array_ops.shape(x)[0], 10, 5, 5])
output = nn_ops.conv2d_transpose(
x, f, f_shape, strides=[1, 1, 1, 1], padding="SAME")
self.assertEqual(output.get_shape().as_list(), [3, 10, 5, 5])
def testConv2DTransposeInvalidOutputShape(self):
with self.session():
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
op = nn_ops.conv2d_transpose(
input=np.ones((1, 1, 1, 1)),
filters=np.ones((1, 1, 1, 1)),
output_shape=[2, -2],
strides=[1])
self.evaluate(op)
def testConv2DTransposeLargeOutputShape(self):
# On GPU, this test does try to allocate the output tensor and OOMs.
with test_util.device(use_gpu=False):
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
op = nn_ops.conv2d_transpose(
input=np.ones((2, 2, 2, 2)),
output_shape=[114078056, 179835296],
strides=[10],
filters=[[[[1]]]])
self.evaluate(op)
if __name__ == "__main__":
test.main()
| Conv2DTransposeTest |
python | run-llama__llama_index | llama-index-core/llama_index/core/storage/index_store/keyval_index_store.py | {
"start": 419,
"end": 4144
} | class ____(BaseIndexStore):
"""
Key-Value Index store.
Args:
kvstore (BaseKVStore): key-value store
namespace (str): namespace for the index store
collection_suffix (str): suffix for the collection name
"""
def __init__(
self,
kvstore: BaseKVStore,
namespace: Optional[str] = None,
collection_suffix: Optional[str] = None,
) -> None:
"""Init a KVIndexStore."""
self._kvstore = kvstore
self._namespace = namespace or DEFAULT_NAMESPACE
self._collection_suffix = collection_suffix or DEFAULT_COLLECTION_SUFFIX
self._collection = f"{self._namespace}{self._collection_suffix}"
def add_index_struct(self, index_struct: IndexStruct) -> None:
"""
Add an index struct.
Args:
index_struct (IndexStruct): index struct
"""
key = index_struct.index_id
data = index_struct_to_json(index_struct)
self._kvstore.put(key, data, collection=self._collection)
def delete_index_struct(self, key: str) -> None:
"""
Delete an index struct.
Args:
key (str): index struct key
"""
self._kvstore.delete(key, collection=self._collection)
def get_index_struct(
self, struct_id: Optional[str] = None
) -> Optional[IndexStruct]:
"""
Get an index struct.
Args:
struct_id (Optional[str]): index struct id
"""
if struct_id is None:
structs = self.index_structs()
assert len(structs) == 1
return structs[0]
else:
json = self._kvstore.get(struct_id, collection=self._collection)
if json is None:
return None
return json_to_index_struct(json)
def index_structs(self) -> List[IndexStruct]:
"""
Get all index structs.
Returns:
List[IndexStruct]: index structs
"""
jsons = self._kvstore.get_all(collection=self._collection)
return [json_to_index_struct(json) for json in jsons.values()]
async def async_add_index_struct(self, index_struct: IndexStruct) -> None:
"""
Asynchronously add an index struct.
Args:
index_struct (IndexStruct): index struct
"""
key = index_struct.index_id
data = index_struct_to_json(index_struct)
await self._kvstore.aput(key, data, collection=self._collection)
async def adelete_index_struct(self, key: str) -> None:
"""
Asynchronously delete an index struct.
Args:
key (str): index struct key
"""
await self._kvstore.adelete(key, collection=self._collection)
async def aget_index_struct(
self, struct_id: Optional[str] = None
) -> Optional[IndexStruct]:
"""
Asynchronously get an index struct.
Args:
struct_id (Optional[str]): index struct id
"""
if struct_id is None:
structs = await self.async_index_structs()
assert len(structs) == 1
return structs[0]
else:
json = await self._kvstore.aget(struct_id, collection=self._collection)
if json is None:
return None
return json_to_index_struct(json)
async def async_index_structs(self) -> List[IndexStruct]:
"""
Asynchronously get all index structs.
Returns:
List[IndexStruct]: index structs
"""
jsons = await self._kvstore.aget_all(collection=self._collection)
return [json_to_index_struct(json) for json in jsons.values()]
| KVIndexStore |
python | getsentry__sentry | src/sentry/web/frontend/debug/debug_onboarding_continuation_email.py | {
"start": 488,
"end": 955
} | class ____(View):
def get(self, request: HttpRequest) -> HttpResponse:
platforms = request.GET.getlist("platforms", ["javascript", "python", "flutter"])
org = Organization(id=1, name="My Company")
user = User(name="Ben")
preview = MailPreviewAdapter(**get_request_builder_args(user, org, platforms))
return render_to_response("sentry/debug/mail/preview.html", {"preview": preview})
| DebugOrganizationOnboardingContinuationEmail |
python | joerick__pyinstrument | pyinstrument/frame_ops.py | {
"start": 391,
"end": 5015
} | class ____(ValueError):
pass
def build_frame_tree(
frame_records: Sequence[FrameRecordType], context: FrameContext
) -> Frame | None:
if len(frame_records) == 0:
return None
root_frame = Frame(identifier_or_frame_info=DUMMY_ROOT_FRAME_IDENTIFIER, context=context)
# put the root frame at the bottom of the stack
frame_stack: list[Frame] = [root_frame]
for frame_info_stack, time in frame_records:
stack_depth = 0
root_frame.record_time_from_frame_info(DUMMY_ROOT_FRAME_IDENTIFIER, time)
for stack_depth, frame_info in enumerate(frame_info_stack, start=1):
frame_identifier = frame_info_get_identifier(frame_info)
try:
frame = frame_stack[stack_depth]
if frame.identifier != frame_identifier:
# trim any frames after and including this one, and make a new frame
del frame_stack[stack_depth:]
raise IdentifierDoesntMatchException()
except (IndexError, IdentifierDoesntMatchException):
# create a new frame
parent = frame_stack[stack_depth - 1]
frame = Frame(identifier_or_frame_info=frame_info)
parent.add_child(frame)
assert len(frame_stack) == stack_depth
frame_stack.append(frame)
frame.record_time_from_frame_info(frame_info=frame_info, time=time)
# trim any extra frames
del frame_stack[stack_depth + 1 :]
final_frame = frame_stack[-1]
if not final_frame.is_synthetic_leaf:
# record the self-time
final_frame.add_child(
Frame(identifier_or_frame_info=SELF_TIME_FRAME_IDENTIFIER, time=time)
)
if len(root_frame.children) == 1:
root_frame = root_frame.children[0]
root_frame.remove_from_parent()
return root_frame
def delete_frame_from_tree(
frame: Frame, replace_with: LiteralStr["children", "self_time", "nothing"]
):
"""
Delete a frame from the tree.
:param frame: the frame to delete
:param replace_with: what to replace the frame with - `children` replaces
the frame with its children, `self_time` replaces the frame with a
self-time frame, and `nothing` deletes the frame, absorbing the time
into the parent.
"""
parent = frame.parent
if parent is None:
raise ValueError("Cannot delete the root frame")
if replace_with == "children":
parent.add_children(frame.children, after=frame)
elif replace_with == "self_time":
parent.add_child(
Frame(identifier_or_frame_info=SELF_TIME_FRAME_IDENTIFIER, time=frame.time),
after=frame,
)
elif replace_with == "nothing":
parent.absorbed_time += frame.time
else:
assert_never(replace_with)
parent.absorbed_time += frame.absorbed_time
frame.remove_from_parent()
# in this call, recursive is true, even when replace_with is 'children'.
# When replace_with is 'self_time' or 'nothing', that's what we want. But
# when it's 'children', by now, the children have been removed and added
# to the parent, so recursive is irrelevant.
remove_frame_from_groups(frame, recursive=True)
def combine_frames(frame: Frame, into: Frame):
"""
Combine two frames into one. The frames must have the same parent.
:param frame: the frame to remove
:param into: the frame to combine into
"""
assert frame.parent is into.parent
into.absorbed_time += frame.absorbed_time
into.time += frame.time
for attribute, time in frame.attributes.items():
try:
into.attributes[attribute] += time
except KeyError:
into.attributes[attribute] = time
into.add_children(frame.children)
frame.remove_from_parent()
remove_frame_from_groups(frame, recursive=False)
def remove_frame_from_groups(frame: Frame, recursive: bool):
"""
Removes frame from any groups that it is a member of. Should be used when
removing a frame from a tree, so groups don't keep references to removed
frames.
"""
if recursive and frame.children:
for child in frame.children:
remove_frame_from_groups(child, recursive=True)
if frame.group:
group = frame.group
group.remove_frame(frame)
if len(group.frames) == 1:
# a group with only one frame is meaningless, we'll remove it
# entirely.
group.remove_frame(group.frames[0])
| IdentifierDoesntMatchException |
python | doocs__leetcode | solution/1800-1899/1899.Merge Triplets to Form Target Triplet/Solution.py | {
"start": 0,
"end": 349
} | class ____:
def mergeTriplets(self, triplets: List[List[int]], target: List[int]) -> bool:
x, y, z = target
d = e = f = 0
for a, b, c in triplets:
if a <= x and b <= y and c <= z:
d = max(d, a)
e = max(e, b)
f = max(f, c)
return [d, e, f] == target
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.