language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pallets__flask | tests/test_async.py | {
"start": 207,
"end": 245
} | class ____(Exception):
pass
| AppError |
python | run-llama__llama_index | llama-index-core/llama_index/core/vector_stores/simple.py | {
"start": 1684,
"end": 12078
} | class ____(BasePydanticVectorStore):
"""
Simple Vector Store.
In this vector store, embeddings are stored within a simple, in-memory dictionary.
Args:
simple_vector_store_data_dict (Optional[dict]): data dict
containing the embeddings and doc_ids. See SimpleVectorStoreData
for more details.
"""
stores_text: bool = False
data: SimpleVectorStoreData = Field(default_factory=SimpleVectorStoreData)
_fs: fsspec.AbstractFileSystem = PrivateAttr()
def __init__(
self,
data: Optional[SimpleVectorStoreData] = None,
fs: Optional[fsspec.AbstractFileSystem] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
super().__init__(data=data or SimpleVectorStoreData()) # type: ignore[call-arg]
self._fs = fs or fsspec.filesystem("file")
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
namespace: str = DEFAULT_VECTOR_STORE,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "SimpleVectorStore":
"""Load from persist dir."""
persist_fname = f"{namespace}{NAMESPACE_SEP}{DEFAULT_PERSIST_FNAME}"
if fs is not None:
persist_path = concat_dirs(persist_dir, persist_fname)
else:
persist_path = os.path.join(persist_dir, persist_fname)
return cls.from_persist_path(persist_path, fs=fs)
@classmethod
def from_namespaced_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> Dict[str, BasePydanticVectorStore]:
"""Load from namespaced persist dir."""
listing_fn = os.listdir if fs is None else fs.listdir
vector_stores: Dict[str, BasePydanticVectorStore] = {}
try:
for fname in listing_fn(persist_dir):
if fname.endswith(DEFAULT_PERSIST_FNAME):
namespace = fname.split(NAMESPACE_SEP)[0]
# handle backwards compatibility with stores that were persisted
if namespace == DEFAULT_PERSIST_FNAME:
vector_stores[DEFAULT_VECTOR_STORE] = cls.from_persist_dir(
persist_dir=persist_dir, fs=fs
)
else:
vector_stores[namespace] = cls.from_persist_dir(
persist_dir=persist_dir, namespace=namespace, fs=fs
)
except Exception:
# failed to listdir, so assume there is only one store
try:
vector_stores[DEFAULT_VECTOR_STORE] = cls.from_persist_dir(
persist_dir=persist_dir, fs=fs, namespace=DEFAULT_VECTOR_STORE
)
except Exception:
# no namespace backwards compat
vector_stores[DEFAULT_VECTOR_STORE] = cls.from_persist_dir(
persist_dir=persist_dir, fs=fs
)
return vector_stores
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "SimpleVectorStore"
@property
def client(self) -> None:
"""Get client."""
return
@property
def _data(self) -> SimpleVectorStoreData:
"""Backwards compatibility."""
return self.data
def get(self, text_id: str) -> List[float]:
"""Get embedding."""
return self.data.embedding_dict[text_id]
def get_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
) -> List[BaseNode]:
"""Get nodes."""
raise NotImplementedError("SimpleVectorStore does not store nodes directly.")
def add(
self,
nodes: Sequence[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to index."""
for node in nodes:
self.data.embedding_dict[node.node_id] = node.get_embedding()
self.data.text_id_to_ref_doc_id[node.node_id] = node.ref_doc_id or "None"
metadata = node_to_metadata_dict(
node, remove_text=True, flat_metadata=False
)
metadata.pop("_node_content", None)
self.data.metadata_dict[node.node_id] = metadata
return [node.node_id for node in nodes]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The doc_id of the document to delete.
"""
text_ids_to_delete = set()
for text_id, ref_doc_id_ in self.data.text_id_to_ref_doc_id.items():
if ref_doc_id == ref_doc_id_:
text_ids_to_delete.add(text_id)
for text_id in text_ids_to_delete:
del self.data.embedding_dict[text_id]
del self.data.text_id_to_ref_doc_id[text_id]
# Handle metadata_dict not being present in stores that were persisted
# without metadata, or, not being present for nodes stored
# prior to metadata functionality.
if self.data.metadata_dict is not None:
self.data.metadata_dict.pop(text_id, None)
def delete_nodes(
self,
node_ids: Optional[List[str]] = None,
filters: Optional[MetadataFilters] = None,
**delete_kwargs: Any,
) -> None:
filter_fn = build_metadata_filter_fn(
lambda node_id: self.data.metadata_dict[node_id], filters
)
if node_ids is not None:
node_id_set = set(node_ids)
def node_filter_fn(node_id: str) -> bool:
return node_id in node_id_set and filter_fn(node_id)
else:
def node_filter_fn(node_id: str) -> bool:
return filter_fn(node_id)
for node_id in list(self.data.embedding_dict.keys()):
if node_filter_fn(node_id):
del self.data.embedding_dict[node_id]
del self.data.text_id_to_ref_doc_id[node_id]
self.data.metadata_dict.pop(node_id, None)
def clear(self) -> None:
"""Clear the store."""
self.data = SimpleVectorStoreData()
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Get nodes for response."""
# Prevent metadata filtering on stores that were persisted without metadata.
if (
query.filters is not None
and self.data.embedding_dict
and not self.data.metadata_dict
):
raise ValueError(
"Cannot filter stores that were persisted without metadata. "
"Please rebuild the store with metadata to enable filtering."
)
# Prefilter nodes based on the query filter and node ID restrictions.
query_filter_fn = build_metadata_filter_fn(
lambda node_id: self.data.metadata_dict[node_id], query.filters
)
if query.node_ids is not None:
available_ids = set(query.node_ids)
def node_filter_fn(node_id: str) -> bool:
return node_id in available_ids
else:
def node_filter_fn(node_id: str) -> bool:
return True
node_ids = []
embeddings = []
# TODO: consolidate with get_query_text_embedding_similarities
for node_id, embedding in self.data.embedding_dict.items():
if node_filter_fn(node_id) and query_filter_fn(node_id):
node_ids.append(node_id)
embeddings.append(embedding)
query_embedding = cast(List[float], query.query_embedding)
if query.mode in LEARNER_MODES:
top_similarities, top_ids = get_top_k_embeddings_learner(
query_embedding,
embeddings,
similarity_top_k=query.similarity_top_k,
embedding_ids=node_ids,
)
elif query.mode == MMR_MODE:
mmr_threshold = kwargs.get("mmr_threshold")
top_similarities, top_ids = get_top_k_mmr_embeddings(
query_embedding,
embeddings,
similarity_top_k=query.similarity_top_k,
embedding_ids=node_ids,
mmr_threshold=mmr_threshold,
)
elif query.mode == VectorStoreQueryMode.DEFAULT:
top_similarities, top_ids = get_top_k_embeddings(
query_embedding,
embeddings,
similarity_top_k=query.similarity_top_k,
embedding_ids=node_ids,
)
else:
raise ValueError(f"Invalid query mode: {query.mode}")
return VectorStoreQueryResult(
similarities=top_similarities,
ids=top_ids,
)
def persist(
self,
persist_path: str = os.path.join(DEFAULT_PERSIST_DIR, DEFAULT_PERSIST_FNAME),
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> None:
"""Persist the SimpleVectorStore to a directory."""
fs = fs or self._fs
dirpath = os.path.dirname(persist_path)
if not fs.exists(dirpath):
fs.makedirs(dirpath)
with fs.open(persist_path, "w") as f:
json.dump(self.data.to_dict(), f)
@classmethod
def from_persist_path(
cls, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> "SimpleVectorStore":
"""Create a SimpleKVStore from a persist directory."""
fs = fs or fsspec.filesystem("file")
if not fs.exists(persist_path):
raise ValueError(
f"No existing {__name__} found at {persist_path}, skipping load."
)
logger.debug(f"Loading {__name__} from {persist_path}.")
with fs.open(persist_path, "rb") as f:
data_dict = json.load(f)
data = SimpleVectorStoreData.from_dict(data_dict)
return cls(data)
@classmethod
def from_dict(cls, data: Dict[str, Any], **kwargs: Any) -> "SimpleVectorStore":
save_data = SimpleVectorStoreData.from_dict(data)
return cls(save_data)
def to_dict(self, **kwargs: Any) -> Dict[str, Any]:
return self.data.to_dict()
| SimpleVectorStore |
python | sympy__sympy | sympy/physics/mechanics/tests/test_actuator.py | {
"start": 12685,
"end": 15336
} | class ____():
r"""A single degree of freedom translational forced mass-spring-damper.
Notes
=====
This system is well known to have the governing equation:
.. math::
m \ddot{x} = F - k x - c \dot{x}
where $F$ is an externally applied force, $m$ is the mass of the particle
to which the spring and damper are attached, $k$ is the spring's stiffness,
$c$ is the dampers damping coefficient, and $x$ is the generalized
coordinate representing the system's single (translational) degree of
freedom.
"""
@pytest.fixture(autouse=True)
def _force_mass_spring_damper_model_fixture(self):
self.m = Symbol('m')
self.k = Symbol('k')
self.c = Symbol('c')
self.F = Symbol('F')
self.q = dynamicsymbols('q')
self.dq = dynamicsymbols('q', 1)
self.u = dynamicsymbols('u')
self.frame = ReferenceFrame('N')
self.origin = Point('pO')
self.origin.set_vel(self.frame, 0)
self.attachment = Point('pA')
self.attachment.set_pos(self.origin, self.q*self.frame.x)
self.mass = Particle('mass', self.attachment, self.m)
self.pathway = LinearPathway(self.origin, self.attachment)
self.kanes_method = KanesMethod(
self.frame,
q_ind=[self.q],
u_ind=[self.u],
kd_eqs=[self.dq - self.u],
)
self.bodies = [self.mass]
self.mass_matrix = Matrix([[self.m]])
self.forcing = Matrix([[self.F - self.c*self.u - self.k*self.q]])
def test_force_acuator(self):
stiffness = -self.k*self.pathway.length
spring = ForceActuator(stiffness, self.pathway)
damping = -self.c*self.pathway.extension_velocity
damper = ForceActuator(damping, self.pathway)
loads = [
(self.attachment, self.F*self.frame.x),
*spring.to_loads(),
*damper.to_loads(),
]
self.kanes_method.kanes_equations(self.bodies, loads)
assert self.kanes_method.mass_matrix == self.mass_matrix
assert self.kanes_method.forcing == self.forcing
def test_linear_spring_linear_damper(self):
spring = LinearSpring(self.k, self.pathway)
damper = LinearDamper(self.c, self.pathway)
loads = [
(self.attachment, self.F*self.frame.x),
*spring.to_loads(),
*damper.to_loads(),
]
self.kanes_method.kanes_equations(self.bodies, loads)
assert self.kanes_method.mass_matrix == self.mass_matrix
assert self.kanes_method.forcing == self.forcing
| TestForcedMassSpringDamperModel |
python | django__django | tests/admin_views/admin.py | {
"start": 16792,
"end": 16899
} | class ____(admin.ModelAdmin):
readonly_fields = ("chapter", "language", "user")
| ReadOnlyRelatedFieldAdmin |
python | bokeh__bokeh | tests/unit/bokeh/document/test_modules.py | {
"start": 1145,
"end": 1359
} | class ____:
__name__ = 'FakeMod'
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
| FakeMod |
python | pydata__xarray | xarray/tests/test_combine.py | {
"start": 47146,
"end": 54456
} | class ____:
def test_concat_along_existing_dim(self):
concat_dim = "dim1"
ds = create_test_data
with set_options(use_new_combine_kwarg_defaults=False):
old = concat([ds(0), ds(1)], dim=concat_dim)
with set_options(use_new_combine_kwarg_defaults=True):
new = concat([ds(0), ds(1)], dim=concat_dim)
assert_identical(old, new)
def test_concat_along_new_dim(self):
concat_dim = "new_dim"
ds = create_test_data
with set_options(use_new_combine_kwarg_defaults=False):
old = concat([ds(0), ds(1)], dim=concat_dim)
with set_options(use_new_combine_kwarg_defaults=True):
new = concat([ds(0), ds(1)], dim=concat_dim)
assert concat_dim in old.dims
assert concat_dim in new.dims
def test_nested_merge_with_overlapping_values(self):
ds1 = Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
ds2 = Dataset({"a": ("x", [2, 3]), "x": [1, 2]})
expected = Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]})
with set_options(use_new_combine_kwarg_defaults=False):
with pytest.warns(
FutureWarning, match="will change from join='outer' to join='exact'"
):
with pytest.warns(
FutureWarning,
match="will change from compat='no_conflicts' to compat='override'",
):
old = combine_nested([ds1, ds2], concat_dim=None)
with set_options(use_new_combine_kwarg_defaults=True):
with pytest.raises(ValueError, match="might be related to new default"):
combine_nested([ds1, ds2], concat_dim=None)
assert_identical(old, expected)
def test_nested_merge_with_nan_order_matters(self):
ds1 = Dataset({"x": 0})
ds2 = Dataset({"x": np.nan})
with set_options(use_new_combine_kwarg_defaults=False):
with pytest.warns(
FutureWarning,
match="will change from compat='no_conflicts' to compat='override'",
):
old = combine_nested([ds1, ds2], concat_dim=None)
with set_options(use_new_combine_kwarg_defaults=True):
new = combine_nested([ds1, ds2], concat_dim=None)
assert_identical(ds1, old)
assert_identical(old, new)
with set_options(use_new_combine_kwarg_defaults=False):
with pytest.warns(
FutureWarning,
match="will change from compat='no_conflicts' to compat='override'",
):
old = combine_nested([ds2, ds1], concat_dim=None)
with set_options(use_new_combine_kwarg_defaults=True):
new = combine_nested([ds2, ds1], concat_dim=None)
assert_identical(ds1, old)
with pytest.raises(AssertionError):
assert_identical(old, new)
def test_nested_merge_with_concat_dim_explicitly_provided(self):
# Test the issue reported in GH #1988
objs = [Dataset({"x": 0, "y": 1})]
dim = DataArray([100], name="baz", dims="baz")
expected = Dataset({"x": ("baz", [0]), "y": ("baz", [1])}, {"baz": [100]})
with set_options(use_new_combine_kwarg_defaults=False):
old = combine_nested(objs, concat_dim=dim)
with set_options(use_new_combine_kwarg_defaults=True):
new = combine_nested(objs, concat_dim=dim)
assert_identical(expected, old)
assert_identical(old, new)
def test_combine_nested_missing_data_new_dim(self):
# Your data includes "time" and "station" dimensions, and each year's
# data has a different set of stations.
datasets = [
Dataset({"a": ("x", [2, 3]), "x": [1, 2]}),
Dataset({"a": ("x", [1, 2]), "x": [0, 1]}),
]
expected = Dataset(
{"a": (("t", "x"), [[np.nan, 2, 3], [1, 2, np.nan]])}, {"x": [0, 1, 2]}
)
with set_options(use_new_combine_kwarg_defaults=False):
with pytest.warns(
FutureWarning, match="will change from join='outer' to join='exact'"
):
old = combine_nested(datasets, concat_dim="t")
with set_options(use_new_combine_kwarg_defaults=True):
with pytest.raises(ValueError, match="might be related to new default"):
combine_nested(datasets, concat_dim="t")
new = combine_nested(datasets, concat_dim="t", join="outer")
assert_identical(expected, old)
assert_identical(expected, new)
def test_combine_by_coords_multiple_variables(self):
objs = [Dataset({"x": [0], "y": [0]}), Dataset({"y": [1], "x": [1]})]
expected = Dataset({"x": [0, 1], "y": [0, 1]})
with set_options(use_new_combine_kwarg_defaults=False):
with pytest.warns(
FutureWarning, match="will change from join='outer' to join='exact'"
):
old = combine_by_coords(objs)
with set_options(use_new_combine_kwarg_defaults=True):
with pytest.raises(ValueError, match="might be related to new default"):
combine_by_coords(objs)
assert_identical(old, expected)
@requires_cftime
def test_combine_by_coords_distant_cftime_dates():
# Regression test for https://github.com/pydata/xarray/issues/3535
import cftime
time_1 = [cftime.DatetimeGregorian(4500, 12, 31)]
time_2 = [cftime.DatetimeGregorian(4600, 12, 31)]
time_3 = [cftime.DatetimeGregorian(5100, 12, 31)]
da_1 = DataArray([0], dims=["time"], coords=[time_1], name="a").to_dataset()
da_2 = DataArray([1], dims=["time"], coords=[time_2], name="a").to_dataset()
da_3 = DataArray([2], dims=["time"], coords=[time_3], name="a").to_dataset()
result = combine_by_coords([da_1, da_2, da_3])
expected_time = np.concatenate([time_1, time_2, time_3])
expected = DataArray(
[0, 1, 2], dims=["time"], coords=[expected_time], name="a"
).to_dataset()
assert_identical(result, expected)
@requires_cftime
def test_combine_by_coords_raises_for_differing_calendars():
# previously failed with uninformative StopIteration instead of TypeError
# https://github.com/pydata/xarray/issues/4495
import cftime
time_1 = [cftime.DatetimeGregorian(2000, 1, 1)]
time_2 = [cftime.DatetimeProlepticGregorian(2001, 1, 1)]
da_1 = DataArray([0], dims=["time"], coords=[time_1], name="a").to_dataset()
da_2 = DataArray([1], dims=["time"], coords=[time_2], name="a").to_dataset()
error_msg = (
"Cannot combine along dimension 'time' with mixed types."
" Found:.*"
" If importing data directly from a file then setting"
" `use_cftime=True` may fix this issue."
)
with pytest.raises(TypeError, match=error_msg):
combine_by_coords([da_1, da_2])
def test_combine_by_coords_raises_for_differing_types():
# str and byte cannot be compared
da_1 = DataArray([0], dims=["time"], coords=[["a"]], name="a").to_dataset()
da_2 = DataArray([1], dims=["time"], coords=[[b"b"]], name="a").to_dataset()
with pytest.raises(
TypeError, match=r"Cannot combine along dimension 'time' with mixed types."
):
combine_by_coords([da_1, da_2])
| TestNewDefaults |
python | gevent__gevent | src/greentest/3.12/test_ssl.py | {
"start": 114714,
"end": 185001
} | class ____(unittest.TestCase):
@support.requires_resource('walltime')
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
# Allow for flexible libssl error messages.
regex = re.compile(r"""(
certificate verify failed # OpenSSL
|
CERTIFICATE_VERIFY_FAILED # AWS-LC
)""", re.X)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError, regex):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
# Allow for flexible libssl error messages.
regex = re.compile(r"""(
certificate verify failed # OpenSSL
|
CERTIFICATE_VERIFY_FAILED # AWS-LC
)""", re.X)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError, regex):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
@unittest.skipUnless(
ssl.HAS_NEVER_CHECK_COMMON_NAME, "test requires hostname_checks_common_name"
)
def test_hostname_checks_common_name(self):
client_context, server_context, hostname = testing_context()
assert client_context.hostname_checks_common_name
client_context.hostname_checks_common_name = False
# default cert has a SAN
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
client_context, server_context, hostname = testing_context(NOSANFILE)
client_context.hostname_checks_common_name = False
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLCertVerificationError):
s.connect((HOST, server.port))
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
with ThreadedEchoServer(context=server_context, chatty=True) as server:
with warnings_helper.check_no_resource_warning(self):
with self.assertRaises(UnicodeError):
context.wrap_socket(socket.socket(),
server_hostname='.pythontest.net')
with ThreadedEchoServer(context=server_context, chatty=True) as server:
with warnings_helper.check_no_resource_warning(self):
with self.assertRaises(UnicodeDecodeError):
context.wrap_socket(socket.socket(),
server_hostname=b'k\xf6nig.idn.pythontest.net')
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname,
suppress_ragged_eofs=False) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(
ssl.SSLError,
'alert unknown ca|EOF occurred|TLSV1_ALERT_UNKNOWN_CA'
):
# TLS 1.3 perform client cert exchange after handshake
s.write(b'data')
s.read(1000)
s.write(b'should have failed already')
s.read(1000)
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = socket_helper.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
self.fail("Expected connection failure")
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
# Allow for flexible libssl error messages.
regex = f"({msg}|CERTIFICATE_VERIFY_FAILED)"
self.assertRegex(repr(e), regex)
regex = re.compile(r"""(
certificate verify failed # OpenSSL
|
CERTIFICATE_VERIFY_FAILED # AWS-LC
)""", re.X)
self.assertRegex(repr(e), regex)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
if has_tls_protocol(ssl.PROTOCOL_TLSv1):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
if has_tls_protocol(ssl.PROTOCOL_TLSv1_1):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
# Get this test file itself:
with open(__file__, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = f'https://localhost:{server.port}/test_ssl.py'
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
self.enterContext(server)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_recv_into_buffer_protocol_len(self):
server = ThreadedEchoServer(CERTFILE)
self.enterContext(server)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
s.send(b"data")
buf = array.array('I', [0, 0])
self.assertEqual(s.recv_into(buf), 4)
self.assertEqual(bytes(buf)[:4], b"data")
class B(bytearray):
def __len__(self):
1/0
s.send(b"data")
buf = B(6)
self.assertEqual(s.recv_into(buf), 4)
self.assertEqual(bytes(buf), b"data\0\0")
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
client_ctx, server_ctx, hostname = testing_context()
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
server = server_ctx.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = client_ctx.wrap_socket(
socket.socket(), server_hostname=hostname
)
client.connect((hostname, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("NO_SHARED_CIPHER", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.3')
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
client_context, server_context, hostname = testing_context()
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_tls_version('TLSv1_1')
@ignore_deprecation
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertRegex(str(e.exception), "(alert|ALERT)")
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
client_context, server_context, hostname = testing_context()
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# tls-unique is not defined for TLSv1.3
# https://datatracker.ietf.org/doc/html/rfc8446#appendix-C.5
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
def test_legacy_server_connect(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_LEGACY_SERVER_CONNECT
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_no_legacy_server_connect(self):
client_context, server_context, hostname = testing_context()
client_context.options &= ~ssl.OP_LEGACY_SERVER_CONNECT
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_dh_params(self):
# Check we can get a connection with ephemeral finite-field
# Diffie-Hellman (if supported).
client_context, server_context, hostname = testing_context()
dhe_aliases = {"ADH", "EDH", "DHE"}
if not (supports_kx_alias(client_context, dhe_aliases)
and supports_kx_alias(server_context, dhe_aliases)):
self.skipTest("libssl doesn't support ephemeral DH")
# test scenario needs TLS <= 1.2
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
try:
server_context.load_dh_params(DHFILE)
except RuntimeError:
if Py_DEBUG_WIN32:
self.skipTest("not supported on Win32 debug build")
raise
server_context.set_ciphers("kEDH")
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if not dhe_aliases.intersection(parts):
self.fail("Non-DH key exchange: " + cipher[0])
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
with self.assertRaises(ssl.SSLError):
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_npn_protocols(self):
assert not ssl.HAS_NPN
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
# Allow for flexible libssl error messages.
regex = "(SSLV3_ALERT_HANDSHAKE_FAILURE|NO_PRIVATE_VALUE)"
self.assertRegex(cm.exception.reason, regex)
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256:eNULL")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(os_helper.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with open(os_helper.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context2.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3")
| ThreadedTests |
python | fluentpython__example-code-2e | 24-class-metaprog/checked/metaclass/checked_demo.py | {
"start": 76,
"end": 604
} | class ____(Checked):
title: str
year: int
box_office: float
if __name__ == '__main__':
movie = Movie(title='The Godfather', year=1972, box_office=137)
print(movie)
print(movie.title)
# end::MOVIE_DEMO[]
try:
# remove the "type: ignore" comment to see Mypy error
movie.year = 'MCMLXXII' # type: ignore
except TypeError as e:
print(e)
try:
blockbuster = Movie(title='Avatar', year=2009, box_office='billions')
except TypeError as e:
print(e)
| Movie |
python | scrapy__scrapy | tests/spiders.py | {
"start": 783,
"end": 1020
} | class ____(MockServerSpider):
name = "meta"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.meta = {}
def closed(self, reason):
self.meta["close_reason"] = reason
| MetaSpider |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/types.py | {
"start": 5617,
"end": 7066
} | class ____(type_api.NativeForEmulated, sqltypes._AbstractInterval):
"""PostgreSQL INTERVAL type."""
__visit_name__ = "INTERVAL"
native = True
def __init__(
self, precision: Optional[int] = None, fields: Optional[str] = None
) -> None:
"""Construct an INTERVAL.
:param precision: optional integer precision value
:param fields: string fields specifier. allows storage of fields
to be limited, such as ``"YEAR"``, ``"MONTH"``, ``"DAY TO HOUR"``,
etc.
"""
self.precision = precision
self.fields = fields
@classmethod
def adapt_emulated_to_native(
cls, interval: sqltypes.Interval, **kw: Any # type: ignore[override]
) -> INTERVAL:
return INTERVAL(precision=interval.second_precision)
@property
def _type_affinity(self) -> Type[sqltypes.Interval]:
return sqltypes.Interval
def as_generic(self, allow_nulltype: bool = False) -> sqltypes.Interval:
return sqltypes.Interval(native=True, second_precision=self.precision)
@property
def python_type(self) -> Type[dt.timedelta]:
return dt.timedelta
def literal_processor(
self, dialect: Dialect
) -> Optional[_LiteralProcessorType[dt.timedelta]]:
def process(value: dt.timedelta) -> str:
return f"make_interval(secs=>{value.total_seconds()})"
return process
PGInterval = INTERVAL
| INTERVAL |
python | simplejson__simplejson | simplejson/tests/test_float.py | {
"start": 182,
"end": 1983
} | class ____(TestCase):
def test_degenerates_allow(self):
for inf in (PosInf, NegInf):
self.assertEqual(json.loads(json.dumps(inf, allow_nan=True), allow_nan=True), inf)
# Python 2.5 doesn't have math.isnan
nan = json.loads(json.dumps(NaN, allow_nan=True), allow_nan=True)
self.assertTrue((0 + nan) != nan)
def test_degenerates_ignore(self):
for f in (PosInf, NegInf, NaN):
self.assertEqual(json.loads(json.dumps(f, ignore_nan=True)), None)
def test_degenerates_deny(self):
for f in (PosInf, NegInf, NaN):
self.assertRaises(ValueError, json.dumps, f, allow_nan=False)
for s in ('Infinity', '-Infinity', 'NaN'):
self.assertRaises(ValueError, json.loads, s, allow_nan=False)
self.assertRaises(ValueError, json.loads, s)
def test_floats(self):
for num in [1617161771.7650001, math.pi, math.pi**100,
math.pi**-100, 3.1]:
self.assertEqual(float(json.dumps(num)), num)
self.assertEqual(json.loads(json.dumps(num)), num)
self.assertEqual(json.loads(text_type(json.dumps(num))), num)
def test_ints(self):
for num in [1, long_type(1), 1<<32, 1<<64]:
self.assertEqual(json.dumps(num), str(num))
self.assertEqual(int(json.dumps(num)), num)
self.assertEqual(json.loads(json.dumps(num)), num)
self.assertEqual(json.loads(text_type(json.dumps(num))), num)
def test_float_range(self):
try:
float_range = [sys.float_info.min, sys.float_info.max]
except AttributeError:
float_range = [2.2250738585072014e-308, 1.7976931348623157e+308]
self.assertEqual(json.loads(json.dumps(float_range)), float_range)
| TestFloat |
python | getsentry__sentry | src/sentry/testutils/pytest/fixtures.py | {
"start": 6046,
"end": 6587
} | class ____(yaml.dumper.SafeDumper):
"""Disable pyyaml aliases for identical object references"""
def ignore_aliases(self, data) -> bool:
return True
def read_snapshot_file(reference_file: str) -> str:
with open(reference_file, encoding="utf-8") as f:
match = SNAPSHOT_REGEX.match(f.read())
if match is None:
raise OSError()
return match.group(1)
InequalityComparator = Callable[[str, str], bool | str]
default_comparator = lambda refval, output: refval != output
| ReadableYamlDumper |
python | huggingface__transformers | src/transformers/models/markuplm/modeling_markuplm.py | {
"start": 1461,
"end": 3394
} | class ____(nn.Module):
"""Construct the embeddings from xpath tags and subscripts.
We drop tree-id in this version, as its info can be covered by xpath.
"""
def __init__(self, config):
super().__init__()
self.max_depth = config.max_depth
self.xpath_unitseq2_embeddings = nn.Linear(config.xpath_unit_hidden_size * self.max_depth, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.activation = nn.ReLU()
self.xpath_unitseq2_inner = nn.Linear(config.xpath_unit_hidden_size * self.max_depth, 4 * config.hidden_size)
self.inner2emb = nn.Linear(4 * config.hidden_size, config.hidden_size)
self.xpath_tag_sub_embeddings = nn.ModuleList(
[
nn.Embedding(config.max_xpath_tag_unit_embeddings, config.xpath_unit_hidden_size)
for _ in range(self.max_depth)
]
)
self.xpath_subs_sub_embeddings = nn.ModuleList(
[
nn.Embedding(config.max_xpath_subs_unit_embeddings, config.xpath_unit_hidden_size)
for _ in range(self.max_depth)
]
)
def forward(self, xpath_tags_seq=None, xpath_subs_seq=None):
xpath_tags_embeddings = []
xpath_subs_embeddings = []
for i in range(self.max_depth):
xpath_tags_embeddings.append(self.xpath_tag_sub_embeddings[i](xpath_tags_seq[:, :, i]))
xpath_subs_embeddings.append(self.xpath_subs_sub_embeddings[i](xpath_subs_seq[:, :, i]))
xpath_tags_embeddings = torch.cat(xpath_tags_embeddings, dim=-1)
xpath_subs_embeddings = torch.cat(xpath_subs_embeddings, dim=-1)
xpath_embeddings = xpath_tags_embeddings + xpath_subs_embeddings
xpath_embeddings = self.inner2emb(self.dropout(self.activation(self.xpath_unitseq2_inner(xpath_embeddings))))
return xpath_embeddings
| XPathEmbeddings |
python | django__django | tests/sites_tests/tests.py | {
"start": 13146,
"end": 13422
} | class ____(TestCase):
def test_request(self):
def get_response(request):
return HttpResponse(str(request.site.id))
response = CurrentSiteMiddleware(get_response)(HttpRequest())
self.assertContains(response, settings.SITE_ID)
| MiddlewareTest |
python | numba__numba | numba/core/controlflow.py | {
"start": 1348,
"end": 1956
} | class ____(collections.namedtuple("Loop",
("entries", "exits", "header", "body"))):
"""
A control flow loop, as detected by a CFGraph object.
"""
__slots__ = ()
# The loop header is enough to detect that two loops are really
# the same, assuming they belong to the same graph.
# (note: in practice, only one loop instance is created per graph
# loop, so identity would be fine)
def __eq__(self, other):
return isinstance(other, Loop) and other.header == self.header
def __hash__(self):
return hash(self.header)
| Loop |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 761076,
"end": 778820
} | class ____(
MarkPropDefnumberArray, NumericArrayMarkPropDef
):
r"""
FieldOrDatumDefWithConditionMarkPropFieldDefnumberArray schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
condition : dict, :class:`ConditionalValueDefnumberArrayExprRef`, :class:`ConditionalParameterValueDefnumberArrayExprRef`, :class:`ConditionalPredicateValueDefnumberArrayExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberArrayExprRef`, :class:`ConditionalParameterValueDefnumberArrayExprRef`, :class:`ConditionalPredicateValueDefnumberArrayExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
legend : dict, :class:`Legend`, None
An object defining properties of the legend. If ``null``, the legend for the
encoding channel will be removed.
**Default value:** If undefined, default `legend properties
<https://vega.github.io/vega-lite/docs/legend.html>`__ are applied.
**See also:** `legend <https://vega.github.io/vega-lite/docs/legend.html>`__
documentation.
scale : dict, :class:`Scale`, None
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : dict, :class:`Sort`, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`AllSortString`, :class:`SortByChannel`, :class:`SortByEncoding`, :class:`EncodingSortField`, :class:`SortByChannelDesc`, Sequence[dict, :class:`DateTime`], Literal['-x', '-y', '-color', '-fill', '-stroke', '-strokeWidth', '-size', '-shape', '-fillOpacity', '-strokeOpacity', '-opacity', '-text', 'ascending', 'descending', 'x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text'], None
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
JavaScript.
* `A string indicating an encoding channel name to sort by
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ (e.g.,
``"x"`` or ``"y"``) with an optional minus prefix for descending sort (e.g.,
``"-x"`` to sort by x-field, descending). This channel string is short-form of `a
sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__. For
example, ``"sort": "-x"`` is equivalent to ``"sort": {"encoding": "x", "order":
"descending"}``.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects
<https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time
units ``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` and sorting by another channel is not supported for ``row`` and
``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_schema = {
"$ref": "#/definitions/FieldOrDatumDefWithCondition<MarkPropFieldDef,number[]>"
}
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Map | None] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
condition=condition,
field=field,
legend=legend,
scale=scale,
sort=sort,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
| FieldOrDatumDefWithConditionMarkPropFieldDefnumberArray |
python | huggingface__transformers | src/transformers/models/speecht5/modeling_speecht5.py | {
"start": 134333,
"end": 139197
} | class ____(PreTrainedModel):
config: SpeechT5HifiGanConfig
main_input_name = "spectrogram"
def __init__(self, config: SpeechT5HifiGanConfig):
super().__init__(config)
self.num_kernels = len(config.resblock_kernel_sizes)
self.num_upsamples = len(config.upsample_rates)
self.conv_pre = nn.Conv1d(
config.model_in_dim,
config.upsample_initial_channel,
kernel_size=7,
stride=1,
padding=3,
)
self.upsampler = nn.ModuleList()
for i, (upsample_rate, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes)):
self.upsampler.append(
nn.ConvTranspose1d(
config.upsample_initial_channel // (2**i),
config.upsample_initial_channel // (2 ** (i + 1)),
kernel_size=kernel_size,
stride=upsample_rate,
padding=(kernel_size - upsample_rate) // 2,
)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.upsampler)):
channels = config.upsample_initial_channel // (2 ** (i + 1))
for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes):
self.resblocks.append(HifiGanResidualBlock(channels, kernel_size, dilation, config.leaky_relu_slope))
self.conv_post = nn.Conv1d(channels, 1, kernel_size=7, stride=1, padding=3)
self.register_buffer("mean", torch.zeros(config.model_in_dim))
self.register_buffer("scale", torch.ones(config.model_in_dim))
# Initialize weights and apply final processing
self.post_init()
def apply_weight_norm(self):
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
weight_norm(self.conv_pre)
for layer in self.upsampler:
weight_norm(layer)
for layer in self.resblocks:
layer.apply_weight_norm()
weight_norm(self.conv_post)
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.conv_pre)
for layer in self.upsampler:
nn.utils.remove_weight_norm(layer)
for layer in self.resblocks:
layer.remove_weight_norm()
nn.utils.remove_weight_norm(self.conv_post)
@auto_docstring(
custom_intro="""
Converts a log-mel spectrogram into a speech waveform. Passing a batch of log-mel spectrograms returns a batch
of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a single, un-batched speech
waveform.
"""
)
def forward(self, spectrogram: torch.FloatTensor) -> torch.FloatTensor:
r"""
spectrogram (`torch.FloatTensor`):
Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
config.model_in_dim)`, or un-batched and of shape `(sequence_length, config.model_in_dim)`.
Returns:
`torch.FloatTensor`: Tensor containing the speech waveform. If the input spectrogram is batched, will be of
shape `(batch_size, num_frames,)`. If un-batched, will be of shape `(num_frames,)`.
"""
if self.config.normalize_before:
spectrogram = (spectrogram - self.mean) / self.scale
is_batched = spectrogram.dim() == 3
if not is_batched:
spectrogram = spectrogram.unsqueeze(0)
hidden_states = spectrogram.transpose(2, 1)
hidden_states = self.conv_pre(hidden_states)
for i in range(self.num_upsamples):
hidden_states = nn.functional.leaky_relu(hidden_states, self.config.leaky_relu_slope)
hidden_states = self.upsampler[i](hidden_states)
res_state = self.resblocks[i * self.num_kernels](hidden_states)
for j in range(1, self.num_kernels):
res_state += self.resblocks[i * self.num_kernels + j](hidden_states)
hidden_states = res_state / self.num_kernels
hidden_states = nn.functional.leaky_relu(hidden_states)
hidden_states = self.conv_post(hidden_states)
hidden_states = torch.tanh(hidden_states)
if not is_batched:
# remove batch dim and collapse tensor to 1-d audio waveform
waveform = hidden_states.squeeze(0).transpose(1, 0).view(-1)
else:
# remove seq-len dim since this collapses to 1
waveform = hidden_states.squeeze(1)
return waveform
__all__ = [
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
| SpeechT5HifiGan |
python | EpistasisLab__tpot | tpot/builtin_modules/genetic_encoders.py | {
"start": 5530,
"end": 6822
} | class ____(TransformerMixin, BaseEstimator ):
"""This class contains the function definition for encoding the input features as a Over Dominance genetic model.
The encoding used is AA(0)->1, Aa(1)->2, aa(2)->0. """
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged.
Dummy function to fit in with the sklearn API and hence work in pipelines.
Parameters
----------
X : array-like
"""
return self
def transform(self, X, y=None):
"""Transform the data by applying the Heterosis encoding.
Parameters
----------
X : numpy ndarray, {n_samples, n_components}
New data, where n_samples is the number of samples (number of individuals)
and n_components is the number of components (number of features).
y : None
Unused
Returns
-------
X_transformed: numpy ndarray, {n_samples, n_components}
The encoded feature set
"""
X = check_array(X)
map = {0: 1, 1: 2, 2: 0}
mapping_function = np.vectorize(lambda i: map[i] if i in map else i)
X_transformed = mapping_function(X)
return X_transformed
| OverDominanceEncoder |
python | pydata__xarray | asv_bench/benchmarks/polyfit.py | {
"start": 133,
"end": 819
} | class ____:
def setup(self, *args, **kwargs):
self.xs = {nx: xr.DataArray(randn((nx,)), dims="x", name="x") for nx in NX}
self.coeffs = {
ndeg: xr.DataArray(
randn((ndeg,)), dims="degree", coords={"degree": np.arange(ndeg)}
)
for ndeg in NDEGS
}
@parameterized(["nx", "ndeg"], [NX, NDEGS])
def time_polyval(self, nx, ndeg):
x = self.xs[nx]
c = self.coeffs[ndeg]
xr.polyval(x, c).compute()
@parameterized(["nx", "ndeg"], [NX, NDEGS])
def peakmem_polyval(self, nx, ndeg):
x = self.xs[nx]
c = self.coeffs[ndeg]
xr.polyval(x, c).compute()
| Polyval |
python | django__django | tests/invalid_models_tests/test_ordinary_fields.py | {
"start": 594,
"end": 2170
} | class ____(SimpleTestCase):
def test_valid_case(self):
class Model(models.Model):
id = models.AutoField(primary_key=True)
field = Model._meta.get_field("id")
self.assertEqual(field.check(), [])
def test_primary_key(self):
# primary_key must be True. Refs #12467.
class Model(models.Model):
field = models.AutoField(primary_key=False)
# Prevent Django from autocreating `id` AutoField, which would
# result in an error, because a model must have exactly one
# AutoField.
another = models.IntegerField(primary_key=True)
field = Model._meta.get_field("field")
self.assertEqual(
field.check(),
[
Error(
"AutoFields must set primary_key=True.",
obj=field,
id="fields.E100",
),
],
)
def test_max_length_warning(self):
class Model(models.Model):
auto = models.AutoField(primary_key=True, max_length=2)
field = Model._meta.get_field("auto")
self.assertEqual(
field.check(),
[
DjangoWarning(
"'max_length' is ignored when used with %s."
% field.__class__.__name__,
hint="Remove 'max_length' from field",
obj=field,
id="fields.W122",
),
],
)
@isolate_apps("invalid_models_tests")
| AutoFieldTests |
python | getsentry__sentry-python | tests/integrations/strawberry/test_strawberry.py | {
"start": 1347,
"end": 1530
} | class ____:
@strawberry.field
def hello(self) -> str:
return "Hello World"
@strawberry.field
def error(self) -> int:
return 1 / 0
@strawberry.type
| Query |
python | getsentry__sentry | src/sentry/uptime/endpoints/serializers.py | {
"start": 5769,
"end": 7027
} | class ____(Serializer):
def serialize(
self, obj: EapCheckEntry, attrs, user, **kwargs
) -> EapCheckEntrySerializerResponse:
check_status = cast(SerializedCheckStatus, obj.check_status)
# XXX: Translate the status from `failed` to `failed_incident` when the
# check is part of an incident.
if check_status == "failure" and obj.incident_status == IncidentStatus.IN_INCIDENT:
check_status = "failure_incident"
region_config = get_region_config(obj.region)
region_name = region_config.name if region_config else "Unknown"
return {
"uptimeCheckId": obj.uptime_check_id,
"timestamp": obj.timestamp.strftime("%Y-%m-%dT%H:%M:%SZ"),
"scheduledCheckTime": obj.scheduled_check_time.strftime("%Y-%m-%dT%H:%M:%SZ"),
"checkStatus": check_status,
"checkStatusReason": obj.check_status_reason,
"httpStatusCode": obj.http_status_code,
"durationMs": obj.duration_ms,
"traceId": obj.trace_id,
"incidentStatus": obj.incident_status,
"environment": obj.environment,
"region": obj.region,
"regionName": region_name,
}
| EapCheckEntrySerializer |
python | astropy__astropy | astropy/io/fits/hdu/groups.py | {
"start": 2632,
"end": 8311
} | class ____(FITS_rec):
"""
Random groups data object.
Allows structured access to FITS Group data in a manner analogous
to tables.
"""
_record_type = Group
def __new__(
cls,
input=None,
bitpix=None,
pardata=None,
parnames=[],
bscale=None,
bzero=None,
parbscales=None,
parbzeros=None,
):
"""
Parameters
----------
input : array or FITS_rec instance
input data, either the group data itself (a
`numpy.ndarray`) or a record array (`FITS_rec`) which will
contain both group parameter info and the data. The rest
of the arguments are used only for the first case.
bitpix : int
data type as expressed in FITS ``BITPIX`` value (8, 16, 32,
64, -32, or -64)
pardata : sequence of array
parameter data, as a list of (numeric) arrays.
parnames : sequence of str
list of parameter names.
bscale : int
``BSCALE`` of the data
bzero : int
``BZERO`` of the data
parbscales : sequence of int
list of bscales for the parameters
parbzeros : sequence of int
list of bzeros for the parameters
"""
if not isinstance(input, FITS_rec):
if pardata is None:
npars = 0
else:
npars = len(pardata)
if parbscales is None:
parbscales = [None] * npars
if parbzeros is None:
parbzeros = [None] * npars
if parnames is None:
parnames = [f"PAR{idx + 1}" for idx in range(npars)]
if len(parnames) != npars:
raise ValueError(
"The number of parameter data arrays does "
"not match the number of parameters."
)
unique_parnames = _unique_parnames(parnames + ["DATA"])
if bitpix is None:
bitpix = DTYPE2BITPIX[input.dtype.name]
fits_fmt = GroupsHDU._bitpix2tform[bitpix] # -32 -> 'E'
format = FITS2NUMPY[fits_fmt] # 'E' -> 'f4'
data_fmt = f"{input.shape[1:]}{format}"
formats = ",".join(([format] * npars) + [data_fmt])
gcount = input.shape[0]
cols = [
Column(
name=unique_parnames[idx],
format=fits_fmt,
bscale=parbscales[idx],
bzero=parbzeros[idx],
)
for idx in range(npars)
]
cols.append(
Column(
name=unique_parnames[-1],
format=fits_fmt,
bscale=bscale,
bzero=bzero,
)
)
coldefs = ColDefs(cols)
self = FITS_rec.__new__(
cls,
np.rec.array(None, formats=formats, names=coldefs.names, shape=gcount),
)
# By default the data field will just be 'DATA', but it may be
# uniquified if 'DATA' is already used by one of the group names
self._data_field = unique_parnames[-1]
self._coldefs = coldefs
self.parnames = parnames
for idx, name in enumerate(unique_parnames[:-1]):
column = coldefs[idx]
# Note: _get_scale_factors is used here and in other cases
# below to determine whether the column has non-default
# scale/zero factors.
# TODO: Find a better way to do this than using this interface
scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
self._cache_field(name, pardata[idx])
else:
np.rec.recarray.field(self, idx)[:] = pardata[idx]
column = coldefs[self._data_field]
scale, zero = self._get_scale_factors(column)[3:5]
if scale or zero:
self._cache_field(self._data_field, input)
else:
np.rec.recarray.field(self, npars)[:] = input
else:
self = FITS_rec.__new__(cls, input)
self.parnames = None
return self
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if isinstance(obj, GroupData):
self.parnames = obj.parnames
elif isinstance(obj, FITS_rec):
self.parnames = obj._coldefs.names
def __getitem__(self, key):
out = super().__getitem__(key)
if isinstance(out, GroupData):
out.parnames = self.parnames
return out
@property
def data(self):
"""
The raw group data represented as a multi-dimensional `numpy.ndarray`
array.
"""
# The last column in the coldefs is the data portion of the group
return self.field(self._coldefs.names[-1])
@lazyproperty
def _unique(self):
return _par_indices(self.parnames)
def par(self, parname):
"""
Get the group parameter values.
"""
if _is_int(parname):
return self.field(parname)
first_index, *others = self._unique[parname.upper()]
if not others:
return self.field(first_index)
# if more than one group parameter have the same name
result = self.field(first_index).astype("f8")
for i in others:
result += self.field(i)
return result
| GroupData |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 122142,
"end": 122425
} | class ____(sgqlc.types.Enum):
"""Properties by which user status connections can be ordered.
Enumeration Choices:
* `UPDATED_AT`: Order user statuses by when they were updated.
"""
__schema__ = github_schema
__choices__ = ("UPDATED_AT",)
| UserStatusOrderField |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets_tests/snippet_checks/guides/components/integrations/test_tableau_utils.py | {
"start": 2743,
"end": 4422
} | class ____(TableauComponent):
# Store mock data as a class variable to share between methods
_mock_data = None
async def write_state_to_path(self, state_path):
"""Override to use mock data - we store it directly."""
import dagster as dg
# Create a mock workspace with the same credentials
mock_workspace = MockTableauWorkspace(**self.workspace.model_dump())
# Fetch and store mock data
MockTableauComponent._mock_data = mock_workspace.fetch_tableau_workspace_data()
# Serialize and write to path
state_path.write_text(dg.serialize_value(MockTableauComponent._mock_data))
def test_mock_tableau_workspace() -> None:
"""Test that the mock Tableau workspace returns the expected data."""
workspace = MockTableauWorkspace(
connected_app_client_id="test_client_id",
connected_app_secret_id="test_secret_id",
connected_app_secret_value="test_secret_value",
username="test_username",
site_name="test_site",
pod_name="10ax",
)
workspace_data = workspace.fetch_tableau_workspace_data()
# Verify we have the expected content
assert len(workspace_data.workbooks_by_id) == 1
assert len(workspace_data.sheets_by_id) == 1
assert len(workspace_data.dashboards_by_id) == 1
assert len(workspace_data.data_sources_by_id) == 1
# Verify specific content
assert "test_workbook_id" in workspace_data.workbooks_by_id
assert "test_sheet_id" in workspace_data.sheets_by_id
assert "test_dashboard_id" in workspace_data.dashboards_by_id
assert "test_datasource_id" in workspace_data.data_sources_by_id
| MockTableauComponent |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | contents/2_Q_Learning_maze/maze_env.py | {
"start": 592,
"end": 4307
} | class ____(tk.Tk, object):
def __init__(self):
super(Maze, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.title('maze')
self.geometry('{0}x{1}'.format(MAZE_W * UNIT, MAZE_H * UNIT))
self._build_maze()
def _build_maze(self):
self.canvas = tk.Canvas(self, bg='white',
height=MAZE_H * UNIT,
width=MAZE_W * UNIT)
# create grids
for c in range(0, MAZE_W * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, MAZE_H * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
# create origin
origin = np.array([20, 20])
# hell
hell1_center = origin + np.array([UNIT * 2, UNIT])
self.hell1 = self.canvas.create_rectangle(
hell1_center[0] - 15, hell1_center[1] - 15,
hell1_center[0] + 15, hell1_center[1] + 15,
fill='black')
# hell
hell2_center = origin + np.array([UNIT, UNIT * 2])
self.hell2 = self.canvas.create_rectangle(
hell2_center[0] - 15, hell2_center[1] - 15,
hell2_center[0] + 15, hell2_center[1] + 15,
fill='black')
# create oval
oval_center = origin + UNIT * 2
self.oval = self.canvas.create_oval(
oval_center[0] - 15, oval_center[1] - 15,
oval_center[0] + 15, oval_center[1] + 15,
fill='yellow')
# create red rect
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.5)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# return observation
return self.canvas.coords(self.rect)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0: # up
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # right
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3: # left
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent
s_ = self.canvas.coords(self.rect) # next state
# reward function
if s_ == self.canvas.coords(self.oval):
reward = 1
done = True
s_ = 'terminal'
elif s_ in [self.canvas.coords(self.hell1), self.canvas.coords(self.hell2)]:
reward = -1
done = True
s_ = 'terminal'
else:
reward = 0
done = False
return s_, reward, done
def render(self):
time.sleep(0.1)
self.update()
def update():
for t in range(10):
s = env.reset()
while True:
env.render()
a = 1
s, r, done = env.step(a)
if done:
break
if __name__ == '__main__':
env = Maze()
env.after(100, update)
env.mainloop() | Maze |
python | django__django | tests/model_fields/models.py | {
"start": 3080,
"end": 3149
} | class ____(models.Model):
s = models.SlugField(max_length=255)
| BigS |
python | explosion__spaCy | spacy/pipeline/spancat.py | {
"start": 5782,
"end": 24983
} | class ____(TrainablePipe):
"""Pipeline component to label spans of text.
DOCS: https://spacy.io/api/spancategorizer
"""
def __init__(
self,
vocab: Vocab,
model: Model[Tuple[List[Doc], Ragged], Floats2d],
suggester: Suggester,
name: str = "spancat",
*,
add_negative_label: bool = False,
spans_key: str = "spans",
negative_weight: Optional[float] = 1.0,
allow_overlap: Optional[bool] = True,
max_positive: Optional[int] = None,
threshold: Optional[float] = 0.5,
scorer: Optional[Callable] = spancat_score,
) -> None:
"""Initialize the multi-label or multi-class span categorizer.
vocab (Vocab): The shared vocabulary.
model (thinc.api.Model): The Thinc Model powering the pipeline component.
For multi-class classification (single label per span) we recommend
using a Softmax classifier as a the final layer, while for multi-label
classification (multiple possible labels per span) we recommend Logistic.
suggester (Callable[[Iterable[Doc], Optional[Ops]], Ragged]): A function that suggests spans.
Spans are returned as a ragged array with two integer columns, for the
start and end positions.
name (str): The component instance name, used to add entries to the
losses during training.
spans_key (str): Key of the Doc.spans dict to save the spans under.
During initialization and training, the component will look for
spans on the reference document under the same key. Defaults to
`"spans"`.
add_negative_label (bool): Learn to predict a special 'negative_label'
when a Span is not annotated.
threshold (Optional[float]): Minimum probability to consider a prediction
positive. Defaults to 0.5. Spans with a positive prediction will be saved
on the Doc.
max_positive (Optional[int]): Maximum number of labels to consider
positive per span. Defaults to None, indicating no limit.
negative_weight (float): Multiplier for the loss terms.
Can be used to downweight the negative samples if there are too many
when add_negative_label is True. Otherwise its unused.
allow_overlap (bool): If True the data is assumed to contain overlapping spans.
Otherwise it produces non-overlapping spans greedily prioritizing
higher assigned label scores. Only used when max_positive is 1.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_spans for the Doc.spans[spans_key] with overlapping
spans allowed.
DOCS: https://spacy.io/api/spancategorizer#init
"""
self.cfg = {
"labels": [],
"spans_key": spans_key,
"threshold": threshold,
"max_positive": max_positive,
"negative_weight": negative_weight,
"allow_overlap": allow_overlap,
}
self.vocab = vocab
self.suggester = suggester
self.model = model
self.name = name
self.scorer = scorer
self.add_negative_label = add_negative_label
if not allow_overlap and max_positive is not None and max_positive > 1:
raise ValueError(Errors.E1051.format(max_positive=max_positive))
@property
def key(self) -> str:
"""Key of the doc.spans dict to save the spans under. During
initialization and training, the component will look for spans on the
reference document under the same key.
"""
return str(self.cfg["spans_key"])
def _allow_extra_label(self) -> None:
"""Raise an error if the component can not add any more labels."""
nO = None
if self.model.has_dim("nO"):
nO = self.model.get_dim("nO")
elif self.model.has_ref("output_layer") and self.model.get_ref(
"output_layer"
).has_dim("nO"):
nO = self.model.get_ref("output_layer").get_dim("nO")
if nO is not None and nO == self._n_labels:
if not self.is_resizable:
raise ValueError(
Errors.E922.format(name=self.name, nO=self.model.get_dim("nO"))
)
def add_label(self, label: str) -> int:
"""Add a new label to the pipe.
label (str): The label to add.
RETURNS (int): 0 if label is already present, otherwise 1.
DOCS: https://spacy.io/api/spancategorizer#add_label
"""
if not isinstance(label, str):
raise ValueError(Errors.E187)
if label in self.labels:
return 0
self._allow_extra_label()
self.cfg["labels"].append(label) # type: ignore
self.vocab.strings.add(label)
return 1
@property
def labels(self) -> Tuple[str]:
"""RETURNS (Tuple[str]): The labels currently added to the component.
DOCS: https://spacy.io/api/spancategorizer#labels
"""
return tuple(self.cfg["labels"]) # type: ignore
@property
def label_data(self) -> List[str]:
"""RETURNS (List[str]): Information about the component's labels.
DOCS: https://spacy.io/api/spancategorizer#label_data
"""
return list(self.labels)
@property
def _label_map(self) -> Dict[str, int]:
"""RETURNS (Dict[str, int]): The label map."""
return {label: i for i, label in enumerate(self.labels)}
@property
def _n_labels(self) -> int:
"""RETURNS (int): Number of labels."""
if self.add_negative_label:
return len(self.labels) + 1
else:
return len(self.labels)
@property
def _negative_label_i(self) -> Union[int, None]:
"""RETURNS (Union[int, None]): Index of the negative label."""
if self.add_negative_label:
return len(self.label_data)
else:
return None
def predict(self, docs: Iterable[Doc]):
"""Apply the pipeline's model to a batch of docs, without modifying them.
docs (Iterable[Doc]): The documents to predict.
RETURNS: The models prediction for each document.
DOCS: https://spacy.io/api/spancategorizer#predict
"""
indices = self.suggester(docs, ops=self.model.ops)
if indices.lengths.sum() == 0:
scores = self.model.ops.alloc2f(0, 0)
else:
scores = self.model.predict((docs, indices)) # type: ignore
return indices, scores
def set_candidates(
self, docs: Iterable[Doc], *, candidates_key: str = "candidates"
) -> None:
"""Use the spancat suggester to add a list of span candidates to a list of docs.
This method is intended to be used for debugging purposes.
docs (Iterable[Doc]): The documents to modify.
candidates_key (str): Key of the Doc.spans dict to save the candidate spans under.
DOCS: https://spacy.io/api/spancategorizer#set_candidates
"""
suggester_output = self.suggester(docs, ops=self.model.ops)
for candidates, doc in zip(suggester_output, docs): # type: ignore
doc.spans[candidates_key] = []
for index in candidates.dataXd:
doc.spans[candidates_key].append(doc[index[0] : index[1]])
def set_annotations(self, docs: Iterable[Doc], indices_scores) -> None:
"""Modify a batch of Doc objects, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify.
scores: The scores to set, produced by SpanCategorizer.predict.
DOCS: https://spacy.io/api/spancategorizer#set_annotations
"""
indices, scores = indices_scores
offset = 0
for i, doc in enumerate(docs):
indices_i = indices[i].dataXd
allow_overlap = cast(bool, self.cfg["allow_overlap"])
if self.cfg["max_positive"] == 1:
doc.spans[self.key] = self._make_span_group_singlelabel(
doc,
indices_i,
scores[offset : offset + indices.lengths[i]],
allow_overlap,
)
else:
doc.spans[self.key] = self._make_span_group_multilabel(
doc,
indices_i,
scores[offset : offset + indices.lengths[i]],
)
offset += indices.lengths[i]
def update(
self,
examples: Iterable[Example],
*,
drop: float = 0.0,
sgd: Optional[Optimizer] = None,
losses: Optional[Dict[str, float]] = None,
) -> Dict[str, float]:
"""Learn from a batch of documents and gold-standard information,
updating the pipe's model. Delegates to predict and get_loss.
examples (Iterable[Example]): A batch of Example objects.
drop (float): The dropout rate.
sgd (thinc.api.Optimizer): The optimizer.
losses (Dict[str, float]): Optional record of the loss during training.
Updated using the component name as the key.
RETURNS (Dict[str, float]): The updated losses dictionary.
DOCS: https://spacy.io/api/spancategorizer#update
"""
if losses is None:
losses = {}
losses.setdefault(self.name, 0.0)
validate_examples(examples, "SpanCategorizer.update")
self._validate_categories(examples)
if not any(len(eg.predicted) if eg.predicted else 0 for eg in examples):
# Handle cases where there are no tokens in any docs.
return losses
docs = [eg.predicted for eg in examples]
spans = self.suggester(docs, ops=self.model.ops)
if spans.lengths.sum() == 0:
return losses
set_dropout_rate(self.model, drop)
scores, backprop_scores = self.model.begin_update((docs, spans))
loss, d_scores = self.get_loss(examples, (spans, scores))
backprop_scores(d_scores) # type: ignore
if sgd is not None:
self.finish_update(sgd)
losses[self.name] += loss
return losses
def get_loss(
self, examples: Iterable[Example], spans_scores: Tuple[Ragged, Floats2d]
) -> Tuple[float, float]:
"""Find the loss and gradient of loss for the batch of documents and
their predicted scores.
examples (Iterable[Examples]): The batch of examples.
spans_scores: Scores representing the model's predictions.
RETURNS (Tuple[float, float]): The loss and the gradient.
DOCS: https://spacy.io/api/spancategorizer#get_loss
"""
spans, scores = spans_scores
spans = Ragged(
self.model.ops.to_numpy(spans.data), self.model.ops.to_numpy(spans.lengths)
)
target = numpy.zeros(scores.shape, dtype=scores.dtype)
if self.add_negative_label:
negative_spans = numpy.ones((scores.shape[0]))
offset = 0
label_map = self._label_map
for i, eg in enumerate(examples):
# Map (start, end) offset of spans to the row in the d_scores array,
# so that we can adjust the gradient for predictions that were
# in the gold standard.
spans_index = {}
spans_i = spans[i].dataXd
for j in range(spans.lengths[i]):
start = int(spans_i[j, 0]) # type: ignore
end = int(spans_i[j, 1]) # type: ignore
spans_index[(start, end)] = offset + j
for gold_span in self._get_aligned_spans(eg):
key = (gold_span.start, gold_span.end)
if key in spans_index:
row = spans_index[key]
k = label_map[gold_span.label_]
target[row, k] = 1.0
if self.add_negative_label:
# delete negative label target.
negative_spans[row] = 0.0
# The target is a flat array for all docs. Track the position
# we're at within the flat array.
offset += spans.lengths[i]
target = self.model.ops.asarray(target, dtype="f") # type: ignore
if self.add_negative_label:
negative_samples = numpy.nonzero(negative_spans)[0]
target[negative_samples, self._negative_label_i] = 1.0 # type: ignore
# The target will have the values 0 (for untrue predictions) or 1
# (for true predictions).
# The scores should be in the range [0, 1].
# If the prediction is 0.9 and it's true, the gradient
# will be -0.1 (0.9 - 1.0).
# If the prediction is 0.9 and it's false, the gradient will be
# 0.9 (0.9 - 0.0)
d_scores = scores - target
if self.add_negative_label:
neg_weight = cast(float, self.cfg["negative_weight"])
if neg_weight != 1.0:
d_scores[negative_samples] *= neg_weight
loss = float((d_scores**2).sum())
return loss, d_scores
def initialize(
self,
get_examples: Callable[[], Iterable[Example]],
*,
nlp: Optional[Language] = None,
labels: Optional[List[str]] = None,
) -> None:
"""Initialize the pipe for training, using a representative set
of data examples.
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Optional[Language]): The current nlp object the component is part of.
labels (Optional[List[str]]): The labels to add to the component, typically generated by the
`init labels` command. If no labels are provided, the get_examples
callback is used to extract the labels from the data.
DOCS: https://spacy.io/api/spancategorizer#initialize
"""
subbatch: List[Example] = []
if labels is not None:
for label in labels:
self.add_label(label)
for eg in get_examples():
if labels is None:
for span in eg.reference.spans.get(self.key, []):
self.add_label(span.label_)
if len(subbatch) < 10:
subbatch.append(eg)
self._require_labels()
if subbatch:
docs = [eg.x for eg in subbatch]
spans = build_ngram_suggester(sizes=[1])(docs)
Y = self.model.ops.alloc2f(spans.dataXd.shape[0], self._n_labels)
self.model.initialize(X=(docs, spans), Y=Y)
else:
self.model.initialize()
def _validate_categories(self, examples: Iterable[Example]):
# TODO
pass
def _get_aligned_spans(self, eg: Example):
return eg.get_aligned_spans_y2x(
eg.reference.spans.get(self.key, []), allow_overlap=True
)
def _make_span_group_multilabel(
self,
doc: Doc,
indices: Ints2d,
scores: Floats2d,
) -> SpanGroup:
"""Find the top-k labels for each span (k=max_positive)."""
spans = SpanGroup(doc, name=self.key)
if scores.size == 0:
return spans
scores = self.model.ops.to_numpy(scores)
indices = self.model.ops.to_numpy(indices)
threshold = self.cfg["threshold"]
max_positive = self.cfg["max_positive"]
keeps = scores >= threshold
if max_positive is not None:
assert isinstance(max_positive, int)
if self.add_negative_label:
negative_scores = numpy.copy(scores[:, self._negative_label_i])
scores[:, self._negative_label_i] = -numpy.inf
ranked = (scores * -1).argsort() # type: ignore
scores[:, self._negative_label_i] = negative_scores
else:
ranked = (scores * -1).argsort() # type: ignore
span_filter = ranked[:, max_positive:]
for i, row in enumerate(span_filter):
keeps[i, row] = False
attrs_scores = []
for i in range(indices.shape[0]):
start = indices[i, 0]
end = indices[i, 1]
for j, keep in enumerate(keeps[i]):
if keep:
if j != self._negative_label_i:
spans.append(Span(doc, start, end, label=self.labels[j]))
attrs_scores.append(scores[i, j])
spans.attrs["scores"] = numpy.array(attrs_scores)
return spans
def _make_span_group_singlelabel(
self,
doc: Doc,
indices: Ints2d,
scores: Floats2d,
allow_overlap: bool = True,
) -> SpanGroup:
"""Find the argmax label for each span."""
# Handle cases when there are zero suggestions
if scores.size == 0:
return SpanGroup(doc, name=self.key)
scores = self.model.ops.to_numpy(scores)
indices = self.model.ops.to_numpy(indices)
predicted = scores.argmax(axis=1)
argmax_scores = numpy.take_along_axis(
scores, numpy.expand_dims(predicted, 1), axis=1
)
keeps = numpy.ones(predicted.shape, dtype=bool)
# Remove samples where the negative label is the argmax.
if self.add_negative_label:
keeps = numpy.logical_and(keeps, predicted != self._negative_label_i)
# Filter samples according to threshold.
threshold = self.cfg["threshold"]
if threshold is not None:
keeps = numpy.logical_and(keeps, (argmax_scores >= threshold).squeeze())
# Sort spans according to argmax probability
if not allow_overlap:
# Get the probabilities
sort_idx = (argmax_scores.squeeze() * -1).argsort()
argmax_scores = argmax_scores[sort_idx]
predicted = predicted[sort_idx]
indices = indices[sort_idx]
keeps = keeps[sort_idx]
seen = _Intervals()
spans = SpanGroup(doc, name=self.key)
attrs_scores = []
for i in range(indices.shape[0]):
if not keeps[i]:
continue
label = predicted[i]
start = indices[i, 0]
end = indices[i, 1]
if not allow_overlap:
if (start, end) in seen:
continue
else:
seen.add(start, end)
attrs_scores.append(argmax_scores[i])
spans.append(Span(doc, start, end, label=self.labels[label]))
spans.attrs["scores"] = numpy.array(attrs_scores)
return spans
# Setup backwards compatibility hook for factories
def __getattr__(name):
if name == "make_spancat":
module = importlib.import_module("spacy.pipeline.factories")
return module.make_spancat
elif name == "make_spancat_singlelabel":
module = importlib.import_module("spacy.pipeline.factories")
return module.make_spancat_singlelabel
raise AttributeError(f"module {__name__} has no attribute {name}")
| SpanCategorizer |
python | django__django | django/contrib/admin/exceptions.py | {
"start": 194,
"end": 335
} | class ____(SuspiciousOperation):
"""Invalid to_field was passed to admin view via URL query string"""
pass
| DisallowedModelAdminToField |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/schemas/test_user_schema.py | {
"start": 2328,
"end": 3447
} | class ____(TestUserBase):
def test_serialize(self):
user_model = User(
first_name="Foo",
last_name="Bar",
username="test",
password="test",
email=TEST_EMAIL,
created_on=timezone.parse(DEFAULT_TIME),
changed_on=timezone.parse(DEFAULT_TIME),
)
self.session.add(user_model)
user_model.roles = [self.role]
self.session.commit()
user = self.session.scalars(select(User).where(User.email == TEST_EMAIL)).first()
deserialized_user = user_collection_item_schema.dump(user)
# No user_id and password in dump
assert deserialized_user == {
"created_on": DEFAULT_TIME,
"email": "test@example.org",
"changed_on": DEFAULT_TIME,
"active": True,
"last_login": None,
"last_name": "Bar",
"fail_login_count": None,
"first_name": "Foo",
"username": "test",
"login_count": None,
"roles": [{"name": "TestRole"}],
}
| TestUserCollectionItemSchema |
python | tensorflow__tensorflow | tensorflow/python/distribute/mirrored_strategy.py | {
"start": 7598,
"end": 11267
} | class ____(distribute_lib.Strategy):
"""Synchronous training across multiple replicas on one machine.
This strategy is typically used for training on one
machine with multiple GPUs. For TPUs, use
`tf.distribute.TPUStrategy`. To use `MirroredStrategy` with multiple workers,
please refer to `tf.distribute.experimental.MultiWorkerMirroredStrategy`.
For example, a variable created under a `MirroredStrategy` is a
`MirroredVariable`. If no devices are specified in the constructor argument of
the strategy then it will use all the available GPUs. If no GPUs are found, it
will use the available CPUs. Note that TensorFlow treats all CPUs on a
machine as a single device, and uses threads internally for parallelism.
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> with strategy.scope():
... x = tf.Variable(1.)
>>> x
MirroredVariable:{
0: <tf.Variable ... shape=() dtype=float32, numpy=1.0>,
1: <tf.Variable ... shape=() dtype=float32, numpy=1.0>
}
While using distribution strategies, all the variable creation should be done
within the strategy's scope. This will replicate the variables across all the
replicas and keep them in sync using an all-reduce algorithm.
Variables created inside a `MirroredStrategy` which is wrapped with a
`tf.function` are still `MirroredVariables`.
>>> x = []
>>> @tf.function # Wrap the function with tf.function.
... def create_variable():
... if not x:
... x.append(tf.Variable(1.))
... return x[0]
>>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"])
>>> with strategy.scope():
... _ = create_variable()
... print(x[0])
MirroredVariable:{
0: <tf.Variable ... shape=() dtype=float32, numpy=1.0>,
1: <tf.Variable ... shape=() dtype=float32, numpy=1.0>
}
`experimental_distribute_dataset` can be used to distribute the dataset across
the replicas when writing your own training loop. If you are using `.fit` and
`.compile` methods available in `tf.keras`, then `tf.keras` will handle the
distribution for you.
For example:
```python
my_strategy = tf.distribute.MirroredStrategy()
with my_strategy.scope():
@tf.function
def distribute_train_epoch(dataset):
def replica_fn(input):
# process input and return result
return result
total_result = 0
for x in dataset:
per_replica_result = my_strategy.run(replica_fn, args=(x,))
total_result += my_strategy.reduce(tf.distribute.ReduceOp.SUM,
per_replica_result, axis=None)
return total_result
dist_dataset = my_strategy.experimental_distribute_dataset(dataset)
for _ in range(EPOCHS):
train_result = distribute_train_epoch(dist_dataset)
```
Args:
devices: a list of device strings such as `['/gpu:0', '/gpu:1']`. If
`None`, all available GPUs are used. If no GPUs are found, CPU is used.
cross_device_ops: optional, a descendant of `CrossDeviceOps`. If this is not
set, `NcclAllReduce()` will be used by default. One would customize this
if NCCL isn't available or if a special implementation that exploits
the particular hardware is available.
"""
# Only set this in tests.
_collective_key_base = 0
def __init__(self, devices=None, cross_device_ops=None):
extended = MirroredExtended(
self, devices=devices, cross_device_ops=cross_device_ops)
super(MirroredStrategy, self).__init__(extended)
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"MirroredStrategy")
@tf_export(v1=["distribute.MirroredStrategy"])
| MirroredStrategy |
python | getsentry__sentry | src/sentry/models/deletedentry.py | {
"start": 174,
"end": 796
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
actor_label = models.CharField(max_length=64, null=True)
# if the entry was created via a user
actor_id = BoundedBigIntegerField(null=True)
# if the entry was created via an api key
actor_key = models.CharField(max_length=32, null=True)
ip_address = models.GenericIPAddressField(null=True, unpack_ipv4=True)
date_deleted = models.DateTimeField(default=timezone.now)
date_created = models.DateTimeField(null=True)
reason = models.TextField(blank=True, null=True)
class Meta:
abstract = True
| DeletedEntry |
python | spack__spack | lib/spack/spack/test/llnl/util/file_list.py | {
"start": 1637,
"end": 4759
} | class ____:
def test_repr(self, library_list):
x = eval(repr(library_list))
assert library_list == x
def test_joined_and_str(self, library_list):
s1 = library_list.joined()
expected = " ".join(
[
"/dir1/liblapack.%s" % plat_static_ext,
"/dir2/libpython3.6.%s"
% (plat_apple_shared_ext if sys.platform != "win32" else "dll"),
"/dir1/libblas.%s" % plat_static_ext,
"/dir3/libz.%s" % plat_shared_ext,
"libmpi.%s.20.10.1" % plat_shared_ext,
]
)
assert s1 == expected
s2 = str(library_list)
assert s1 == s2
s3 = library_list.joined(";")
expected = ";".join(
[
"/dir1/liblapack.%s" % plat_static_ext,
"/dir2/libpython3.6.%s"
% (plat_apple_shared_ext if sys.platform != "win32" else "dll"),
"/dir1/libblas.%s" % plat_static_ext,
"/dir3/libz.%s" % plat_shared_ext,
"libmpi.%s.20.10.1" % plat_shared_ext,
]
)
assert s3 == expected
def test_flags(self, library_list):
search_flags = library_list.search_flags
assert "-L/dir1" in search_flags
assert "-L/dir2" in search_flags
assert "-L/dir3" in search_flags
assert isinstance(search_flags, str)
assert search_flags == "-L/dir1 -L/dir2 -L/dir3"
link_flags = library_list.link_flags
assert "-llapack" in link_flags
assert "-lpython3.6" in link_flags
assert "-lblas" in link_flags
assert "-lz" in link_flags
assert "-lmpi" in link_flags
assert isinstance(link_flags, str)
assert link_flags == "-llapack -lpython3.6 -lblas -lz -lmpi"
ld_flags = library_list.ld_flags
assert isinstance(ld_flags, str)
assert ld_flags == search_flags + " " + link_flags
def test_paths_manipulation(self, library_list):
names = library_list.names
assert names == ["lapack", "python3.6", "blas", "z", "mpi"]
directories = library_list.directories
assert directories == ["/dir1", "/dir2", "/dir3"]
def test_get_item(self, library_list):
a = library_list[0]
assert a == "/dir1/liblapack.%s" % plat_static_ext
b = library_list[:]
assert type(b) is type(library_list)
assert library_list == b
assert library_list is not b
def test_add(self, library_list):
pylist = [
"/dir1/liblapack.%s" % plat_static_ext, # removed from the final list
"/dir2/libmpi.%s" % plat_shared_ext,
"/dir4/libnew.%s" % plat_static_ext,
]
another = LibraryList(pylist)
both = library_list + another
assert len(both) == 7
# Invariant
assert both == both + both
# Always produce an instance of LibraryList
assert type(library_list + pylist) is type(library_list)
assert type(pylist + library_list) is type(library_list)
| TestLibraryList |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 82071,
"end": 82215
} | class ____(BaseModel, extra="forbid"):
"""
Payload field
"""
key: str = Field(..., description="Payload field name")
| PayloadField |
python | apache__airflow | task-sdk/src/airflow/sdk/api/datamodels/_generated.py | {
"start": 18197,
"end": 19443
} | class ____(BaseModel):
"""
Schema for DagRun model with minimal required fields needed for Runtime.
"""
model_config = ConfigDict(
extra="forbid",
)
dag_id: Annotated[str, Field(title="Dag Id")]
run_id: Annotated[str, Field(title="Run Id")]
logical_date: Annotated[AwareDatetime | None, Field(title="Logical Date")] = None
data_interval_start: Annotated[AwareDatetime | None, Field(title="Data Interval Start")] = None
data_interval_end: Annotated[AwareDatetime | None, Field(title="Data Interval End")] = None
run_after: Annotated[AwareDatetime, Field(title="Run After")]
start_date: Annotated[AwareDatetime, Field(title="Start Date")]
end_date: Annotated[AwareDatetime | None, Field(title="End Date")] = None
clear_number: Annotated[int | None, Field(title="Clear Number")] = 0
run_type: DagRunType
state: DagRunState
conf: Annotated[dict[str, Any] | None, Field(title="Conf")] = None
triggering_user_name: Annotated[str | None, Field(title="Triggering User Name")] = None
consumed_asset_events: Annotated[list[AssetEventDagRunReference], Field(title="Consumed Asset Events")]
partition_key: Annotated[str | None, Field(title="Partition Key")] = None
| DagRun |
python | facebook__pyre-check | tools/generate_taint_models/tests/test_functions.py | {
"start": 402,
"end": 573
} | class ____:
def __init__(self) -> None:
...
def methodA(self, x: int) -> None:
...
def methodB(self, *args: str) -> None:
...
| TestClass |
python | getsentry__sentry | src/sentry/integrations/discord/actions/issue_alert/notification.py | {
"start": 940,
"end": 4582
} | class ____(IntegrationEventAction):
id = "sentry.integrations.discord.notify_action.DiscordNotifyServiceAction"
label = "Send a notification to the {server} Discord server in the channel with ID or URL: {channel_id} and show tags {tags} in the notification."
prompt = "Send a Discord notification"
provider = IntegrationProviderSlug.DISCORD.value
integration_key = "server"
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.form_fields = {
"server": {
"type": "choice",
"choices": [(i.id, i.name) for i in self.get_integrations()],
},
"channel_id": {"type": "string", "placeholder": "paste channel ID or URL here"},
"tags": {"type": "string", "placeholder": "e.g., environment,user,my_tag"},
}
def after(
self, event: GroupEvent, notification_uuid: str | None = None
) -> Generator[CallbackFuture]:
channel_id = self.get_option("channel_id")
tags = set(self.get_tags_list())
integration = self.get_integration()
if not integration:
# Integration removed, but rule still active
return
def send_notification(event: GroupEvent, futures: Sequence[RuleFuture]) -> None:
rules = [f.rule for f in futures]
message = DiscordIssuesMessageBuilder(
event.group, event=event, tags=tags, rules=rules
).build(notification_uuid=notification_uuid)
client = DiscordClient()
with MessagingInteractionEvent(
interaction_type=MessagingInteractionType.SEND_ISSUE_ALERT_NOTIFICATION,
spec=DiscordMessagingSpec(),
).capture() as lifecycle:
try:
lifecycle.add_extras({"integration_id": integration.id, "channel": channel_id})
client.send_message(channel_id, message)
except ApiError as error:
# Errors that we recieve from the Discord API
record_lifecycle_termination_level(lifecycle, error)
except Exception as e:
lifecycle.add_extras(
{
"project_id": event.project_id,
"event_id": event.event_id,
"guild_id": integration.external_id,
"channel_id": channel_id,
}
)
lifecycle.record_failure(e)
rule = rules[0] if rules else None
self.record_notification_sent(event, channel_id, rule, notification_uuid)
key = f"discord:{integration.id}:{channel_id}"
metrics.incr(
"notifications.sent",
instance="discord.notifications",
tags={
"issue_type": event.group.issue_type.slug,
},
skip_internal=False,
)
yield self.future(send_notification, key=key)
def render_label(self) -> str:
tags = self.get_tags_list()
return self.label.format(
server=self.get_integration_name(),
channel_id=self.get_option("channel_id"),
tags="[{}]".format(", ".join(tags)),
)
def get_tags_list(self) -> Sequence[str]:
return [s.strip() for s in self.get_option("tags", "").split(",")]
def get_form_instance(self) -> DiscordNotifyServiceForm:
return DiscordNotifyServiceForm(self.data, integrations=self.get_integrations())
| DiscordNotifyServiceAction |
python | python__mypy | mypy/test/testipc.py | {
"start": 774,
"end": 3966
} | class ____(TestCase):
def setUp(self) -> None:
if sys.platform == "linux":
# The default "fork" start method is potentially unsafe
self.ctx = get_context("forkserver")
else:
self.ctx = get_context("spawn")
def test_transaction_large(self) -> None:
queue: Queue[str] = self.ctx.Queue()
msg = "t" * 200000 # longer than the max read size of 100_000
p = self.ctx.Process(target=server, args=(msg, queue), daemon=True)
p.start()
connection_name = queue.get()
with IPCClient(connection_name, timeout=1) as client:
assert client.read() == msg
client.write("test")
queue.close()
queue.join_thread()
p.join()
def test_connect_twice(self) -> None:
queue: Queue[str] = self.ctx.Queue()
msg = "this is a test message"
p = self.ctx.Process(target=server, args=(msg, queue), daemon=True)
p.start()
connection_name = queue.get()
with IPCClient(connection_name, timeout=1) as client:
assert client.read() == msg
client.write("") # don't let the server hang up yet, we want to connect again.
with IPCClient(connection_name, timeout=1) as client:
assert client.read() == msg
client.write("test")
queue.close()
queue.join_thread()
p.join()
assert p.exitcode == 0
def test_multiple_messages(self) -> None:
queue: Queue[str] = self.ctx.Queue()
p = self.ctx.Process(target=server_multi_message_echo, args=(queue,), daemon=True)
p.start()
connection_name = queue.get()
with IPCClient(connection_name, timeout=1) as client:
# "foo bar" with extra accents on letters.
# In UTF-8 encoding so we don't confuse editors opening this file.
fancy_text = b"f\xcc\xb6o\xcc\xb2\xf0\x9d\x91\x9c \xd0\xb2\xe2\xb7\xa1a\xcc\xb6r\xcc\x93\xcd\x98\xcd\x8c"
client.write(fancy_text.decode("utf-8"))
assert client.read() == fancy_text.decode("utf-8")
client.write("Test with spaces")
client.write("Test write before reading previous")
time.sleep(0) # yield to the server to force reading of all messages by server.
assert client.read() == "Test with spaces"
assert client.read() == "Test write before reading previous"
client.write("quit")
assert client.read() == "quit"
queue.close()
queue.join_thread()
p.join()
assert p.exitcode == 0
# Run test_connect_twice a lot, in the hopes of finding issues.
# This is really slow, so it is skipped, but can be enabled if
# needed to debug IPC issues.
@pytest.mark.skip
def test_connect_alot(self) -> None:
t0 = time.time()
for i in range(1000):
try:
print(i, "start")
self.test_connect_twice()
finally:
t1 = time.time()
print(i, t1 - t0)
sys.stdout.flush()
t0 = t1
if __name__ == "__main__":
main()
| IPCTests |
python | realpython__materials | geoshops/nearbyshops/views.py | {
"start": 541,
"end": 784
} | class ____(generic.ListView):
model = Shop
context_object_name = "shops"
queryset = Shop.objects.annotate(
distance=Distance("location", user_location)
).order_by("distance")[0:6]
template_name = "shops/index.html"
| Home |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/core.py | {
"start": 14916,
"end": 16188
} | class ____(Layer):
"""Applies an activation function to an output.
Args:
activation: Activation function, such as `tf.nn.relu`, or string name of
built-in activation function, such as "relu".
Usage:
>>> layer = tf.keras.layers.Activation('relu')
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[0.0, 0.0, 0.0, 2.0]
>>> layer = tf.keras.layers.Activation(tf.nn.relu)
>>> output = layer([-3.0, -1.0, 0.0, 2.0])
>>> list(output.numpy())
[0.0, 0.0, 0.0, 2.0]
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the batch axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| Activation |
python | django-import-export__django-import-export | tests/core/models.py | {
"start": 1697,
"end": 2140
} | class ____(models.Manager):
"""
Added to enable get_by_natural_key method
NOTE: Manager classes are only required to enable
using the natural key functionality of ForeignKeyWidget
"""
def get_by_natural_key(self, name, author):
"""
Django pattern function for returning a book by its natural key
"""
return self.get(name=name, author=Author.objects.get_by_natural_key(author))
| BookManager |
python | astropy__astropy | astropy/io/ascii/basic.py | {
"start": 1630,
"end": 1862
} | class ____(BasicHeader):
"""
Reader for table header without a header.
Set the start of header line number to `None`, which tells the basic
reader there is no header line.
"""
start_line = None
| NoHeaderHeader |
python | plotly__plotly.py | plotly/graph_objs/scattersmith/_hoverlabel.py | {
"start": 233,
"end": 11276
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattersmith"
_path_str = "scattersmith.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattersmith.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.scattersmith.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattersmith.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattersmith.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattersmith.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | ipython__ipython | tests/test_oinspect.py | {
"start": 4233,
"end": 15988
} | class ____(object):
"""Attribute accesses always get another copy of the same class.
unittest.mock.call does something similar, but it's not ideal for testing
as the failure mode is to eat all your RAM. This gives up after 10k levels.
"""
def __init__(self, max_fibbing_twig, lies_told=0):
if lies_told > 10000:
raise RuntimeError("Nose too long, honesty is the best policy")
self.max_fibbing_twig = max_fibbing_twig
self.lies_told = lies_told
max_fibbing_twig[0] = max(max_fibbing_twig[0], lies_told)
def __getattr__(self, item):
return SerialLiar(self.max_fibbing_twig, self.lies_told + 1)
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
def test_info():
"Check that Inspector.info fills out various fields as expected."
i = inspector.info(Call, oname="Call")
assert i["type_name"] == "type"
expected_class = str(type(type)) # <class 'type'> (Python 3) or <type 'type'>
assert i["base_class"] == expected_class
assert re.search(
"<class 'tests.test_oinspect.Call'( at 0x[0-9a-f]{1,9})?>",
i["string_form"],
)
fname = __file__
if fname.endswith(".pyc"):
fname = fname[:-1]
# case-insensitive comparison needed on some filesystems
# e.g. Windows:
assert i["file"].lower() == compress_user(fname).lower()
assert i["definition"] == None
assert i["docstring"] == Call.__doc__
assert i["source"] == None
assert i["isclass"] is True
assert i["init_definition"] == "Call(x, y=1)"
assert i["init_docstring"] == Call.__init__.__doc__
i = inspector.info(Call, detail_level=1)
assert i["source"] is not None
assert i["docstring"] == None
c = Call(1)
c.__doc__ = "Modified instance docstring"
i = inspector.info(c)
assert i["type_name"] == "Call"
assert i["docstring"] == "Modified instance docstring"
assert i["class_docstring"] == Call.__doc__
assert i["init_docstring"] == Call.__init__.__doc__
assert i["call_docstring"] == Call.__call__.__doc__
def test_class_signature():
info = inspector.info(HasSignature, "HasSignature")
assert info["init_definition"] == "HasSignature(test)"
assert info["init_docstring"] == HasSignature.__init__.__doc__
def test_info_awkward():
# Just test that this doesn't throw an error.
inspector.info(Awkward())
def test_bool_raise():
inspector.info(NoBoolCall())
def test_info_serialliar():
fib_tracker = [0]
inspector.info(SerialLiar(fib_tracker))
# Nested attribute access should be cut off at 100 levels deep to avoid
# infinite loops: https://github.com/ipython/ipython/issues/9122
assert fib_tracker[0] < 9000
def support_function_one(x, y=2, *a, **kw):
"""A simple function."""
def test_calldef_none():
# We should ignore __call__ for all of these.
for obj in [support_function_one, SimpleClass().method, any, str.upper]:
i = inspector.info(obj)
assert i["call_def"] is None
def f_kwarg(pos, *, kwonly):
pass
def test_definition_kwonlyargs():
i = inspector.info(f_kwarg, oname="f_kwarg") # analysis:ignore
assert i["definition"] == "f_kwarg(pos, *, kwonly)"
def test_getdoc():
class A(object):
"""standard docstring"""
pass
class B(object):
"""standard docstring"""
def getdoc(self):
return "custom docstring"
class C(object):
"""standard docstring"""
def getdoc(self):
return None
a = A()
b = B()
c = C()
assert oinspect.getdoc(a) == "standard docstring"
assert oinspect.getdoc(b) == "custom docstring"
assert oinspect.getdoc(c) == "standard docstring"
def test_empty_property_has_no_source():
i = inspector.info(property(), detail_level=1)
assert i["source"] is None
def test_property_sources():
# A simple adder whose source and signature stays
# the same across Python distributions
def simple_add(a, b):
"Adds two numbers"
return a + b
class A(object):
@property
def foo(self):
return "bar"
foo = foo.setter(lambda self, v: setattr(self, "bar", v))
dname = property(oinspect.getdoc)
adder = property(simple_add)
i = inspector.info(A.foo, detail_level=1)
assert "def foo(self):" in i["source"]
assert "lambda self, v:" in i["source"]
i = inspector.info(A.dname, detail_level=1)
assert "def getdoc(obj)" in i["source"]
i = inspector.info(A.adder, detail_level=1)
assert "def simple_add(a, b)" in i["source"]
def test_property_docstring_is_in_info_for_detail_level_0():
class A(object):
@property
def foobar(self):
"""This is `foobar` property."""
pass
ip.user_ns["a_obj"] = A()
assert (
"This is `foobar` property."
== ip.object_inspect("a_obj.foobar", detail_level=0)["docstring"]
)
ip.user_ns["a_cls"] = A
assert (
"This is `foobar` property."
== ip.object_inspect("a_cls.foobar", detail_level=0)["docstring"]
)
def test_pdef():
# See gh-1914
def foo():
pass
inspector.pdef(foo, "foo")
@contextmanager
def cleanup_user_ns(**kwargs):
"""
On exit delete all the keys that were not in user_ns before entering.
It does not restore old values !
Parameters
----------
**kwargs
used to update ip.user_ns
"""
try:
known = set(ip.user_ns.keys())
ip.user_ns.update(kwargs)
yield
finally:
added = set(ip.user_ns.keys()) - known
for k in added:
del ip.user_ns[k]
def test_pinfo_bool_raise():
"""
Test that bool method is not called on parent.
"""
class RaiseBool:
attr = None
def __bool__(self):
raise ValueError("pinfo should not access this method")
raise_bool = RaiseBool()
with cleanup_user_ns(raise_bool=raise_bool):
ip._inspect("pinfo", "raise_bool.attr", detail_level=0)
def test_pinfo_getindex():
def dummy():
"""
MARKER
"""
container = [dummy]
with cleanup_user_ns(container=container):
with AssertPrints("MARKER"):
ip._inspect("pinfo", "container[0]", detail_level=0)
assert "container" not in ip.user_ns.keys()
def test_qmark_getindex():
def dummy():
"""
MARKER 2
"""
container = [dummy]
with cleanup_user_ns(container=container):
with AssertPrints("MARKER 2"):
ip.run_cell("container[0]?")
assert "container" not in ip.user_ns.keys()
def test_qmark_getindex_negatif():
def dummy():
"""
MARKER 3
"""
container = [dummy]
with cleanup_user_ns(container=container):
with AssertPrints("MARKER 3"):
ip.run_cell("container[-1]?")
assert "container" not in ip.user_ns.keys()
def test_pinfo_nonascii():
# See gh-1177
from . import nonascii2
ip.user_ns["nonascii2"] = nonascii2
ip._inspect("pinfo", "nonascii2", detail_level=1)
def test_pinfo_type():
"""
type can fail in various edge case, for example `type.__subclass__()`
"""
ip._inspect("pinfo", "type")
def test_pinfo_docstring_no_source():
"""Docstring should be included with detail_level=1 if there is no source"""
with AssertPrints("Docstring:"):
ip._inspect("pinfo", "str.format", detail_level=0)
with AssertPrints("Docstring:"):
ip._inspect("pinfo", "str.format", detail_level=1)
def test_pinfo_no_docstring_if_source():
"""Docstring should not be included with detail_level=1 if source is found"""
def foo():
"""foo has a docstring"""
ip.user_ns["foo"] = foo
with AssertPrints("Docstring:"):
ip._inspect("pinfo", "foo", detail_level=0)
with AssertPrints("Source:"):
ip._inspect("pinfo", "foo", detail_level=1)
with AssertNotPrints("Docstring:"):
ip._inspect("pinfo", "foo", detail_level=1)
def test_pinfo_docstring_if_detail_and_no_source():
"""Docstring should be displayed if source info not available"""
obj_def = '''class Foo(object):
""" This is a docstring for Foo """
def bar(self):
""" This is a docstring for Foo.bar """
pass
'''
ip.run_cell(obj_def)
ip.run_cell("foo = Foo()")
with AssertNotPrints("Source:"):
with AssertPrints("Docstring:"):
ip._inspect("pinfo", "foo", detail_level=0)
with AssertPrints("Docstring:"):
ip._inspect("pinfo", "foo", detail_level=1)
with AssertPrints("Docstring:"):
ip._inspect("pinfo", "foo.bar", detail_level=0)
with AssertNotPrints("Docstring:"):
with AssertPrints("Source:"):
ip._inspect("pinfo", "foo.bar", detail_level=1)
@pytest.mark.xfail(
sys.version_info.releaselevel not in ("final", "candidate"),
reason="fails on 3.13.dev",
strict=True,
)
def test_pinfo_docstring_dynamic(capsys):
obj_def = """class Bar:
__custom_documentations__ = {
"prop" : "cdoc for prop",
"non_exist" : "cdoc for non_exist",
}
@property
def prop(self):
'''
Docstring for prop
'''
return self._prop
@prop.setter
def prop(self, v):
self._prop = v
"""
ip.run_cell(obj_def)
ip.run_cell("b = Bar()")
ip.run_line_magic("pinfo", "b.prop")
captured = capsys.readouterr()
assert re.search(r"Docstring:\s+cdoc for prop", captured.out)
ip.run_line_magic("pinfo", "b.non_exist")
captured = capsys.readouterr()
assert re.search(r"Docstring:\s+cdoc for non_exist", captured.out)
ip.run_cell("b.prop?")
captured = capsys.readouterr()
assert re.search(r"Docstring:\s+cdoc for prop", captured.out)
ip.run_cell("b.non_exist?")
captured = capsys.readouterr()
assert re.search(r"Docstring:\s+cdoc for non_exist", captured.out)
ip.run_cell("b.undefined?")
captured = capsys.readouterr()
assert re.search(r"Type:\s+NoneType", captured.out)
def test_pinfo_magic():
with AssertPrints("Docstring:"):
ip._inspect("pinfo", "lsmagic", detail_level=0)
with AssertPrints("Source:"):
ip._inspect("pinfo", "lsmagic", detail_level=1)
def test_init_colors():
# ensure colors are not present in signature info
info = inspector.info(HasSignature)
init_def = info["init_definition"]
assert "[0m" not in init_def
def test_builtin_init():
info = inspector.info(list)
init_def = info["init_definition"]
assert init_def is not None
def test_render_signature_short():
def short_fun(a=1):
pass
sig = oinspect._render_signature(
signature(short_fun),
short_fun.__name__,
)
assert sig == "short_fun(a=1)"
def test_render_signature_long():
from typing import Optional
def long_function(
a_really_long_parameter: int,
and_another_long_one: bool = False,
let_us_make_sure_this_is_looong: Optional[str] = None,
) -> bool:
pass
sig = oinspect._render_signature(
signature(long_function),
long_function.__name__,
)
expected = """\
long_function(
a_really_long_parameter: int,
and_another_long_one: bool = False,
let_us_make_sure_this_is_looong: Optional[str] = None,
) -> bool\
"""
if sys.version_info >= (3, 14):
expected = expected.replace("Optional[str]", "str | None")
assert sig == expected
| SerialLiar |
python | gevent__gevent | src/greentest/3.14/test_weakref.py | {
"start": 41625,
"end": 71445
} | class ____(TestBase):
COUNT = 10
if support.check_sanitizer(thread=True) and support.Py_GIL_DISABLED:
# Reduce iteration count to get acceptable latency
NUM_THREADED_ITERATIONS = 1000
else:
NUM_THREADED_ITERATIONS = 100000
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
# Keep an iterator alive
it = dct.items()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
del it
gc.collect()
n2 = len(dct)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.items()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
gc_collect() # For PyPy or other GCs.
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
gc_collect() # For PyPy or other GCs.
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
gc_collect() # For PyPy or other GCs.
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
gc_collect() # For PyPy or other GCs.
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
gc_collect() # For PyPy or other GCs.
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def check_weak_del_and_len_while_iterating(self, dict, testcontext):
# Check that len() works when both iterating and removing keys
# explicitly through various means (.pop(), .clear()...), while
# implicit mutation is deferred because an iterator is alive.
# (each call to testcontext() should schedule one item for removal
# for this test to work properly)
o = Object(123456)
with testcontext():
n = len(dict)
# Since underlying dict is ordered, first item is popped
dict.pop(next(dict.keys()))
self.assertEqual(len(dict), n - 1)
dict[o] = o
self.assertEqual(len(dict), n)
# last item in objects is removed from dict in context shutdown
with testcontext():
self.assertEqual(len(dict), n - 1)
# Then, (o, o) is popped
dict.popitem()
self.assertEqual(len(dict), n - 2)
with testcontext():
self.assertEqual(len(dict), n - 3)
del dict[next(dict.keys())]
self.assertEqual(len(dict), n - 4)
with testcontext():
self.assertEqual(len(dict), n - 5)
dict.popitem()
self.assertEqual(len(dict), n - 6)
with testcontext():
dict.clear()
self.assertEqual(len(dict), 0)
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
# Issue #21173: len() fragile when keys are both implicitly and
# explicitly removed.
dict, objects = self.make_weak_keyed_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
dict, objects = self.make_weak_valued_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_from_weak_valued_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
dict2 = weakref.WeakValueDictionary(dict)
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_misc(self):
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
d = weakref.WeakValueDictionary()
self.assertRaises(TypeError, d.update, {}, {})
self.assertRaises(TypeError, d.update, (), ())
self.assertEqual(list(d.keys()), [])
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary()
d.update(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def test_weak_valued_union_operators(self):
a = C()
b = C()
c = C()
wvd1 = weakref.WeakValueDictionary({1: a})
wvd2 = weakref.WeakValueDictionary({1: b, 2: a})
wvd3 = wvd1.copy()
d1 = {1: c, 3: b}
pairs = [(5, c), (6, b)]
tmp1 = wvd1 | wvd2 # Between two WeakValueDictionaries
self.assertEqual(dict(tmp1), dict(wvd1) | dict(wvd2))
self.assertIs(type(tmp1), weakref.WeakValueDictionary)
wvd1 |= wvd2
self.assertEqual(wvd1, tmp1)
tmp2 = wvd2 | d1 # Between WeakValueDictionary and mapping
self.assertEqual(dict(tmp2), dict(wvd2) | d1)
self.assertIs(type(tmp2), weakref.WeakValueDictionary)
wvd2 |= d1
self.assertEqual(wvd2, tmp2)
tmp3 = wvd3.copy() # Between WeakValueDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wvd3) | dict(pairs))
self.assertIs(type(tmp3), weakref.WeakValueDictionary)
tmp4 = d1 | wvd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wvd3))
self.assertIs(type(tmp4), weakref.WeakValueDictionary)
del a
self.assertNotIn(2, tmp1)
self.assertNotIn(2, tmp2)
self.assertNotIn(1, tmp3)
self.assertNotIn(1, tmp4)
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_keyed_union_operators(self):
o1 = C()
o2 = C()
o3 = C()
wkd1 = weakref.WeakKeyDictionary({o1: 1, o2: 2})
wkd2 = weakref.WeakKeyDictionary({o3: 3, o1: 4})
wkd3 = wkd1.copy()
d1 = {o2: '5', o3: '6'}
pairs = [(o2, 7), (o3, 8)]
tmp1 = wkd1 | wkd2 # Between two WeakKeyDictionaries
self.assertEqual(dict(tmp1), dict(wkd1) | dict(wkd2))
self.assertIs(type(tmp1), weakref.WeakKeyDictionary)
wkd1 |= wkd2
self.assertEqual(wkd1, tmp1)
tmp2 = wkd2 | d1 # Between WeakKeyDictionary and mapping
self.assertEqual(dict(tmp2), dict(wkd2) | d1)
self.assertIs(type(tmp2), weakref.WeakKeyDictionary)
wkd2 |= d1
self.assertEqual(wkd2, tmp2)
tmp3 = wkd3.copy() # Between WeakKeyDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wkd3) | dict(pairs))
self.assertIs(type(tmp3), weakref.WeakKeyDictionary)
tmp4 = d1 | wkd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wkd3))
self.assertIs(type(tmp4), weakref.WeakKeyDictionary)
del o1
self.assertNotIn(4, tmp1.values())
self.assertNotIn(4, tmp2.values())
self.assertNotIn(1, tmp3.values())
self.assertNotIn(1, tmp4.values())
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(list(d.items()), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time through the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
gc_collect() # For PyPy or other GCs.
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
def test_make_weak_valued_dict_repr(self):
dict = weakref.WeakValueDictionary()
self.assertRegex(repr(dict), '<WeakValueDictionary at 0x.*>')
def test_make_weak_keyed_dict_repr(self):
dict = weakref.WeakKeyDictionary()
self.assertRegex(repr(dict), '<WeakKeyDictionary at 0x.*>')
@threading_helper.requires_working_threading()
def test_threaded_weak_valued_setdefault(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(self.NUM_THREADED_ITERATIONS):
x = d.setdefault(10, RefCycle())
self.assertIsNot(x, None) # we never put None in there!
del x
@threading_helper.requires_working_threading()
def test_threaded_weak_valued_pop(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(self.NUM_THREADED_ITERATIONS):
d[10] = RefCycle()
x = d.pop(10, 10)
self.assertIsNot(x, None) # we never put None in there!
@threading_helper.requires_working_threading()
def test_threaded_weak_valued_consistency(self):
# Issue #28427: old keys should not remove new values from
# WeakValueDictionary when collecting from another thread.
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(2 * self.NUM_THREADED_ITERATIONS):
o = RefCycle()
d[10] = o
# o is still alive, so the dict can't be empty
self.assertEqual(len(d), 1)
o = None # lose ref
@support.cpython_only
def test_weak_valued_consistency(self):
# A single-threaded, deterministic repro for issue #28427: old keys
# should not remove new values from WeakValueDictionary. This relies on
# an implementation detail of CPython's WeakValueDictionary (its
# underlying dictionary of KeyedRefs) to reproduce the issue.
d = weakref.WeakValueDictionary()
with support.disable_gc():
d[10] = RefCycle()
# Keep the KeyedRef alive after it's replaced so that GC will invoke
# the callback.
wr = d.data[10]
# Replace the value with something that isn't cyclic garbage
o = RefCycle()
d[10] = o
# Trigger GC, which will invoke the callback for `wr`
gc.collect()
self.assertEqual(len(d), 1)
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `type_` should be either WeakKeyDictionary or WeakValueDictionary.
# `deepcopy` should be either True or False.
exc = []
class DummyKey:
def __init__(self, ctr):
self.ctr = ctr
class DummyValue:
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
self.assertIn(type_, (weakref.WeakKeyDictionary, weakref.WeakValueDictionary))
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(target=dict_copy, args=(d, exc,))
if type_ is weakref.WeakKeyDictionary:
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
else: # weakref.WeakValueDictionary
t_collect = threading.Thread(target=pop_and_collect, args=(values,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
@threading_helper.requires_working_threading()
@support.requires_resource('cpu')
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, False)
@threading_helper.requires_working_threading()
@support.requires_resource('cpu')
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, True)
@threading_helper.requires_working_threading()
@support.requires_resource('cpu')
def test_threaded_weak_value_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, False)
@threading_helper.requires_working_threading()
@support.requires_resource('cpu')
def test_threaded_weak_value_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, True)
@support.cpython_only
def test_remove_closure(self):
d = weakref.WeakValueDictionary()
self.assertIsNone(d._remove.__closure__)
from test import mapping_tests
| MappingTestCase |
python | Pylons__pyramid | tests/test_scripts/test_pshell.py | {
"start": 14055,
"end": 14378
} | class ____:
def __init__(self, entry_points):
self._entry_points = entry_points
if sys.version_info >= (3, 10):
def select(self, **kw):
return iter(self._entry_points)
else: # pragma no cover
def get(self, key):
return list(self._entry_points)
| DummyEntryPoints |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 20000,
"end": 20494
} | class ____(models.Model):
foreign_key = models.ForeignKey(ForeignKeyTargetModel, related_name='reverse_foreign_key', on_delete=models.CASCADE)
many_to_many = models.ManyToManyField(ManyToManyTargetModel, related_name='reverse_many_to_many')
one_to_one = models.OneToOneField(OneToOneTargetModel, related_name='reverse_one_to_one', on_delete=models.CASCADE)
through = models.ManyToManyField(ThroughTargetModel, through=Supplementary, related_name='reverse_through')
| RelationalModel |
python | kamyu104__LeetCode-Solutions | Python/minimum-operations-to-make-elements-within-k-subarrays-equal.py | {
"start": 106,
"end": 2364
} | class ____(object):
def minOperations(self, nums, x, k):
"""
:type nums: List[int]
:type x: int
:type k: int
:rtype: int
"""
class SlidingWindow(object):
def __init__(self):
self.left = SortedList()
self.right = SortedList()
self.total1 = self.total2 = 0
def add(self, val):
if not self.left or val <= self.left[-1]:
self.left.add(val)
self.total1 += val
else:
self.right.add(val)
self.total2 += val
self.rebalance()
def remove(self, val):
if val <= self.left[-1]:
self.left.remove(val)
self.total1 -= val
else:
self.right.remove(val)
self.total2 -= val
self.rebalance()
def rebalance(self):
if len(self.left) < len(self.right):
self.total2 -= self.right[0]
self.total1 += self.right[0]
self.left.add(self.right[0])
self.right.pop(0)
elif len(self.left) > len(self.right)+1:
self.total1 -= self.left[-1]
self.total2 += self.left[-1]
self.right.add(self.left[-1])
self.left.pop()
def median(self):
return self.left[-1]
INF = float("inf")
sw = SlidingWindow()
cost = [INF]*(len(nums)+1)
for i in xrange(len(nums)):
if i-x >= 0:
sw.remove(nums[i-x])
sw.add(nums[i])
if i >= x-1:
cost[i+1] = (sw.median()*len(sw.left)-sw.total1) + (sw.total2-sw.median()*len(sw.right))
dp = [0]*(len(nums)+1)
for i in xrange(k):
new_dp = [INF]*(len(nums)+1)
for j in xrange((i+1)*x, len(nums)+1):
new_dp[j] = min(new_dp[j-1], dp[j-x]+cost[j])
dp = new_dp
return dp[-1]
# Time: O(nlogx + k * n)
# Space: O(n)
import heapq
import collections
# two heaps, dp
| Solution |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_deployments.py | {
"start": 109446,
"end": 114061
} | class ____:
@pytest.fixture
async def deployment_schedule(self, session, deployment):
schedules = await models.deployments.read_deployment_schedules(
session=session, deployment_id=deployment.id
)
assert len(schedules) == 1
return schedules[0]
async def test_schedule_deployment(
self, client, session, deployment, deployment_schedule
):
n_runs = await models.flow_runs.count_flow_runs(session)
assert n_runs == 0
await client.post(f"/deployments/{deployment.id}/schedule")
runs = await models.flow_runs.read_flow_runs(session)
expected_dates = await deployment_schedule.schedule.get_dates(
n=PREFECT_API_SERVICES_SCHEDULER_MIN_RUNS.value(),
start=now_fn("UTC"),
end=now_fn("UTC")
+ PREFECT_API_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME.value(),
)
actual_dates = {r.state.state_details.scheduled_time for r in runs}
assert actual_dates == set(expected_dates)
async def test_schedule_deployment_provide_runs(
self, client, session, deployment, deployment_schedule
):
n_runs = await models.flow_runs.count_flow_runs(session)
assert n_runs == 0
await client.post(
f"/deployments/{deployment.id}/schedule", json=dict(min_runs=5)
)
runs = await models.flow_runs.read_flow_runs(session)
expected_dates = await deployment_schedule.schedule.get_dates(
n=5,
start=now_fn("UTC"),
end=now_fn("UTC")
+ PREFECT_API_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME.value(),
)
actual_dates = {r.state.state_details.scheduled_time for r in runs}
assert actual_dates == set(expected_dates)
async def test_schedule_deployment_start_time(
self, client, session, deployment, deployment_schedule
):
n_runs = await models.flow_runs.count_flow_runs(session)
assert n_runs == 0
await client.post(
f"/deployments/{deployment.id}/schedule",
json=dict(start_time=str(now_fn("UTC") + datetime.timedelta(days=120))),
)
runs = await models.flow_runs.read_flow_runs(session)
expected_dates = await deployment_schedule.schedule.get_dates(
n=PREFECT_API_SERVICES_SCHEDULER_MIN_RUNS.value(),
start=now_fn("UTC") + datetime.timedelta(days=120),
end=now_fn("UTC")
+ datetime.timedelta(days=120)
+ PREFECT_API_SERVICES_SCHEDULER_MAX_SCHEDULED_TIME.value(),
)
actual_dates = {r.state.state_details.scheduled_time for r in runs}
assert actual_dates == set(expected_dates)
async def test_schedule_deployment_end_time(
self, client, session, deployment, deployment_schedule
):
n_runs = await models.flow_runs.count_flow_runs(session)
assert n_runs == 0
await client.post(
f"/deployments/{deployment.id}/schedule",
json=dict(
end_time=str(now_fn("UTC") + datetime.timedelta(days=7)),
# schedule a large number of min runs to see the effect of end_time
min_runs=100,
),
)
runs = await models.flow_runs.read_flow_runs(session)
expected_dates = await deployment_schedule.schedule.get_dates(
n=100,
start=now_fn("UTC"),
end=now_fn("UTC") + datetime.timedelta(days=7),
)
actual_dates = {r.state.state_details.scheduled_time for r in runs}
assert actual_dates == set(expected_dates)
assert len(actual_dates) == 7
async def test_schedule_deployment_backfill(
self, client, session, deployment, deployment_schedule
):
n_runs = await models.flow_runs.count_flow_runs(session)
assert n_runs == 0
await client.post(
f"/deployments/{deployment.id}/schedule",
json=dict(
start_time=str(now_fn("UTC") - datetime.timedelta(days=20)),
end_time=str(now_fn("UTC")),
min_runs=100,
),
)
runs = await models.flow_runs.read_flow_runs(session)
expected_dates = await deployment_schedule.schedule.get_dates(
n=100,
start=now_fn("UTC") - datetime.timedelta(days=20),
end=now_fn("UTC"),
)
actual_dates = {r.state.state_details.scheduled_time for r in runs}
assert actual_dates == set(expected_dates)
assert len(actual_dates) == 20
| TestScheduleDeployment |
python | astropy__astropy | astropy/io/fits/column.py | {
"start": 74760,
"end": 77196
} | class ____(ColDefs):
"""ColDefs implementation for ASCII tables."""
_padding_byte = " "
_col_format_cls = _AsciiColumnFormat
def __init__(self, input, ascii=True):
super().__init__(input)
# if the format of an ASCII column has no width, add one
if not isinstance(input, _AsciiColDefs):
self._update_field_metrics()
else:
for idx, s in enumerate(input.starts):
self.columns[idx].start = s
self._spans = input.spans
self._width = input._width
@lazyproperty
def dtype(self):
dtype = {}
for j in range(len(self)):
data_type = "S" + str(self.spans[j])
dtype[self.names[j]] = (data_type, self.starts[j] - 1)
return np.dtype(dtype)
@property
def spans(self):
"""A list of the widths of each field in the table."""
return self._spans
@lazyproperty
def _recformats(self):
if len(self) == 1:
widths = []
else:
widths = [y - x for x, y in pairwise(self.starts)]
# Widths is the width of each field *including* any space between
# fields; this is so that we can map the fields to string records in a
# Numpy recarray
widths.append(self._width - self.starts[-1] + 1)
return ["S" + str(w) for w in widths]
def add_col(self, column):
super().add_col(column)
self._update_field_metrics()
def del_col(self, col_name):
super().del_col(col_name)
self._update_field_metrics()
def _update_field_metrics(self):
"""
Updates the list of the start columns, the list of the widths of each
field, and the total width of each record in the table.
"""
spans = [0] * len(self.columns)
end_col = 0 # Refers to the ASCII text column, not the table col
for idx, col in enumerate(self.columns):
width = col.format.width
# Update the start columns and column span widths taking into
# account the case that the starting column of a field may not
# be the column immediately after the previous field
if not col.start:
col.start = end_col + 1
end_col = col.start + width - 1
spans[idx] = width
self._spans = spans
self._width = end_col
# Utilities
| _AsciiColDefs |
python | pytorch__pytorch | torch/_inductor/pattern_matcher.py | {
"start": 28749,
"end": 28823
} | class ____(_TargetExprVarArgs):
op = "call_function"
| CallFunctionVarArgs |
python | tensorflow__tensorflow | tensorflow/python/training/saver_large_partitioned_variable_test.py | {
"start": 1181,
"end": 2309
} | class ____(test.TestCase):
# Need to do this in a separate test because of the amount of memory needed
# to run this test.
def testLargePartitionedVariables(self):
save_path = os.path.join(self.get_temp_dir(), "large_variable")
var_name = "my_var"
# Saving large partition variable.
with session.Session("", graph=ops.Graph()) as sess:
with ops.device("/cpu:0"):
# Create a partitioned variable which is larger than int32 size but
# split into smaller sized variables.
init = lambda shape, dtype, partition_info: constant_op.constant(
True, dtype, shape)
partitioned_var = list(variable_scope.get_variable(
var_name,
shape=[1 << 31],
partitioner=partitioned_variables.fixed_size_partitioner(4),
initializer=init,
dtype=dtypes.bool))
self.evaluate(variables.global_variables_initializer())
save = saver.Saver(partitioned_var)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
if __name__ == "__main__":
test.main()
| SaverLargePartitionedVariableTest |
python | getsentry__sentry | src/sentry/release_health/release_monitor/base.py | {
"start": 262,
"end": 1060
} | class ____(Service):
CHUNK_SIZE = 1000
MAX_SECONDS = 360
__all__ = ("fetch_projects_with_recent_sessions", "fetch_project_release_health_totals")
def fetch_projects_with_recent_sessions(self) -> Mapping[int, Sequence[int]]:
"""
Fetches all projects that have had session data in the last 6 hours, grouped by
organization_id. Returned as a dict in format {organization_id: <list of project ids>}.
"""
raise NotImplementedError
def fetch_project_release_health_totals(
self, org_id: int, project_ids: Sequence[int]
) -> Totals:
"""
Fetches release health totals for the passed project_ids, which must be related to the
passed org id.
"""
raise NotImplementedError
| BaseReleaseMonitorBackend |
python | pydata__xarray | xarray/tests/test_backends_datatree.py | {
"start": 19608,
"end": 21247
} | class ____(NetCDFIOBase):
engine: T_DataTreeNetcdfEngine | None = "h5netcdf"
def test_phony_dims_warning(self, tmpdir) -> None:
filepath = tmpdir + "/phony_dims.nc"
import h5py
foo_data = np.arange(125).reshape(5, 5, 5)
bar_data = np.arange(625).reshape(25, 5, 5)
var = {"foo1": foo_data, "foo2": bar_data, "foo3": foo_data, "foo4": bar_data}
with h5py.File(filepath, "w") as f:
grps = ["bar", "baz"]
for grp in grps:
fx = f.create_group(grp)
for k, v in var.items():
fx.create_dataset(k, data=v)
with pytest.warns(UserWarning, match="The 'phony_dims' kwarg"):
with open_datatree(filepath, engine=self.engine) as tree:
assert tree.bar.dims == {
"phony_dim_0": 5,
"phony_dim_1": 5,
"phony_dim_2": 5,
"phony_dim_3": 25,
}
def test_roundtrip_using_filelike_object(self, tmpdir, simple_datatree) -> None:
original_dt = simple_datatree
filepath = tmpdir + "/test.nc"
# h5py requires both read and write access when writing, it will
# work with file-like objects provided they support both, and are
# seekable.
with open(filepath, "wb+") as file:
original_dt.to_netcdf(file, engine=self.engine)
with open(filepath, "rb") as file:
with open_datatree(file, engine=self.engine) as roundtrip_dt:
assert_equal(original_dt, roundtrip_dt)
@network
@requires_pydap
| TestH5NetCDFDatatreeIO |
python | huggingface__transformers | tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py | {
"start": 1604,
"end": 9518
} | class ____:
def __init__(
self,
parent,
batch_size=3,
feat_seq_length=30,
num_channels=3,
image_size=14,
seq_length=39,
vision_config={
"depth": 2,
"embed_dim": 32,
"hidden_act": "quick_gelu",
"hidden_size": 32,
"out_hidden_size": 32,
"intermediate_size": 24,
"mlp_ratio": 4,
"num_heads": 4,
"patch_size": 14,
"spatial_merge_size": 1,
"temporal_patch_size": 2,
"fullatt_block_indexes": [0],
"initializer_range": 0.02,
},
audio_config={
"model_type": "qwen_omni_thinker_audio_encoder",
"d_model": 32,
"encoder_attention_heads": 4,
"encoder_ffn_dim": 32,
"encoder_layers": 2,
"num_mel_bins": 20,
"max_source_positions": 1500,
"initializer_range": 0.02,
"n_window": 100,
"output_dim": 32,
},
text_config={
"rope_parameters": {"mrope_section": [1, 1, 2], "rope_type": "default", "type": "default"},
"vocab_size": 99,
"hidden_size": 32,
"intermediate_size": 37,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_key_value_heads": 2,
"hidden_act": "silu",
"max_position_embeddings": 1024,
"rms_norm_eps": 1e-06,
"use_cache": True,
"tie_word_embeddings": False,
"rope_theta": 1000000.0,
"use_sliding_window": False,
"sliding_window": 50,
"max_window_layers": 3,
"attention_dropout": 0.0,
"pad_token_id": 0,
"initializer_range": 0.02,
},
audio_token_index=1,
image_token_index=2,
video_token_index=3,
position_id_per_seconds=25,
seconds_per_chunk=2,
audio_start_token_id=4,
audio_end_token_id=5,
user_token_id=6,
vision_start_token_id=7,
vision_end_token_id=8,
initializer_range=0.02,
):
self.parent = parent
self.audio_config = audio_config
self.vision_config = vision_config
self.text_config = text_config
self.audio_token_index = audio_token_index
self.image_token_index = image_token_index
self.video_token_index = video_token_index
self.position_id_per_seconds = position_id_per_seconds
self.seconds_per_chunk = seconds_per_chunk
self.audio_start_token_id = audio_start_token_id
self.audio_end_token_id = audio_end_token_id
self.vision_start_token_id = vision_start_token_id
self.vision_end_token_id = vision_end_token_id
self.user_token_id = user_token_id
self.initializer_range = initializer_range
self.batch_size = batch_size
self.feat_seq_length = feat_seq_length
self.num_channels = num_channels
self.image_size = image_size
self.seq_length = seq_length
self.is_training = False
# Used from `self.model_tester` by common model tests
self.num_hidden_layers = self.text_config["num_hidden_layers"]
self.hidden_size = self.text_config["hidden_size"]
self.num_attention_heads = self.text_config["num_attention_heads"]
self.vocab_size = self.text_config["vocab_size"]
def get_config(self):
return Qwen2_5OmniThinkerConfig(
audio_config=self.audio_config,
vision_config=self.vision_config,
text_config=self.text_config,
audio_token_index=self.audio_token_index,
image_token_index=self.image_token_index,
video_token_index=self.video_token_index,
position_id_per_seconds=self.position_id_per_seconds,
seconds_per_chunk=self.seconds_per_chunk,
audio_start_token_id=self.audio_start_token_id,
audio_end_token_id=self.audio_end_token_id,
vision_start_token_id=self.vision_start_token_id,
vision_end_token_id=self.vision_end_token_id,
user_token_id=self.user_token_id,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs(self):
config = self.get_config()
patch_size = config.vision_config.patch_size
temporal_patch_size = config.vision_config.temporal_patch_size
pixel_values = floats_tensor(
[
self.batch_size * (self.image_size**2) // (patch_size**2),
self.num_channels * (patch_size**2) * temporal_patch_size,
]
)
pixel_grid_thw = torch.LongTensor(
[[1, self.image_size / patch_size, self.image_size / patch_size]] * self.batch_size
).to(pixel_values.device)
input_features_values = floats_tensor(
[self.batch_size, self.audio_config["num_mel_bins"], self.feat_seq_length]
)
feature_attention_mask = torch.ones([self.batch_size, self.feat_seq_length], dtype=torch.long).to(torch_device)
return config, pixel_values, pixel_grid_thw, input_features_values, feature_attention_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, pixel_grid_thw, input_features_values, feature_attention_mask = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.get_text_config().vocab_size - 3) + 3
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
# Make sure no other tokens are set to special, to prevetn flakiness
tokens_to_replace = torch.tensor(
[
config.image_token_index,
config.audio_token_index,
config.audio_start_token_id,
config.audio_end_token_id,
config.vision_start_token_id,
config.vision_end_token_id,
],
device=input_ids.device,
)
input_ids[torch.isin(input_ids, tokens_to_replace)] = config.text_config.pad_token_id
attention_mask[:, :1] = 0
# Audio token placeholders should be wrapped in start and end token ids
audio_feat_length = ((self.feat_seq_length - 1) // 2 + 1 - 2) // 2 + 1
input_ids[:, 1] = config.audio_start_token_id
input_ids[:, 2 : (2 + audio_feat_length)] = config.audio_token_index
input_ids[:, 2 + audio_feat_length] = config.audio_end_token_id
# Image token placeholders should be wrapped in start and end token ids
input_ids[:, -4:-1] = torch.tensor(
[config.vision_start_token_id, config.image_token_index, config.vision_end_token_id]
)
inputs_dict = {
"input_features": input_features_values,
"feature_attention_mask": feature_attention_mask,
"input_ids": input_ids,
"attention_mask": attention_mask,
"image_grid_thw": pixel_grid_thw,
"pixel_values": pixel_values,
}
return config, inputs_dict
def create_and_check_qwenomnithinker_model_fp16_forward(self, config, input_ids, pixel_values, attention_mask):
model = Qwen2_5OmniThinkerForConditionalGeneration(config=config)
model.to(torch_device)
model.eval()
with torch.autocast(device_type=torch_device, dtype=torch.float16):
logits = model(
input_ids=input_ids,
attention_mask=attention_mask,
pixel_values=pixel_values.to(torch.bfloat16),
return_dict=True,
)["logits"]
self.parent.assertFalse(torch.isnan(logits).any().item())
@require_torch
| Qwen2_5OmniThinkerForConditionalGenerationTester |
python | django__django | tests/prefetch_related/models.py | {
"start": 903,
"end": 1261
} | class ____(models.Model):
author = models.ForeignKey(
Author, models.CASCADE, to_field="name", related_name="i_like"
)
likes_author = models.ForeignKey(
Author, models.CASCADE, to_field="name", related_name="likes_me"
)
is_active = models.BooleanField(default=True)
class Meta:
ordering = ["id"]
| FavoriteAuthors |
python | apache__airflow | providers/http/tests/unit/http/sensors/test_http.py | {
"start": 9289,
"end": 10004
} | class ____:
def __init__(self):
self.response = requests.Response()
self.response.status_code = 200
self.response._content = "apache/airflow".encode("ascii", "ignore")
self.proxies = None
self.stream = None
self.verify = False
self.cert = None
def send(self, *args, **kwargs):
return self.response
def prepare_request(self, request):
if "date" in request.params:
self.response._content += ("/" + request.params["date"]).encode("ascii", "ignore")
return self.response
def merge_environment_settings(self, _url, **kwargs):
return kwargs
def mount(self, prefix, adapter):
pass
| FakeSession |
python | keras-team__keras | keras/src/layers/preprocessing/data_layer.py | {
"start": 270,
"end": 5766
} | class ____(Layer):
"""Layer designed for safe use in `tf.data` or `grain` pipeline.
This layer overrides the `__call__` method to ensure that the correct
backend is used and that computation is performed on the CPU.
The `call()` method in subclasses should use `self.backend` ops. If
randomness is needed, define both `seed` and `generator` in `__init__` and
retrieve the running seed using `self._get_seed_generator()`. If the layer
has weights in `__init__` or `build()`, use `convert_weight()` to ensure
they are in the correct backend.
**Note:** This layer and its subclasses only support a single input tensor.
Examples:
**Custom `DataLayer` subclass:**
```python
from keras.src.layers.preprocessing.data_layer import DataLayer
from keras.src.random import SeedGenerator
class BiasedRandomRGBToHSVLayer(DataLayer):
def __init__(self, seed=None, **kwargs):
super().__init__(**kwargs)
self.probability_bias = ops.convert_to_tensor(0.01)
self.seed = seed
self.generator = SeedGenerator(seed)
def call(self, inputs):
images_shape = self.backend.shape(inputs)
batch_size = 1 if len(images_shape) == 3 else images_shape[0]
seed = self._get_seed_generator(self.backend._backend)
probability = self.backend.random.uniform(
shape=(batch_size,),
minval=0.0,
maxval=1.0,
seed=seed,
)
probability = self.backend.numpy.add(
probability, self.convert_weight(self.probability_bias)
)
hsv_images = self.backend.image.rgb_to_hsv(inputs)
return self.backend.numpy.where(
probability[:, None, None, None] > 0.5,
hsv_images,
inputs,
)
def compute_output_shape(self, input_shape):
return input_shape
```
**Using as a regular Keras layer:**
```python
import numpy as np
x = np.random.uniform(size=(1, 16, 16, 3)).astype("float32")
print(BiasedRandomRGBToHSVLayer()(x).shape) # (1, 16, 16, 3)
```
**Using in a `tf.data` pipeline:**
```python
import tensorflow as tf
tf_ds = tf.data.Dataset.from_tensors(x)
tf_ds = tf_ds.map(BiasedRandomRGBToHSVLayer())
print([x.shape for x in tf_ds]) # [(1, 16, 16, 3)]
```
**Using in a `grain` pipeline:**
```python
import grain
grain_ds = grain.MapDataset.source([x])
grain_ds = grain_ds.map(BiasedRandomRGBToHSVLayer())
print([x.shape for x in grain_ds]) # [(1, 16, 16, 3)]
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.backend = backend_utils.DynamicBackend()
self._allow_non_tensor_positional_args = True
def __call__(self, inputs, **kwargs):
sample_input = tree.flatten(inputs)[0]
if (
not isinstance(sample_input, keras.KerasTensor)
and backend_utils.in_tf_graph()
and not jax_utils.is_in_jax_tracing_scope(sample_input)
):
# We're in a TF graph, e.g. a tf.data pipeline.
self.backend.set_backend("tensorflow")
inputs = tree.map_structure(
lambda x: self.backend.convert_to_tensor(
x, dtype=self.compute_dtype
),
inputs,
)
switch_convert_input_args = False
if self._convert_input_args:
self._convert_input_args = False
switch_convert_input_args = True
try:
outputs = super().__call__(inputs, **kwargs)
finally:
self.backend.reset()
if switch_convert_input_args:
self._convert_input_args = True
return outputs
elif (
not isinstance(sample_input, keras.KerasTensor)
and backend_utils.in_grain_data_pipeline()
):
# We're in a Grain data pipeline. Force computation and data
# placement to CPU.
with keras.src.backend.device_scope("cpu"):
return super().__call__(inputs, **kwargs)
else:
return super().__call__(inputs, **kwargs)
@tracking.no_automatic_dependency_tracking
def _get_seed_generator(self, backend=None):
if not hasattr(self, "seed") or not hasattr(self, "generator"):
raise ValueError(
"The `seed` and `generator` variable must be set in the "
"`__init__` method before calling `_get_seed_generator()`."
)
if backend is None or backend == keras.backend.backend():
return self.generator
if not hasattr(self, "_backend_generators"):
self._backend_generators = {}
if backend in self._backend_generators:
return self._backend_generators[backend]
seed_generator = SeedGenerator(self.seed, backend=self.backend)
self._backend_generators[backend] = seed_generator
return seed_generator
def convert_weight(self, weight):
"""Convert the weight if it is from the a different backend."""
if self.backend.name == keras.backend.backend():
return weight
else:
weight = keras.ops.convert_to_numpy(weight)
return self.backend.convert_to_tensor(weight)
| DataLayer |
python | getsentry__sentry | src/sentry/migrations/0915_add_user_email_unique_column.py | {
"start": 155,
"end": 1486
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0914_increase_orgmember_user_email_max_length"),
]
operations = [
migrations.AddField(
model_name="user",
name="email_unique",
field=models.EmailField(null=True, max_length=200, unique=True),
),
]
| Migration |
python | optuna__optuna | tests/samplers_tests/tpe_tests/test_multi_objective_sampler.py | {
"start": 281,
"end": 18417
} | class ____:
def __init__(self) -> None:
self.value: dict[str, dict] = {}
def set_trial_system_attr(self, _: int, key: str, value: dict) -> None:
self.value[key] = value
def suggest(
sampler: optuna.samplers.BaseSampler,
study: optuna.Study,
trial: optuna.trial.FrozenTrial,
distribution: optuna.distributions.BaseDistribution,
past_trials: list[optuna.trial.FrozenTrial],
) -> float:
attrs = MockSystemAttr()
with (
patch.object(study._storage, "get_all_trials", return_value=past_trials),
patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
),
patch.object(study._storage, "get_trial", return_value=trial),
patch("optuna.trial.Trial.system_attrs", new_callable=PropertyMock) as mock1,
patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2,
):
mock1.return_value = attrs.value
mock2.return_value = attrs.value
suggestion = sampler.sample_independent(study, trial, "param-a", distribution)
return suggestion
def test_multi_objective_sample_independent_seed_fix() -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
random.seed(128)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(16)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
suggestion = suggest(sampler, study, trial, dist, past_trials)
sampler = TPESampler(seed=0)
assert suggest(sampler, study, trial, dist, past_trials) == suggestion
sampler = TPESampler(seed=1)
assert suggest(sampler, study, trial, dist, past_trials) != suggestion
def test_multi_objective_sample_independent_prior() -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
random.seed(128)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(16)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
suggestion = suggest(sampler, study, trial, dist, past_trials)
sampler = TPESampler(prior_weight=0.5, seed=0)
assert suggest(sampler, study, trial, dist, past_trials) != suggestion
def test_multi_objective_sample_independent_n_startup_trial() -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
random.seed(128)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(16)]
trial = frozen_trial_factory(16, [0, 0])
def _suggest_and_return_call_count(
sampler: optuna.samplers.BaseSampler,
past_trials: list[optuna.trial.FrozenTrial],
) -> int:
attrs = MockSystemAttr()
with (
patch.object(study._storage, "get_all_trials", return_value=past_trials),
patch.object(
study._storage, "set_trial_system_attr", side_effect=attrs.set_trial_system_attr
),
patch.object(study._storage, "get_trial", return_value=trial),
patch("optuna.trial.Trial.system_attrs", new_callable=PropertyMock) as mock1,
patch(
"optuna.trial.FrozenTrial.system_attrs",
new_callable=PropertyMock,
) as mock2,
patch.object(
optuna.samplers.RandomSampler,
"sample_independent",
return_value=1.0,
) as sample_method,
):
mock1.return_value = attrs.value
mock2.return_value = attrs.value
sampler.sample_independent(study, trial, "param-a", dist)
study._thread_local.cached_all_trials = None
return sample_method.call_count
sampler = TPESampler(n_startup_trials=16, seed=0)
assert _suggest_and_return_call_count(sampler, past_trials[:-1]) == 1
sampler = TPESampler(n_startup_trials=16, seed=0)
assert _suggest_and_return_call_count(sampler, past_trials) == 0
def test_multi_objective_sample_independent_misc_arguments() -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
random.seed(128)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(32)]
# Prepare a trial and a sample for later checks.
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
suggestion = suggest(sampler, study, trial, dist, past_trials)
# Test misc. parameters.
sampler = TPESampler(n_ei_candidates=13, seed=0)
assert suggest(sampler, study, trial, dist, past_trials) != suggestion
sampler = TPESampler(gamma=lambda _: 1, seed=0)
assert suggest(sampler, study, trial, dist, past_trials) != suggestion
@pytest.mark.parametrize("log, step", [(False, None), (True, None), (False, 0.1)])
def test_multi_objective_sample_independent_float_distributions(
log: bool, step: float | None
) -> None:
# Prepare sample from float distribution for checking other distributions.
study = optuna.create_study(directions=["minimize", "maximize"])
random.seed(128)
float_dist = optuna.distributions.FloatDistribution(1.0, 100.0, log=log, step=step)
value_fn: Callable[[int], float] | None = None
if float_dist.step:
def value_fn(number: int) -> float:
return int(random.random() * 1000) * 0.1
past_trials = [
frozen_trial_factory(
i, [random.random(), random.random()], dist=float_dist, value_fn=value_fn
)
for i in range(16)
]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
float_suggestion = suggest(sampler, study, trial, float_dist, past_trials)
assert 1.0 <= float_suggestion < 100.0
if float_dist.step == 0.1:
assert abs(int(float_suggestion * 10) - float_suggestion * 10) < 1e-3
# Test sample is different when `float_dist.log` is True or float_dist.step != 1.0.
random.seed(128)
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(16)]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
suggestion = suggest(sampler, study, trial, dist, past_trials)
if float_dist.log or float_dist.step == 0.1:
assert float_suggestion != suggestion
else:
assert float_suggestion == suggestion
def test_multi_objective_sample_independent_categorical_distributions() -> None:
"""Test samples are drawn from the specified category."""
study = optuna.create_study(directions=["minimize", "maximize"])
random.seed(128)
categories = [i * 0.3 + 1.0 for i in range(330)]
def cat_value_fn(idx: int) -> float:
return categories[random.randint(0, len(categories) - 1)]
cat_dist = optuna.distributions.CategoricalDistribution(categories)
past_trials = [
frozen_trial_factory(
i, [random.random(), random.random()], dist=cat_dist, value_fn=cat_value_fn
)
for i in range(16)
]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
categorical_suggestion = suggest(sampler, study, trial, cat_dist, past_trials)
assert categorical_suggestion in categories
@pytest.mark.parametrize(
"log, step",
[
(False, 1),
(True, 1),
(False, 2),
],
)
def test_multi_objective_sample_int_distributions(log: bool, step: int) -> None:
"""Test sampling from int distribution returns integer."""
study = optuna.create_study(directions=["minimize", "maximize"])
random.seed(128)
def int_value_fn(idx: int) -> float:
return random.randint(1, 99)
int_dist = optuna.distributions.IntDistribution(1, 99, log, step)
past_trials = [
frozen_trial_factory(
i, [random.random(), random.random()], dist=int_dist, value_fn=int_value_fn
)
for i in range(16)
]
trial = frozen_trial_factory(16, [0, 0])
sampler = TPESampler(seed=0)
int_suggestion = suggest(sampler, study, trial, int_dist, past_trials)
assert 1 <= int_suggestion <= 99
assert isinstance(int_suggestion, int)
@pytest.mark.parametrize(
"state",
[
(optuna.trial.TrialState.FAIL,),
(optuna.trial.TrialState.PRUNED,),
(optuna.trial.TrialState.RUNNING,),
(optuna.trial.TrialState.WAITING,),
],
)
def test_multi_objective_sample_independent_handle_unsuccessful_states(
state: optuna.trial.TrialState,
) -> None:
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
random.seed(128)
# Prepare sampling result for later tests.
past_trials = [frozen_trial_factory(i, [random.random(), random.random()]) for i in range(32)]
trial = frozen_trial_factory(32, [0, 0])
sampler = TPESampler(seed=0)
all_success_suggestion = suggest(sampler, study, trial, dist, past_trials)
study._thread_local.cached_all_trials = None
# Test unsuccessful trials are handled differently.
state_fn = build_state_fn(state)
past_trials = [
frozen_trial_factory(i, [random.random(), random.random()], state_fn=state_fn)
for i in range(32)
]
trial = frozen_trial_factory(32, [0, 0])
sampler = TPESampler(seed=0)
partial_unsuccessful_suggestion = suggest(sampler, study, trial, dist, past_trials)
assert partial_unsuccessful_suggestion != all_success_suggestion
def test_multi_objective_sample_independent_ignored_states() -> None:
"""Tests FAIL, RUNNING, and WAITING states are equally."""
study = optuna.create_study(directions=["minimize", "maximize"])
dist = optuna.distributions.FloatDistribution(1.0, 100.0)
suggestions = []
for state in [
optuna.trial.TrialState.FAIL,
optuna.trial.TrialState.RUNNING,
optuna.trial.TrialState.WAITING,
]:
random.seed(128)
state_fn = build_state_fn(state)
past_trials = [
frozen_trial_factory(i, [random.random(), random.random()], state_fn=state_fn)
for i in range(32)
]
trial = frozen_trial_factory(32, [0, 0])
sampler = TPESampler(seed=0)
suggestions.append(suggest(sampler, study, trial, dist, past_trials))
assert len(set(suggestions)) == 1
@pytest.mark.parametrize("direction0", ["minimize", "maximize"])
@pytest.mark.parametrize("direction1", ["minimize", "maximize"])
def test_split_complete_trials_multi_objective(direction0: str, direction1: str) -> None:
study = optuna.create_study(directions=(direction0, direction1))
for values in ([-2.0, -1.0], [3.0, 3.0], [0.0, 1.0], [-1.0, 0.0]):
value0, value1 = values
if direction0 == "maximize":
value0 = -value0
if direction1 == "maximize":
value1 = -value1
study.add_trial(
optuna.create_trial(
state=optuna.trial.TrialState.COMPLETE,
values=(value0, value1),
params={"x": 0},
distributions={"x": optuna.distributions.FloatDistribution(-1.0, 1.0)},
)
)
below_trials, above_trials = _tpe.sampler._split_complete_trials_multi_objective(
study.trials,
study,
2,
)
assert [trial.number for trial in below_trials] == [0, 3]
assert [trial.number for trial in above_trials] == [1, 2]
def test_split_complete_trials_multi_objective_empty() -> None:
study = optuna.create_study(directions=("minimize", "minimize"))
assert _tpe.sampler._split_complete_trials_multi_objective([], study, 0) == ([], [])
def test_calculate_weights_below_for_multi_objective() -> None:
# No sample.
study = optuna.create_study(directions=["minimize", "minimize"])
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(study, [], None)
assert len(weights_below) == 0
# One sample.
study = optuna.create_study(directions=["minimize", "minimize"])
trial0 = optuna.create_trial(values=[0.2, 0.5])
study.add_trials([trial0])
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
study, [trial0], None
)
assert len(weights_below) == 1
assert sum(weights_below) > 0
# Two samples.
study = optuna.create_study(directions=["minimize", "minimize"])
trial0 = optuna.create_trial(values=[0.2, 0.5])
trial1 = optuna.create_trial(values=[0.9, 0.4])
study.add_trials([trial0, trial1])
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
study,
[trial0, trial1],
None,
)
assert len(weights_below) == 2
assert weights_below[0] > weights_below[1]
assert sum(weights_below) > 0
# Two equally contributed samples.
study = optuna.create_study(directions=["minimize", "minimize"])
trial0 = optuna.create_trial(values=[0.2, 0.8])
trial1 = optuna.create_trial(values=[0.8, 0.2])
study.add_trials([trial0, trial1])
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
study,
[trial0, trial1],
None,
)
assert len(weights_below) == 2
assert weights_below[0] == weights_below[1]
assert sum(weights_below) > 0
# Duplicated samples.
study = optuna.create_study(directions=["minimize", "minimize"])
trial0 = optuna.create_trial(values=[0.2, 0.8])
trial1 = optuna.create_trial(values=[0.2, 0.8])
study.add_trials([trial0, trial1])
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
study,
[trial0, trial1],
None,
)
assert len(weights_below) == 2
assert weights_below[0] == weights_below[1]
assert sum(weights_below) > 0
# Three samples.
study = optuna.create_study(directions=["minimize", "minimize"])
trial0 = optuna.create_trial(values=[0.3, 0.3])
trial1 = optuna.create_trial(values=[0.2, 0.8])
trial2 = optuna.create_trial(values=[0.8, 0.2])
study.add_trials([trial0, trial1, trial2])
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
study,
[trial0, trial1, trial2],
None,
)
assert len(weights_below) == 3
assert weights_below[0] > weights_below[1]
assert weights_below[0] > weights_below[2]
assert weights_below[1] == weights_below[2]
assert sum(weights_below) > 0
# Zero/negative objective values.
study = optuna.create_study(directions=["minimize", "minimize"])
trial0 = optuna.create_trial(values=[-0.3, -0.3])
trial1 = optuna.create_trial(values=[0.0, -0.8])
trial2 = optuna.create_trial(values=[-0.8, 0.0])
study.add_trials([trial0, trial1, trial2])
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
study,
[trial0, trial1, trial2],
None,
)
assert len(weights_below) == 3
assert weights_below[0] > weights_below[1]
assert weights_below[0] > weights_below[2]
assert np.isclose(weights_below[1], weights_below[2])
assert sum(weights_below) > 0
# +/-inf objective values.
study = optuna.create_study(directions=["minimize", "minimize"])
trial0 = optuna.create_trial(values=[-float("inf"), -float("inf")])
trial1 = optuna.create_trial(values=[0.0, -float("inf")])
trial2 = optuna.create_trial(values=[-float("inf"), 0.0])
study.add_trials([trial0, trial1, trial2])
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
study,
[trial0, trial1, trial2],
None,
)
assert len(weights_below) == 3
assert not any([np.isnan(w) for w in weights_below])
assert sum(weights_below) > 0
# Three samples with two infeasible trials.
study = optuna.create_study(directions=["minimize", "minimize"])
trial0 = optuna.create_trial(values=[0.3, 0.3], system_attrs={"constraints": 2})
trial1 = optuna.create_trial(values=[0.2, 0.8], system_attrs={"constraints": 8})
trial2 = optuna.create_trial(values=[0.8, 0.2], system_attrs={"constraints": 0})
study.add_trials([trial0, trial1, trial2])
weights_below = _tpe.sampler._calculate_weights_below_for_multi_objective(
study,
[trial0, trial1, trial2],
lambda trial: [trial.system_attrs["constraints"]],
)
assert len(weights_below) == 3
assert weights_below[0] == _tpe.sampler.EPS
assert weights_below[1] == _tpe.sampler.EPS
assert weights_below[2] > 0
def frozen_trial_factory(
number: int,
values: list[float],
dist: optuna.distributions.BaseDistribution = optuna.distributions.FloatDistribution(
1.0, 100.0
),
value_fn: Callable[[int], int | float] | None = None,
state_fn: Callable[
[int], optuna.trial.TrialState
] = lambda _: optuna.trial.TrialState.COMPLETE,
) -> optuna.trial.FrozenTrial:
if value_fn is None:
value = random.random() * 99.0 + 1.0
else:
value = value_fn(number)
trial = optuna.trial.FrozenTrial(
number=number,
trial_id=number,
state=optuna.trial.TrialState.COMPLETE,
value=None,
datetime_start=None,
datetime_complete=None,
params={"param-a": value},
distributions={"param-a": dist},
user_attrs={},
system_attrs={},
intermediate_values={},
values=values,
)
return trial
def build_state_fn(state: optuna.trial.TrialState) -> Callable[[int], optuna.trial.TrialState]:
def state_fn(idx: int) -> optuna.trial.TrialState:
return [optuna.trial.TrialState.COMPLETE, state][idx % 2]
return state_fn
| MockSystemAttr |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/versions/v2025_11_05.py | {
"start": 1013,
"end": 1731
} | class ____(VersionChange):
"""Add the `triggering_user_name` field to DagRun model."""
description = __doc__
instructions_to_migrate_to_previous_version = (schema(DagRun).field("triggering_user_name").didnt_exist,)
@convert_response_to_previous_version_for(TIRunContext) # type: ignore[arg-type]
def remove_triggering_user_name_from_dag_run(response: ResponseInfo) -> None: # type: ignore[misc]
"""Remove the `triggering_user_name` field from the dag_run object when converting to the previous version."""
if "dag_run" in response.body and isinstance(response.body["dag_run"], dict):
response.body["dag_run"].pop("triggering_user_name", None)
| AddTriggeringUserNameField |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataproc_metastore.py | {
"start": 12392,
"end": 13784
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dataproc_metastore.DataprocMetastoreHook")
@mock.patch("airflow.providers.google.cloud.operators.dataproc_metastore.Backup")
def test_assert_valid_hook_call(self, mock_backup, mock_hook) -> None:
task = DataprocMetastoreListBackupsOperator(
task_id=TASK_ID,
project_id=GCP_PROJECT_ID,
region=GCP_LOCATION,
service_id=TEST_SERVICE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.wait_for_operation.return_value = None
mock_backup.return_value.to_dict.return_value = None
task.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.list_backups.assert_called_once_with(
project_id=GCP_PROJECT_ID,
region=GCP_LOCATION,
service_id=TEST_SERVICE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
filter=None,
order_by=None,
page_size=None,
page_token=None,
)
| TestDataprocMetastoreListBackupsOperator |
python | jmcnamara__XlsxWriter | xlsxwriter/test/worksheet/test_write_custom_filter.py | {
"start": 301,
"end": 844
} | class ____(unittest.TestCase):
"""
Test the Worksheet _write_custom_filter() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_custom_filter(self):
"""Test the _write_custom_filter() method"""
self.worksheet._write_custom_filter(4, 3000)
exp = """<customFilter operator="greaterThan" val="3000"/>"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteCustomFilter |
python | plotly__plotly.py | plotly/graph_objs/choropleth/marker/_line.py | {
"start": 233,
"end": 5288
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "choropleth.marker"
_path_str = "choropleth.marker.line"
_valid_props = {"color", "colorsrc", "width", "widthsrc"}
@property
def color(self):
"""
Sets the marker.line color. It accepts either a specific color
or an array of numbers that are mapped to the colorscale
relative to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
"""
def __init__(
self, arg=None, color=None, colorsrc=None, width=None, widthsrc=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choropleth.marker.Line`
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choropleth.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choropleth.marker.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("width", arg, width)
self._set_property("widthsrc", arg, widthsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | openai__openai-python | tests/test_transform.py | {
"start": 1877,
"end": 2235
} | class ____(TypedDict):
my_field: Annotated[str, PropertyInfo(alias="myField")]
@parametrize
@pytest.mark.asyncio
async def test_list_of_typeddict(use_async: bool) -> None:
result = await transform({"things": [{"my_field": "foo"}, {"my_field": "foo2"}]}, Foo3, use_async)
assert result == {"things": [{"myField": "foo"}, {"myField": "foo2"}]}
| Bar3 |
python | PyCQA__pylint | tests/functional/n/name/name_styles.py | {
"start": 766,
"end": 1434
} | class ____:
"""Class with a good name."""
def __init__(self):
self._good_private_name = 10
self.__good_real_private_name = 11
self.good_attribute_name = 12
self._Bad_AtTR_name = None # [invalid-name]
self.Bad_PUBLIC_name = None # [invalid-name]
zz = 'Why Was It Bad Class Attribute?'
GOOD_CLASS_ATTR = 'Good Class Attribute'
def BadMethodName(self): # [invalid-name]
"""A Method with a bad name."""
def good_method_name(self):
"""A method with a good name."""
def __DunDER_IS_not_free_for_all__(self): # [invalid-name]
"""Another badly named method."""
| CorrectClassName |
python | gevent__gevent | examples/portforwarder.py | {
"start": 605,
"end": 3504
} | class ____(StreamServer):
def __init__(self, listener, dest, **kwargs):
StreamServer.__init__(self, listener, **kwargs)
self.dest = dest
def handle(self, source, address): # pylint:disable=method-hidden
log('%s:%s accepted', *address[:2])
try:
dest = create_connection(self.dest)
except IOError as ex:
log('%s:%s failed to connect to %s:%s: %s', address[0], address[1], self.dest[0], self.dest[1], ex)
return
forwarders = (gevent.spawn(forward, source, dest, self),
gevent.spawn(forward, dest, source, self))
# if we return from this method, the stream will be closed out
# from under us, so wait for our children
gevent.joinall(forwarders)
def close(self):
if self.closed:
sys.exit('Multiple exit signals received - aborting.')
else:
log('Closing listener socket')
StreamServer.close(self)
def forward(source, dest, server):
try:
source_address = '%s:%s' % source.getpeername()[:2]
dest_address = '%s:%s' % dest.getpeername()[:2]
except socket.error as e:
# We could be racing signals that close the server
# and hence a socket.
log("Failed to get all peer names: %s", e)
return
try:
while True:
try:
data = source.recv(1024)
log('%s->%s: %r', source_address, dest_address, data)
if not data:
break
dest.sendall(data)
except KeyboardInterrupt:
# On Windows, a Ctrl-C signal (sent by a program) usually winds
# up here, not in the installed signal handler.
if not server.closed:
server.close()
break
except socket.error:
if not server.closed:
server.close()
break
finally:
source.close()
dest.close()
server = None
def parse_address(address):
try:
hostname, port = address.rsplit(':', 1)
port = int(port)
except ValueError:
sys.exit('Expected HOST:PORT: %r' % address)
return gethostbyname(hostname), port
def main():
args = sys.argv[1:]
if len(args) != 2:
sys.exit('Usage: %s source-address destination-address' % __file__)
source = args[0]
dest = parse_address(args[1])
server = PortForwarder(source, dest)
log('Starting port forwarder %s:%s -> %s:%s', *(server.address[:2] + dest))
gevent.signal_handler(signal.SIGTERM, server.close)
gevent.signal_handler(signal.SIGINT, server.close)
server.start()
gevent.wait()
def log(message, *args):
message = message % args
sys.stderr.write(message + '\n')
if __name__ == '__main__':
main()
| PortForwarder |
python | jupyterlab__jupyterlab | jupyterlab/handlers/announcements.py | {
"start": 927,
"end": 1497
} | class ____:
"""Notification
Attributes:
createdAt: Creation date
message: Notification message
modifiedAt: Modification date
type: Notification type — ["default", "error", "info", "success", "warning"]
link: Notification link button as a tuple (label, URL)
options: Notification options
"""
createdAt: float # noqa
message: str
modifiedAt: float # noqa
type: str = "default"
link: tuple[str, str] = field(default_factory=tuple)
options: dict = field(default_factory=dict)
| Notification |
python | pytorch__pytorch | torch/testing/_internal/common_pruning.py | {
"start": 4796,
"end": 5449
} | class ____(nn.Module):
r"""Model with only Conv2d layers, all without bias, some in a Sequential and some following.
Used to test pruned Conv2d-Conv2d fusion."""
def __init__(self) -> None:
super().__init__()
self.seq = nn.Sequential(
nn.Conv2d(1, 32, 3, 1, bias=False),
nn.Conv2d(32, 64, 3, 1, bias=False),
)
self.conv2d1 = nn.Conv2d(64, 48, 3, 1, bias=False)
self.conv2d2 = nn.Conv2d(48, 52, 3, 1, bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.seq(x)
x = self.conv2d1(x)
x = self.conv2d2(x)
return x
| SimpleConv2d |
python | kamyu104__LeetCode-Solutions | Python/asteroid-collision.py | {
"start": 29,
"end": 607
} | class ____(object):
def asteroidCollision(self, asteroids):
"""
:type asteroids: List[int]
:rtype: List[int]
"""
result = []
for x in asteroids:
if x > 0:
result.append(x)
continue
while result and 0 < result[-1] < -x:
result.pop()
if result and 0 < result[-1]:
if result[-1] == -x:
result.pop()
continue
result.append(x)
return result
# Time: O(n)
# Space: O(n)
| Solution |
python | huggingface__transformers | src/transformers/models/d_fine/modular_d_fine.py | {
"start": 54741,
"end": 56555
} | class ____(RTDetrHybridEncoder):
def __init__(self, config: DFineConfig):
nn.Module.__init__(self)
self.config = config
self.in_channels = config.encoder_in_channels
self.num_fpn_stages = len(self.in_channels) - 1
self.feat_strides = config.feat_strides
self.encoder_hidden_dim = config.encoder_hidden_dim
self.encode_proj_layers = config.encode_proj_layers
self.positional_encoding_temperature = config.positional_encoding_temperature
self.eval_size = config.eval_size
self.out_channels = [self.encoder_hidden_dim for _ in self.in_channels]
self.out_strides = self.feat_strides
# encoder transformer
self.encoder = nn.ModuleList([DFineEncoder(config) for _ in range(len(self.encode_proj_layers))])
# top-down fpn
self.lateral_convs = nn.ModuleList()
self.fpn_blocks = nn.ModuleList()
for _ in range(len(self.in_channels) - 1, 0, -1):
lateral_layer = DFineConvNormLayer(config, self.encoder_hidden_dim, self.encoder_hidden_dim, 1, 1)
self.lateral_convs.append(lateral_layer)
num_blocks = round(3 * config.depth_mult)
fpn_layer = DFineRepNCSPELAN4(config, numb_blocks=num_blocks)
self.fpn_blocks.append(fpn_layer)
# bottom-up pan
self.downsample_convs = nn.ModuleList()
self.pan_blocks = nn.ModuleList()
for _ in range(len(self.in_channels) - 1):
self.downsample_convs.append(DFineSCDown(config, 3, 2))
num_blocks = round(3 * config.depth_mult)
self.pan_blocks.append(DFineRepNCSPELAN4(config, numb_blocks=num_blocks))
__all__ = [
"DFineConfig",
"DFineModel",
"DFinePreTrainedModel",
"DFineForObjectDetection",
]
| DFineHybridEncoder |
python | pyparsing__pyparsing | examples/eval_arith.py | {
"start": 1921,
"end": 2317
} | class ____:
"Class to evaluate addition and subtraction expressions"
def __init__(self, tokens):
self.value = tokens[0]
def eval(self):
sum = self.value[0].eval()
for op, val in operatorOperands(self.value[1:]):
if op == "+":
sum += val.eval()
if op == "-":
sum -= val.eval()
return sum
| EvalAddOp |
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/concurrency.py | {
"start": 346,
"end": 1607
} | class ____:
concurrency_key: str
slot_status: ConcurrencySlotStatus
priority: Optional[int] = None
assigned_timestamp: Optional[datetime] = None
enqueued_timestamp: Optional[datetime] = None
sleep_interval: Optional[float] = None
@property
def is_claimed(self):
return self.slot_status == ConcurrencySlotStatus.CLAIMED
@property
def is_assigned(self):
return self.assigned_timestamp is not None
def with_slot_status(self, slot_status: ConcurrencySlotStatus):
return ConcurrencyClaimStatus(
concurrency_key=self.concurrency_key,
slot_status=slot_status,
priority=self.priority,
assigned_timestamp=self.assigned_timestamp,
enqueued_timestamp=self.enqueued_timestamp,
sleep_interval=self.sleep_interval,
)
def with_sleep_interval(self, interval: float):
return ConcurrencyClaimStatus(
concurrency_key=self.concurrency_key,
slot_status=self.slot_status,
priority=self.priority,
assigned_timestamp=self.assigned_timestamp,
enqueued_timestamp=self.enqueued_timestamp,
sleep_interval=interval,
)
@record
| ConcurrencyClaimStatus |
python | sphinx-doc__sphinx | sphinx/ext/doctest.py | {
"start": 7918,
"end": 8486
} | class ____:
def __init__(
self,
code: str,
type: str,
filename: str,
lineno: int,
options: dict[int, bool] | None = None,
) -> None:
self.code = code
self.type = type
self.filename = filename
self.lineno = lineno
self.options: dict[int, bool] = options or {}
def __repr__(self) -> str:
return (
f'TestCode({self.code!r}, {self.type!r}, filename={self.filename!r}, '
f'lineno={self.lineno!r}, options={self.options!r})'
)
| TestCode |
python | huggingface__transformers | src/transformers/models/plbart/modeling_plbart.py | {
"start": 3058,
"end": 5112
} | class ____(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# PLBart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models don't have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(
self, input_ids: torch.Tensor, past_key_values_length: int = 0, position_ids: Optional[torch.Tensor] = None
):
"""`input_ids' shape is expected to be [bsz x seqlen]."""
if position_ids is None:
bsz, seq_len = input_ids.shape[:2]
position_ids = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
).expand(bsz, -1)
else:
position_ids = position_ids.unsqueeze(0)
return super().forward(position_ids + self.offset)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| PLBartLearnedPositionalEmbedding |
python | tensorflow__tensorflow | tensorflow/python/keras/utils/object_identity.py | {
"start": 2346,
"end": 2594
} | class ____(_ObjectIdentityWrapper):
__slots__ = ()
def __init__(self, wrapped):
super(_WeakObjectIdentityWrapper, self).__init__(weakref.ref(wrapped))
@property
def unwrapped(self):
return self._wrapped()
| _WeakObjectIdentityWrapper |
python | huggingface__transformers | src/transformers/models/qwen2_vl/video_processing_qwen2_vl.py | {
"start": 1643,
"end": 2898
} | class ____(VideosKwargs, total=False):
min_pixels: int
max_pixels: int
patch_size: int
temporal_patch_size: int
merge_size: int
min_frames: int
max_frames: int
@add_start_docstrings(
"Constructs a fast Qwen2-VL image processor that dynamically resizes videos based on the original videos.",
BASE_VIDEO_PROCESSOR_DOCSTRING,
"""
min_pixels (`int`, *optional*, defaults to `56 * 56`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `28 * 28 * 1280`):
The max pixels of the image to resize the image.
patch_size (`int`, *optional*, defaults to 14):
The spacial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to 2):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to 2):
The merge size of the vision encoder to llm encoder.
min_frames (`int`, *optional*, defaults to 4):
The minimum number of frames that can be sampled.
max_frames (`int`, *optional*, defaults to 768):
The maximum number of frames that can be sampled.
""",
)
| Qwen2VLVideoProcessorInitKwargs |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/comprehend.py | {
"start": 2856,
"end": 5895
} | class ____(ComprehendBaseSensor):
"""
Poll the state of the pii entities detection job until it reaches a completed state; fails if the job fails.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:ComprehendStartPiiEntitiesDetectionJobCompletedSensor`
:param job_id: The id of the Comprehend pii entities detection job.
:param deferrable: If True, the sensor will operate in deferrable mode. This mode requires aiobotocore
module to be installed.
(default: False, but can be overridden in config file by setting default_deferrable to True)
:param poke_interval: Polling period in seconds to check for the status of the job. (default: 120)
:param max_retries: Number of times before returning the current state. (default: 75)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
INTERMEDIATE_STATES: tuple[str, ...] = ("IN_PROGRESS",)
FAILURE_STATES: tuple[str, ...] = ("FAILED", "STOP_REQUESTED", "STOPPED")
SUCCESS_STATES: tuple[str, ...] = ("COMPLETED",)
FAILURE_MESSAGE = "Comprehend start pii entities detection job sensor failed."
template_fields: Sequence[str] = aws_template_fields("job_id")
def __init__(
self,
*,
job_id: str,
max_retries: int = 75,
poke_interval: int = 120,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.job_id = job_id
self.max_retries = max_retries
self.poke_interval = poke_interval
def execute(self, context: Context) -> Any:
if self.deferrable:
self.defer(
trigger=ComprehendPiiEntitiesDetectionJobCompletedTrigger(
job_id=self.job_id,
waiter_delay=int(self.poke_interval),
waiter_max_attempts=self.max_retries,
aws_conn_id=self.aws_conn_id,
),
method_name="poke",
)
else:
super().execute(context=context)
def get_state(self) -> str:
return self.hook.conn.describe_pii_entities_detection_job(JobId=self.job_id)[
"PiiEntitiesDetectionJobProperties"
]["JobStatus"]
| ComprehendStartPiiEntitiesDetectionJobCompletedSensor |
python | crytic__slither | slither/detectors/statements/unary.py | {
"start": 1766,
"end": 3765
} | class ____(AbstractDetector):
"""
Incorrect Unary Expression detector
"""
ARGUMENT = "incorrect-unary"
HELP = "Dangerous unary expressions"
IMPACT = DetectorClassification.LOW
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = (
"https://github.com/crytic/slither/wiki/Detector-Documentation#dangerous-unary-expressions"
)
WIKI_TITLE = "Dangerous unary expressions"
WIKI_DESCRIPTION = "Unary expressions such as `x=+1` probably typos."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```Solidity
contract Bug{
uint public counter;
function increase() public returns(uint){
counter=+1;
return counter;
}
}
```
`increase()` uses `=+` instead of `+=`, so `counter` will never exceed 1."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Remove the unary expression."
def _detect(self) -> List[Output]:
"""
Detect the incorrect use of unary expressions
"""
results = []
for c in self.contracts:
for variable in c.state_variables:
if (
variable.expression
and InvalidUnaryStateVariableDetector(variable.expression).result
):
info: DETECTOR_INFO = [
variable,
f" uses an dangerous unary operator: {variable.expression}\n",
]
json = self.generate_result(info)
results.append(json)
for f in c.functions_and_modifiers_declared:
for node in f.nodes:
if node.expression and InvalidUnaryExpressionDetector(node.expression).result:
info = [node.function, " uses an dangerous unary operator: ", node, "\n"]
res = self.generate_result(info)
results.append(res)
return results
| IncorrectUnaryExpressionDetection |
python | huggingface__transformers | src/transformers/models/bridgetower/modeling_bridgetower.py | {
"start": 4873,
"end": 6649
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.attn = nn.MultiheadAttention(config.hidden_size, config.hidden_size // 64)
self.ln_1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = nn.ModuleDict(
OrderedDict(
[
("c_fc", nn.Linear(config.hidden_size, config.hidden_size * 4)),
("gelu", QuickGELUActivation()),
("c_proj", nn.Linear(config.hidden_size * 4, config.hidden_size)),
]
)
)
self.ln_2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attn_mask = None
def attention(self, hidden_state: torch.Tensor, attention_mask: torch.Tensor):
if attention_mask is not None:
attention_mask = attention_mask.to(dtype=torch.bool, device=hidden_state.device)
self.attn_mask = (
self.attn_mask.to(dtype=hidden_state.dtype, device=hidden_state.device)
if self.attn_mask is not None
else None
)
return self.attn(
hidden_state,
hidden_state,
hidden_state,
need_weights=False,
attn_mask=self.attn_mask,
key_padding_mask=attention_mask,
)[0]
def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor] = None):
residual_state = hidden_state + self.attention(self.ln_1(hidden_state), attention_mask)
hidden_state = self.ln_2(residual_state)
for layer in self.mlp.values():
hidden_state = layer(hidden_state)
hidden_state = residual_state + hidden_state
return hidden_state
| BridgeTowerResidualAttention |
python | vyperlang__vyper | vyper/compiler/input_bundle.py | {
"start": 2705,
"end": 5888
} | class ____:
# a list of search paths
search_paths: list[PathLike]
_cache: Any
def __init__(self, search_paths):
self.search_paths = search_paths
self._source_id_counter = 0
self._source_ids: dict[PathLike, int] = {}
# this is a little bit cursed, but it allows consumers to cache data
# that share the same lifetime as this input bundle.
self._cache = _Cache()
def _normalize_path(self, path):
raise NotImplementedError(f"not implemented! {self.__class__}._normalize_path()")
def _load_from_path(self, resolved_path, path):
raise NotImplementedError(f"not implemented! {self.__class__}._load_from_path()")
def _generate_source_id(self, resolved_path: PathLike) -> int:
# Note: it is possible for a file to get in here more than once,
# e.g. by symlink
if resolved_path not in self._source_ids:
self._source_ids[resolved_path] = self._source_id_counter
self._source_id_counter += 1
return self._source_ids[resolved_path]
def load_file(self, path: PathLike | str) -> FileInput:
# search path precedence
tried = []
if isinstance(path, str):
path = PurePath(path)
for sp in reversed(self.search_paths):
# note from pathlib docs:
# > If the argument is an absolute path, the previous path is ignored.
# Path("/a") / Path("/b") => Path("/b")
to_try = sp / path
try:
to_try = self._normalize_path(to_try)
res = self._load_from_path(to_try, path)
break
except _NotFound:
tried.append(to_try)
else:
formatted_search_paths = "\n".join([" " + str(p) for p in tried])
raise FileNotFoundError(
f"could not find {path} in any of the following locations:\n"
f"{formatted_search_paths}"
)
return res
def load_json_file(self, path: PathLike | str) -> JSONInput:
file_input = self.load_file(path)
return JSONInput.from_file_input(file_input)
# temporarily add something to the search path (within the
# scope of the context manager) with highest precedence.
# if `path` is None, do nothing
@contextlib.contextmanager
def search_path(self, path: Optional[PathLike]) -> Iterator[None]:
if path is None:
yield # convenience, so caller does not have to handle null path
else:
self.search_paths.append(path)
try:
yield
finally:
self.search_paths.pop()
# temporarily set search paths to a given list
@contextlib.contextmanager
def temporary_search_paths(self, new_paths: list[PathLike]) -> Iterator[None]:
original_paths = self.search_paths
self.search_paths = new_paths
try:
yield
finally:
self.search_paths = original_paths
# regular input. takes a search path(s), and `load_file()` will search all
# search paths for the file and read it from the filesystem
| InputBundle |
python | mwaskom__seaborn | tests/test_rcmod.py | {
"start": 8010,
"end": 8965
} | class ____(RCParamFixtures):
_no_verdana = not has_verdana()
@pytest.mark.skipif(_no_verdana, reason="Verdana font is not present")
def test_set_font(self):
rcmod.set_theme(font="Verdana")
_, ax = plt.subplots()
ax.set_xlabel("foo")
assert ax.xaxis.label.get_fontname() == "Verdana"
rcmod.set_theme()
def test_set_serif_font(self):
rcmod.set_theme(font="serif")
_, ax = plt.subplots()
ax.set_xlabel("foo")
assert ax.xaxis.label.get_fontname() in mpl.rcParams["font.serif"]
rcmod.set_theme()
@pytest.mark.skipif(_no_verdana, reason="Verdana font is not present")
def test_different_sans_serif(self):
rcmod.set_theme()
rcmod.set_style(rc={"font.sans-serif": ["Verdana"]})
_, ax = plt.subplots()
ax.set_xlabel("foo")
assert ax.xaxis.label.get_fontname() == "Verdana"
rcmod.set_theme()
| TestFonts |
python | arrow-py__arrow | arrow/locales.py | {
"start": 28887,
"end": 29498
} | class ____(Locale):
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]]
def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str:
form = self.timeframes[timeframe]
delta = abs(delta)
if isinstance(form, Mapping):
if delta % 10 == 1 and delta % 100 != 11:
form = form["singular"]
elif 2 <= delta % 10 <= 4 and (delta % 100 < 10 or delta % 100 >= 20):
form = form["dual"]
else:
form = form["plural"]
return form.format(delta)
| SlavicBaseLocale |
python | pytorch__pytorch | torch/distributed/tensor/parallel/fsdp.py | {
"start": 11066,
"end": 13708
} | class ____(FSDPExtensions):
"""
DTensorExtension is the TensorFlattener extension needed for 2D FSDP + TP.
This is the implementation for FSDPExtensions defined in
https://github.com/pytorch/pytorch/blob/main/torch/distributed/fsdp/_fsdp_extensions.py
"""
def __init__(self, device_handle) -> None:
super().__init__()
self.compute_stream = None
self.device_handle = device_handle
# we have to use the dynamo disable this way to disable dynamo as the decorator way would
# trigger build failure with torch deploy...
self.post_unflatten_transform = torch._dynamo.disable( # type: ignore[method-assign]
self.post_unflatten_transform
)
def pre_flatten_transform(
self,
tensor: torch.Tensor,
) -> tuple[torch.Tensor, Any | None]:
return _flatten_tensor(tensor)
def post_unflatten_transform(
self, tensor: torch.Tensor, param_extension: Any
) -> torch.Tensor:
stream = self.compute_stream or self.device_handle.current_stream()
with self.device_handle.stream(stream):
# runtime we put the unflattened tensor call on the compute stream since
# the unflattened tensor might contain computations in fwd/bwd where we
# need to sync properly.
# TODO: this is a short term fix and we should make the get_unflat_views
# directly happen in the compute stream.
result = _unflatten_tensor(
tensor,
param_extension,
device_handle=self.device_handle,
compute_stream=self.compute_stream,
)
_set_fsdp_flattened(result)
return result
def chunk_tensor(
self,
tensor: torch.Tensor,
rank: int,
world_size: int,
num_devices_per_node: int,
pg: dist.ProcessGroup,
device: torch.device | None = None,
) -> torch.Tensor:
return _chunk_tensor(tensor, rank, world_size, num_devices_per_node, pg)
def chunk_dtensor(
self,
tensor: torch.Tensor,
rank: int,
device_mesh: DeviceMesh,
) -> torch.Tensor:
return _chunk_dtensor(tensor, rank, device_mesh)
def pre_load_state_dict_transform(
self,
tensor: torch.Tensor,
) -> tuple[torch.Tensor, list[Shard]]:
return _pre_load_state_dict(tensor)
def all_gather_dtensor(
self,
tensor: DTensor,
parent_mesh: DeviceMesh | None,
) -> torch.Tensor:
return _all_gather_dtensor(tensor, parent_mesh)
| DTensorExtensions |
python | sympy__sympy | sympy/codegen/ast.py | {
"start": 52702,
"end": 54061
} | class ____(Node):
""" Represents a function prototype
Allows the user to generate forward declaration in e.g. C/C++.
Parameters
==========
return_type : Type
name : str
parameters: iterable of Variable instances
attrs : iterable of Attribute instances
Examples
========
>>> from sympy import ccode, symbols
>>> from sympy.codegen.ast import real, FunctionPrototype
>>> x, y = symbols('x y', real=True)
>>> fp = FunctionPrototype(real, 'foo', [x, y])
>>> ccode(fp)
'double foo(double x, double y)'
"""
__slots__ = ('return_type', 'name', 'parameters')
_fields: tuple[str, ...] = __slots__ + Node._fields
_construct_return_type = Type
_construct_name = String
@staticmethod
def _construct_parameters(args):
def _var(arg):
if isinstance(arg, Declaration):
return arg.variable
elif isinstance(arg, Variable):
return arg
else:
return Variable.deduced(arg)
return Tuple(*map(_var, args))
@classmethod
def from_FunctionDefinition(cls, func_def):
if not isinstance(func_def, FunctionDefinition):
raise TypeError("func_def is not an instance of FunctionDefinition")
return cls(**func_def.kwargs(exclude=('body',)))
| FunctionPrototype |
python | pyqtgraph__pyqtgraph | pyqtgraph/parametertree/Parameter.py | {
"start": 1652,
"end": 36218
} | class ____(QtCore.QObject):
"""
A Parameter is the basic unit of data in a parameter tree. Each parameter has
a name, a type, a value, and several other properties that modify the behavior of the
Parameter. Parameters may have parent / child / sibling relationships to construct
organized hierarchies. Parameters generally do not have any inherent GUI or visual
interpretation; instead they manage ParameterItem instances which take care of
display and user interaction.
Note: It is fairly uncommon to use the Parameter class directly; mostly you
will use subclasses which provide specialized type and data handling. The static
method Parameter.create(...) is an easy way to generate instances of these subclasses.
For more Parameter types, see ParameterTree.parameterTypes module.
=================================== =========================================================
**Signals:**
sigStateChanged(self, change, info) Emitted when anything changes about this parameter at
all.
The second argument is a string indicating what changed
('value', 'childAdded', etc..)
The third argument can be any extra information about
the change
sigTreeStateChanged(self, changes) Emitted when any child in the tree changes state
(but only if monitorChildren() is called)
the format of *changes* is [(param, change, info), ...]
sigValueChanged(self, value) Emitted when value is finished changing
sigValueChanging(self, value) Emitted immediately for all value changes,
including during editing.
sigChildAdded(self, child, index) Emitted when a child is added
sigChildRemoved(self, child) Emitted when a child is removed
sigRemoved(self) Emitted when this parameter is removed
sigParentChanged(self, parent) Emitted when this parameter's parent has changed
sigLimitsChanged(self, limits) Emitted when this parameter's limits have changed
sigDefaultChanged(self, default) Emitted when this parameter's default value has changed
sigNameChanged(self, name) Emitted when this parameter's name has changed
sigOptionsChanged(self, opts) Emitted when any of this parameter's options have changed
sigContextMenu(self, name) Emitted when a context menu was clicked
=================================== =========================================================
"""
## name, type, limits, etc.
## can also carry UI hints (slider vs spinbox, etc.)
sigValueChanged = QtCore.Signal(object, object) ## self, value emitted when value is finished being edited
sigValueChanging = QtCore.Signal(object, object) ## self, value emitted as value is being edited
sigChildAdded = QtCore.Signal(object, object, object) ## self, child, index
sigChildRemoved = QtCore.Signal(object, object) ## self, child
sigRemoved = QtCore.Signal(object) ## self
sigParentChanged = QtCore.Signal(object, object) ## self, parent
sigLimitsChanged = QtCore.Signal(object, object) ## self, limits
sigDefaultChanged = QtCore.Signal(object, object) ## self, default
sigNameChanged = QtCore.Signal(object, object) ## self, name
sigOptionsChanged = QtCore.Signal(object, object) ## self, {opt:val, ...}
## Emitted when anything changes about this parameter at all.
## The second argument is a string indicating what changed ('value', 'childAdded', etc..)
## The third argument can be any extra information about the change
sigStateChanged = QtCore.Signal(object, object, object) ## self, change, info
## emitted when any child in the tree changes state
## (but only if monitorChildren() is called)
sigTreeStateChanged = QtCore.Signal(object, object) # self, changes
# changes = [(param, change, info), ...]
sigContextMenu = QtCore.Signal(object, object) # self, name
# bad planning.
#def __new__(cls, *args, **opts):
#try:
#cls = PARAM_TYPES[opts['type']]
#except KeyError:
#pass
#return QtCore.QObject.__new__(cls, *args, **opts)
@classmethod
def create(cls, **opts):
"""
Static method that creates a new Parameter (or subclass) instance using
opts['type'] to select the appropriate class.
All options are passed directly to the new Parameter's __init__ method.
Use registerParameterType() to add new class types.
"""
typ = opts.get('type', None)
if typ is None:
klass = cls
else:
klass = PARAM_TYPES[opts['type']]
return klass(**opts)
def __init__(self, **opts):
"""
Initialize a Parameter object. Although it is rare to directly create a
Parameter instance, the options available to this method are also allowed
by most Parameter subclasses.
======================= =========================================================
**Keyword Arguments:**
name The name to give this Parameter. This is the name that
will appear in the left-most column of a ParameterTree
for this Parameter.
value The value to initially assign to this Parameter.
default The default value for this Parameter (most Parameters
provide an option to 'reset to default').
children A list of children for this Parameter. Children
may be given either as a Parameter instance or as a
dictionary to pass to Parameter.create(). In this way,
it is possible to specify complex hierarchies of
Parameters from a single nested data structure.
readonly If True, the user will not be allowed to edit this
Parameter. (default=False)
enabled If False, any widget(s) for this parameter will appear
disabled. (default=True)
visible If False, the Parameter will not appear when displayed
in a ParameterTree. (default=True)
renamable If True, the user may rename this Parameter.
(default=False)
removable If True, the user may remove this Parameter.
(default=False)
expanded If True, the Parameter will initially be expanded in
ParameterTrees: Its children will be visible.
(default=True)
syncExpanded If True, the `expanded` state of this Parameter is
synchronized with all ParameterTrees it is displayed in.
(default=False)
title (str or None) If specified, then the parameter will be
displayed to the user using this string as its name.
However, the parameter will still be referred to
internally using the *name* specified above. Note that
this option is not compatible with renamable=True.
(default=None; added in version 0.9.9)
======================= =========================================================
"""
super().__init__()
self.opts = {
'type': None,
'readonly': False,
'visible': True,
'enabled': True,
'renamable': False,
'removable': False,
'strictNaming': False, # forces name to be usable as a python variable
'expanded': True,
'syncExpanded': False,
'title': None,
# The following intentionally excluded; each parameter type may have a different data type for limits.
# 'limits': None,
}
try:
name = opts['name']
except KeyError:
raise KeyError("Parameter must have a name specified")
self.opts.update(opts)
self.opts['name'] = None
self.childs = []
self.names = {} ## map name:child
self.items = weakref.WeakKeyDictionary() ## keeps track of tree items representing this parameter
self._parent = None
self.treeStateChanges = [] ## cache of tree state changes to be delivered on next emit
self.blockTreeChangeEmit = 0
self.setName(name)
self.addChildren(self.opts.pop('children', []))
if 'value' in self.opts and 'default' not in self.opts:
self.opts['default'] = self.opts['value']
value = self.opts.get('value', self.opts.get('default'))
modified = 'value' in self.opts
if value is not None:
self.setValue(value)
self._modifiedSinceReset = modified
# Connect all state changed signals to the general sigStateChanged
self.sigValueChanged.connect(self._emitValueChanged)
self.sigChildAdded.connect(self._emitChildAddedChanged)
self.sigChildRemoved.connect(self._emitChildRemovedChanged)
self.sigParentChanged.connect(self._emitParentChanged)
self.sigLimitsChanged.connect(self._emitLimitsChanged)
self.sigDefaultChanged.connect(self._emitDefaultChanged)
self.sigNameChanged.connect(self._emitNameChanged)
self.sigOptionsChanged.connect(self._emitOptionsChanged)
self.sigContextMenu.connect(self._emitContextMenuChanged)
@property
def itemClass(self):
"""
The class of ParameterItem to use when displaying this parameter in a ParameterTree.
"""
return ParameterItem
def name(self):
"""Return the name of this Parameter."""
return self.opts['name']
def title(self):
"""Return the title of this Parameter.
By default, the title is the same as the name unless it has been explicitly specified
otherwise."""
title = self.opts.get('title', None)
if title is None:
title = self.name()
return title
def contextMenu(self, name):
""""A context menu entry was clicked"""
self.sigContextMenu.emit(self, name)
def setName(self, name):
"""Attempt to change the name of this parameter; return the actual name.
(The parameter may reject the name change or automatically pick a different name)"""
if self.opts['strictNaming'] and (len(name) < 1 or re.search(r'\W', name) or re.match(r'\d', name[0])):
raise ValueError(
f"Parameter name '{name}' is invalid. (Must contain only alphanumeric and underscore characters and "
f"may not start with a number)")
parent = self.parent()
if parent is not None:
name = parent._renameChild(self, name) # first ask parent if it's ok to rename
if self.opts['name'] != name:
self.opts['name'] = name
self.sigNameChanged.emit(self, name)
return name
def type(self):
"""Return the type string for this Parameter."""
return self.opts['type']
def isType(self, typ):
"""
Return True if this parameter type matches the name *typ*.
This can occur either of two ways:
- If self.type() == *typ*
- If this parameter's class is registered with the name *typ*
"""
if self.type() == typ:
return True
global PARAM_TYPES
cls = PARAM_TYPES.get(typ, None)
if cls is None:
raise ValueError(f"Type name '{typ}' is not registered.")
return self.__class__ is cls
def childPath(self, child):
"""
Return the path of parameter names from self to child.
If child is not a (grand)child of self, return None.
"""
path = []
while child is not self:
path.insert(0, child.name())
child = child.parent()
if child is None:
return None
return path
def setValue(self, value, blockSignal=None):
"""
Set the value of this Parameter; return the actual value that was set.
(this may be different from the value that was requested)
"""
value = self._interpretValue(value)
if fn.eq(self.opts.get('value', None), value):
return value
self._modifiedSinceReset = True
self.opts['value'] = value
if not blockSignal:
self.sigValueChanged.emit(self, value) # value might change after signal is received by tree item
return self.opts['value']
def _interpretValue(self, v):
return v
def hasValue(self):
"""Return True if this Parameter has a value set."""
return 'value' in self.opts
def value(self):
"""
Return the value of this Parameter. Raises ValueError if no value has been set.
"""
try:
return self.opts['value']
except KeyError:
raise ValueError("No Value has been set")
def getValues(self):
"""
Return a tree of all values that are children of this parameter. Raises ValueError if any child has no value.
"""
vals = OrderedDict()
for ch in self:
vals[ch.name()] = (ch.value(), ch.getValues())
return vals
def saveState(self, filter=None):
"""
Return a structure representing the entire state of the parameter tree.
The tree state may be restored from this structure using restoreState().
If *filter* is set to 'user', then only user-settable data will be included in the
returned state.
"""
if filter is None:
state = self.opts.copy()
if state['type'] is None:
global PARAM_NAMES
state['type'] = PARAM_NAMES.get(type(self), None)
elif filter == 'user':
if self.hasValue():
state = {'value': self.value()}
else:
state = {}
else:
raise ValueError(f"Unrecognized filter argument: '{filter}'")
ch = OrderedDict([(ch.name(), ch.saveState(filter=filter)) for ch in self])
if len(ch) > 0:
state['children'] = ch
return state
def restoreState(self, state, recursive=True, addChildren=True, removeChildren=True, blockSignals=True):
"""
Restore the state of this parameter and its children from a structure generated using saveState()
If recursive is True, then attempt to restore the state of child parameters as well.
If addChildren is True, then any children which are referenced in the state object will be
created if they do not already exist.
If removeChildren is True, then any children which are not referenced in the state object will
be removed.
If blockSignals is True, no signals will be emitted until the tree has been completely restored.
This prevents signal handlers from responding to a partially-rebuilt network.
"""
state = state.copy()
childState = state.pop('children', [])
## list of children may be stored either as list or dict.
if isinstance(childState, dict):
cs = []
for k,v in childState.items():
cs.append(v.copy())
cs[-1].setdefault('name', k)
childState = cs
if blockSignals:
self.blockTreeChangeSignal()
try:
self.setOpts(**state)
if not recursive:
return
ptr = 0 ## pointer to first child that has not been restored yet
foundChilds = set()
for ch in childState:
name = ch['name']
#typ = ch.get('type', None)
#print('child: %s, %s' % (self.name()+'.'+name, typ))
## First, see if there is already a child with this name
gotChild = False
for i, ch2 in enumerate(self.childs[ptr:]):
#print " ", ch2.name(), ch2.type()
if ch2.name() != name: # or not ch2.isType(typ):
continue
gotChild = True
#print " found it"
if i != 0: ## move parameter to next position
#self.removeChild(ch2)
self.insertChild(ptr, ch2)
#print " moved to position", ptr
ch2.restoreState(ch, recursive=recursive, addChildren=addChildren, removeChildren=removeChildren)
foundChilds.add(ch2)
break
if not gotChild:
if not addChildren:
#print " ignored child"
continue
#print " created new"
ch2 = Parameter.create(**ch)
self.insertChild(ptr, ch2)
foundChilds.add(ch2)
ptr += 1
if removeChildren:
for ch in self.childs[:]:
if ch not in foundChilds:
#print " remove:", ch
self.removeChild(ch)
finally:
if blockSignals:
self.unblockTreeChangeSignal()
def valueModifiedSinceResetToDefault(self):
"""Return True if this parameter's value has been changed since the last time
it was reset to its default value."""
return self._modifiedSinceReset
def defaultValue(self):
"""Return the default value for this parameter. Raises ValueError if no default."""
return self.opts.get('default')
def setDefault(self, val, updatePristineValues=False):
"""Set the default value for this parameter. If updatePristineValues is True, then
any values that haven't been modified since the last time they were reset to default
will be updated to the new default value (default: False)."""
if self.opts.get('default') == val:
return
self.opts['default'] = val
if 'value' not in self.opts or (updatePristineValues and not self.valueModifiedSinceResetToDefault()):
self.setToDefault()
if not self.valueIsDefault():
self._modifiedSinceReset = True
self.sigDefaultChanged.emit(self, val)
def setToDefault(self):
"""Set this parameter's value to the default. Raises ValueError if no default is set."""
with self.treeChangeBlocker():
self.setValue(self.defaultValue())
self._modifiedSinceReset = False
def hasDefault(self):
"""Returns True if this parameter has a default value."""
return self.opts.get('default') is not None
def valueIsDefault(self):
"""Returns True if this parameter's value is equal to the default value."""
if not self.hasValue() or not self.hasDefault():
return False
return fn.eq(self.value(), self.defaultValue())
def setLimits(self, limits):
"""Set limits on the acceptable values for this parameter.
The format of limits depends on the type of the parameter and
some parameters do not make use of limits at all."""
if 'limits' in self.opts and fn.eq(self.opts['limits'], limits):
return
self.opts['limits'] = limits
self.sigLimitsChanged.emit(self, limits)
return limits
def writable(self):
"""
Returns True if this parameter's value can be changed by the user.
Note that the value of the parameter can *always* be changed by
calling setValue().
"""
return not self.readonly()
def setWritable(self, writable=True):
"""Set whether this Parameter should be editable by the user. (This is
exactly the opposite of setReadonly)."""
self.setOpts(readonly=not writable)
def readonly(self):
"""
Return True if this parameter is read-only. (this is the opposite of writable())
"""
return self.opts.get('readonly', False)
def setReadonly(self, readonly=True):
"""Set whether this Parameter's value may be edited by the user
(this is the opposite of setWritable())."""
self.setOpts(readonly=readonly)
def setOpts(self, **opts):
"""
Set any arbitrary options on this parameter.
The exact behavior of this function will depend on the parameter type, but
most parameters will accept a common set of options: value, name, limits,
default, readonly, removable, renamable, visible, enabled, expanded and
syncExpanded.
See :func:`Parameter.__init__ <pyqtgraph.parametertree.Parameter.__init__>`
for more information on default options.
"""
changed = OrderedDict()
for k in opts:
if k == 'value':
self.setValue(opts[k])
elif k == 'name':
self.setName(opts[k])
elif k == 'limits':
self.setLimits(opts[k])
elif k == 'default':
self.setDefault(opts[k])
elif k not in self.opts or not fn.eq(self.opts[k], opts[k]):
self.opts[k] = opts[k]
changed[k] = opts[k]
if len(changed) > 0:
self.sigOptionsChanged.emit(self, changed)
def emitStateChanged(self, changeDesc, data):
## Emits stateChanged signal and
## requests emission of new treeStateChanged signal
self.sigStateChanged.emit(self, changeDesc, data)
#self.treeStateChanged(self, changeDesc, data)
self.treeStateChanges.append((self, changeDesc, data))
self.emitTreeChanges()
@QtCore.Slot(object, object)
def _emitValueChanged(self, param, data):
self.emitStateChanged("value", data)
@QtCore.Slot(object, object, object)
def _emitChildAddedChanged(self, param, *data):
self.emitStateChanged("childAdded", data)
@QtCore.Slot(object, object)
def _emitChildRemovedChanged(self, param, data):
self.emitStateChanged("childRemoved", data)
@QtCore.Slot(object, object)
def _emitParentChanged(self, param, data):
self.emitStateChanged("parent", data)
@QtCore.Slot(object, object)
def _emitLimitsChanged(self, param, data):
self.emitStateChanged("limits", data)
@QtCore.Slot(object, object)
def _emitDefaultChanged(self, param, data):
self.emitStateChanged("default", data)
@QtCore.Slot(object, object)
def _emitNameChanged(self, param, data):
self.emitStateChanged("name", data)
@QtCore.Slot(object, object)
def _emitOptionsChanged(self, param, data):
self.emitStateChanged("options", data)
@QtCore.Slot(object, object)
def _emitContextMenuChanged(self, param, data):
self.emitStateChanged("contextMenu", data)
def makeTreeItem(self, depth):
"""
Return a TreeWidgetItem suitable for displaying/controlling the content of
this parameter. This is called automatically when a ParameterTree attempts
to display this Parameter.
Most subclasses will want to override this function.
"""
# Default to user-specified itemClass. If not present, check for a registered item class. Finally,
# revert to ParameterItem if both fail
itemClass = self.itemClass or _PARAM_ITEM_TYPES.get(self.opts['type'], ParameterItem)
return itemClass(self, depth)
def addChild(self, child, autoIncrementName=None, existOk=False):
"""
Add another parameter to the end of this parameter's child list.
See insertChild() for a description of the *autoIncrementName* and *existOk*
arguments.
"""
return self.insertChild(len(self.childs), child, autoIncrementName=autoIncrementName, existOk=existOk)
def addChildren(self, children):
"""
Add a list or dict of children to this parameter. This method calls
addChild once for each value in *children*.
"""
## If children was specified as dict, then assume keys are the names.
if isinstance(children, dict):
ch2 = []
for name, opts in children.items():
if isinstance(opts, dict) and 'name' not in opts:
opts = opts.copy()
opts['name'] = name
ch2.append(opts)
children = ch2
for chOpts in children:
#print self, "Add child:", type(chOpts), id(chOpts)
self.addChild(chOpts)
def insertChild(self, pos, child, autoIncrementName=None, existOk=False):
"""
Insert a new child at pos.
If pos is a Parameter, then insert at the position of that Parameter.
If child is a dict, then a parameter is constructed using
:func:`Parameter.create <pyqtgraph.parametertree.Parameter.create>`.
By default, the child's 'autoIncrementName' option determines whether
the name will be adjusted to avoid prior name collisions. This
behavior may be overridden by specifying the *autoIncrementName*
argument. This argument was added in version 0.9.9.
If 'autoIncrementName' is *False*, an error is raised when the inserted child already exists. However, if
'existOk' is *True*, the existing child will be returned instead, and this child will *not* be inserted.
"""
if isinstance(child, dict):
child = Parameter.create(**child)
name = child.name()
if name in self.names and child is not self.names[name]:
if autoIncrementName is True or (autoIncrementName is None and child.opts.get('autoIncrementName', False)):
name = self.incrementName(name)
child.setName(name)
elif existOk:
return self.names[name]
else:
raise ValueError("Already have child named %s" % str(name))
if isinstance(pos, Parameter):
pos = self.childs.index(pos)
with self.treeChangeBlocker():
if child.parent() is not None:
child.remove()
self.names[name] = child
self.childs.insert(pos, child)
child.parentChanged(self)
child.sigTreeStateChanged.connect(self.treeStateChanged)
self.sigChildAdded.emit(self, child, pos)
return child
def removeChild(self, child):
"""Remove a child parameter."""
name = child.name()
if name not in self.names or self.names[name] is not child:
raise Exception("Parameter %s is not my child; can't remove." % str(child))
del self.names[name]
self.childs.pop(self.childs.index(child))
child.parentChanged(None)
try:
child.sigTreeStateChanged.disconnect(self.treeStateChanged)
except (TypeError, RuntimeError): ## already disconnected
pass
self.sigChildRemoved.emit(self, child)
def clearChildren(self):
"""Remove all child parameters."""
for ch in self.childs[:]:
self.removeChild(ch)
def children(self):
"""Return a list of this parameter's children.
Warning: this overrides QObject.children
"""
return self.childs[:]
def hasChildren(self):
"""Return True if this Parameter has children."""
return len(self.childs) > 0
def parentChanged(self, parent):
"""This method is called when the parameter's parent has changed.
It may be useful to extend this method in subclasses."""
self._parent = parent
self.sigParentChanged.emit(self, parent)
def parent(self):
"""Return the parent of this parameter."""
return self._parent
def remove(self):
"""Remove this parameter from its parent's child list"""
parent = self.parent()
if parent is None:
raise Exception("Cannot remove; no parent.")
parent.removeChild(self)
self.sigRemoved.emit(self)
def incrementName(self, name):
## return an unused name by adding a number to the name given
base, num = re.match(r'([^\d]*)(\d*)', name).groups()
numLen = len(num)
if numLen == 0:
num = 2
numLen = 1
else:
num = int(num)
while True:
newName = base + ("%%0%dd"%numLen) % num
if newName not in self.names:
return newName
num += 1
def __iter__(self):
yield from self.childs
def __getitem__(self, names):
"""Get the value of a child parameter. The name may also be a tuple giving
the path to a sub-parameter::
value = param[('child', 'grandchild')]
Raises ValueError if the child value is not set.
"""
if not isinstance(names, tuple):
names = (names,)
return self.param(*names).value()
def __setitem__(self, names, value):
"""Set the value of a child parameter. The name may also be a tuple giving
the path to a sub-parameter::
param[('child', 'grandchild')] = value
"""
if isinstance(names, str):
names = (names,)
return self.param(*names).setValue(value)
def keys(self):
return self.names
def child(self, *names):
"""Return a child parameter.
Accepts the name of the child or a tuple (path, to, child)
Added in version 0.9.9. Earlier versions used the 'param' method, which is still
implemented for backward compatibility.
"""
try:
param = self.names[names[0]]
except KeyError as e:
raise KeyError(f"Parameter {self.name()} has no child named {names[0]}") from e
if len(names) > 1:
return param.child(*names[1:])
else:
return param
def param(self, *names):
# for backward compatibility.
return self.child(*names)
def __repr__(self):
return "<%s '%s' at 0x%x>" % (self.__class__.__name__, self.name(), id(self))
def _renameChild(self, child, name):
## Only to be called from Parameter.rename
if name in self.names:
return child.name()
self.names[name] = child
del self.names[child.name()]
return name
def registerItem(self, item):
self.items[item] = None
def hide(self):
"""Hide this parameter. It and its children will no longer be visible in any ParameterTree
widgets it is connected to."""
self.show(False)
def show(self, s=True):
"""Show this parameter. """
self.opts['visible'] = s
self.sigOptionsChanged.emit(self, {'visible': s})
def treeChangeBlocker(self):
"""
Return an object that can be used to temporarily block and accumulate
sigTreeStateChanged signals. This is meant to be used when numerous changes are
about to be made to the tree and only one change signal should be
emitted at the end.
Example::
with param.treeChangeBlocker():
param.addChild(...)
param.removeChild(...)
param.setValue(...)
"""
return SignalBlocker(self.blockTreeChangeSignal, self.unblockTreeChangeSignal)
def blockTreeChangeSignal(self):
"""
Used to temporarily block and accumulate tree change signals.
*You must remember to unblock*, so it is advisable to use treeChangeBlocker() instead.
"""
self.blockTreeChangeEmit += 1
def unblockTreeChangeSignal(self):
"""Unblocks enission of sigTreeStateChanged and flushes the changes out through a single signal."""
self.blockTreeChangeEmit -= 1
self.emitTreeChanges()
@QtCore.Slot(object, object)
def treeStateChanged(self, param, changes):
"""
Called when the state of any sub-parameter has changed.
============== ================================================================
**Arguments:**
param The immediate child whose tree state has changed.
note that the change may have originated from a grandchild.
changes List of tuples describing all changes that have been made
in this event: (param, changeDescr, data)
============== ================================================================
This function can be extended to react to tree state changes.
"""
self.treeStateChanges.extend(changes)
self.emitTreeChanges()
def emitTreeChanges(self):
if self.blockTreeChangeEmit == 0:
changes = self.treeStateChanges
self.treeStateChanges = []
if len(changes) > 0:
self.sigTreeStateChanged.emit(self, changes)
| Parameter |
python | uqfoundation__dill | dill/tests/test_properties.py | {
"start": 402,
"end": 1346
} | class ____(object):
def __init__(self):
self._data = 1
def _get_data(self):
return self._data
def _set_data(self, x):
self._data = x
data = property(_get_data, _set_data)
def test_data_not_none():
FooS = dill.copy(Foo)
assert FooS.data.fget is not None
assert FooS.data.fset is not None
assert FooS.data.fdel is None
def test_data_unchanged():
FooS = dill.copy(Foo)
try:
res = FooS().data
except Exception:
e = sys.exc_info()[1]
raise AssertionError(str(e))
else:
assert res == 1
def test_data_changed():
FooS = dill.copy(Foo)
try:
f = FooS()
f.data = 1024
res = f.data
except Exception:
e = sys.exc_info()[1]
raise AssertionError(str(e))
else:
assert res == 1024
if __name__ == '__main__':
test_data_not_none()
test_data_unchanged()
test_data_changed()
| Foo |
python | doocs__leetcode | solution/0000-0099/0067.Add Binary/Solution2.py | {
"start": 0,
"end": 386
} | class ____:
def addBinary(self, a: str, b: str) -> str:
ans = []
i, j, carry = len(a) - 1, len(b) - 1, 0
while i >= 0 or j >= 0 or carry:
carry += (0 if i < 0 else int(a[i])) + (0 if j < 0 else int(b[j]))
carry, v = divmod(carry, 2)
ans.append(str(v))
i, j = i - 1, j - 1
return ''.join(ans[::-1])
| Solution |
python | pennersr__django-allauth | allauth/socialaccount/providers/kakao/provider.py | {
"start": 760,
"end": 1660
} | class ____(OAuth2Provider):
id = "kakao"
name = "Kakao"
account_class = KakaoAccount
oauth2_adapter_class = KakaoOAuth2Adapter
def extract_uid(self, data):
return str(data["id"])
def extract_common_fields(self, data):
email = data.get("kakao_account", {}).get("email")
nickname = data.get("kakao_account", {}).get("profile", {}).get("nickname")
return dict(email=email, username=nickname)
def extract_email_addresses(self, data):
ret = []
data = data.get("kakao_account", {})
email = data.get("email")
if email:
verified = data.get("is_email_verified")
# data['is_email_verified'] imply the email address is
# verified
ret.append(EmailAddress(email=email, verified=verified, primary=True))
return ret
provider_classes = [KakaoProvider]
| KakaoProvider |
python | fsspec__filesystem_spec | fsspec/transaction.py | {
"start": 1789,
"end": 2398
} | class ____(Transaction):
def __init__(self, fs):
"""
Parameters
----------
fs: FileSystem instance
"""
import distributed
super().__init__(fs)
client = distributed.default_client()
self.files = client.submit(FileActor, actor=True).result()
def complete(self, commit=True):
"""Finish transaction: commit or discard all deferred files"""
if commit:
self.files.commit().result()
else:
self.files.discard().result()
self.fs._intrans = False
self.fs = None
| DaskTransaction |
python | pennersr__django-allauth | allauth/socialaccount/providers/tiktok/provider.py | {
"start": 687,
"end": 1439
} | class ____(OAuth2Provider):
id = "tiktok"
name = "TikTok"
account_class = TikTokAccount
oauth2_adapter_class = TikTokOAuth2Adapter
pkce_enabled_default = False
def extract_uid(self, data):
return str(data["open_id"])
def extract_common_fields(self, data):
# TikTok does not provide an email address
return {
"username": data.get("username") or data.get("display_name"),
"name": data.get("display_name"),
}
def get_default_scope(self):
# Requires LoginKit and Scopes with user.info.basic and user.info.profile enabled
return [TikTokScope.user_info_basic.value, TikTokScope.user_info_profile.value]
provider_classes = [TikTokProvider]
| TikTokProvider |
python | ApeWorX__ape | src/ape/managers/project.py | {
"start": 84450,
"end": 107705
} | class ____(Project):
"""
Manage project(s).
Usage example::
from ape import project, Project
# Interact with local project contracts.
project.MyToken.deploy(sender=...)
# Interact with projects located elsewhere.
other_project = Project("Path/somewhere/else")
other_project.TokenSwapper.deploy(sender=...)
"""
def __init__(
self,
path: Union[Path, str],
manifest_path: Optional[Path] = None,
config_override: Optional[dict] = None,
) -> None:
self._session_source_change_check: set[str] = set()
# NOTE: Set this before super() because needed for self.config read.
self._config_override = config_override or {}
self._base_path = Path(path).resolve()
# A local project uses a special manifest.
self.manifest_path = manifest_path or self._base_path / ".build" / "__local__.json"
manifest = self.load_manifest()
super().__init__(manifest, config_override=self._config_override)
# NOTE: Avoid pointlessly adding info to the __local__ manifest.
# This is mainly for dependencies.
if self.manifest_path.stem != "__local__" and not manifest.sources:
# Perform initial manifest updates.
data: dict = {}
if (
self.name
and self.version
and (self.version != self.manifest.version or self.name != self.manifest.name)
):
# Ensure name / version is in the manifest correctly.
data["name"] = self.name.lower().replace("_", "-")
data["version"] = self.version
try:
src_dict = dict(self.sources.items())
except Exception as err:
logger.error(str(err))
else:
if src_dict and not self.manifest.sources:
# Sources file can be added.
# NOTE: Is also updated after compile changes and
# before publishing.
data["sources"] = src_dict
if data:
self.update_manifest(**data)
# Ensure any custom networks will work, otherwise Ape's network manager
# only knows about the "local" project's.
if custom_nets := (config_override or {}).get("networks", {}).get("custom", []):
self.network_manager._custom_networks.extend(custom_nets)
@log_instead_of_fail(default="<ProjectManager>")
def __repr__(self):
path = f" {clean_path(self._base_path)}"
# NOTE: 'Project' is meta for 'ProjectManager' (mixin magic).
return f"<ProjectManager{path}>"
def __contains__(self, name: str) -> bool:
return name in dir(self) or name in self.contracts
@only_raise_attribute_error
def __getattr__(self, item: str) -> Any:
try:
return get_attribute_with_extras(self, item)
except AttributeError as err:
message = getattr(err, "message", str(err))
did_append = False
if item not in (self.manifest.contract_types or {}):
all_files = get_all_files_in_directory(self.contracts_folder)
for path in all_files:
# Possibly, the user was trying to use a file name instead.
if path.stem != item:
continue
if message and message[-1] not in (".", "?", "!"):
message = f"{message}."
message = (
f"{message} However, there is a source file named '{path.name}'. "
"This file may not be compiling (see error above), or maybe you meant "
"to reference a contract name from this source file?"
)
did_append = True
break
# Possibly, the user does not have compiler plugins installed or working.
missing_exts = set()
for path in all_files:
if ext := get_full_extension(path):
if ext not in self.compiler_manager.registered_compilers:
missing_exts.add(ext)
if missing_exts:
start = "Else, could" if did_append else "Could"
message = (
f"{message} {start} it be from one of the "
"missing compilers for extensions: " + f"{', '.join(sorted(missing_exts))}?"
)
# NOTE: Purposely discard the stack-trace and raise a new exception.
# This shows a better stack-trace to the user (rather than weird
# BaseModel internals).
raise AttributeError(message)
@cached_property
def path(self) -> Path:
"""
The path to the project's "base" (where contract source IDs are relative to).
"""
return self._base_path / (self.config.base_path or "")
@property
def _contract_sources(self) -> list[ContractSource]:
sources = []
for contract in self.contracts.values():
if contract_src := self._create_contract_source(contract.contract_type):
sources.append(contract_src)
return sources
@cached_property
def _deduced_contracts_folder(self) -> Path:
# NOTE: This helper is only called if not configured and not ordinary.
return self._deduce_contracts_folder()
def _deduce_contracts_folder(self) -> Path:
if not self.path.is_dir():
# Not even able to try.
return self.path
common_names = ("contracts", "sources", "src")
for name in common_names:
if (self.path / name).is_dir():
return self.path / name
exts_not_json = {
k for k in self.compiler_manager.registered_compilers.keys() if k != ".json"
}
if not exts_not_json:
# Not really able to look anywhere else.
return self.path
def find_in_subs(pth):
for sub_directory in pth.iterdir():
if not sub_directory.is_dir():
continue
if directory := _find_directory_with_extension(sub_directory, exts_not_json):
return directory
if res := find_in_subs(self.path):
return res
if _find_directory_with_extension(self.path, exts_not_json, recurse=False):
return self.path
# Doesn't exist. Return non-existent default directory.
return self.path / "contracts"
@cached_property
def project_api(self) -> ProjectAPI:
"""
The 'type' of project this is, such as an Ape project
or a Brownie project (or something else).
"""
default_project = self._get_ape_project_api()
valid_apis: list[ProjectAPI] = [default_project] if default_project else []
# ape-config.yaml does no exist. Check for another ProjectAPI type.
project_classes: Iterator[type[ProjectAPI]] = (
t[1]
for t in self.plugin_manager.projects # type: ignore
)
plugins = (t for t in project_classes if not issubclass(t, ApeProject))
for api in plugins:
if instance := api.attempt_validate(path=self._base_path):
valid_apis.append(instance)
num_apis = len(valid_apis)
if num_apis == 1:
# Only 1 valid API- we can proceed from here.
return valid_apis[0]
elif num_apis == 0:
# Invalid project: not a likely scenario, as ApeProject should always work.
raise ProjectError(f"'{self._base_path.name}' is not recognized as a project.")
# More than 1 valid API. Remove default unless its config exists.
if valid_apis[0] == default_project:
if default_project.config_file.is_file():
# If Ape is configured for real, we want these changes at the end of the list,
# since they are most final.
valid_apis = [*valid_apis[1:], valid_apis[0]]
else:
# Remove, as we have others that are _actually_ valid, and the default is not needed.
valid_apis = valid_apis[1:]
if len(valid_apis) == 1:
# After removing the unnecessary default project type, there is only 1 valid project type left.
return valid_apis[0]
# If we get here, there are more than 1 project types we should use.
return MultiProject(apis=valid_apis, path=self._base_path)
def _get_ape_project_api(self) -> Optional[ApeProject]:
if instance := ApeProject.attempt_validate(path=self._base_path):
return cast(ApeProject, instance)
return None
@property
def name(self) -> str:
if name := self.config.get("name"):
return name
elif name := self.manifest.name:
return name
return self._base_path.name.replace("_", "-").lower()
@cached_property
def config(self) -> ApeConfig:
"""
The project configuration (including global defaults).
"""
# NOTE: Accessing the config this way allows us
# to be a different project than the cwd project.
project_config = self.project_api.extract_config(**self._config_override)
return self.config_manager.merge_with_global(project_config)
@cached_property
def contracts(self) -> ContractManager: # type: ignore[override]
"""
Container for managing contracts from local sources.
"""
return ContractManager(self, self.sources)
@property
def contracts_folder(self) -> Path:
"""
The root contract source directory.
"""
if sub_path := self.config.contracts_folder:
return self.path / sub_path
return self._deduced_contracts_folder
@cached_property
def deployments(self) -> DeploymentManager:
"""
Project deployment manager for adding and reading
deployments.
"""
return DeploymentManager(self)
@property
def exclusions(self) -> set[Union[str, Pattern]]:
"""
Source-file exclusion glob patterns.
"""
return {*self.config.compile.exclude, *SOURCE_EXCLUDE_PATTERNS}
@cached_property
def interfaces_folder(self) -> Path:
"""
The root interface source directory.
"""
name = self.config.interfaces_folder
for base in (self.path, self.contracts_folder):
path = base / name
if path.is_dir():
return path
# Defaults to non-existing path / interfaces
return self.path / name
@property
def in_tempdir(self) -> bool:
"""
``True`` when this project is in the temporary directory,
meaning existing only in the temporary directory
namespace.
"""
if not self.path:
return False
return in_tempdir(self.path)
@property
def meta(self) -> PackageMeta:
"""
Metadata about the active project as per EIP
https://eips.ethereum.org/EIPS/eip-2678#the-package-meta-object
Use when publishing your package manifest.
"""
return self.config.meta
@property
def scripts_folder(self) -> Path:
return self.path / "scripts"
@cached_property
def sources(self) -> SourceManager: # type: ignore[override]
"""
All the sources in the project.
"""
return SourceManager(
self.path, lambda: self.contracts_folder, exclude_globs=self.exclusions
)
@property
def tests_folder(self) -> Path:
return self.path / "tests"
@contextmanager
def isolate_in_tempdir(self, **config_override) -> Iterator["LocalProject"]:
"""
Clone this project to a temporary directory and return
its project.vers_settings["outputSelection"]
"""
sources = dict(self.sources.items())
if self.in_tempdir:
# Already in a tempdir.
if config_override:
self.reconfigure(**config_override)
self.manifest.sources = sources
yield self
else:
with super().isolate_in_tempdir(**config_override) as project:
# Add sources to manifest memory, in case they are missing.
project.manifest.sources = sources
yield project
def unpack(self, destination: Path, config_override: Optional[dict] = None) -> "LocalProject":
config_override = {**self._config_override, **(config_override or {})}
def copytree(src, dst):
try:
shutil.copytree(src, dst, dirs_exist_ok=True)
except Exception as err:
logger.error(f"Failed to unpack '{src}' to '{dst}': {err}")
pass
# Unpack contracts.
if self.contracts_folder.is_dir():
contracts_path = get_relative_path(self.contracts_folder, self.path)
contracts_destination = destination / contracts_path
copytree(self.contracts_folder, contracts_destination)
# Unpack config file.
if not (destination / "ape-config.yaml").is_file():
self.config.write_to_disk(destination / "ape-config.yaml")
# Unpack scripts folder.
if self.scripts_folder.is_dir():
copytree(self.scripts_folder, destination / "scripts")
# Unpack tests folder.
if self.tests_folder.is_dir():
copytree(self.tests_folder, destination / "tests")
# Unpack interfaces folder. Avoid double unpacking if already covered in contracts folder.
if self.interfaces_folder.is_dir() and not self.interfaces_folder.is_relative_to(
self.contracts_folder
):
prefix = get_relative_path(self.interfaces_folder.parent, self.path)
interfaces_destination = destination / prefix / self.config.interfaces_folder
interfaces_destination.parent.mkdir(parents=True, exist_ok=True)
copytree(self.interfaces_folder, interfaces_destination)
# Unpack build folder (to avoid needless re-compiling).
if self.manifest_path.parent.is_dir() and self.manifest_path.parent.name == ".build":
build_destination = destination / ".build"
build_destination.parent.mkdir(parents=True, exist_ok=True)
copytree(self.manifest_path.parent, build_destination)
return LocalProject(destination, config_override=config_override)
def load_manifest(self) -> PackageManifest:
"""
Load a publish-able manifest.
Returns:
ethpm_types.PackageManifest
"""
if not self.manifest_path.is_file():
return PackageManifest()
try:
manifest = _load_manifest(self.manifest_path)
except Exception as err:
logger.error(f"__local__.json manifest corrupted! Re-building.\nFull error: {err}.")
self.manifest_path.unlink(missing_ok=True)
manifest = PackageManifest()
self._manifest = manifest
return manifest
def get_contract(self, name: str) -> Any:
if name in self._session_source_change_check:
check_for_changes = False
else:
check_for_changes = True
self._session_source_change_check.add(name)
contract = self.contracts.get(name, check_for_changes=check_for_changes)
if contract:
contract.base_path = self.path
return contract
def update_manifest(self, **kwargs):
# Update the manifest in memory.
super().update_manifest(**kwargs)
# Write updates to disk.
self.manifest_path.unlink(missing_ok=True)
manifest_text = self.manifest.model_dump_json(mode="json", by_alias=True)
self.manifest_path.parent.mkdir(parents=True, exist_ok=True)
self.manifest_path.write_text(manifest_text, encoding="utf8")
def load_contracts(
self,
*source_ids: Union[str, Path],
use_cache: bool = True,
excluded_compilers: Optional[list[str]] = None,
) -> dict[str, ContractContainer]:
paths: Iterable[Path]
starting: dict[str, ContractContainer] = {}
if source_ids:
paths = [(self.path / src_id) for src_id in source_ids]
else:
starting = {
n: ContractContainer(ct)
for n, ct in (self.manifest.contract_types or {}).items()
if use_cache and ct.source_id and (self.path / ct.source_id).is_file()
}
paths = self.sources.paths
new_types = {
c.contract_type.name: c
for c in self.contracts._compile(
paths, use_cache=use_cache, excluded_compilers=excluded_compilers
)
if c.contract_type.name
}
return {**starting, **new_types}
def extract_manifest(self) -> PackageManifest:
"""
Get a finalized manifest for publishing.
Returns:
PackageManifest
"""
sources = dict(self.sources)
contract_types = {
n: c.contract_type
for n, c in self.load_contracts().items()
if c.contract_type.source_id in sources
}
# Add any remaining data to the manifest here.
self.update_manifest(
contract_types=contract_types,
dependencies=self.dependencies.uri_map,
deployments=self.deployments.instance_map,
meta=self.meta,
name=self.name,
sources=sources,
version=self.version,
)
return self.manifest
def clean(self):
super().clean()
if self.manifest_path.name == "__local__.json":
self.manifest_path.unlink(missing_ok=True)
self._manifest = PackageManifest()
self.sources._path_cache = None
self._clear_cached_config()
def chdir(self, path: Optional[Path] = None):
"""
Change the local project to the new path.
Args:
path (Path): The path of the new project.
If not given, defaults to the project's path.
"""
return ChangeDirectory(
self.path,
path or self.path,
on_push=self._handle_path_changed,
on_pop=self._handle_path_restored,
)
def _handle_path_changed(self, path: Path) -> dict:
cache: dict = {
"__dict__": {},
"_session_source_change_check": self._session_source_change_check,
"_config_override": self._config_override,
"_base_path": self._base_path,
"manifest_path": self.manifest_path,
"_manifest": self._manifest,
}
# New path: clear cached properties.
for attr in list(self.__dict__.keys()):
if isinstance(getattr(type(self), attr, None), cached_property):
cache["__dict__"][attr] = self.__dict__.pop(attr)
self._session_source_change_check = set()
self._config_override = {}
self._base_path = Path(path).resolve()
if self.manifest_path.name == "__local__.json":
self.manifest_path = self._base_path / ".build" / "__local__.json"
self._manifest = self.load_manifest()
return cache
def _handle_path_restored(self, cache: dict) -> None:
self.__dict__ = {**(self.__dict__ or {}), **cache.get("__dict__", {})}
self._session_source_change_check = cache.get("_session_source_change_check", set())
self._config_override = cache.get("_config_override", {})
if base_path := self._base_path:
self._base_path = base_path
if manifest_path := cache.get("manifest_path"):
self.manifest_path = manifest_path
if manifest := cache.get("_manifest"):
self._manifest = manifest
@contextmanager
def within_project_path(self):
"""
A context-manager for changing the current working directory to the
project's ``.path``. Then, switching back to whatever the current
directory was before calling this method.
"""
with within_directory(self.path):
yield
def reload_config(self):
"""
Reload the local ape-config.yaml file.
This is useful if the file was modified in the
active python session.
"""
self._clear_cached_config()
_ = self.config
def refresh_sources(self):
"""
Check for file-changes. Typically, you don't need to call this method.
This method exists for when changing files mid-session, you can "refresh"
and Ape will know about the changes.
"""
self._session_source_change_check = set()
self.sources.refresh()
def _clear_cached_config(self):
if "config" in self.__dict__:
del self.__dict__["config"]
def _create_contract_source(self, contract_type: ContractType) -> Optional[ContractSource]:
if not (source_id := contract_type.source_id):
return None
elif not (src := self.sources.get(source_id)):
# Not found in this project's sources.
try:
cwd = Path.cwd()
except Exception:
# Happens when left in a cleaned-up temp path maybe?
cwd = None
if cwd is not None and self.path != cwd:
root_project = self.Project(cwd)
if src := root_project._create_contract_source(contract_type):
return src
return None
try:
return ContractSource.create(contract_type, src, self.path)
except (ValueError, FileNotFoundError):
return None
def _update_contract_types(self, contract_types: dict[str, ContractType]):
super()._update_contract_types(contract_types)
if "ABI" in [x.value for x in self.config.compile.output_extra]:
abi_folder = self.manifest_path.parent / "abi"
shutil.rmtree(abi_folder, ignore_errors=True)
abi_folder.mkdir(parents=True, exist_ok=True)
for name, ct in (self.manifest.contract_types or {}).items():
file = abi_folder / f"{name}.json"
abi_json = json.dumps([x.model_dump(by_alias=True, mode="json") for x in ct.abi])
file.write_text(abi_json, encoding="utf8")
def _find_directory_with_extension(
path: Path, extensions: set[str], recurse: bool = True
) -> Optional[Path]:
if not path.is_dir():
return None
for file in path.iterdir():
if file.is_file() and get_full_extension(file) in extensions:
return file.parent
elif recurse and file.is_dir():
return _find_directory_with_extension(file, extensions)
return None
| LocalProject |
python | getsentry__sentry | src/sentry/api/serializers/models/apikey.py | {
"start": 116,
"end": 495
} | class ____(Serializer):
def serialize(self, obj, attrs, user, **kwargs):
return {
"id": str(obj.id),
"label": obj.label,
"key": obj.key,
"scope_list": obj.scope_list,
"status": obj.status,
"allowed_origins": "" if obj.allowed_origins is None else str(obj.allowed_origins),
}
| ApiKeySerializer |
python | tensorflow__tensorflow | tensorflow/lite/examples/experimental_new_converter/stack_trace_example.py | {
"start": 998,
"end": 2573
} | class ____(tf.Module):
"""The test model has unsupported op."""
@tf.function(input_signature=[tf.TensorSpec(shape=[3, 3], dtype=tf.float32)])
def model(self, x):
y = tf.math.reciprocal(x) # Not supported
return y + y
# comment out the `@suppress_exception` to display the stack trace
@suppress_exception
def test_from_saved_model():
"""displaying stack trace when converting saved model."""
test_model = TestModule()
saved_model_path = '/tmp/test.saved_model'
save_options = tf.saved_model.SaveOptions(save_debug_info=True)
tf.saved_model.save(test_model, saved_model_path, options=save_options)
# load the model and convert
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_path)
converter.convert()
# comment out the `@suppress_exception` to display the stack trace
# @suppress_exception
def test_from_concrete_function():
"""displaying stack trace when converting concrete function."""
@tf.function(input_signature=[tf.TensorSpec(shape=[3, 3], dtype=tf.float32)])
def model(x):
y = tf.math.reciprocal(x) # not supported
return y + y
func = model.get_concrete_function()
converter = lite.TFLiteConverterV2.from_concrete_functions([func], model)
converter.convert()
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
sys.stdout.write('==== Testing from_concrete_functions ====\n')
test_from_concrete_function()
sys.stdout.write('==== Testing from_saved_model ====\n')
test_from_saved_model()
if __name__ == '__main__':
app.run(main)
| TestModule |
python | django__django | django/db/migrations/serializer.py | {
"start": 3037,
"end": 4548
} | class ____(BaseSerializer):
@staticmethod
def serialize_deconstructed(path, args, kwargs):
name, imports = DeconstructibleSerializer._serialize_path(path)
strings = []
for arg in args:
arg_string, arg_imports = serializer_factory(arg).serialize()
strings.append(arg_string)
imports.update(arg_imports)
non_ident_kwargs = {}
for kw, arg in sorted(kwargs.items()):
if kw.isidentifier():
arg_string, arg_imports = serializer_factory(arg).serialize()
imports.update(arg_imports)
strings.append("%s=%s" % (kw, arg_string))
else:
non_ident_kwargs[kw] = arg
if non_ident_kwargs:
# Serialize non-identifier keyword arguments as a dict.
kw_string, kw_imports = serializer_factory(non_ident_kwargs).serialize()
strings.append(f"**{kw_string}")
imports.update(kw_imports)
return "%s(%s)" % (name, ", ".join(strings)), imports
@staticmethod
def _serialize_path(path):
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = {"from django.db import models"}
name = "models.%s" % name
else:
imports = {"import %s" % module}
name = path
return name, imports
def serialize(self):
return self.serialize_deconstructed(*self.value.deconstruct())
| DeconstructibleSerializer |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 44461,
"end": 46881
} | class ____(system_info):
section = 'mkl'
dir_env_var = 'MKLROOT'
_lib_mkl = ['mkl_rt']
def get_mkl_rootdir(self):
mklroot = os.environ.get('MKLROOT', None)
if mklroot is not None:
return mklroot
paths = os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)
ld_so_conf = '/etc/ld.so.conf'
if os.path.isfile(ld_so_conf):
with open(ld_so_conf) as f:
for d in f:
d = d.strip()
if d:
paths.append(d)
intel_mkl_dirs = []
for path in paths:
path_atoms = path.split(os.sep)
for m in path_atoms:
if m.startswith('mkl'):
d = os.sep.join(path_atoms[:path_atoms.index(m) + 2])
intel_mkl_dirs.append(d)
break
for d in paths:
dirs = glob(os.path.join(d, 'mkl', '*'))
dirs += glob(os.path.join(d, 'mkl*'))
for sub_dir in dirs:
if os.path.isdir(os.path.join(sub_dir, 'lib')):
return sub_dir
return None
def __init__(self):
mklroot = self.get_mkl_rootdir()
if mklroot is None:
system_info.__init__(self)
else:
from .cpuinfo import cpu
if cpu.is_Itanium():
plt = '64'
elif cpu.is_Intel() and cpu.is_64bit():
plt = 'intel64'
else:
plt = '32'
system_info.__init__(
self,
default_lib_dirs=[os.path.join(mklroot, 'lib', plt)],
default_include_dirs=[os.path.join(mklroot, 'include')])
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
opt = self.get_option_single('mkl_libs', 'libraries')
mkl_libs = self.get_libs(opt, self._lib_mkl)
info = self.check_libs2(lib_dirs, mkl_libs)
if info is None:
return
dict_append(info,
define_macros=[('SCIPY_MKL_H', None),
('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
if sys.platform == 'win32':
pass # win32 has no pthread library
else:
dict_append(info, libraries=['pthread'])
self.set_info(**info)
| mkl_info |
python | gevent__gevent | src/gevent/libuv/watcher.py | {
"start": 19230,
"end": 20685
} | class ____(object):
_watcher_skip_ffi = True
def __init__(self, loop, *args, **kwargs):
self._async = loop.async_()
try:
super(_SimulatedWithAsyncMixin, self).__init__(loop, *args, **kwargs)
except:
self._async.close()
raise
def _watcher_create(self, _args):
return
@property
def _watcher_handle(self):
return None
def _watcher_ffi_init(self, _args):
return
def _watcher_ffi_set_init_ref(self, ref):
self._async.ref = ref
@property
def active(self):
return self._async.active
def start(self, cb, *args):
assert self._async is not None
self._register_loop_callback()
self.callback = cb
self.args = args
self._async.start(cb, *args)
def stop(self):
self._unregister_loop_callback()
self.callback = None
self.args = None
if self._async is not None:
# If we're stop() after close().
# That should be allowed.
self._async.stop()
def close(self):
if self._async is not None:
a = self._async
self._async = None
a.close()
def _register_loop_callback(self):
# called from start()
raise NotImplementedError()
def _unregister_loop_callback(self):
# called from stop
raise NotImplementedError()
| _SimulatedWithAsyncMixin |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-appsflyer/source_appsflyer/source.py | {
"start": 11981,
"end": 12268
} | class ____(InAppEvents):
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return f"raw-data/export/app/{self.app_id}/in-app-events-retarget/v5"
| RetargetingInAppEvents |
python | google__jax | tests/pallas/pallas_vmap_test.py | {
"start": 1831,
"end": 8403
} | class ____(PallasBaseTest):
def setUp(self):
super().setUp()
if jtu.test_device_matches(["tpu"]):
# TODO: most tests fail on TPU in non-interpret mode
self.skipTest("On TPU the test works only in interpret mode")
def test_vmap_of_simple_kernel(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), intx),
)
def add_one(x_ref, o_ref):
o_ref[()] = x_ref[()] + 1
out = jax.vmap(add_one)(jnp.arange(8))
out_ref = jnp.arange(1, 9)
np.testing.assert_allclose(out, out_ref)
def test_vmap_of_simple_kernel_with_in_axes_None(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), intx),
)
def add(x_ref, y_ref, o_ref):
o_ref[()] = x_ref[()] + y_ref[()]
out = jax.vmap(add, in_axes=(0, None))(jnp.arange(8), 1)
out_ref = jnp.arange(1, 9)
np.testing.assert_allclose(out, out_ref)
def test_double_vmap_of_simple_kernel(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), intx),
)
def add_one(x_ref, o_ref):
o_ref[()] = x_ref[()] + 1
out = jax.vmap(jax.vmap(add_one))(jnp.arange(8).reshape((4, 2)))
out_ref = jnp.arange(1, 9).reshape((4, 2))
np.testing.assert_allclose(out, out_ref)
def test_quadruple_vmap_of_simple_kernel(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), intx),
)
def add_one(x_ref, o_ref):
o_ref[()] = x_ref[()] + 1
out = jax.vmap(jax.vmap(jax.vmap(jax.vmap(add_one))))(
jnp.arange(15 * 8).reshape((5, 3, 4, 2)))
out_ref = jnp.arange(1, 15 * 8 + 1).reshape((5, 3, 4, 2))
np.testing.assert_allclose(out, out_ref)
def test_quadruple_vmap_of_batched_kernel(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((7,), intx),
grid=(7,))
def add_one(x_ref, o_ref):
i = pl.program_id(0)
o_ref[i] = x_ref[i] + 1
out = jax.vmap(jax.vmap(jax.vmap(jax.vmap(add_one))))(
jnp.arange(15 * 8 * 7).reshape((5, 3, 4, 2, 7)))
out_ref = jnp.arange(1, 15 * 8 * 7 + 1).reshape((5, 3, 4, 2, 7))
np.testing.assert_allclose(out, out_ref)
def test_vmap_of_slicing_kernel(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((2,), intx),
grid=(2,))
def add_one(x_ref, o_ref):
i = pl.program_id(0)
o_ref[i] = x_ref[i] + 1
out = jax.vmap(add_one)(jnp.arange(8).reshape((4, 2)))
out_ref = jnp.arange(1, 9).reshape((4, 2))
np.testing.assert_allclose(out, out_ref)
def test_vmap_with_const_args(self):
if config.use_simplified_jaxpr_constants.value:
self.skipTest("TODO: decide if we want to keep these errors")
to_store = np.arange(128, dtype=np.float32).reshape((1, 128))
x = np.arange(4 * 16 * 128, dtype=np.float32).reshape((4, 16, 128))
@jax.vmap
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((64, 128), x.dtype),
grid=(2,),
in_specs=[pl.BlockSpec((8, 128), lambda i: (i, 0))],
out_specs=pl.BlockSpec((32, 128), lambda i: (i, 0)),
)
def kernel(src, dst):
dst[0:1] = to_store
with self.assertRaisesRegex(
ValueError,
"The kernel function .* captures constants"):
kernel(x)
def test_vmap_of_kernel_with_input_output_aliases(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((), intx),
input_output_aliases={1:0},
grid=())
def add(x_ref, _, o_ref):
o_ref[()] = x_ref[()] + o_ref[()] + 1
out = jax.vmap(add, in_axes=(0, None))(jnp.arange(8), 1)
out_ref = jnp.arange(2, 10)
np.testing.assert_allclose(out, out_ref)
def test_vmap_of_kernel_with_input_output_aliases_different_axes(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((4,), intx),
input_output_aliases={0: 0},
grid=(),
)
def add(x_ref, o_ref):
o_ref[()] = x_ref[()] + 1
out = jax.vmap(add, in_axes=1)(jnp.arange(8).reshape((4, 2)))
out_ref = jnp.arange(1, 9).reshape((4, 2)).swapaxes(0, 1)
np.testing.assert_allclose(out, out_ref)
def test_vmap_of_slicing_kernel_different_axes(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((2,), intx),
grid=(2,))
def add_one(x_ref, o_ref):
i = pl.program_id(0)
o_ref[i] = x_ref[i] + 1
add_one_ref = lambda x: x + 1
x = jnp.arange(8).reshape((2, 4))
out = jax.vmap(add_one, in_axes=1, out_axes=1)(x)
out_ref = jax.vmap(add_one_ref, in_axes=1, out_axes=1)(x)
np.testing.assert_allclose(out, out_ref)
out = jax.vmap(add_one, in_axes=1, out_axes=0)(x)
out_ref = jax.vmap(add_one_ref, in_axes=1, out_axes=0)(x)
np.testing.assert_allclose(out, out_ref)
def test_double_vmap_of_slicing_kernel_different_axes(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((4,), floatx),
grid=(4,))
def sin(x_ref, o_ref):
i = pl.program_id(0)
o_ref[i] = jnp.sin(x_ref[i])
sin_ref = jnp.sin
x = jnp.arange(64.).reshape((8, 4, 2))
out = jax.vmap(jax.vmap(sin, in_axes=1), in_axes=0)(x)
out_ref = jax.vmap(jax.vmap(sin_ref, in_axes=1), in_axes=0)(x)
np.testing.assert_allclose(out, out_ref, atol=1e-3, rtol=1e-3)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
@jtu.skip_on_devices("cpu") # Test is very slow on CPU
def test_small_large_vmap(self):
# Catches https://github.com/jax-ml/jax/issues/18361
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((2,), intx),
grid=(2,))
def add_one(x_ref, o_ref):
o_ref[()] = x_ref[()] + 1
add_one = jax.vmap(jax.vmap(add_one))
add_one_ref = lambda x: x + 1
x = random.randint(random.key(0), (4, 65536, 2), 0, 10000)
out = add_one(x)
out_ref = add_one_ref(x)
np.testing.assert_allclose(out, out_ref)
@jtu.skip_on_devices("cpu") # Test is very slow on CPU
def test_small_small_large_vmap(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((2,), intx),
grid=(2,))
def add_one(x_ref, o_ref):
o_ref[()] = x_ref[()] + 1
add_one = jax.vmap(jax.vmap(jax.vmap(add_one)))
add_one_ref = lambda x: x + 1
x = random.randint(random.key(0), (2, 2, 65536, 2), 0, 10000)
out = add_one(x)
out_ref = add_one_ref(x)
np.testing.assert_allclose(out, out_ref)
| PallasCallVmapTest |
python | falconry__falcon | tests/test_wsgiref_inputwrapper_with_size.py | {
"start": 56,
"end": 513
} | class ____(testing.SimpleTestResource):
"""A simple resource to return the posted request body."""
@falcon.before(testing.capture_responder_args)
def on_post(self, req, resp, **kwargs):
resp.status = falcon.HTTP_200
# NOTE(masterkale): No size needs to be specified here because we're
# emulating a stream read in production.
resp.text = json.dumps({'data': req.bounded_stream.read().decode('utf-8')})
| TypeResource |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.