language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
ray-project__ray
|
python/ray/train/v2/_internal/execution/scaling_policy/scaling_policy.py
|
{
"start": 421,
"end": 526
}
|
class ____(ScalingDecision):
num_workers: int
resources_per_worker: Dict[str, float]
|
ResizeDecision
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/filters.py
|
{
"start": 17852,
"end": 19776
}
|
class ____:
def __init__(self, target: _TargetRefs) -> None:
self.__target = target
self.__last_target = self.__target # use this to append to the end of the chain
def by_ref(self, link_on: str) -> "_FilterByRef":
"""Filter on the given reference."""
self.__last_target.target = _SingleTargetRef(link_on=link_on)
self.__last_target = self.__last_target.target
return self
def by_ref_multi_target(self, reference: str, target_collection: str) -> "_FilterByRef":
"""Filter on the given multi-target reference."""
target_collection = _capitalize_first_letter(target_collection)
self.__last_target.target = _MultiTargetRef(
link_on=reference, target_collection=target_collection
)
self.__last_target = self.__last_target.target
return self
def by_ref_count(self, link_on: str) -> _FilterByCount:
"""Filter on the given reference."""
return _FilterByCount(link_on, self.__target)
def by_id(self) -> _FilterById:
"""Define a filter based on the uuid to be used when querying and deleting from a collection."""
return _FilterById(self.__target)
def by_creation_time(self) -> _FilterByCreationTime:
"""Define a filter based on the creation time to be used when querying and deleting from a collection."""
return _FilterByCreationTime(self.__target)
def by_update_time(self) -> _FilterByUpdateTime:
"""Define a filter based on the update time to be used when querying and deleting from a collection."""
return _FilterByUpdateTime(self.__target)
def by_property(self, name: str, length: bool = False) -> _FilterByProperty:
"""Define a filter based on a property to be used when querying and deleting from a collection."""
return _FilterByProperty(prop=name, length=length, target=self.__target)
|
_FilterByRef
|
python
|
getsentry__sentry
|
tests/sentry/integrations/api/endpoints/test_data_forwarding.py
|
{
"start": 5900,
"end": 14492
}
|
class ____(DataForwardingIndexEndpointTest):
method = "POST"
def test_without_revamp_feature_flag_access(self) -> None:
with self.feature(
{
"organizations:data-forwarding-revamp-access": False,
"organizations:data-forwarding": True,
}
):
response = self.client.post(reverse(self.endpoint, args=(self.organization.slug,)))
assert response.status_code == 403
def test_without_data_forwarding_feature_flag_access(self) -> None:
with self.feature(
{
"organizations:data-forwarding-revamp-access": True,
"organizations:data-forwarding": False,
}
):
response = self.client.post(reverse(self.endpoint, args=(self.organization.slug,)))
assert response.status_code == 403
def test_create_segment_data_forwarder(self) -> None:
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "test_segment_key"},
"is_enabled": True,
"enroll_new_projects": False,
"project_ids": [],
}
response = self.get_success_response(self.organization.slug, status_code=201, **payload)
assert response.data["provider"] == DataForwarderProviderSlug.SEGMENT
assert response.data["config"] == {"write_key": "test_segment_key"}
assert response.data["isEnabled"] is True
assert response.data["enrollNewProjects"] is False
data_forwarder = DataForwarder.objects.get(id=response.data["id"])
assert data_forwarder.organization_id == self.organization.id
assert data_forwarder.provider == DataForwarderProviderSlug.SEGMENT
assert data_forwarder.config == {"write_key": "test_segment_key"}
def test_create_sqs_data_forwarder(self) -> None:
payload = {
"provider": DataForwarderProviderSlug.SQS,
"config": {
"queue_url": "https://sqs.us-east-1.amazonaws.com/123456789012/test-queue",
"region": "us-east-1",
"access_key": "AKIAIOSFODNN7EXAMPLE",
"secret_key": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
},
"project_ids": [],
}
response = self.get_success_response(self.organization.slug, status_code=201, **payload)
assert response.data["provider"] == DataForwarderProviderSlug.SQS
data_forwarder = DataForwarder.objects.get(id=response.data["id"])
assert data_forwarder.provider == DataForwarderProviderSlug.SQS
def test_create_splunk_data_forwarder(self) -> None:
payload = {
"provider": DataForwarderProviderSlug.SPLUNK,
"config": {
"instance_url": "https://splunk.example.com:8089",
"index": "main",
"source": "sentry",
"token": "12345678-1234-1234-1234-123456789abc",
},
"project_ids": [],
}
response = self.get_success_response(self.organization.slug, status_code=201, **payload)
assert response.data["provider"] == DataForwarderProviderSlug.SPLUNK
data_forwarder = DataForwarder.objects.get(id=response.data["id"])
assert data_forwarder.provider == DataForwarderProviderSlug.SPLUNK
def test_create_with_default_values(self) -> None:
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "test_key"},
"project_ids": [],
}
response = self.get_success_response(self.organization.slug, status_code=201, **payload)
assert response.data["isEnabled"] is True
assert response.data["enrollNewProjects"] is False
def test_create_duplicate_provider_fails(self) -> None:
self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "existing_key"},
)
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "new_key"},
"project_ids": [],
}
response = self.get_error_response(self.organization.slug, status_code=400, **payload)
assert "already exists" in str(response.data).lower()
def test_create_different_providers_succeeds(self) -> None:
self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "segment_key"},
)
payload = {
"provider": DataForwarderProviderSlug.SQS,
"config": {
"queue_url": "https://sqs.us-east-1.amazonaws.com/123456789012/test-queue",
"region": "us-east-1",
"access_key": "AKIAIOSFODNN7EXAMPLE",
"secret_key": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
},
"project_ids": [],
}
response = self.get_success_response(self.organization.slug, status_code=201, **payload)
assert response.data["provider"] == DataForwarderProviderSlug.SQS
def test_create_missing_required_fields(self) -> None:
payload = {
"config": {"write_key": "test_key"},
}
response = self.get_error_response(self.organization.slug, status_code=400, **payload)
assert "provider" in str(response.data).lower()
def test_create_invalid_config(self) -> None:
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "invalid key"},
"project_ids": [],
}
response = self.get_error_response(self.organization.slug, status_code=400, **payload)
assert "config" in str(response.data).lower()
def test_create_requires_write_permission(self) -> None:
user_without_permission = self.create_user()
self.login_as(user=user_without_permission)
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "test_key"},
"project_ids": [],
}
self.get_error_response(self.organization.slug, status_code=403, **payload)
def test_create_invalid_provider(self) -> None:
payload = {
"provider": "invalid_provider",
"config": {"write_key": "test_key"},
"project_ids": [],
}
response = self.get_error_response(self.organization.slug, status_code=400, **payload)
assert "provider" in str(response.data).lower()
def test_create_duplicate_provider_returns_error(self) -> None:
self.create_data_forwarder(
provider=DataForwarderProviderSlug.SEGMENT,
config={"write_key": "existing_key"},
)
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "new_key"},
"project_ids": [],
}
response = self.get_error_response(self.organization.slug, status_code=400, **payload)
assert "already exists" in str(response.data).lower()
def test_create_missing_config(self) -> None:
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
}
response = self.get_error_response(self.organization.slug, status_code=400, **payload)
assert "config" in str(response.data).lower()
def test_create_without_project_ids(self) -> None:
payload = {
"provider": DataForwarderProviderSlug.SEGMENT,
"config": {"write_key": "test_key"},
}
response = self.get_success_response(self.organization.slug, status_code=201, **payload)
assert response.data["provider"] == DataForwarderProviderSlug.SEGMENT
data_forwarder = DataForwarder.objects.get(id=response.data["id"])
assert data_forwarder.projects.count() == 0
def test_create_sqs_fifo_queue_validation(self) -> None:
payload = {
"provider": DataForwarderProviderSlug.SQS,
"config": {
"queue_url": "https://sqs.us-east-1.amazonaws.com/123456789012/test-queue.fifo",
"region": "us-east-1",
"access_key": "AKIAIOSFODNN7EXAMPLE",
"secret_key": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
},
"project_ids": [],
}
response = self.get_error_response(self.organization.slug, status_code=400, **payload)
assert "message_group_id" in str(response.data).lower()
|
DataForwardingIndexPostTest
|
python
|
cython__cython
|
Cython/Debugger/libpython.py
|
{
"start": 88479,
"end": 89556
}
|
class ____(PythonCodeExecutor):
"""
Context manager that fetches the error indicator in the inferior and
restores it on exit.
"""
def __init__(self):
self.sizeof_PyObjectPtr = gdb.lookup_type('PyObject').pointer().sizeof
self.pointer = self.malloc(self.sizeof_PyObjectPtr * 3)
type = self.pointer
value = self.pointer + self.sizeof_PyObjectPtr
traceback = self.pointer + self.sizeof_PyObjectPtr * 2
self.errstate = type, value, traceback
def __enter__(self):
gdb.parse_and_eval("PyErr_Fetch(%d, %d, %d)" % self.errstate)
def __exit__(self, *args):
if gdb.parse_and_eval("(int) PyErr_Occurred()"):
gdb.parse_and_eval("PyErr_Print()")
pyerr_restore = ("PyErr_Restore("
"(PyObject *) *%d,"
"(PyObject *) *%d,"
"(PyObject *) *%d)")
try:
gdb.parse_and_eval(pyerr_restore % self.errstate)
finally:
self.free(self.pointer)
|
FetchAndRestoreError
|
python
|
pyparsing__pyparsing
|
pyparsing/core.py
|
{
"start": 151533,
"end": 152063
}
|
class ____(PositionToken):
"""Matches if current position is at the beginning of the parse
string
"""
def __init__(self) -> None:
super().__init__()
self.set_name("start of text")
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
# see if entire string up to here is just whitespace and ignoreables
if loc != 0 and loc != self.preParse(instring, 0):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
|
StringStart
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-tplcentral/source_tplcentral/streams.py
|
{
"start": 2337,
"end": 3051
}
|
class ____(TplcentralStream):
# https://api.3plcentral.com/rels/inventory/stocksummaries
collection_field = "Summaries"
primary_key = ["FacilityId", "_item_identifier_id"]
page_size = 500
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return "inventory/stocksummaries"
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
records = super().parse_response(response, **kwargs)
for record in records:
record["_item_identifier_id"] = deep_get(record, "ItemIdentifier.Id")
yield record
|
StockSummaries
|
python
|
scipy__scipy
|
scipy/cluster/tests/test_hierarchy.py
|
{
"start": 11901,
"end": 14123
}
|
class ____:
@make_xp_test_case(single, is_isomorphic)
@pytest.mark.parametrize("criterion,t",
[("inconsistent", t) for t in hierarchy_test_data.fcluster_inconsistent]
+ [("distance", t) for t in hierarchy_test_data.fcluster_distance]
+ [("maxclust", t) for t in hierarchy_test_data.fcluster_maxclust]
)
def test_fcluster(self, t, criterion, xp):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t])
Z = single(xp.asarray(hierarchy_test_data.Q_X))
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
@make_xp_test_case(single, is_isomorphic, maxdists)
@pytest.mark.parametrize("t", hierarchy_test_data.fcluster_distance)
def test_fcluster_monocrit(self, t, xp):
expectedT = xp.asarray(hierarchy_test_data.fcluster_distance[t])
Z = single(xp.asarray(hierarchy_test_data.Q_X))
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
@make_xp_test_case(single, is_isomorphic, maxdists)
@pytest.mark.parametrize("t", hierarchy_test_data.fcluster_maxclust)
def test_fcluster_maxclust_monocrit(self, t, xp):
expectedT = xp.asarray(hierarchy_test_data.fcluster_maxclust[t])
Z = single(xp.asarray(hierarchy_test_data.Q_X))
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
@make_xp_test_case(single)
def test_fcluster_maxclust_gh_12651(self, xp):
y = xp.asarray([[1], [4], [5]])
Z = single(y)
assert_array_equal(fcluster(Z, t=1, criterion="maxclust"),
xp.asarray([1, 1, 1]))
assert_array_equal(fcluster(Z, t=2, criterion="maxclust"),
xp.asarray([2, 1, 1]))
assert_array_equal(fcluster(Z, t=3, criterion="maxclust"),
xp.asarray([1, 2, 3]))
assert_array_equal(fcluster(Z, t=5, criterion="maxclust"),
xp.asarray([1, 2, 3]))
@make_xp_test_case(leaders)
|
TestFcluster
|
python
|
pytorch__pytorch
|
test/dynamo/cpython/3_13/test_set.py
|
{
"start": 41295,
"end": 43697
}
|
class ____(__TestCase):
def setUp(self):
self.set = set((2, 4, 6))
super().setUp()
def test_eq(self): # SF bug 643115
self.assertEqual(self.set, set({2:1,4:3,6:5}))
def test_union_subset(self):
result = self.set | set([2])
self.assertEqual(result, set((2, 4, 6)))
def test_union_superset(self):
result = self.set | set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_union_overlap(self):
result = self.set | set([3, 4, 5])
self.assertEqual(result, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
result = self.set | set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_intersection_subset(self):
result = self.set & set((2, 4))
self.assertEqual(result, set((2, 4)))
def test_intersection_superset(self):
result = self.set & set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6]))
def test_intersection_overlap(self):
result = self.set & set([3, 4, 5])
self.assertEqual(result, set([4]))
def test_intersection_non_overlap(self):
result = self.set & set([8])
self.assertEqual(result, empty_set)
def test_isdisjoint_subset(self):
result = self.set.isdisjoint(set((2, 4)))
self.assertEqual(result, False)
def test_isdisjoint_superset(self):
result = self.set.isdisjoint(set([2, 4, 6, 8]))
self.assertEqual(result, False)
def test_isdisjoint_overlap(self):
result = self.set.isdisjoint(set([3, 4, 5]))
self.assertEqual(result, False)
def test_isdisjoint_non_overlap(self):
result = self.set.isdisjoint(set([8]))
self.assertEqual(result, True)
def test_sym_difference_subset(self):
result = self.set ^ set((2, 4))
self.assertEqual(result, set([6]))
def test_sym_difference_superset(self):
result = self.set ^ set((2, 4, 6, 8))
self.assertEqual(result, set([8]))
def test_sym_difference_overlap(self):
result = self.set ^ set((3, 4, 5))
self.assertEqual(result, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
result = self.set ^ set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
#==============================================================================
|
TestBinaryOps
|
python
|
RaRe-Technologies__gensim
|
gensim/matutils.py
|
{
"start": 18261,
"end": 36601
}
|
class ____:
"""Convert a matrix in scipy.sparse format into a streaming Gensim corpus.
See Also
--------
:func:`~gensim.matutils.corpus2csc`
Convert gensim corpus format to `scipy.sparse.csc` matrix
:class:`~gensim.matutils.Dense2Corpus`
Convert dense matrix to gensim corpus.
"""
def __init__(self, sparse, documents_columns=True):
"""
Parameters
----------
sparse : `scipy.sparse`
Corpus scipy sparse format
documents_columns : bool, optional
Documents will be column?
"""
if documents_columns:
self.sparse = sparse.tocsc()
else:
self.sparse = sparse.tocsr().T # make sure shape[1]=number of docs (needed in len())
def __iter__(self):
"""
Yields
------
list of (int, float)
Document in BoW format.
"""
for indprev, indnow in zip(self.sparse.indptr, self.sparse.indptr[1:]):
yield list(zip(self.sparse.indices[indprev:indnow], self.sparse.data[indprev:indnow]))
def __len__(self):
return self.sparse.shape[1]
def __getitem__(self, key):
"""
Retrieve a document vector or subset from the corpus by key.
Parameters
----------
key: int, ellipsis, slice, iterable object
Index of the document retrieve.
Less commonly, the key can also be a slice, ellipsis, or an iterable
to retrieve multiple documents.
Returns
-------
list of (int, number), Sparse2Corpus
Document in BoW format when `key` is an integer. Otherwise :class:`~gensim.matutils.Sparse2Corpus`.
"""
sparse = self.sparse
if isinstance(key, int):
iprev = self.sparse.indptr[key]
inow = self.sparse.indptr[key + 1]
return list(zip(sparse.indices[iprev:inow], sparse.data[iprev:inow]))
sparse = self.sparse.__getitem__((slice(None, None, None), key))
return Sparse2Corpus(sparse)
def veclen(vec):
"""Calculate L2 (euclidean) length of a vector.
Parameters
----------
vec : list of (int, number)
Input vector in sparse bag-of-words format.
Returns
-------
float
Length of `vec`.
"""
if len(vec) == 0:
return 0.0
length = 1.0 * math.sqrt(sum(val**2 for _, val in vec))
assert length > 0.0, "sparse documents must not contain any explicit zero entries"
return length
def ret_normalized_vec(vec, length):
"""Normalize a vector in L2 (Euclidean unit norm).
Parameters
----------
vec : list of (int, number)
Input vector in BoW format.
length : float
Length of vector
Returns
-------
list of (int, number)
L2-normalized vector in BoW format.
"""
if length != 1.0:
return [(termid, val / length) for termid, val in vec]
else:
return list(vec)
def ret_log_normalize_vec(vec, axis=1):
log_max = 100.0
if len(vec.shape) == 1:
max_val = np.max(vec)
log_shift = log_max - np.log(len(vec) + 1.0) - max_val
tot = np.sum(np.exp(vec + log_shift))
log_norm = np.log(tot) - log_shift
vec -= log_norm
else:
if axis == 1: # independently normalize each sample
max_val = np.max(vec, 1)
log_shift = log_max - np.log(vec.shape[1] + 1.0) - max_val
tot = np.sum(np.exp(vec + log_shift[:, np.newaxis]), 1)
log_norm = np.log(tot) - log_shift
vec = vec - log_norm[:, np.newaxis]
elif axis == 0: # normalize each feature
k = ret_log_normalize_vec(vec.T)
return k[0].T, k[1]
else:
raise ValueError("'%s' is not a supported axis" % axis)
return vec, log_norm
blas_nrm2 = blas('nrm2', np.array([], dtype=float))
blas_scal = blas('scal', np.array([], dtype=float))
def unitvec(vec, norm='l2', return_norm=False):
"""Scale a vector to unit length.
Parameters
----------
vec : {numpy.ndarray, scipy.sparse, list of (int, float)}
Input vector in any format
norm : {'l1', 'l2', 'unique'}, optional
Metric to normalize in.
return_norm : bool, optional
Return the length of vector `vec`, in addition to the normalized vector itself?
Returns
-------
numpy.ndarray, scipy.sparse, list of (int, float)}
Normalized vector in same format as `vec`.
float
Length of `vec` before normalization, if `return_norm` is set.
Notes
-----
Zero-vector will be unchanged.
"""
supported_norms = ('l1', 'l2', 'unique')
if norm not in supported_norms:
raise ValueError("'%s' is not a supported norm. Currently supported norms are %s." % (norm, supported_norms))
if scipy.sparse.issparse(vec):
vec = vec.tocsr()
if norm == 'l1':
veclen = np.sum(np.abs(vec.data))
if norm == 'l2':
veclen = np.sqrt(np.sum(vec.data ** 2))
if norm == 'unique':
veclen = vec.nnz
if veclen > 0.0:
if np.issubdtype(vec.dtype, np.integer):
vec = vec.astype(float)
vec /= veclen
if return_norm:
return vec, veclen
else:
return vec
else:
if return_norm:
return vec, 1.0
else:
return vec
if isinstance(vec, np.ndarray):
if norm == 'l1':
veclen = np.sum(np.abs(vec))
if norm == 'l2':
if vec.size == 0:
veclen = 0.0
else:
veclen = blas_nrm2(vec)
if norm == 'unique':
veclen = np.count_nonzero(vec)
if veclen > 0.0:
if np.issubdtype(vec.dtype, np.integer):
vec = vec.astype(float)
if return_norm:
return blas_scal(1.0 / veclen, vec).astype(vec.dtype), veclen
else:
return blas_scal(1.0 / veclen, vec).astype(vec.dtype)
else:
if return_norm:
return vec, 1.0
else:
return vec
try:
first = next(iter(vec)) # is there at least one element?
except StopIteration:
if return_norm:
return vec, 1.0
else:
return vec
if isinstance(first, (tuple, list)) and len(first) == 2: # gensim sparse format
if norm == 'l1':
length = float(sum(abs(val) for _, val in vec))
if norm == 'l2':
length = 1.0 * math.sqrt(sum(val ** 2 for _, val in vec))
if norm == 'unique':
length = 1.0 * len(vec)
assert length > 0.0, "sparse documents must not contain any explicit zero entries"
if return_norm:
return ret_normalized_vec(vec, length), length
else:
return ret_normalized_vec(vec, length)
else:
raise ValueError("unknown input type")
def cossim(vec1, vec2):
"""Get cosine similarity between two sparse vectors.
Cosine similarity is a number between `<-1.0, 1.0>`, higher means more similar.
Parameters
----------
vec1 : list of (int, float)
Vector in BoW format.
vec2 : list of (int, float)
Vector in BoW format.
Returns
-------
float
Cosine similarity between `vec1` and `vec2`.
"""
vec1, vec2 = dict(vec1), dict(vec2)
if not vec1 or not vec2:
return 0.0
vec1len = 1.0 * math.sqrt(sum(val * val for val in vec1.values()))
vec2len = 1.0 * math.sqrt(sum(val * val for val in vec2.values()))
assert vec1len > 0.0 and vec2len > 0.0, "sparse documents must not contain any explicit zero entries"
if len(vec2) < len(vec1):
vec1, vec2 = vec2, vec1 # swap references so that we iterate over the shorter vector
result = sum(value * vec2.get(index, 0.0) for index, value in vec1.items())
result /= vec1len * vec2len # rescale by vector lengths
return result
def isbow(vec):
"""Checks if a vector is in the sparse Gensim bag-of-words format.
Parameters
----------
vec : object
Object to check.
Returns
-------
bool
Is `vec` in BoW format.
"""
if scipy.sparse.issparse(vec):
vec = vec.todense().tolist()
try:
id_, val_ = vec[0] # checking first value to see if it is in bag of words format by unpacking
int(id_), float(val_)
except IndexError:
return True # this is to handle the empty input case
except (ValueError, TypeError):
return False
return True
def _convert_vec(vec1, vec2, num_features=None):
if scipy.sparse.issparse(vec1):
vec1 = vec1.toarray()
if scipy.sparse.issparse(vec2):
vec2 = vec2.toarray() # converted both the vectors to dense in case they were in sparse matrix
if isbow(vec1) and isbow(vec2): # if they are in bag of words format we make it dense
if num_features is not None: # if not None, make as large as the documents drawing from
dense1 = sparse2full(vec1, num_features)
dense2 = sparse2full(vec2, num_features)
return dense1, dense2
else:
max_len = max(len(vec1), len(vec2))
dense1 = sparse2full(vec1, max_len)
dense2 = sparse2full(vec2, max_len)
return dense1, dense2
else:
# this conversion is made because if it is not in bow format, it might be a list within a list after conversion
# the scipy implementation of Kullback fails in such a case so we pick up only the nested list.
if len(vec1) == 1:
vec1 = vec1[0]
if len(vec2) == 1:
vec2 = vec2[0]
return vec1, vec2
def kullback_leibler(vec1, vec2, num_features=None):
"""Calculate Kullback-Leibler distance between two probability distributions using `scipy.stats.entropy`.
Parameters
----------
vec1 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
vec2 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
num_features : int, optional
Number of features in the vectors.
Returns
-------
float
Kullback-Leibler distance between `vec1` and `vec2`.
Value in range [0, +∞) where values closer to 0 mean less distance (higher similarity).
"""
vec1, vec2 = _convert_vec(vec1, vec2, num_features=num_features)
return entropy(vec1, vec2)
def jensen_shannon(vec1, vec2, num_features=None):
"""Calculate Jensen-Shannon distance between two probability distributions using `scipy.stats.entropy`.
Parameters
----------
vec1 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
vec2 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
num_features : int, optional
Number of features in the vectors.
Returns
-------
float
Jensen-Shannon distance between `vec1` and `vec2`.
Notes
-----
This is a symmetric and finite "version" of :func:`gensim.matutils.kullback_leibler`.
"""
vec1, vec2 = _convert_vec(vec1, vec2, num_features=num_features)
avg_vec = 0.5 * (vec1 + vec2)
return 0.5 * (entropy(vec1, avg_vec) + entropy(vec2, avg_vec))
def hellinger(vec1, vec2):
"""Calculate Hellinger distance between two probability distributions.
Parameters
----------
vec1 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
vec2 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
Returns
-------
float
Hellinger distance between `vec1` and `vec2`.
Value in range `[0, 1]`, where 0 is min distance (max similarity) and 1 is max distance (min similarity).
"""
if scipy.sparse.issparse(vec1):
vec1 = vec1.toarray()
if scipy.sparse.issparse(vec2):
vec2 = vec2.toarray()
if isbow(vec1) and isbow(vec2):
# if it is a BoW format, instead of converting to dense we use dictionaries to calculate appropriate distance
vec1, vec2 = dict(vec1), dict(vec2)
indices = set(list(vec1.keys()) + list(vec2.keys()))
sim = np.sqrt(
0.5 * sum((np.sqrt(vec1.get(index, 0.0)) - np.sqrt(vec2.get(index, 0.0)))**2 for index in indices)
)
return sim
else:
sim = np.sqrt(0.5 * ((np.sqrt(vec1) - np.sqrt(vec2))**2).sum())
return sim
def jaccard(vec1, vec2):
"""Calculate Jaccard distance between two vectors.
Parameters
----------
vec1 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
vec2 : {scipy.sparse, numpy.ndarray, list of (int, float)}
Distribution vector.
Returns
-------
float
Jaccard distance between `vec1` and `vec2`.
Value in range `[0, 1]`, where 0 is min distance (max similarity) and 1 is max distance (min similarity).
"""
# converting from sparse for easier manipulation
if scipy.sparse.issparse(vec1):
vec1 = vec1.toarray()
if scipy.sparse.issparse(vec2):
vec2 = vec2.toarray()
if isbow(vec1) and isbow(vec2):
# if it's in bow format, we use the following definitions:
# union = sum of the 'weights' of both the bags
# intersection = lowest weight for a particular id; basically the number of common words or items
union = sum(weight for id_, weight in vec1) + sum(weight for id_, weight in vec2)
vec1, vec2 = dict(vec1), dict(vec2)
intersection = 0.0
for feature_id, feature_weight in vec1.items():
intersection += min(feature_weight, vec2.get(feature_id, 0.0))
return 1 - float(intersection) / float(union)
else:
# if it isn't in bag of words format, we can use sets to calculate intersection and union
if isinstance(vec1, np.ndarray):
vec1 = vec1.tolist()
if isinstance(vec2, np.ndarray):
vec2 = vec2.tolist()
vec1 = set(vec1)
vec2 = set(vec2)
intersection = vec1 & vec2
union = vec1 | vec2
return 1 - float(len(intersection)) / float(len(union))
def jaccard_distance(set1, set2):
"""Calculate Jaccard distance between two sets.
Parameters
----------
set1 : set
Input set.
set2 : set
Input set.
Returns
-------
float
Jaccard distance between `set1` and `set2`.
Value in range `[0, 1]`, where 0 is min distance (max similarity) and 1 is max distance (min similarity).
"""
union_cardinality = len(set1 | set2)
if union_cardinality == 0: # Both sets are empty
return 1.
return 1. - float(len(set1 & set2)) / float(union_cardinality)
try:
# try to load fast, cythonized code if possible
from gensim._matutils import logsumexp, mean_absolute_difference, dirichlet_expectation
except ImportError:
def logsumexp(x):
"""Log of sum of exponentials.
Parameters
----------
x : numpy.ndarray
Input 2d matrix.
Returns
-------
float
log of sum of exponentials of elements in `x`.
Warnings
--------
For performance reasons, doesn't support NaNs or 1d, 3d, etc arrays like :func:`scipy.special.logsumexp`.
"""
x_max = np.max(x)
x = np.log(np.sum(np.exp(x - x_max)))
x += x_max
return x
def mean_absolute_difference(a, b):
"""Mean absolute difference between two arrays.
Parameters
----------
a : numpy.ndarray
Input 1d array.
b : numpy.ndarray
Input 1d array.
Returns
-------
float
mean(abs(a - b)).
"""
return np.mean(np.abs(a - b))
def dirichlet_expectation(alpha):
"""Expected value of log(theta) where theta is drawn from a Dirichlet distribution.
Parameters
----------
alpha : numpy.ndarray
Dirichlet parameter 2d matrix or 1d vector, if 2d - each row is treated as a separate parameter vector.
Returns
-------
numpy.ndarray
Log of expected values, dimension same as `alpha.ndim`.
"""
if len(alpha.shape) == 1:
result = psi(alpha) - psi(np.sum(alpha))
else:
result = psi(alpha) - psi(np.sum(alpha, 1))[:, np.newaxis]
return result.astype(alpha.dtype, copy=False) # keep the same precision as input
def qr_destroy(la):
"""Get QR decomposition of `la[0]`.
Parameters
----------
la : list of numpy.ndarray
Run QR decomposition on the first elements of `la`. Must not be empty.
Returns
-------
(numpy.ndarray, numpy.ndarray)
Matrices :math:`Q` and :math:`R`.
Notes
-----
Using this function is less memory intense than calling `scipy.linalg.qr(la[0])`,
because the memory used in `la[0]` is reclaimed earlier. This makes a difference when
decomposing very large arrays, where every memory copy counts.
Warnings
--------
Content of `la` as well as `la[0]` gets destroyed in the process. Again, for memory-effiency reasons.
"""
a = np.asfortranarray(la[0])
del la[0], la # now `a` is the only reference to the input matrix
m, n = a.shape
# perform q, r = QR(a); code hacked out of scipy.linalg.qr
logger.debug("computing QR of %s dense matrix", str(a.shape))
geqrf, = get_lapack_funcs(('geqrf',), (a,))
qr, tau, work, info = geqrf(a, lwork=-1, overwrite_a=True)
qr, tau, work, info = geqrf(a, lwork=work[0], overwrite_a=True)
del a # free up mem
assert info >= 0
r = triu(qr[:n, :n])
if m < n: # rare case, #features < #topics
qr = qr[:, :m] # retains fortran order
gorgqr, = get_lapack_funcs(('orgqr',), (qr,))
q, work, info = gorgqr(qr, tau, lwork=-1, overwrite_a=True)
q, work, info = gorgqr(qr, tau, lwork=work[0], overwrite_a=True)
assert info >= 0, "qr failed"
assert q.flags.f_contiguous
return q, r
|
Sparse2Corpus
|
python
|
Unity-Technologies__ml-agents
|
ml-agents-trainer-plugin/mlagents_trainer_plugin/a2c/a2c_optimizer.py
|
{
"start": 801,
"end": 1285
}
|
class ____(OnPolicyHyperparamSettings):
beta: float = 5.0e-3
lambd: float = 0.95
num_epoch: int = attr.ib(default=1) # A2C does just one pass
shared_critic: bool = False
@num_epoch.validator
def _check_num_epoch_one(self, attribute, value):
if value != 1:
raise TrainerConfigError("A2C requires num_epoch = 1")
learning_rate_schedule: ScheduleType = ScheduleType.LINEAR
beta_schedule: ScheduleType = ScheduleType.LINEAR
|
A2CSettings
|
python
|
ansible__ansible
|
lib/ansible/utils/collection_loader/_collection_finder.py
|
{
"start": 32649,
"end": 36214
}
|
class ____(_AnsibleCollectionPkgLoaderBase):
# HACK: stash this in a better place
_redirected_package_map = {} # type: dict[str, str]
_allows_package_code = True
def _validate_args(self):
super(_AnsibleCollectionLoader, self)._validate_args()
if len(self._split_name) < 4:
raise ValueError('this loader is only for sub-collection modules/packages, not {0}'.format(self._fullname))
def _get_candidate_paths(self, path_list):
if len(path_list) != 1 and self._split_name[1:3] != ['ansible', 'builtin']:
raise ValueError('this loader requires exactly one path to search')
return path_list
def _get_subpackage_search_paths(self, candidate_paths):
collection_name = '.'.join(self._split_name[1:3])
collection_meta = _get_collection_metadata(collection_name)
# check for explicit redirection, as well as ancestor package-level redirection (only load the actual code once!)
redirect = None
explicit_redirect = False
routing_entry = _nested_dict_get(collection_meta, ['import_redirection', self._fullname])
if routing_entry:
redirect = routing_entry.get('redirect')
if redirect:
explicit_redirect = True
else:
redirect = _get_ancestor_redirect(self._redirected_package_map, self._fullname)
# NB: package level redirection requires hooking all future imports beneath the redirected source package
# in order to ensure sanity on future relative imports. We always import everything under its "real" name,
# then add a sys.modules entry with the redirected name using the same module instance. If we naively imported
# the source for each redirection, most submodules would import OK, but we'd have N runtime copies of the module
# (one for each name), and relative imports that ascend above the redirected package would break (since they'd
# see the redirected ancestor package contents instead of the package where they actually live).
if redirect:
# FIXME: wrap this so we can be explicit about a failed redirection
self._redirect_module = import_module(redirect)
if explicit_redirect and hasattr(self._redirect_module, '__path__') and self._redirect_module.__path__:
# if the import target looks like a package, store its name so we can rewrite future descendent loads
self._redirected_package_map[self._fullname] = redirect
# if we redirected, don't do any further custom package logic
return None
# we're not doing a redirect- try to find what we need to actually load a module/package
# this will raise ImportError if we can't find the requested module/package at all
if not candidate_paths:
# noplace to look, just ImportError
raise ImportError('package has no paths')
found_path, has_code, package_path = self._module_file_from_path(self._package_to_load, candidate_paths[0])
# still here? we found something to load...
if has_code:
self._source_code_path = found_path
if package_path:
return [package_path] # always needs to be a list
return None
# This loader only answers for intercepted Ansible Python modules. Normal imports will fail here and be picked up later
# by our path_hook importer (which proxies the built-in import mechanisms, allowing normal caching etc to occur)
|
_AnsibleCollectionLoader
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/models.py
|
{
"start": 72076,
"end": 73336
}
|
class ____(Request):
"""
Convert public models to private
:param ids: IDs of the models to convert. Only the models originated by the
company can be converted
:type ids: Sequence[str]
"""
_service = "models"
_action = "make_private"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Ids of the models to convert. Only the models originated by the company can be converted",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
super(MakePrivateRequest, self).__init__(**kwargs)
self.ids = ids
@schema_property("ids")
def ids(self) -> Optional[List[str]]:
return self._property_ids
@ids.setter
def ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
|
MakePrivateRequest
|
python
|
oauthlib__oauthlib
|
tests/oauth1/rfc5849/endpoints/test_base.py
|
{
"start": 14008,
"end": 16520
}
|
class ____(TestCase):
def setUp(self):
v = ClientValidator()
self.e = BaseEndpoint(v)
self.uri = 'https://example.com/'
self.sig = ('oauth_signature=%s&'
'oauth_timestamp=1234567890&'
'oauth_nonce=abcdefghijklmnopqrstuvwxyz&'
'oauth_version=1.0&'
'oauth_signature_method=%s&'
'oauth_token=abcdefghijklmnopqrstuvxyz&'
'oauth_consumer_key=foo')
def test_signature_too_short(self):
short_sig = ('oauth_signature=fmrXnTF4lO4o%2BD0%2FlZaJHP%2FXqEY&'
'oauth_timestamp=1234567890&'
'oauth_nonce=abcdefghijklmnopqrstuvwxyz&'
'oauth_version=1.0&oauth_signature_method=HMAC-SHA1&'
'oauth_token=abcdefghijklmnopqrstuvxyz&'
'oauth_consumer_key=foo')
r = self.e._create_request(self.uri, 'GET', short_sig, URLENCODED)
self.assertFalse(self.e._check_signature(r))
plain = ('oauth_signature=correctlengthbutthewrongcontent1111&'
'oauth_timestamp=1234567890&'
'oauth_nonce=abcdefghijklmnopqrstuvwxyz&'
'oauth_version=1.0&oauth_signature_method=PLAINTEXT&'
'oauth_token=abcdefghijklmnopqrstuvxyz&'
'oauth_consumer_key=foo')
r = self.e._create_request(self.uri, 'GET', plain, URLENCODED)
self.assertFalse(self.e._check_signature(r))
def test_hmac_signature(self):
hmac_sig = "fmrXnTF4lO4o%2BD0%2FlZaJHP%2FXqEY%3D"
sig = self.sig % (hmac_sig, "HMAC-SHA1")
r = self.e._create_request(self.uri, 'GET', sig, URLENCODED)
self.assertTrue(self.e._check_signature(r))
def test_rsa_signature(self):
rsa_sig = ("fxFvCx33oKlR9wDquJ%2FPsndFzJphyBa3RFPPIKi3flqK%2BJ7yIrMVbH"
"YTM%2FLHPc7NChWz4F4%2FzRA%2BDN1k08xgYGSBoWJUOW6VvOQ6fbYhMA"
"FkOGYbuGDbje487XMzsAcv6ZjqZHCROSCk5vofgLk2SN7RZ3OrgrFzf4in"
"xetClqA%3D")
sig = self.sig % (rsa_sig, "RSA-SHA1")
r = self.e._create_request(self.uri, 'GET', sig, URLENCODED)
self.assertTrue(self.e._check_signature(r))
def test_plaintext_signature(self):
plain_sig = "super%252520secret%26even%252520more%252520secret"
sig = self.sig % (plain_sig, "PLAINTEXT")
r = self.e._create_request(self.uri, 'GET', sig, URLENCODED)
self.assertTrue(self.e._check_signature(r))
|
SignatureVerificationTest
|
python
|
python-attrs__attrs
|
tests/test_dunders.py
|
{
"start": 27351,
"end": 28742
}
|
class ____:
def test_filenames(self):
"""
The created dunder methods have a "consistent" filename.
"""
assert (
OriginalC.__init__.__code__.co_filename
== "<attrs generated methods tests.test_dunders.C>"
)
assert (
OriginalC.__eq__.__code__.co_filename
== "<attrs generated methods tests.test_dunders.C>"
)
assert (
OriginalC.__hash__.__code__.co_filename
== "<attrs generated methods tests.test_dunders.C>"
)
assert (
CopyC.__init__.__code__.co_filename
== "<attrs generated methods tests.test_dunders.C>"
)
assert (
CopyC.__eq__.__code__.co_filename
== "<attrs generated methods tests.test_dunders.C>"
)
assert (
CopyC.__hash__.__code__.co_filename
== "<attrs generated methods tests.test_dunders.C>"
)
assert (
C.__init__.__code__.co_filename
== "<attrs generated methods tests.test_dunders.C-1>"
)
assert (
C.__eq__.__code__.co_filename
== "<attrs generated methods tests.test_dunders.C-1>"
)
assert (
C.__hash__.__code__.co_filename
== "<attrs generated methods tests.test_dunders.C-1>"
)
|
TestFilenames
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_linalg.py
|
{
"start": 1911,
"end": 2316
}
|
class ____(Benchmark):
params = sorted(set(TYPES1) - {'float16'})
param_names = ['dtype']
def setup(self, typename):
np.seterr(all='ignore')
self.a = get_squares_()[typename]
def time_svd(self, typename):
np.linalg.svd(self.a)
def time_pinv(self, typename):
np.linalg.pinv(self.a)
def time_det(self, typename):
np.linalg.det(self.a)
|
Linalg
|
python
|
dask__dask
|
dask/dataframe/dask_expr/_expr.py
|
{
"start": 92340,
"end": 92425
}
|
class ____(Binop):
operation = operator.floordiv
_operator_repr = "//"
|
FloorDiv
|
python
|
huggingface__transformers
|
src/transformers/models/maskformer/modeling_maskformer_swin.py
|
{
"start": 19467,
"end": 20128
}
|
class ____(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->MaskFormerSwin
|
MaskFormerSwinIntermediate
|
python
|
scikit-learn__scikit-learn
|
sklearn/compose/tests/test_column_transformer.py
|
{
"start": 1553,
"end": 1860
}
|
class ____(BaseEstimator):
def __init__(self, csr_container):
self.csr_container = csr_container
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
n_samples = len(X)
return self.csr_container(sparse.eye(n_samples, n_samples))
|
SparseMatrixTrans
|
python
|
numba__numba
|
numba/tests/test_nested_calls.py
|
{
"start": 1433,
"end": 4082
}
|
class ____(TestCase):
def compile_func(self, pyfunc, objmode=False):
def check(*args, **kwargs):
expected = pyfunc(*args, **kwargs)
result = f(*args, **kwargs)
self.assertPreciseEqual(result, expected)
flags = dict(forceobj=True) if objmode else dict(nopython=True)
f = jit(**flags)(pyfunc)
return f, check
def test_boolean_return(self):
@jit(nopython=True)
def inner(x):
return not x
@jit(nopython=True)
def outer(x):
if inner(x):
return True
else:
return False
self.assertFalse(outer(True))
self.assertTrue(outer(False))
def test_named_args(self, objmode=False):
"""
Test a nested function call with named (keyword) arguments.
"""
cfunc, check = self.compile_func(f, objmode)
check(1, 2, 3)
check(1, y=2, z=3)
def test_named_args_objmode(self):
self.test_named_args(objmode=True)
def test_default_args(self, objmode=False):
"""
Test a nested function call using default argument values.
"""
cfunc, check = self.compile_func(g, objmode)
check(1, 2, 3)
check(1, y=2, z=3)
def test_default_args_objmode(self):
self.test_default_args(objmode=True)
def test_star_args(self):
"""
Test a nested function call to a function with *args in its signature.
"""
cfunc, check = self.compile_func(star)
check(1, 2, 3)
def test_star_call(self, objmode=False):
"""
Test a function call with a *args.
"""
cfunc, check = self.compile_func(star_call, objmode)
check(1, (2,), (3,))
def test_star_call_objmode(self):
self.test_star_call(objmode=True)
def test_argcast(self):
"""
Issue #1488: implicitly casting an argument variable should not
break nested calls.
"""
cfunc, check = self.compile_func(argcast)
check(1, 0)
check(1, 1)
def test_call_generated(self):
"""
Test a nested function call to a generated jit function.
"""
cfunc = jit(nopython=True)(call_generated)
self.assertPreciseEqual(cfunc(1, 2), (-4, 2))
self.assertPreciseEqual(cfunc(1j, 2), (1j + 5, 2))
def test_nested_annotated(self):
"""
Tested a nested function with annotations.
"""
cfunc = njit()(nested_annotated)
# should not fail
cfunc()
if __name__ == '__main__':
unittest.main()
|
TestNestedCall
|
python
|
scipy__scipy
|
scipy/fft/_pocketfft/tests/test_basic.py
|
{
"start": 28718,
"end": 28853
}
|
class ____:
def __init__(self, data):
self._data = data
self.__array_interface__ = data.__array_interface__
|
FakeArray
|
python
|
huggingface__transformers
|
tests/models/moshi/test_modeling_moshi.py
|
{
"start": 16284,
"end": 22824
}
|
class ____:
def __init__(
self,
parent,
batch_size=4, # need batch_size != num_hidden_layers
seq_length=7,
is_training=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=8,
intermediate_size=4,
hidden_act="silu",
rms_norm_eps=0.001,
ffn_dim=32,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=100,
pad_token_id=25,
bos_token_id=25,
num_codebooks=4,
audio_encoder_type="mimi",
attn_implementation="eager",
depth_hidden_size=16,
depth_num_hidden_layers=2,
depth_max_position_embeddings=5,
depth_num_attention_heads=8,
depth_ffn_dim=16,
depth_sliding_window=4,
mimi_intermediate_size=40,
mimi_hidden_size=32,
mimi_num_filters=8,
mimi_num_residual_layers=1,
mimi_upsampling_ratios=[8, 4],
mimi_codebook_size=64,
mimi_vector_quantization_hidden_dimension=64,
mimi_codebook_dim=64,
mimi_upsample_groups=32,
mimi_num_hidden_layers=2,
mimi_num_attention_heads=2,
mimi_num_key_value_heads=2,
mimi_sliding_window=3,
sampling_rate=800,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.rms_norm_eps = rms_norm_eps
self.ffn_dim = ffn_dim
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.num_codebooks = num_codebooks
self.attn_implementation = attn_implementation
self.depth_hidden_size = depth_hidden_size
self.depth_num_hidden_layers = depth_num_hidden_layers
self.depth_max_position_embeddings = depth_max_position_embeddings
self.depth_num_attention_heads = depth_num_attention_heads
self.depth_ffn_dim = depth_ffn_dim
self.depth_sliding_window = depth_sliding_window
self.audio_encoder_type = audio_encoder_type
self.mimi_intermediate_size = mimi_intermediate_size
self.mimi_hidden_size = mimi_hidden_size
self.mimi_num_filters = mimi_num_filters
self.mimi_num_residual_layers = mimi_num_residual_layers
self.mimi_upsampling_ratios = mimi_upsampling_ratios
self.mimi_codebook_size = mimi_codebook_size
self.mimi_vector_quantization_hidden_dimension = mimi_vector_quantization_hidden_dimension
self.mimi_codebook_dim = mimi_codebook_dim
self.mimi_upsample_groups = mimi_upsample_groups
self.mimi_num_hidden_layers = mimi_num_hidden_layers
self.mimi_num_attention_heads = mimi_num_attention_heads
self.mimi_num_key_value_heads = mimi_num_key_value_heads
self.mimi_sliding_window = mimi_sliding_window
self.sampling_rate = sampling_rate
self.num_hidden_states_types = 2
def prepare_config_and_inputs(self, batch_size=None):
batch_size = self.batch_size if batch_size is None else batch_size
input_ids = ids_tensor([batch_size, self.seq_length], self.vocab_size)
moshi_audio_codes = ids_tensor([batch_size, self.num_codebooks, self.seq_length], self.mimi_codebook_size)
user_audio_codes = ids_tensor([batch_size, self.num_codebooks, self.seq_length], self.mimi_codebook_size)
attention_mask = input_ids.ne(self.pad_token_id)
config = self.get_config()
inputs_dict = {
"input_ids": input_ids,
"moshi_audio_codes": moshi_audio_codes,
"user_audio_codes": user_audio_codes,
"attention_mask": attention_mask,
}
return config, inputs_dict
def get_config(self):
mimi_dict_config = {
"model_type": self.audio_encoder_type,
"audio_channels": 1,
"hidden_size": self.mimi_hidden_size,
"num_filters": self.mimi_num_filters,
"num_residual_layers": self.mimi_num_residual_layers,
"upsampling_ratios": self.mimi_upsampling_ratios,
"codebook_size": self.mimi_codebook_size,
"vector_quantization_hidden_dimension": self.mimi_vector_quantization_hidden_dimension,
"upsample_groups": self.mimi_upsample_groups,
"num_hidden_layers": self.mimi_num_hidden_layers,
"num_attention_heads": self.mimi_num_attention_heads,
"num_key_value_heads": self.mimi_num_key_value_heads,
"sliding_window": self.mimi_sliding_window,
"codebook_dim": self.mimi_codebook_dim,
"use_cache": False,
"sampling_rate": self.sampling_rate,
}
depth_dict_config = {
"hidden_size": self.depth_hidden_size,
"num_hidden_layers": self.depth_num_hidden_layers,
"max_position_embeddings": self.depth_max_position_embeddings,
"num_attention_heads": self.depth_num_attention_heads,
"ffn_dim": self.depth_ffn_dim,
"sliding_window": self.depth_sliding_window,
}
config = MoshiConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
d_ff=self.intermediate_size,
num_codebooks=self.num_codebooks,
rms_norm_eps=self.rms_norm_eps,
tie_word_embeddings=False,
pad_token_id=self.pad_token_id,
bos_token_id=self.bos_token_id,
ffn_dim=self.ffn_dim,
audio_encoder_config=mimi_dict_config,
depth_decoder_config=depth_dict_config,
attn_implementation=self.attn_implementation,
)
return config
def prepare_config_and_inputs_for_common(self, batch_size=None):
config, inputs_dict = self.prepare_config_and_inputs(batch_size)
return config, inputs_dict
@require_torch
|
MoshiTester
|
python
|
django__django
|
tests/admin_views/tests.py
|
{
"start": 148866,
"end": 149818
}
|
class ____(TestCase):
"""Regression test for #17333"""
@classmethod
def setUpTestData(cls):
# User who can change Reports
cls.changeuser = User.objects.create_user(
username="changeuser", password="secret", is_staff=True
)
cls.changeuser.user_permissions.add(
get_perm(Report, get_permission_codename("change", Report._meta))
)
def test_no_standard_modeladmin_urls(self):
"""
Admin index views don't break when user's ModelAdmin removes standard
urls
"""
self.client.force_login(self.changeuser)
r = self.client.get(reverse("admin:index"))
# we shouldn't get a 500 error caused by a NoReverseMatch
self.assertEqual(r.status_code, 200)
self.client.post(reverse("admin:logout"))
@skipUnlessDBFeature("can_defer_constraint_checks")
@override_settings(ROOT_URLCONF="admin_views.urls")
|
AdminViewsNoUrlTest
|
python
|
getsentry__sentry
|
tests/sentry/utils/locking/backends/test_redis.py
|
{
"start": 2191,
"end": 2382
}
|
class ____(RedisBackendTestCaseBase, TestCase):
backend_class = RedisLockBackend
@cached_property
def cluster(self):
return clusters.get("default")
|
RedisLockBackendTestCase
|
python
|
dask__distributed
|
distributed/worker.py
|
{
"start": 7434,
"end": 124705
}
|
class ____(BaseWorker, ServerNode):
"""Worker node in a Dask distributed cluster
Workers perform two functions:
1. **Serve data** from a local dictionary
2. **Perform computation** on that data and on data from peers
Workers keep the scheduler informed of their data and use that scheduler to
gather data from other workers when necessary to perform a computation.
You can start a worker with the ``dask worker`` command line application::
$ dask worker scheduler-ip:port
Use the ``--help`` flag to see more options::
$ dask worker --help
The rest of this docstring is about the internal state that the worker uses
to manage and track internal computations.
**State**
**Informational State**
These attributes don't change significantly during execution.
* **nthreads:** ``int``:
Number of nthreads used by this worker process
* **executors:** ``dict[str, concurrent.futures.Executor]``:
Executors used to perform computation. Always contains the default
executor.
* **local_directory:** ``path``:
Path on local machine to store temporary files
* **scheduler:** ``PooledRPCCall``:
Location of scheduler. See ``.ip/.port`` attributes.
* **name:** ``string``:
Alias
* **services:** ``{str: Server}``:
Auxiliary web servers running on this worker
* **service_ports:** ``{str: port}``:
* **transfer_outgoing_count_limit**: ``int``
The maximum number of concurrent outgoing data transfers.
See also
:attr:`distributed.worker_state_machine.WorkerState.transfer_incoming_count_limit`.
* **batched_stream**: ``BatchedSend``
A batched stream along which we communicate to the scheduler
* **log**: ``[(message)]``
A structured and queryable log. See ``Worker.story``
**Volatile State**
These attributes track the progress of tasks that this worker is trying to
complete. In the descriptions below a ``key`` is the name of a task that
we want to compute and ``dep`` is the name of a piece of dependent data
that we want to collect from others.
* **threads**: ``{key: int}``
The ID of the thread on which the task ran
* **active_threads**: ``{int: key}``
The keys currently running on active threads
* **state**: ``WorkerState``
Encapsulated state machine. See
:class:`~distributed.worker_state_machine.BaseWorker` and
:class:`~distributed.worker_state_machine.WorkerState`
Parameters
----------
scheduler_ip: str, optional
scheduler_port: int, optional
scheduler_file: str, optional
host: str, optional
data: MutableMapping, type, None
The object to use for storage, builds a disk-backed LRU dict by default.
If a callable to construct the storage object is provided, it
will receive the worker's attr:``local_directory`` as an
argument if the calling signature has an argument named
``worker_local_directory``.
nthreads: int, optional
local_directory: str, optional
Directory where we place local resources
name: str, optional
memory_limit: int, float, string
Number of bytes of memory that this worker should use.
Set to zero for no limit. Set to 'auto' to calculate
as system.MEMORY_LIMIT * min(1, nthreads / total_cores)
Use strings or numbers like 5GB or 5e9
memory_target_fraction: float or False
Fraction of memory to try to stay beneath
(default: read from config key distributed.worker.memory.target)
memory_spill_fraction: float or False
Fraction of memory at which we start spilling to disk
(default: read from config key distributed.worker.memory.spill)
memory_pause_fraction: float or False
Fraction of memory at which we stop running new tasks
(default: read from config key distributed.worker.memory.pause)
max_spill: int, string or False
Limit of number of bytes to be spilled on disk.
(default: read from config key distributed.worker.memory.max-spill)
executor: concurrent.futures.Executor, dict[str, concurrent.futures.Executor], "offload"
The executor(s) to use. Depending on the type, it has the following meanings:
- Executor instance: The default executor.
- Dict[str, Executor]: mapping names to Executor instances. If the
"default" key isn't in the dict, a "default" executor will be created
using ``ThreadPoolExecutor(nthreads)``.
- Str: The string "offload", which refer to the same thread pool used for
offloading communications. This results in the same thread being used
for deserialization and computation.
resources: dict
Resources that this worker has like ``{'GPU': 2}``
nanny: str
Address on which to contact nanny, if it exists
lifetime: str
Amount of time like "1 hour" after which we gracefully shut down the worker.
This defaults to None, meaning no explicit shutdown time.
lifetime_stagger: str
Amount of time like "5 minutes" to stagger the lifetime value
The actual lifetime will be selected uniformly at random between
lifetime +/- lifetime_stagger
lifetime_restart: bool
Whether or not to restart a worker after it has reached its lifetime
Default False
kwargs: optional
Additional parameters to ServerNode constructor
Examples
--------
Use the command line to start a worker::
$ dask scheduler
Start scheduler at 127.0.0.1:8786
$ dask worker 127.0.0.1:8786
Start worker at: 127.0.0.1:1234
Registered with scheduler at: 127.0.0.1:8786
See Also
--------
distributed.scheduler.Scheduler
distributed.nanny.Nanny
"""
_instances: ClassVar[weakref.WeakSet[Worker]] = weakref.WeakSet()
_initialized_clients: ClassVar[weakref.WeakSet[Client]] = weakref.WeakSet()
nanny: Nanny | None
_lock: threading.Lock
transfer_outgoing_count_limit: int
threads: dict[Key, int] # {ts.key: thread ID}
active_threads_lock: threading.Lock
active_threads: dict[int, Key] # {thread ID: ts.key}
active_keys: set[Key]
profile_keys: defaultdict[str, dict[str, Any]]
profile_keys_history: deque[tuple[float, dict[str, dict[str, Any]]]]
profile_recent: dict[str, Any]
profile_history: deque[tuple[float, dict[str, Any]]]
transfer_incoming_log: deque[dict[str, Any]]
transfer_outgoing_log: deque[dict[str, Any]]
#: Total number of data transfers to other workers since the worker was started
transfer_outgoing_count_total: int
#: Total size of data transfers to other workers (including in-progress and failed transfers)
transfer_outgoing_bytes_total: int
#: Current total size of open data transfers to other workers
transfer_outgoing_bytes: int
#: Current number of open data transfers to other workers
transfer_outgoing_count: int
bandwidth: float
latency: float
profile_cycle_interval: float
workspace: WorkSpace
_client: Client | None
bandwidth_workers: defaultdict[str, tuple[float, int]]
bandwidth_types: defaultdict[type, tuple[float, int]]
preloads: preloading.PreloadManager
contact_address: str | None
_start_port: int | str | Collection[int] | None = None
_start_host: str | None
_interface: str | None
_protocol: str
_dashboard_address: str | None
_dashboard: bool
_http_prefix: str
death_timeout: float | None
lifetime: float | None
lifetime_stagger: float | None
lifetime_restart: bool
extensions: dict
security: Security
connection_args: dict[str, Any]
loop: IOLoop
executors: dict[str, Executor]
batched_stream: BatchedSend
name: Any
scheduler_delay: float
stream_comms: dict[str, BatchedSend]
heartbeat_interval: float
services: dict[str, Any] = {}
service_specs: dict[str, Any]
metrics: dict[str, Callable[[Worker], Any]]
startup_information: dict[str, Callable[[Worker], Any]]
low_level_profiler: bool
scheduler: PooledRPCCall
execution_state: dict[str, Any]
plugins: dict[str, WorkerPlugin]
_pending_plugins: tuple[WorkerPlugin, ...]
def __init__(
self,
scheduler_ip: str | None = None,
scheduler_port: int | None = None,
*,
scheduler_file: str | None = None,
nthreads: int | None = None,
loop: IOLoop | None = None, # Deprecated
local_directory: str | None = None,
services: dict | None = None,
name: Any | None = None,
reconnect: bool | None = None,
executor: Executor | dict[str, Executor] | Literal["offload"] | None = None,
resources: dict[str, float] | None = None,
silence_logs: int | None = None,
death_timeout: Any | None = None,
preload: list[str] | None = None,
preload_argv: list[str] | list[list[str]] | None = None,
security: Security | dict[str, Any] | None = None,
contact_address: str | None = None,
heartbeat_interval: Any = "1s",
extensions: dict[str, type] | None = None,
metrics: Mapping[str, Callable[[Worker], Any]] = DEFAULT_METRICS,
startup_information: Mapping[
str, Callable[[Worker], Any]
] = DEFAULT_STARTUP_INFORMATION,
interface: str | None = None,
host: str | None = None,
port: int | str | Collection[int] | None = None,
protocol: str | None = None,
dashboard_address: str | None = None,
dashboard: bool = False,
http_prefix: str = "/",
nanny: Nanny | None = None,
plugins: tuple[WorkerPlugin, ...] = (),
low_level_profiler: bool | None = None,
validate: bool | None = None,
profile_cycle_interval=None,
lifetime: Any | None = None,
lifetime_stagger: Any | None = None,
lifetime_restart: bool | None = None,
transition_counter_max: int | Literal[False] = False,
###################################
# Parameters to WorkerMemoryManager
memory_limit: str | float = "auto",
# Allow overriding the dict-like that stores the task outputs.
# This is meant for power users only. See WorkerMemoryManager for details.
data: WorkerDataParameter = None,
# Deprecated parameters; please use dask config instead.
memory_target_fraction: float | Literal[False] | None = None,
memory_spill_fraction: float | Literal[False] | None = None,
memory_pause_fraction: float | Literal[False] | None = None,
###################################
# Parameters to Server
scheduler_sni: str | None = None,
WorkerStateClass: type = WorkerState,
**kwargs,
):
if reconnect is not None:
if reconnect:
raise ValueError(
"The `reconnect=True` option for `Worker` has been removed. "
"To improve cluster stability, workers now always shut down in the face of network disconnects. "
"For details, or if this is an issue for you, see https://github.com/dask/distributed/issues/6350."
)
else:
warnings.warn(
"The `reconnect` argument to `Worker` is deprecated, and will be removed in a future release. "
"Worker reconnection is now always disabled, so passing `reconnect=False` is unnecessary. "
"See https://github.com/dask/distributed/issues/6350 for details.",
DeprecationWarning,
stacklevel=2,
)
if loop is not None:
warnings.warn(
"The `loop` argument to `Worker` is ignored, and will be removed in a future release. "
"The Worker always binds to the current loop",
DeprecationWarning,
stacklevel=2,
)
self.__exit_stack = stack = contextlib.ExitStack()
self.nanny = nanny
self._lock = threading.Lock()
transfer_incoming_count_limit = dask.config.get(
"distributed.worker.connections.outgoing"
)
self.transfer_outgoing_count_limit = dask.config.get(
"distributed.worker.connections.incoming"
)
transfer_message_bytes_limit = parse_bytes(
dask.config.get("distributed.worker.transfer.message-bytes-limit")
)
self.threads = {}
self.active_threads_lock = threading.Lock()
self.active_threads = {}
self.active_keys = set()
self.profile_keys = defaultdict(profile.create)
maxlen = dask.config.get("distributed.admin.low-level-log-length")
self.profile_keys_history = deque(maxlen=maxlen)
self.profile_history = deque(maxlen=maxlen)
self.profile_recent = profile.create()
if validate is None:
validate = dask.config.get("distributed.worker.validate")
self.transfer_incoming_log = deque(maxlen=maxlen)
self.transfer_outgoing_log = deque(maxlen=maxlen)
self.transfer_outgoing_count_total = 0
self.transfer_outgoing_bytes_total = 0
self.transfer_outgoing_bytes = 0
self.transfer_outgoing_count = 0
self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth"))
self.bandwidth_workers = defaultdict(
lambda: (0, 0)
) # bw/count recent transfers
self.bandwidth_types = defaultdict(lambda: (0, 0)) # bw/count recent transfers
self.latency = 0.001
self._client = None
if profile_cycle_interval is None:
profile_cycle_interval = dask.config.get("distributed.worker.profile.cycle")
profile_cycle_interval = parse_timedelta(profile_cycle_interval, default="ms")
assert profile_cycle_interval
self._setup_logging(logger)
self.death_timeout = parse_timedelta(death_timeout)
self.contact_address = contact_address
self._start_port = port
self._start_host = host
if host:
# Helpful error message if IPv6 specified incorrectly
_, host_address = parse_address(host)
if host_address.count(":") > 1 and not host_address.startswith("["):
raise ValueError(
"Host address with IPv6 must be bracketed like '[::1]'; "
f"got {host_address}"
)
self._interface = interface
nthreads = nthreads or CPU_COUNT
if resources is None:
resources = dask.config.get("distributed.worker.resources")
assert isinstance(resources, dict)
self.extensions = {}
if silence_logs:
stack.enter_context(silence_logging_cmgr(level=silence_logs))
if isinstance(security, dict):
security = Security(**security)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
self.loop = self.io_loop = IOLoop.current()
if scheduler_sni:
self.connection_args["server_hostname"] = scheduler_sni
self.name = name
executor_pool_prefix = f"{self.name}-" if self.name else ""
# Common executors always available
self.executors = {
"offload": utils._offload_executor,
"actor": ThreadPoolExecutor(
1, thread_name_prefix=f"{executor_pool_prefix}Dask-Actor-Threads"
),
}
# Find the default executor
if executor == "offload":
self.executors["default"] = self.executors["offload"]
elif isinstance(executor, dict):
self.executors.update(executor)
elif executor is not None:
self.executors["default"] = executor
if "default" not in self.executors:
self.executors["default"] = ThreadPoolExecutor(
nthreads,
thread_name_prefix=f"{executor_pool_prefix}Dask-Default-Threads",
)
self.batched_stream = BatchedSend(interval="2ms", loop=self.loop)
self.scheduler_delay = 0
self.stream_comms = {}
self.plugins = {}
self._pending_plugins = plugins
self.services = {}
self.service_specs = services or {}
self._dashboard_address = dashboard_address
self._dashboard = dashboard
self._http_prefix = http_prefix
self.metrics = dict(metrics) if metrics else {}
self.startup_information = (
dict(startup_information) if startup_information else {}
)
if low_level_profiler is None:
low_level_profiler = dask.config.get("distributed.worker.profile.low-level")
self.low_level_profiler = low_level_profiler
handlers = {
"gather": self.gather,
"run": self.run,
"run_coroutine": self.run_coroutine,
"get_data": self.get_data,
"update_data": self.update_data,
"free_keys": self._handle_remote_stimulus(FreeKeysEvent),
"terminate": self.close,
"ping": pingpong,
"upload_file": self.upload_file,
"call_stack": self.get_call_stack,
"profile": self.get_profile,
"profile_metadata": self.get_profile_metadata,
"get_logs": self.get_logs,
"keys": self.keys,
"versions": self.versions,
"actor_execute": self.actor_execute,
"actor_attribute": self.actor_attribute,
"plugin-add": self.plugin_add,
"plugin-remove": self.plugin_remove,
"get_monitor_info": self.get_monitor_info,
"benchmark_disk": self.benchmark_disk,
"benchmark_memory": self.benchmark_memory,
"benchmark_network": self.benchmark_network,
"get_story": self.get_story,
}
stream_handlers = {
"close": self.close,
"cancel-compute": self._handle_remote_stimulus(CancelComputeEvent),
"acquire-replicas": self._handle_remote_stimulus(AcquireReplicasEvent),
"compute-task": self._handle_remote_stimulus(ComputeTaskEvent),
"free-keys": self._handle_remote_stimulus(FreeKeysEvent),
"remove-replicas": self._handle_remote_stimulus(RemoveReplicasEvent),
"steal-request": self._handle_remote_stimulus(StealRequestEvent),
"refresh-who-has": self._handle_remote_stimulus(RefreshWhoHasEvent),
"worker-status-change": self.handle_worker_status_change,
"remove-worker": self._handle_remove_worker,
}
ServerNode.__init__(
self,
handlers=handlers,
stream_handlers=stream_handlers,
connection_args=self.connection_args,
local_directory=local_directory,
**kwargs,
)
if not preload:
preload = dask.config.get("distributed.worker.preload")
if not preload_argv:
preload_argv = dask.config.get("distributed.worker.preload-argv")
assert preload is not None
assert preload_argv is not None
self.preloads = preloading.process_preloads(
self, preload, preload_argv, file_dir=self.local_directory
)
if scheduler_file:
cfg = json_load_robust(scheduler_file, timeout=self.death_timeout)
scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address", None):
scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
scheduler_addr = coerce_to_address(scheduler_ip)
else:
scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
if protocol is None:
protocol_address = scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
assert protocol
self._protocol = protocol
self.memory_manager = WorkerMemoryManager(
self,
data=data,
nthreads=nthreads,
memory_limit=memory_limit,
memory_target_fraction=memory_target_fraction,
memory_spill_fraction=memory_spill_fraction,
memory_pause_fraction=memory_pause_fraction,
)
transfer_incoming_bytes_limit = math.inf
transfer_incoming_bytes_fraction = dask.config.get(
"distributed.worker.memory.transfer"
)
if (
self.memory_manager.memory_limit is not None
and transfer_incoming_bytes_fraction is not False
):
transfer_incoming_bytes_limit = int(
self.memory_manager.memory_limit * transfer_incoming_bytes_fraction
)
state = WorkerStateClass(
nthreads=nthreads,
data=self.memory_manager.data,
threads=self.threads,
plugins=self.plugins,
resources=resources,
transfer_incoming_count_limit=transfer_incoming_count_limit,
validate=validate,
transition_counter_max=transition_counter_max,
transfer_incoming_bytes_limit=transfer_incoming_bytes_limit,
transfer_message_bytes_limit=transfer_message_bytes_limit,
)
BaseWorker.__init__(self, state)
self.scheduler = self.rpc(scheduler_addr)
self.execution_state = {
"scheduler": self.scheduler.address,
"ioloop": self.loop,
"worker": self,
}
self.heartbeat_interval = parse_timedelta(heartbeat_interval, default="ms")
pc = PeriodicCallback(self.heartbeat, self.heartbeat_interval * 1000)
self.periodic_callbacks["heartbeat"] = pc
pc = PeriodicCallback(lambda: self.batched_send({"op": "keep-alive"}), 60000)
self.periodic_callbacks["keep-alive"] = pc
pc = PeriodicCallback(self.find_missing, 1000)
self.periodic_callbacks["find-missing"] = pc
self._address = contact_address
if extensions is None:
extensions = DEFAULT_EXTENSIONS
self.extensions = {
name: extension(self) for name, extension in extensions.items()
}
setproctitle("dask worker [not started]")
if dask.config.get("distributed.worker.profile.enabled"):
profile_trigger_interval = parse_timedelta(
dask.config.get("distributed.worker.profile.interval"), default="ms"
)
pc = PeriodicCallback(self.trigger_profile, profile_trigger_interval * 1000)
self.periodic_callbacks["profile"] = pc
pc = PeriodicCallback(self.cycle_profile, profile_cycle_interval * 1000)
self.periodic_callbacks["profile-cycle"] = pc
if lifetime is None:
lifetime = dask.config.get("distributed.worker.lifetime.duration")
lifetime = parse_timedelta(lifetime)
if lifetime_stagger is None:
lifetime_stagger = dask.config.get("distributed.worker.lifetime.stagger")
lifetime_stagger = parse_timedelta(lifetime_stagger)
if lifetime_restart is None:
lifetime_restart = dask.config.get("distributed.worker.lifetime.restart")
self.lifetime_restart = lifetime_restart
if lifetime:
lifetime += (random.random() * 2 - 1) * lifetime_stagger
self.io_loop.call_later(
lifetime, self.close_gracefully, reason="worker-lifetime-reached"
)
self.lifetime = lifetime
Worker._instances.add(self)
################
# Memory manager
################
memory_manager: WorkerMemoryManager
@property
def data(self) -> MutableMapping[Key, object]:
"""{task key: task payload} of all completed tasks, whether they were computed
on this Worker or computed somewhere else and then transferred here over the
network.
When using the default configuration, this is a zict buffer that automatically
spills to disk whenever the target threshold is exceeded.
If spilling is disabled, it is a plain dict instead.
It could also be a user-defined arbitrary dict-like passed when initialising
the Worker or the Nanny.
Worker logic should treat this opaquely and stick to the MutableMapping API.
.. note::
This same collection is also available at ``self.state.data`` and
``self.memory_manager.data``.
"""
return self.memory_manager.data
# Deprecated attributes moved to self.memory_manager.<name>
memory_limit = DeprecatedMemoryManagerAttribute()
memory_target_fraction = DeprecatedMemoryManagerAttribute()
memory_spill_fraction = DeprecatedMemoryManagerAttribute()
memory_pause_fraction = DeprecatedMemoryManagerAttribute()
memory_monitor = DeprecatedMemoryMonitor()
###########################
# State machine accessors #
###########################
# Deprecated attributes moved to self.state.<name>
actors = DeprecatedWorkerStateAttribute()
available_resources = DeprecatedWorkerStateAttribute()
busy_workers = DeprecatedWorkerStateAttribute()
comm_nbytes = DeprecatedWorkerStateAttribute(target="transfer_incoming_bytes")
comm_threshold_bytes = DeprecatedWorkerStateAttribute(
target="transfer_incoming_bytes_throttle_threshold"
)
constrained = DeprecatedWorkerStateAttribute()
data_needed_per_worker = DeprecatedWorkerStateAttribute(target="data_needed")
executed_count = DeprecatedWorkerStateAttribute()
executing_count = DeprecatedWorkerStateAttribute()
generation = DeprecatedWorkerStateAttribute()
has_what = DeprecatedWorkerStateAttribute()
incoming_count = DeprecatedWorkerStateAttribute(
target="transfer_incoming_count_total"
)
in_flight_tasks = DeprecatedWorkerStateAttribute(target="in_flight_tasks_count")
in_flight_workers = DeprecatedWorkerStateAttribute()
log = DeprecatedWorkerStateAttribute()
long_running = DeprecatedWorkerStateAttribute()
nthreads = DeprecatedWorkerStateAttribute()
stimulus_log = DeprecatedWorkerStateAttribute()
stimulus_story = DeprecatedWorkerStateAttribute()
story = DeprecatedWorkerStateAttribute()
ready = DeprecatedWorkerStateAttribute()
tasks = DeprecatedWorkerStateAttribute()
target_message_size = DeprecatedWorkerStateAttribute(
target="transfer_message_bytes_limit"
)
total_out_connections = DeprecatedWorkerStateAttribute(
target="transfer_incoming_count_limit"
)
total_resources = DeprecatedWorkerStateAttribute()
transition_counter = DeprecatedWorkerStateAttribute()
transition_counter_max = DeprecatedWorkerStateAttribute()
validate = DeprecatedWorkerStateAttribute()
validate_task = DeprecatedWorkerStateAttribute()
@property
def data_needed(self) -> set[TaskState]:
warnings.warn(
"The `Worker.data_needed` attribute has been removed; "
"use `Worker.state.data_needed[address]`",
FutureWarning,
)
return {ts for tss in self.state.data_needed.values() for ts in tss}
@property
def waiting_for_data_count(self) -> int:
warnings.warn(
"The `Worker.waiting_for_data_count` attribute has been removed; "
"use `len(Worker.state.waiting)`",
FutureWarning,
)
return len(self.state.waiting)
##################
# Administrative #
##################
def __repr__(self) -> str:
name = f", name: {self.name}" if self.name != self.address_safe else ""
return (
f"<{self.__class__.__name__} {self.address_safe!r}{name}, "
f"status: {self.status.name}, "
f"stored: {len(self.data)}, "
f"running: {self.state.executing_count}/{self.state.nthreads}, "
f"ready: {len(self.state.ready)}, "
f"comm: {self.state.in_flight_tasks_count}, "
f"waiting: {len(self.state.waiting)}>"
)
@property
def logs(self):
return self._deque_handler.deque
def log_event(self, topic: str | Collection[str], msg: Any) -> None:
"""Log an event under a given topic
Parameters
----------
topic : str, list[str]
Name of the topic under which to log an event. To log the same
event under multiple topics, pass a list of topic names.
msg
Event message to log. Note this must be msgpack serializable.
See also
--------
Client.log_event
"""
if not _is_dumpable(msg):
raise TypeError(
f"Message must be msgpack serializable. Got {type(msg)=} instead."
)
full_msg = {
"op": "log-event",
"topic": topic,
"msg": msg,
}
if self.thread_id == threading.get_ident():
self.batched_send(full_msg)
else:
self.loop.add_callback(self.batched_send, full_msg)
@property
def worker_address(self):
"""For API compatibility with Nanny"""
return self.address
@property
def executor(self):
return self.executors["default"]
@ServerNode.status.setter # type: ignore
def status(self, value: Status) -> None:
"""Override Server.status to notify the Scheduler of status changes.
Also handles pausing/unpausing.
"""
prev_status = self.status
ServerNode.status.__set__(self, value) # type: ignore
stimulus_id = f"worker-status-change-{time()}"
self._send_worker_status_change(stimulus_id)
if prev_status == Status.running and value != Status.running:
self.handle_stimulus(PauseEvent(stimulus_id=stimulus_id))
elif value == Status.running and prev_status in (
Status.paused,
Status.closing_gracefully,
):
self.handle_stimulus(UnpauseEvent(stimulus_id=stimulus_id))
def _send_worker_status_change(self, stimulus_id: str) -> None:
self.batched_send(
{
"op": "worker-status-change",
"status": self._status.name,
"stimulus_id": stimulus_id,
},
)
async def get_metrics(self) -> dict:
try:
spilled_memory, spilled_disk = self.data.spilled_total # type: ignore
except AttributeError:
# spilling is disabled
spilled_memory, spilled_disk = 0, 0
# Send Fine Performance Metrics
# Swap the dictionary to avoid updates while we iterate over it
digests_total_since_heartbeat = self.digests_total_since_heartbeat
self.digests_total_since_heartbeat = defaultdict(int)
spans_ext: SpansWorkerExtension | None = self.extensions.get("spans")
if spans_ext:
# Send metrics with disaggregated span_id
spans_ext.collect_digests(digests_total_since_heartbeat)
# Send metrics with squashed span_id
# Don't cast int metrics to float
digests: defaultdict[Hashable, float] = defaultdict(int)
for k, v in digests_total_since_heartbeat.items():
if isinstance(k, tuple) and k[0] in CONTEXTS_WITH_SPAN_ID:
k = k[:1] + k[2:]
digests[k] += v
out: dict = dict(
task_counts=self.state.task_counter.current_count(by_prefix=False),
bandwidth={
"total": self.bandwidth,
"workers": dict(self.bandwidth_workers),
"types": keymap(typename, self.bandwidth_types),
},
digests_total_since_heartbeat=dict(digests),
managed_bytes=self.state.nbytes,
spilled_bytes={
"memory": spilled_memory,
"disk": spilled_disk,
},
transfer={
"incoming_bytes": self.state.transfer_incoming_bytes,
"incoming_count": self.state.transfer_incoming_count,
"incoming_count_total": self.state.transfer_incoming_count_total,
"outgoing_bytes": self.transfer_outgoing_bytes,
"outgoing_count": self.transfer_outgoing_count,
"outgoing_count_total": self.transfer_outgoing_count_total,
},
event_loop_interval=self._tick_interval_observed,
)
monitor_recent = self.monitor.recent()
# Convert {foo.bar: 123} to {foo: {bar: 123}}
for k, v in monitor_recent.items():
if "." in k:
k0, _, k1 = k.partition(".")
out.setdefault(k0, {})[k1] = v
else:
out[k] = v
for k, metric in self.metrics.items():
try:
result = metric(self)
if isawaitable(result):
result = await result
# In case of collision, prefer core metrics
out.setdefault(k, result)
except Exception: # TODO: log error once
pass
return out
async def get_startup_information(self):
result = {}
for k, f in self.startup_information.items():
try:
v = f(self)
if isawaitable(v):
v = await v
result[k] = v
except Exception: # TODO: log error once
pass
return result
def identity(self):
return {
"type": type(self).__name__,
"id": self.id,
"scheduler": self.scheduler.address,
"nthreads": self.state.nthreads,
"memory_limit": self.memory_manager.memory_limit,
}
def _to_dict(self, *, exclude: Container[str] = ()) -> dict:
"""Dictionary representation for debugging purposes.
Not type stable and not intended for roundtrips.
See also
--------
Worker.identity
Client.dump_cluster_state
distributed.utils.recursive_to_dict
"""
info = super()._to_dict(exclude=exclude)
extra = {
"status": self.status,
"logs": self.get_logs(),
"config": dask.config.config,
"transfer_incoming_log": self.transfer_incoming_log,
"transfer_outgoing_log": self.transfer_outgoing_log,
}
extra = {k: v for k, v in extra.items() if k not in exclude}
info.update(extra)
info.update(self.state._to_dict(exclude=exclude))
info.update(self.memory_manager._to_dict(exclude=exclude))
return recursive_to_dict(info, exclude=exclude)
#####################
# External Services #
#####################
def batched_send(self, msg: dict[str, Any]) -> None:
"""Implements BaseWorker abstract method.
Send a fire-and-forget message to the scheduler through bulk comms.
If we're not currently connected to the scheduler, the message will be silently
dropped!
See also
--------
distributed.worker_state_machine.BaseWorker.batched_send
"""
if (
self.batched_stream
and self.batched_stream.comm
and not self.batched_stream.comm.closed()
):
self.batched_stream.send(msg)
async def _register_with_scheduler(self) -> None:
self.periodic_callbacks["keep-alive"].stop()
self.periodic_callbacks["heartbeat"].stop()
start = time()
if self.contact_address is None:
self.contact_address = self.address
logger.info("-" * 49)
# Worker reconnection is not supported
assert not self.data
assert not self.state.tasks
while True:
try:
_start = time()
comm = await connect(self.scheduler.address, **self.connection_args)
comm.name = "Worker->Scheduler"
comm._server = weakref.ref(self)
await comm.write(
dict(
op="register-worker",
reply=False,
address=self.contact_address,
status=self.status.name,
nthreads=self.state.nthreads,
name=self.name,
now=time(),
resources=self.state.total_resources,
memory_limit=self.memory_manager.memory_limit,
local_directory=self.local_directory,
services=self.service_ports,
nanny=self.nanny,
pid=os.getpid(),
versions=get_versions(),
metrics=await self.get_metrics(),
extra=await self.get_startup_information(),
stimulus_id=f"worker-connect-{time()}",
server_id=self.id,
),
serializers=["msgpack"],
)
future = comm.read(deserializers=["msgpack"])
response = await future
if response.get("warning"):
logger.warning(response["warning"])
_end = time()
middle = (_start + _end) / 2
self._update_latency(_end - start)
self.scheduler_delay = response["time"] - middle
break
except OSError:
logger.info("Waiting to connect to: %26s", self.scheduler.address)
await asyncio.sleep(0.1)
except TimeoutError: # pragma: no cover
logger.info("Timed out when connecting to scheduler")
if response["status"] != "OK":
await comm.close()
msg = response["message"] if "message" in response else repr(response)
logger.error(f"Unable to connect to scheduler: {msg}")
raise ValueError(f"Unexpected response from register: {response!r}")
self.batched_stream.start(comm)
self.status = Status.running
await asyncio.gather(
*(
self.plugin_add(name=name, plugin=plugin)
for name, plugin in response["worker-plugins"].items()
),
)
logger.info(" Registered to: %26s", self.scheduler.address)
logger.info("-" * 49)
self.periodic_callbacks["keep-alive"].start()
self.periodic_callbacks["heartbeat"].start()
self.loop.add_callback(self.handle_scheduler, comm)
def _update_latency(self, latency: float) -> None:
self.latency = latency * 0.05 + self.latency * 0.95
self.digest_metric("latency", latency)
async def heartbeat(self) -> None:
logger.debug("Heartbeat: %s", self.address)
try:
start = time()
response = await retry_operation(
self.scheduler.heartbeat_worker,
address=self.contact_address,
now=start,
metrics=await self.get_metrics(),
executing={
key: start - cast(float, self.state.tasks[key].start_time)
for key in self.active_keys
if key in self.state.tasks
},
extensions={
name: extension.heartbeat()
for name, extension in self.extensions.items()
if hasattr(extension, "heartbeat")
},
)
end = time()
middle = (start + end) / 2
self._update_latency(end - start)
if response["status"] == "missing":
# Scheduler thought we left.
# This is a common race condition when the scheduler calls
# remove_worker(); there can be a heartbeat between when the scheduler
# removes the worker on its side and when the {"op": "close"} command
# arrives through batched comms to the worker.
logger.warning("Scheduler was unaware of this worker; shutting down.")
# We close here just for safety's sake - the {op: close} should
# arrive soon anyway.
await self.close(reason="worker-heartbeat-missing")
return
self.scheduler_delay = response["time"] - middle
self.periodic_callbacks["heartbeat"].callback_time = (
response["heartbeat-interval"] * 1000
)
self.bandwidth_workers.clear()
self.bandwidth_types.clear()
except OSError:
logger.exception("Failed to communicate with scheduler during heartbeat.")
except Exception:
logger.exception("Unexpected exception during heartbeat. Closing worker.")
await self.close(reason="worker-heartbeat-error")
raise
@fail_hard
async def handle_scheduler(self, comm: Comm) -> None:
try:
await self.handle_stream(comm)
finally:
await self.close(reason="worker-handle-scheduler-connection-broken")
def keys(self) -> list[Key]:
return list(self.data)
async def gather(self, who_has: dict[Key, list[str]]) -> dict[Key, object]:
"""Endpoint used by Scheduler.rebalance() and Scheduler.replicate()"""
missing_keys = [k for k in who_has if k not in self.data]
failed_keys = []
missing_workers: set[str] = set()
stimulus_id = f"gather-{time()}"
while missing_keys:
to_gather = {}
for k in missing_keys:
workers = set(who_has[k]) - missing_workers
if workers:
to_gather[k] = workers
else:
failed_keys.append(k)
if not to_gather:
break
(
data,
missing_keys,
new_failed_keys,
new_missing_workers,
) = await gather_from_workers(
who_has=to_gather, rpc=self.rpc, who=self.address
)
self.update_data(data, stimulus_id=stimulus_id)
del data
failed_keys += new_failed_keys
missing_workers.update(new_missing_workers)
if missing_keys:
who_has = await retry_operation(
self.scheduler.who_has, keys=missing_keys
)
if failed_keys:
logger.error("Could not find data: %s", failed_keys)
return {"status": "partial-fail", "keys": list(failed_keys)}
else:
return {"status": "OK"}
def get_monitor_info(self, recent: bool = False, start: int = 0) -> dict[str, Any]:
result = dict(
range_query=(
self.monitor.recent()
if recent
else self.monitor.range_query(start=start)
),
count=self.monitor.count,
last_time=self.monitor.last_time,
)
if nvml.device_get_count() > 0:
result["gpu_name"] = self.monitor.gpu_name
result["gpu_memory_total"] = self.monitor.gpu_memory_total
return result
#############
# Lifecycle #
#############
async def start_unsafe(self):
await super().start_unsafe()
enable_gc_diagnosis()
ports = parse_ports(self._start_port)
for port in ports:
start_address = address_from_user_args(
host=self._start_host,
port=port,
interface=self._interface,
protocol=self._protocol,
security=self.security,
)
kwargs = self.security.get_listen_args("worker")
if self._protocol in ("tcp", "tls"):
kwargs = kwargs.copy()
kwargs["default_host"] = get_ip(
get_address_host(self.scheduler.address)
)
try:
await self.listen(start_address, **kwargs)
except OSError as e:
if len(ports) > 1 and e.errno == errno.EADDRINUSE:
continue
else:
raise
else:
self._start_address = start_address
break
else:
raise ValueError(
f"Could not start Worker on host {self._start_host} "
f"with port {self._start_port}"
)
# Start HTTP server associated with this Worker node
routes = get_handlers(
server=self,
modules=dask.config.get("distributed.worker.http.routes"),
prefix=self._http_prefix,
)
self.start_http_server(routes, self._dashboard_address)
if self._dashboard:
try:
import distributed.dashboard.worker
except ImportError:
logger.debug("To start diagnostics web server please install Bokeh")
else:
distributed.dashboard.worker.connect(
self.http_application,
self.http_server,
self,
prefix=self._http_prefix,
)
self.ip = get_address_host(self.address)
if self.name is None:
self.name = self.address
await self.preloads.start()
# Services listen on all addresses
# Note Nanny is not a "real" service, just some metadata
# passed in service_ports...
self.start_services(self.ip)
try:
listening_address = "%s%s:%d" % (self.listener.prefix, self.ip, self.port)
except Exception:
listening_address = f"{self.listener.prefix}{self.ip}"
logger.info(" Start worker at: %26s", self.address)
logger.info(" Listening to: %26s", listening_address)
if self.name != self.address_safe:
# only if name was not None
logger.info(" Worker name: %26s", self.name)
for k, v in self.service_ports.items():
logger.info(" {:>16} at: {:>26}".format(k, self.ip + ":" + str(v)))
logger.info("Waiting to connect to: %26s", self.scheduler.address)
logger.info("-" * 49)
logger.info(" Threads: %26d", self.state.nthreads)
if self.memory_manager.memory_limit:
logger.info(
" Memory: %26s",
format_bytes(self.memory_manager.memory_limit),
)
logger.info(" Local Directory: %26s", self.local_directory)
setproctitle("dask worker [%s]" % self.address)
plugins_msgs = await asyncio.gather(
*(
self.plugin_add(plugin=plugin, catch_errors=False)
for plugin in self._pending_plugins
),
return_exceptions=True,
)
plugins_exceptions = [msg for msg in plugins_msgs if isinstance(msg, Exception)]
if len(plugins_exceptions) >= 1:
if len(plugins_exceptions) > 1:
logger.error(
"Multiple plugin exceptions raised. All exceptions will be logged, the first is raised."
)
for exc in plugins_exceptions:
logger.error(repr(exc))
raise plugins_exceptions[0]
self._pending_plugins = ()
self.state.address = self.address
await self._register_with_scheduler()
self.start_periodic_callbacks()
return self
@log_errors
async def close( # type: ignore
self,
timeout: float = 30,
executor_wait: bool = True,
nanny: bool = True,
reason: str = "worker-close",
) -> str | None:
"""Close the worker
Close asynchronous operations running on the worker, stop all executors and
comms. If requested, this also closes the nanny.
Parameters
----------
timeout
Timeout in seconds for shutting down individual instructions
executor_wait
If True, shut down executors synchronously, otherwise asynchronously
nanny
If True, close the nanny
reason
Reason for closing the worker
Returns
-------
str | None
None if worker already in closing state or failed, "OK" otherwise
"""
# FIXME: The worker should not be allowed to close the nanny. Ownership
# is the other way round. If an external caller wants to close
# nanny+worker, the nanny must be notified first. ==> Remove kwarg
# nanny, see also Scheduler.retire_workers
if self.status in (Status.closed, Status.closing, Status.failed):
logger.debug(
"Attempted to close worker that is already %s. Reason: %s",
self.status,
reason,
)
await self.finished()
return None
if self.status == Status.init:
# If the worker is still in startup/init and is started by a nanny,
# this means the nanny itself is not up, yet. If the Nanny isn't up,
# yet, it's server will not accept any incoming RPC requests and
# will block until the startup is finished.
# Therefore, this worker trying to communicate with the Nanny during
# startup is not possible and we cannot close it.
# In this case, the Nanny will automatically close after inspecting
# the worker status
nanny = False
disable_gc_diagnosis()
try:
self.log_event(self.address, {"action": "closing-worker", "reason": reason})
except Exception:
# This can happen when the Server is not up yet
logger.exception("Failed to log closing event")
try:
logger.info("Stopping worker at %s. Reason: %s", self.address, reason)
except ValueError: # address not available if already closed
logger.info("Stopping worker. Reason: %s", reason)
if self.status not in WORKER_ANY_RUNNING:
logger.info("Closed worker has not yet started: %s", self.status)
if not executor_wait:
logger.info("Not waiting on executor to close")
# This also informs the scheduler about the status update
self.status = Status.closing
setproctitle("dask worker [closing]")
if nanny and self.nanny:
with self.rpc(self.nanny) as r:
await r.close_gracefully(reason=reason)
# Cancel async instructions
await BaseWorker.close(self, timeout=timeout)
await asyncio.gather(*(self.plugin_remove(name) for name in self.plugins))
for extension in self.extensions.values():
if hasattr(extension, "close"):
result = extension.close()
if isawaitable(result):
await result
self.stop_services()
await self.preloads.teardown()
for pc in self.periodic_callbacks.values():
pc.stop()
if self._client:
# If this worker is the last one alive, clean up the worker
# initialized clients
if not any(
w
for w in Worker._instances
if w != self and w.status in WORKER_ANY_RUNNING
):
for c in Worker._initialized_clients:
# Regardless of what the client was initialized with
# we'll require the result as a future. This is
# necessary since the heuristics of asynchronous are not
# reliable and we might deadlock here
c._asynchronous = True
if c.asynchronous:
await c.close()
else:
# There is still the chance that even with us
# telling the client to be async, itself will decide
# otherwise
c.close()
await self._stop_listeners()
await self.rpc.close()
# Give some time for a UCX scheduler to complete closing endpoints
# before closing self.batched_stream, otherwise the local endpoint
# may be closed too early and errors be raised on the scheduler when
# trying to send closing message. Using startswith supports variations
# of the protocols, e.g., `ucx` and `ucxx` which are both valid in
# distributed-ucxx.
if self._protocol.startswith("ucx"): # pragma: no cover
await asyncio.sleep(0.2)
self.batched_send({"op": "close-stream"})
if self.batched_stream:
with suppress(TimeoutError):
await self.batched_stream.close(timedelta(seconds=timeout))
for executor in self.executors.values():
if executor is utils._offload_executor:
continue # Never shutdown the offload executor
def _close(executor, wait):
if isinstance(executor, ThreadPoolExecutor):
executor._work_queue.queue.clear()
executor.shutdown(wait=wait, timeout=timeout)
else:
executor.shutdown(wait=wait)
# Waiting for the shutdown can block the event loop causing
# weird deadlocks particularly if the task that is executing in
# the thread is waiting for a server reply, e.g. when using
# worker clients, semaphores, etc.
# Are we shutting down the process?
if self._is_finalizing() or not threading.main_thread().is_alive():
# If we're shutting down there is no need to wait for daemon
# threads to finish
_close(executor=executor, wait=False)
else:
try:
await asyncio.to_thread(
_close, executor=executor, wait=executor_wait
)
except RuntimeError:
logger.error(
"Could not close executor %r by dispatching to thread. Trying synchronously.",
executor,
exc_info=True,
)
_close(
executor=executor, wait=executor_wait
) # Just run it directly
self.stop()
self.status = Status.closed
setproctitle("dask worker [closed]")
await ServerNode.close(self)
self.__exit_stack.__exit__(None, None, None)
return "OK"
async def close_gracefully(
self, restart=None, reason: str = "worker-close-gracefully"
):
"""Gracefully shut down a worker
This first informs the scheduler that we're shutting down, and asks it
to move our data elsewhere. Afterwards, we close as normal
"""
if self.status in (Status.closing, Status.closing_gracefully):
await self.finished()
if self.status == Status.closed:
return
logger.info("Closing worker gracefully: %s. Reason: %s", self.address, reason)
# Wait for all tasks to leave the worker and don't accept any new ones.
# Scheduler.retire_workers will set the status to closing_gracefully and push it
# back to this worker.
await self.scheduler.retire_workers(
workers=[self.address],
close_workers=False,
remove=True,
stimulus_id=f"worker-close-gracefully-{time()}",
)
if restart is None:
restart = self.lifetime_restart
await self.close(nanny=not restart, reason=reason)
async def wait_until_closed(self):
warnings.warn("wait_until_closed has moved to finished()")
await self.finished()
assert self.status == Status.closed
################
# Worker Peers #
################
def send_to_worker(self, address, msg):
if address not in self.stream_comms:
bcomm = BatchedSend(interval="1ms", loop=self.loop)
self.stream_comms[address] = bcomm
async def batched_send_connect():
comm = await connect(
address, **self.connection_args # TODO, serialization
)
comm.name = "Worker->Worker"
await comm.write({"op": "connection_stream"})
bcomm.start(comm)
self._ongoing_background_tasks.call_soon(batched_send_connect)
self.stream_comms[address].send(msg)
@context_meter_to_server_digest("get-data")
async def get_data(
self,
comm: Comm,
keys: Collection[str],
who: str | None = None,
serializers: list[str] | None = None,
) -> GetDataBusy | Literal[Status.dont_reply]:
max_connections = self.transfer_outgoing_count_limit
# Allow same-host connections more liberally
if get_address_host(comm.peer_address) == get_address_host(self.address):
max_connections = max_connections * 2
if self.status == Status.paused:
max_connections = 1
throttle_msg = (
" Throttling outgoing data transfers because worker is paused."
)
else:
throttle_msg = ""
if (
max_connections is not False
and self.transfer_outgoing_count >= max_connections
):
logger.debug(
"Worker %s has too many open connections to respond to data request "
"from %s (%d/%d).%s",
self.address,
who,
self.transfer_outgoing_count,
max_connections,
throttle_msg,
)
return {"status": "busy"}
self.transfer_outgoing_count += 1
self.transfer_outgoing_count_total += 1
# This may potentially take many seconds if it involves unspilling
data = {k: self.data[k] for k in keys if k in self.data}
if len(data) < len(keys):
for k in set(keys) - data.keys():
if k in self.state.actors:
from distributed.actor import Actor
data[k] = Actor(
type(self.state.actors[k]), self.address, k, worker=self
)
msg = {"status": "OK", "data": {k: to_serialize(v) for k, v in data.items()}}
# Note: `if k in self.data` above guarantees that
# k is in self.state.tasks too and that nbytes is non-None
bytes_per_task = {k: self.state.tasks[k].nbytes or 0 for k in data}
total_bytes = sum(bytes_per_task.values())
self.transfer_outgoing_bytes += total_bytes
self.transfer_outgoing_bytes_total += total_bytes
try:
with context_meter.meter("network", func=time) as m:
compressed = await comm.write(msg, serializers=serializers)
response = await comm.read(deserializers=serializers)
assert response == "OK", response
except OSError:
logger.exception(
"failed during get data with %s -> %s",
self.address,
who,
)
comm.abort()
raise
finally:
self.transfer_outgoing_bytes -= total_bytes
self.transfer_outgoing_count -= 1
# Not the same as m.delta, which doesn't include time spent
# serializing/deserializing
duration = max(0.001, m.stop - m.start)
self.transfer_outgoing_log.append(
{
"start": m.start + self.scheduler_delay,
"stop": m.stop + self.scheduler_delay,
"middle": (m.start + m.stop) / 2,
"duration": duration,
"who": who,
"keys": bytes_per_task,
"total": total_bytes,
"compressed": compressed,
"bandwidth": total_bytes / duration,
}
)
return Status.dont_reply
###################
# Local Execution #
###################
def update_data(
self,
data: dict[Key, object],
stimulus_id: str | None = None,
) -> dict[str, Any]:
if stimulus_id is None:
stimulus_id = f"update-data-{time()}"
self.handle_stimulus(UpdateDataEvent(data=data, stimulus_id=stimulus_id))
return {"nbytes": {k: sizeof(v) for k, v in data.items()}, "status": "OK"}
async def set_resources(self, **resources: float) -> None:
for r, quantity in resources.items():
if r in self.state.total_resources:
self.state.available_resources[r] += (
quantity - self.state.total_resources[r]
)
else:
self.state.available_resources[r] = quantity
self.state.total_resources[r] = quantity
await retry_operation(
self.scheduler.set_resources,
resources=self.state.total_resources,
worker=self.contact_address,
)
@log_errors
async def plugin_add(
self,
plugin: WorkerPlugin | bytes,
name: str | None = None,
catch_errors: bool = True,
) -> ErrorMessage | OKMessage:
if isinstance(plugin, bytes):
plugin = pickle.loads(plugin)
if not isinstance(plugin, WorkerPlugin):
warnings.warn(
"Registering duck-typed plugins has been deprecated. "
"Please make sure your plugin subclasses `WorkerPlugin`.",
DeprecationWarning,
stacklevel=2,
)
plugin = cast(WorkerPlugin, plugin)
if name is None:
name = _get_plugin_name(plugin)
assert name
if name in self.plugins:
await self.plugin_remove(name=name)
self.plugins[name] = plugin
logger.info("Starting Worker plugin %s", name)
if hasattr(plugin, "setup"):
try:
result = plugin.setup(worker=self)
if isawaitable(result):
await result
except Exception as e:
logger.exception("Worker plugin %s failed to setup", name)
if not catch_errors:
raise
return error_message(e)
return {"status": "OK"}
@log_errors
async def plugin_remove(self, name: str) -> ErrorMessage | OKMessage:
logger.info(f"Removing Worker plugin {name}")
try:
plugin = self.plugins.pop(name)
if hasattr(plugin, "teardown"):
result = plugin.teardown(worker=self)
if isawaitable(result):
await result
except Exception as e:
logger.exception("Worker plugin %s failed to teardown", name)
return error_message(e)
return {"status": "OK"}
def handle_worker_status_change(self, status: str, stimulus_id: str) -> None:
new_status = Status.lookup[status] # type: ignore
if (
new_status == Status.closing_gracefully
and self._status not in WORKER_ANY_RUNNING
):
logger.error(
"Invalid Worker.status transition: %s -> %s", self._status, new_status
)
# Reiterate the current status to the scheduler to restore sync
self._send_worker_status_change(stimulus_id)
else:
# Update status and send confirmation to the Scheduler (see status.setter)
self.status = new_status
###################
# Task Management #
###################
def _handle_remote_stimulus(
self, cls: type[StateMachineEvent]
) -> Callable[..., None]:
def _(**kwargs):
event = cls(**kwargs)
self.handle_stimulus(event)
_.__name__ = f"_handle_remote_stimulus({cls.__name__})"
return _
@fail_hard
def handle_stimulus(self, *stims: StateMachineEvent) -> None:
"""Override BaseWorker method for added validation
See also
--------
distributed.worker_state_machine.BaseWorker.handle_stimulus
distributed.worker_state_machine.WorkerState.handle_stimulus
"""
try:
super().handle_stimulus(*stims)
except Exception as e:
if hasattr(e, "to_event"):
topic, msg = e.to_event()
self.log_event(topic, msg)
raise
def stateof(self, key: str) -> dict[str, Any]:
ts = self.state.tasks[key]
return {
"executing": ts.state == "executing",
"waiting_for_data": bool(ts.waiting_for_data),
"heap": ts in self.state.ready or ts in self.state.constrained,
"data": key in self.data,
}
async def get_story(self, keys_or_stimuli: Iterable[str]) -> list[tuple]:
return self.state.story(*keys_or_stimuli)
##########################
# Dependencies gathering #
##########################
def _get_cause(self, keys: Iterable[Key]) -> TaskState:
"""For diagnostics, we want to attach a transfer to a single task. This task is
typically the next to be executed but since we're fetching tasks for potentially
many dependents, an exact match is not possible. Additionally, if a key was
fetched through acquire-replicas, dependents may not be known at all.
Returns
-------
The task to attach startstops of this transfer to
"""
cause = None
for key in keys:
ts = self.state.tasks[key]
if ts.dependents:
return next(iter(ts.dependents))
cause = ts
assert cause # Always at least one key
return cause
def _update_metrics_received_data(
self,
start: float,
stop: float,
data: dict[Key, object],
cause: TaskState,
worker: str,
) -> None:
total_bytes = sum(self.state.tasks[key].get_nbytes() for key in data)
cause.startstops.append(
{
"action": "transfer",
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"source": worker,
}
)
duration = max(0.001, stop - start)
bandwidth = total_bytes / duration
self.transfer_incoming_log.append(
{
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"middle": (start + stop) / 2.0 + self.scheduler_delay,
"duration": duration,
"keys": {key: self.state.tasks[key].nbytes for key in data},
"total": total_bytes,
"bandwidth": bandwidth,
"who": worker,
}
)
if total_bytes > 1_000_000:
self.bandwidth = self.bandwidth * 0.95 + bandwidth * 0.05
bw, cnt = self.bandwidth_workers[worker]
self.bandwidth_workers[worker] = (bw + bandwidth, cnt + 1)
types = set(map(type, data.values()))
if len(types) == 1:
[typ] = types
bw, cnt = self.bandwidth_types[typ]
self.bandwidth_types[typ] = (bw + bandwidth, cnt + 1)
self.digest_metric("transfer-bandwidth", total_bytes / duration)
self.digest_metric("transfer-duration", duration)
self.counters["transfer-count"].add(len(data))
@fail_hard
async def gather_dep(
self,
worker: str,
to_gather: Collection[Key],
total_nbytes: int,
*,
stimulus_id: str,
) -> StateMachineEvent:
"""Implements BaseWorker abstract method
See also
--------
distributed.worker_state_machine.BaseWorker.gather_dep
"""
if self.status not in WORKER_ANY_RUNNING:
# This is only for the sake of coherence of the WorkerState;
# it should never actually reach the scheduler.
return GatherDepFailureEvent.from_exception(
RuntimeError("Worker is shutting down"),
worker=worker,
total_nbytes=total_nbytes,
stimulus_id=f"worker-closing-{time()}",
)
self.state.log.append(("request-dep", worker, to_gather, stimulus_id, time()))
logger.debug("Request %d keys from %s", len(to_gather), worker)
try:
with context_meter.meter("network", func=time) as m:
response = await get_data_from_worker(
rpc=self.rpc, keys=to_gather, worker=worker, who=self.address
)
if response["status"] == "busy":
self.state.log.append(
("gather-dep-busy", worker, to_gather, stimulus_id, time())
)
return GatherDepBusyEvent(
worker=worker,
total_nbytes=total_nbytes,
stimulus_id=f"gather-dep-busy-{time()}",
)
assert response["status"] == "OK"
cause = self._get_cause(to_gather)
self._update_metrics_received_data(
start=m.start,
stop=m.stop,
data=response["data"],
cause=cause,
worker=worker,
)
self.state.log.append(
("receive-dep", worker, set(response["data"]), stimulus_id, time())
)
return GatherDepSuccessEvent(
worker=worker,
total_nbytes=total_nbytes,
data=response["data"],
stimulus_id=f"gather-dep-success-{time()}",
)
except OSError:
logger.exception("Worker stream died during communication: %s", worker)
self.state.log.append(
("gather-dep-failed", worker, to_gather, stimulus_id, time())
)
return GatherDepNetworkFailureEvent(
worker=worker,
total_nbytes=total_nbytes,
stimulus_id=f"gather-dep-network-failure-{time()}",
)
except Exception as e:
# e.g. data failed to deserialize
# FIXME this will deadlock the cluster
# https://github.com/dask/distributed/issues/6705
logger.exception(e)
self.state.log.append(
("gather-dep-failed", worker, to_gather, stimulus_id, time())
)
if self.batched_stream and LOG_PDB:
import pdb
pdb.set_trace()
return GatherDepFailureEvent.from_exception(
e,
worker=worker,
total_nbytes=total_nbytes,
stimulus_id=f"gather-dep-failed-{time()}",
)
async def retry_busy_worker_later(self, worker: str) -> StateMachineEvent:
"""Wait some time, then take a peer worker out of busy state.
Implements BaseWorker abstract method.
See Also
--------
distributed.worker_state_machine.BaseWorker.retry_busy_worker_later
"""
await asyncio.sleep(0.15)
return RetryBusyWorkerEvent(
worker=worker, stimulus_id=f"retry-busy-worker-{time()}"
)
def digest_metric(self, name: Hashable, value: float) -> None:
"""Implement BaseWorker.digest_metric by calling Server.digest_metric"""
ServerNode.digest_metric(self, name, value)
@log_errors
def find_missing(self) -> None:
self.handle_stimulus(FindMissingEvent(stimulus_id=f"find-missing-{time()}"))
# This is quite arbitrary but the heartbeat has scaling implemented
self.periodic_callbacks["find-missing"].callback_time = self.periodic_callbacks[
"heartbeat"
].callback_time
################
# Execute Task #
################
def run(self, comm, function, args=(), wait=True, kwargs=None):
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
def run_coroutine(self, comm, function, args=(), kwargs=None, wait=True):
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
async def actor_execute(
self,
actor=None,
function=None,
args=(),
kwargs: dict | None = None,
) -> dict[str, Any]:
kwargs = kwargs or {}
separate_thread = kwargs.pop("separate_thread", True)
key = actor
actor = self.state.actors[key]
func = getattr(actor, function)
name = key_split(key) + "." + function
try:
if iscoroutinefunction(func):
token = _worker_cvar.set(self)
try:
result = await func(*args, **kwargs)
finally:
_worker_cvar.reset(token)
elif separate_thread:
result = await self.loop.run_in_executor(
self.executors["actor"],
_run_actor,
func,
args,
kwargs,
self.execution_state,
name,
self.active_threads,
self.active_threads_lock,
)
else:
token = _worker_cvar.set(self)
try:
result = func(*args, **kwargs)
finally:
_worker_cvar.reset(token)
return {"status": "OK", "result": to_serialize(result)}
except Exception as ex:
return {"status": "error", "exception": to_serialize(ex)}
def actor_attribute(self, actor=None, attribute=None) -> dict[str, Any]:
try:
value = getattr(self.state.actors[actor], attribute)
return {"status": "OK", "result": to_serialize(value)}
except Exception as ex:
return {"status": "error", "exception": to_serialize(ex)}
@fail_hard
async def execute(self, key: Key, *, stimulus_id: str) -> StateMachineEvent:
"""Execute a task. Implements BaseWorker abstract method.
See also
--------
distributed.worker_state_machine.BaseWorker.execute
"""
if self.status not in WORKER_ANY_RUNNING:
# This is just for internal coherence of the WorkerState; the reschedule
# message should not ever reach the Scheduler.
# It is still OK if it does though.
return RescheduleEvent(key=key, stimulus_id=f"worker-closing-{time()}")
# The key *must* be in the worker state thanks to the cancelled state
ts = self.state.tasks[key]
run_id = ts.run_id
try:
if self.state.validate:
assert not ts.waiting_for_data
assert ts.state in ("executing", "cancelled", "resumed"), ts
assert ts.run_spec is not None
start = time()
data: dict[Key, Any] = {}
for dep in ts.dependencies:
dkey = dep.key
actors = self.state.actors
if actors and dkey in actors:
from distributed.actor import Actor # TODO: create local actor
data[dkey] = Actor(type(actors[dkey]), self.address, dkey, self)
else:
data[dkey] = self.data[dkey]
stop = time()
if stop - start > 0.005:
ts.startstops.append(
{"action": "disk-read", "start": start, "stop": stop}
)
assert ts.annotations is not None
executor = ts.annotations.get("executor", "default")
try:
e = self.executors[executor]
except KeyError:
raise ValueError(
f"Invalid executor {executor!r}; "
f"expected one of: {sorted(self.executors)}"
)
self.active_keys.add(key)
# Propagate span (see distributed.spans). This is useful when spawning
# more tasks using worker_client() and for logging.
span_ctx = (
dask.annotate(span=ts.annotations["span"])
if "span" in ts.annotations
else contextlib.nullcontext()
)
span_ctx.__enter__()
run_spec = ts.run_spec
try:
ts.start_time = time()
if ts.run_spec.is_coro:
token = _worker_cvar.set(self)
try:
result = await _run_task_async(
ts.run_spec,
data,
self.scheduler_delay,
)
finally:
_worker_cvar.reset(token)
elif "ThreadPoolExecutor" in str(type(e)):
# The 'executor' time metric should be almost zero most of the time,
# e.g. thread synchronization overhead only, since thread-noncpu and
# thread-cpu inside the thread detract from it. However, it may
# become substantial in case of misalignment between the size of the
# thread pool and the number of running tasks in the worker stater
# machine (e.g. https://github.com/dask/distributed/issues/5882)
with context_meter.meter("executor"):
result = await run_in_executor_with_context(
e,
_run_task,
ts.run_spec,
data,
self.execution_state,
key,
self.active_threads,
self.active_threads_lock,
self.scheduler_delay,
)
else:
# Can't capture contextvars across processes. If this is a
# ProcessPoolExecutor, the 'executor' time metric will show the
# whole runtime inside the executor.
with context_meter.meter("executor"):
result = await self.loop.run_in_executor(
e,
_run_task_simple,
ts.run_spec,
data,
self.scheduler_delay,
)
finally:
self.active_keys.discard(key)
span_ctx.__exit__(None, None, None)
self.threads[key] = result["thread"]
if result["op"] == "task-finished":
if self.digests is not None:
duration = max(0, result["stop"] - result["start"])
self.digests["task-duration"].add(duration)
return ExecuteSuccessEvent(
key=key,
run_id=run_id,
value=result["result"],
start=result["start"],
stop=result["stop"],
nbytes=result["nbytes"],
type=result["type"],
stimulus_id=f"task-finished-{time()}",
)
task_exc = result["actual_exception"]
if isinstance(task_exc, Reschedule):
return RescheduleEvent(key=ts.key, stimulus_id=f"reschedule-{time()}")
if (
self.status == Status.closing
and isinstance(task_exc, asyncio.CancelledError)
and run_spec.is_coro
):
# `Worker.cancel` will cause async user tasks to raise `CancelledError`.
# Since we cancelled those tasks, we shouldn't treat them as failures.
# This is just a heuristic; it's _possible_ the task happened to
# fail independently with `CancelledError`.
logger.info(
f"Async task {key!r} cancelled during worker close; rescheduling."
)
return RescheduleEvent(
key=ts.key, stimulus_id=f"cancelled-by-worker-close-{time()}"
)
if ts.state in ("executing", "long-running", "resumed"):
logger.error(
"Compute Failed\n"
"Key: %s\n"
"State: %s\n"
"Task: %s\n"
"Exception: %r\n"
"Traceback: %r\n",
key,
ts.state,
repr(run_spec)[:1000],
result["exception_text"],
result["traceback_text"],
)
return ExecuteFailureEvent.from_exception(
result,
key=key,
run_id=run_id,
start=result["start"],
stop=result["stop"],
stimulus_id=f"task-erred-{time()}",
)
except Exception as exc:
# Some legitimate use cases that will make us reach this point:
# - User specified an invalid executor;
# - Task transitioned to cancelled or resumed(fetch) before the start of
# execute() and its dependencies were released. This caused
# _prepare_args_for_execution() to raise KeyError;
# - A dependency was unspilled but failed to deserialize due to a bug in
# user-defined or third party classes.
if ts.state in ("executing", "long-running"):
logger.error(
f"Exception during execution of task {key!r}",
exc_info=True,
)
return ExecuteFailureEvent.from_exception(
exc,
key=key,
run_id=run_id,
stimulus_id=f"execute-unknown-error-{time()}",
)
##################
# Administrative #
##################
def cycle_profile(self) -> None:
now = time() + self.scheduler_delay
prof, self.profile_recent = self.profile_recent, profile.create()
self.profile_history.append((now, prof))
self.profile_keys_history.append((now, dict(self.profile_keys)))
self.profile_keys.clear()
def trigger_profile(self) -> None:
"""
Get a frame from all actively computing threads
Merge these frames into existing profile counts
"""
if not self.active_threads: # hope that this is thread-atomic?
return
start = time()
with self.active_threads_lock:
active_threads = self.active_threads.copy()
frames = sys._current_frames()
frames = {ident: frames[ident] for ident in active_threads}
llframes = {}
if self.low_level_profiler:
llframes = {ident: profile.ll_get_stack(ident) for ident in active_threads}
for ident, frame in frames.items():
if frame is not None:
key = key_split(active_threads[ident])
llframe = llframes.get(ident)
state = profile.process(
frame, True, self.profile_recent, stop="distributed/worker.py"
)
profile.llprocess(llframe, None, state)
profile.process(
frame, True, self.profile_keys[key], stop="distributed/worker.py"
)
stop = time()
self.digest_metric("profile-duration", stop - start)
async def get_profile(
self,
start=None,
stop=None,
key=None,
server: bool = False,
):
now = time() + self.scheduler_delay
if server:
history = self.io_loop.profile # type: ignore[attr-defined]
elif key is None:
history = self.profile_history
else:
history = [(t, d[key]) for t, d in self.profile_keys_history if key in d]
if start is None:
istart = 0
else:
istart = bisect.bisect_left(history, (start,))
if stop is None:
istop = None
else:
istop = bisect.bisect_right(history, (stop,)) + 1
if istop >= len(history):
istop = None # include end
if istart == 0 and istop is None:
history = list(history)
else:
iistop = len(history) if istop is None else istop
history = [history[i] for i in range(istart, iistop)]
prof = profile.merge(*pluck(1, history))
if not history:
return profile.create()
if istop is None and (start is None or start < now):
if key is None:
recent = self.profile_recent
else:
recent = self.profile_keys[key]
prof = profile.merge(prof, recent)
return prof
async def get_profile_metadata(
self, start: float = 0, stop: float | None = None
) -> dict[str, Any]:
add_recent = stop is None
now = time() + self.scheduler_delay
stop = stop or now
result = {
"counts": [
(t, d["count"]) for t, d in self.profile_history if start < t < stop
],
"keys": [
(t, {k: d["count"] for k, d in v.items()})
for t, v in self.profile_keys_history
if start < t < stop
],
}
if add_recent:
result["counts"].append((now, self.profile_recent["count"]))
result["keys"].append(
(now, {k: v["count"] for k, v in self.profile_keys.items()})
)
return result
def get_call_stack(self, keys: Collection[Key] | None = None) -> dict[Key, Any]:
with self.active_threads_lock:
sys_frames = sys._current_frames()
frames = {key: sys_frames[tid] for tid, key in self.active_threads.items()}
if keys is not None:
frames = {key: frames[key] for key in keys if key in frames}
return {key: profile.call_stack(frame) for key, frame in frames.items()}
async def benchmark_disk(self) -> dict[str, float]:
return await self.loop.run_in_executor(
self.executor, benchmark_disk, self.local_directory
)
async def benchmark_memory(self) -> dict[str, float]:
return await self.loop.run_in_executor(self.executor, benchmark_memory)
async def benchmark_network(self, address: str) -> dict[str, float]:
return await benchmark_network(rpc=self.rpc, address=address)
#######################################
# Worker Clients (advanced workloads) #
#######################################
@property
def client(self) -> Client:
with self._lock:
if self._client:
return self._client
else:
return self._get_client()
def _get_client(self, timeout: float | None = None) -> Client:
"""Get local client attached to this worker
If no such client exists, create one
See Also
--------
get_client
"""
if timeout is None:
timeout = dask.config.get("distributed.comm.timeouts.connect")
timeout = parse_timedelta(timeout, "s")
try:
from distributed.client import default_client
client = default_client()
except ValueError: # no clients found, need to make a new one
pass
else:
# must be lazy import otherwise cyclic import
from distributed.deploy.cluster import Cluster
if (
client.scheduler
and client.scheduler.address == self.scheduler.address
# The below conditions should only happen in case a second
# cluster is alive, e.g. if a submitted task spawned its onwn
# LocalCluster, see gh4565
or (
isinstance(client._start_arg, str)
and client._start_arg == self.scheduler.address
or isinstance(client._start_arg, Cluster)
and client._start_arg.scheduler_address == self.scheduler.address
)
):
self._client = client
if not self._client:
from distributed.client import Client
asynchronous = in_async_call(self.loop)
self._client = Client(
self.scheduler,
loop=self.loop,
security=self.security,
set_as_default=True,
asynchronous=asynchronous,
direct_to_workers=True,
name="worker",
timeout=timeout,
)
Worker._initialized_clients.add(self._client)
if not asynchronous:
assert self._client.status == "running"
self.log_event(
"worker-get-client",
{
"client": self._client.id,
"timeout": timeout,
},
)
return self._client
def get_current_task(self) -> Key:
"""Get the key of the task we are currently running
This only makes sense to run within a task
Examples
--------
>>> from dask.distributed import get_worker
>>> def f():
... return get_worker().get_current_task()
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
'f-1234'
See Also
--------
get_worker
"""
return self.active_threads[threading.get_ident()]
def _handle_remove_worker(self, worker: str, stimulus_id: str) -> None:
self.rpc.remove(worker)
self.handle_stimulus(RemoveWorkerEvent(worker=worker, stimulus_id=stimulus_id))
def validate_state(self) -> None:
try:
self.state.validate_state()
except Exception as e:
logger.error("Validate state failed", exc_info=e)
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
if hasattr(e, "to_event"):
topic, msg = e.to_event()
self.log_event(topic, msg)
raise
@property
def incoming_transfer_log(self):
warnings.warn(
"The `Worker.incoming_transfer_log` attribute has been renamed to "
"`Worker.transfer_incoming_log`",
DeprecationWarning,
stacklevel=2,
)
return self.transfer_incoming_log
@property
def outgoing_count(self):
warnings.warn(
"The `Worker.outgoing_count` attribute has been renamed to "
"`Worker.transfer_outgoing_count_total`",
DeprecationWarning,
stacklevel=2,
)
return self.transfer_outgoing_count_total
@property
def outgoing_current_count(self):
warnings.warn(
"The `Worker.outgoing_current_count` attribute has been renamed to "
"`Worker.transfer_outgoing_count`",
DeprecationWarning,
stacklevel=2,
)
return self.transfer_outgoing_count
@property
def outgoing_transfer_log(self):
warnings.warn(
"The `Worker.outgoing_transfer_log` attribute has been renamed to "
"`Worker.transfer_outgoing_log`",
DeprecationWarning,
stacklevel=2,
)
return self.transfer_outgoing_log
@property
def total_in_connections(self):
warnings.warn(
"The `Worker.total_in_connections` attribute has been renamed to "
"`Worker.transfer_outgoing_count_limit`",
DeprecationWarning,
stacklevel=2,
)
return self.transfer_outgoing_count_limit
_worker_cvar: contextvars.ContextVar[Worker] = contextvars.ContextVar("_worker_cvar")
def get_worker() -> Worker:
"""Get the worker currently running this task
Examples
--------
>>> def f():
... worker = get_worker() # The worker on which this task is running
... return worker.address
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
'tcp://127.0.0.1:47373'
See Also
--------
get_client
worker_client
"""
try:
return _worker_cvar.get()
except LookupError:
raise ValueError("No worker found") from None
def get_client(address=None, timeout=None, resolve_address=True) -> Client:
"""Get a client while within a task.
This client connects to the same scheduler to which the worker is connected
Parameters
----------
address : str, optional
The address of the scheduler to connect to. Defaults to the scheduler
the worker is connected to.
timeout : int or str
Timeout (in seconds) for getting the Client. Defaults to the
``distributed.comm.timeouts.connect`` configuration value.
resolve_address : bool, default True
Whether to resolve `address` to its canonical form.
Returns
-------
Client
Examples
--------
>>> def f():
... client = get_client(timeout="10s")
... futures = client.map(lambda x: x + 1, range(10)) # spawn many tasks
... results = client.gather(futures)
... return sum(results)
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
55
See Also
--------
get_worker
worker_client
secede
"""
if timeout is None:
timeout = dask.config.get("distributed.comm.timeouts.connect")
timeout = parse_timedelta(timeout, "s")
if address and resolve_address:
address = comm_resolve_address(address)
try:
worker = get_worker()
except ValueError: # could not find worker
pass
else:
if not address or worker.scheduler.address == address:
return worker._get_client(timeout=timeout)
from distributed.client import Client
try:
client = Client.current() # TODO: assumes the same scheduler
except ValueError:
client = None
if client and (not address or client.scheduler.address == address):
return client
elif address:
return Client(address, timeout=timeout)
else:
raise ValueError("No global client found and no address provided")
def secede():
"""
Have this task secede from the worker's thread pool
This opens up a new scheduling slot and a new thread for a new task. This
enables the client to schedule tasks on this node, which is
especially useful while waiting for other jobs to finish (e.g., with
``client.gather``).
Examples
--------
>>> def mytask(x):
... # do some work
... client = get_client()
... futures = client.map(...) # do some remote work
... secede() # while that work happens, remove ourself from the pool
... return client.gather(futures) # return gathered results
See Also
--------
get_client
get_worker
"""
worker = get_worker()
tpe_secede() # have this thread secede from the thread pool
duration = time() - thread_state.start_time
worker.loop.add_callback(
worker.handle_stimulus,
SecedeEvent(
key=thread_state.key,
compute_duration=duration,
stimulus_id=f"secede-{time()}",
),
)
async def get_data_from_worker(
rpc: ConnectionPool,
keys: Collection[Key],
worker: str,
*,
who: str | None = None,
serializers: list[str] | None = None,
deserializers: list[str] | None = None,
) -> GetDataBusy | GetDataSuccess:
"""Get keys from worker
The worker has a two step handshake to acknowledge when data has been fully
delivered. This function implements that handshake.
See Also
--------
Worker.get_data
Worker.gather_dep
utils_comm.gather_data_from_workers
"""
if serializers is None:
serializers = rpc.serializers
if deserializers is None:
deserializers = rpc.deserializers
comm = await rpc.connect(worker)
comm.name = "Ephemeral Worker->Worker for gather"
try:
response = await send_recv(
comm,
serializers=serializers,
deserializers=deserializers,
op="get_data",
keys=keys,
who=who,
)
try:
status = response["status"]
except KeyError: # pragma: no cover
raise ValueError("Unexpected response", response)
else:
if status == "OK":
await comm.write("OK")
return response
finally:
rpc.reuse(worker, comm)
cache_dumps: LRU[Callable[..., Any], bytes] = LRU(maxsize=100)
_cache_lock = threading.Lock()
def dumps_function(func) -> bytes:
"""Dump a function to bytes, cache functions"""
try:
with _cache_lock:
result = cache_dumps[func]
except KeyError:
result = pickle.dumps(func)
if len(result) < 100000:
with _cache_lock:
cache_dumps[func] = result
except TypeError: # Unhashable function
result = pickle.dumps(func)
return result
def _run_task(
task: GraphNode,
data: dict,
execution_state: dict,
key: Key,
active_threads: dict,
active_threads_lock: threading.Lock,
time_delay: float,
) -> RunTaskSuccess | RunTaskFailure:
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
with set_thread_state(
start_time=time(),
execution_state=execution_state,
key=key,
):
token = _worker_cvar.set(execution_state["worker"])
try:
msg = _run_task_simple(task, data, time_delay)
finally:
_worker_cvar.reset(token)
with active_threads_lock:
del active_threads[ident]
return msg
def _run_task_simple(
task: GraphNode,
data: dict,
time_delay: float,
) -> RunTaskSuccess | RunTaskFailure:
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
# meter("thread-cpu").delta
# Difference in thread_time() before and after function call, minus user calls
# to context_meter inside the function. Published to Server.digests as
# {("execute", <prefix>, "thread-cpu", "seconds"): <value>}
# m.delta
# Difference in wall time before and after function call, minus thread-cpu,
# minus user calls to context_meter. Published to Server.digests as
# {("execute", <prefix>, "thread-noncpu", "seconds"): <value>}
# m.stop - m.start
# Difference in wall time before and after function call, without subtracting
# anything. This is used in scheduler heuristics, e.g. task stealing.
with (
context_meter.meter("thread-noncpu", func=time) as m,
context_meter.meter("thread-cpu", func=thread_time),
):
try:
result = task(data)
except (SystemExit, KeyboardInterrupt):
# Special-case these, just like asyncio does all over the place. They will
# pass through `fail_hard` and `_handle_stimulus_from_task`, and eventually
# be caught by special-case logic in asyncio:
# https://github.com/python/cpython/blob/v3.9.4/Lib/asyncio/events.py#L81-L82
# Any other `BaseException` types would ultimately be ignored by asyncio if
# raised here, after messing up the worker state machine along their way.
raise
except BaseException as e:
# Users _shouldn't_ use `BaseException`s, but if they do, we can assume they
# aren't a reason to shut down the whole system (since we allow the
# system-shutting-down `SystemExit` and `KeyboardInterrupt` to pass through)
msg: RunTaskFailure = error_message(e) # type: ignore
msg["op"] = "task-erred"
msg["actual_exception"] = e
else:
msg: RunTaskSuccess = { # type: ignore
"op": "task-finished",
"status": "OK",
"result": result,
"nbytes": sizeof(result),
"type": type(result) if result is not None else None,
}
msg["start"] = m.start + time_delay
msg["stop"] = m.stop + time_delay
msg["thread"] = threading.get_ident()
return msg
async def _run_task_async(
task: GraphNode,
data: dict,
time_delay: float,
) -> RunTaskSuccess | RunTaskFailure:
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
with context_meter.meter("thread-noncpu", func=time) as m:
try:
result = await task(data)
except (SystemExit, KeyboardInterrupt):
# Special-case these, just like asyncio does all over the place. They will
# pass through `fail_hard` and `_handle_stimulus_from_task`, and eventually
# be caught by special-case logic in asyncio:
# https://github.com/python/cpython/blob/v3.9.4/Lib/asyncio/events.py#L81-L82
# Any other `BaseException` types would ultimately be ignored by asyncio if
# raised here, after messing up the worker state machine along their way.
raise
except BaseException as e:
# NOTE: this includes `CancelledError`! Since it's a user task, that's _not_
# a reason to shut down the worker.
# Users _shouldn't_ use `BaseException`s, but if they do, we can assume they
# aren't a reason to shut down the whole system (since we allow the
# system-shutting-down `SystemExit` and `KeyboardInterrupt` to pass through)
msg: RunTaskFailure = error_message(e) # type: ignore
msg["op"] = "task-erred"
msg["actual_exception"] = e
else:
msg: RunTaskSuccess = { # type: ignore
"op": "task-finished",
"status": "OK",
"result": result,
"nbytes": sizeof(result),
"type": type(result) if result is not None else None,
}
msg["start"] = m.start + time_delay
msg["stop"] = m.stop + time_delay
msg["thread"] = threading.get_ident()
return msg
def _run_actor(
func: Callable,
args: tuple,
kwargs: dict,
execution_state: dict,
key: Key,
active_threads: dict,
active_threads_lock: threading.Lock,
) -> Any:
"""Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
with set_thread_state(
start_time=time(),
execution_state=execution_state,
key=key,
actor=True,
):
token = _worker_cvar.set(execution_state["worker"])
try:
result = func(*args, **kwargs)
finally:
_worker_cvar.reset(token)
with active_threads_lock:
del active_threads[ident]
return result
def get_msg_safe_str(msg):
"""Make a worker msg, which contains args and kwargs, safe to cast to str:
allowing for some arguments to raise exceptions during conversion and
ignoring them.
"""
class Repr:
def __init__(self, f, val):
self._f = f
self._val = val
def __repr__(self):
return self._f(self._val)
msg = msg.copy()
if "args" in msg:
msg["args"] = Repr(convert_args_to_str, msg["args"])
if "kwargs" in msg:
msg["kwargs"] = Repr(convert_kwargs_to_str, msg["kwargs"])
return msg
def convert_args_to_str(args, max_len: int | None = None) -> str:
"""Convert args to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.
"""
length = 0
strs = ["" for i in range(len(args))]
for i, arg in enumerate(args):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
strs[i] = sarg
length += len(sarg) + 2
if max_len is not None and length > max_len:
return "({}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "({})".format(", ".join(strs))
def convert_kwargs_to_str(kwargs: dict, max_len: int | None = None) -> str:
"""Convert kwargs to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.
"""
length = 0
strs = ["" for i in range(len(kwargs))]
for i, (argname, arg) in enumerate(kwargs.items()):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
skwarg = repr(argname) + ": " + sarg
strs[i] = skwarg
length += len(skwarg) + 2
if max_len is not None and length > max_len:
return "{{{}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "{{{}}}".format(", ".join(strs))
async def run(server, comm, function, args=(), kwargs=None, wait=True):
kwargs = kwargs or {}
function = pickle.loads(function)
is_coro = iscoroutinefunction(function)
assert wait or is_coro, "Combination not supported"
if args:
args = pickle.loads(args)
if kwargs:
kwargs = pickle.loads(kwargs)
if has_arg(function, "dask_worker"):
kwargs["dask_worker"] = server
if has_arg(function, "dask_scheduler"):
kwargs["dask_scheduler"] = server
logger.info("Run out-of-band function %r", funcname(function))
try:
if not is_coro:
result = function(*args, **kwargs)
else:
if wait:
result = await function(*args, **kwargs)
else:
server._ongoing_background_tasks.call_soon(function, *args, **kwargs)
result = None
except Exception as e:
logger.warning(
"Run Failed\nFunction: %s\nargs: %s\nkwargs: %s\n",
str(funcname(function))[:1000],
convert_args_to_str(args, max_len=1000),
convert_kwargs_to_str(kwargs, max_len=1000),
exc_info=True,
)
response = error_message(e)
else:
response = {"status": "OK", "result": to_serialize(result)}
return response
_global_workers = Worker._instances
def add_gpu_metrics():
async def gpu_metric(worker):
result = await offload(nvml.real_time)
return result
DEFAULT_METRICS["gpu"] = gpu_metric
def gpu_startup(worker):
return nvml.one_time()
DEFAULT_STARTUP_INFORMATION["gpu"] = gpu_startup
try:
import rmm as _rmm
except Exception:
pass
else:
async def rmm_metric(worker):
result = await offload(rmm.real_time)
return result
DEFAULT_METRICS["rmm"] = rmm_metric
del _rmm
# avoid importing cuDF unless explicitly enabled
if dask.config.get("distributed.diagnostics.cudf"):
try:
import cudf as _cudf
except Exception:
pass
else:
from distributed.diagnostics import cudf
async def cudf_metric(worker):
result = await offload(cudf.real_time)
return result
DEFAULT_METRICS["cudf"] = cudf_metric
del _cudf
def print(
*args,
sep: str | None = " ",
end: str | None = "\n",
file: TextIO | None = None,
flush: bool = False,
) -> None:
"""
A drop-in replacement of the built-in ``print`` function for remote printing
from workers to clients. If called from outside a dask worker, its arguments
are passed directly to ``builtins.print()``. If called by code running on a
worker, then in addition to printing locally, any clients connected
(possibly remotely) to the scheduler managing this worker will receive an
event instructing them to print the same output to their own standard output
or standard error streams. For example, the user can perform simple
debugging of remote computations by including calls to this ``print``
function in the submitted code and inspecting the output in a local Jupyter
notebook or interpreter session.
All arguments behave the same as those of ``builtins.print()``, with the
exception that the ``file`` keyword argument, if specified, must either be
``sys.stdout`` or ``sys.stderr``; arbitrary file-like objects are not
allowed.
All non-keyword arguments are converted to strings using ``str()`` and
written to the stream, separated by ``sep`` and followed by ``end``. Both
``sep`` and ``end`` must be strings; they can also be ``None``, which means
to use the default values. If no objects are given, ``print()`` will just
write ``end``.
Parameters
----------
sep : str, optional
String inserted between values, default a space.
end : str, optional
String appended after the last value, default a newline.
file : ``sys.stdout`` or ``sys.stderr``, optional
Defaults to the current sys.stdout.
flush : bool, default False
Whether to forcibly flush the stream.
Examples
--------
>>> from dask.distributed import Client, print
>>> client = distributed.Client(...)
>>> def worker_function():
... print("Hello from worker!")
>>> client.submit(worker_function)
<Future: finished, type: NoneType, key: worker_function-...>
Hello from worker!
"""
try:
worker = get_worker()
except ValueError:
pass
else:
# We are in a worker: prepare all of the print args and kwargs to be
# serialized over the wire to the client.
msg = {
# According to the Python stdlib docs, builtin print() simply calls
# str() on each positional argument, so we do the same here.
"args": tuple(map(str, args)),
"sep": sep,
"end": end,
"flush": flush,
}
if file == sys.stdout:
msg["file"] = 1 # type: ignore
elif file == sys.stderr:
msg["file"] = 2 # type: ignore
elif file is not None:
raise TypeError(
f"Remote printing to arbitrary file objects is not supported. file "
f"kwarg must be one of None, sys.stdout, or sys.stderr; got: {file!r}"
)
worker.log_event("print", msg)
builtins.print(*args, sep=sep, end=end, file=file, flush=flush)
def warn(
message: str | Warning,
category: type[Warning] | None = UserWarning,
stacklevel: int = 1,
source: Any = None,
) -> None:
"""
A drop-in replacement of the built-in ``warnings.warn()`` function for
issuing warnings remotely from workers to clients.
If called from outside a dask worker, its arguments are passed directly to
``warnings.warn()``. If called by code running on a worker, then in addition
to emitting a warning locally, any clients connected (possibly remotely) to
the scheduler managing this worker will receive an event instructing them to
emit the same warning (subject to their own local filters, etc.). When
implementing computations that may run on a worker, the user can call this
``warn`` function to ensure that any remote client sessions will see their
warnings, for example in a Jupyter output cell.
While all of the arguments are respected by the locally emitted warning
(with same meanings as in ``warnings.warn()``), ``stacklevel`` and
``source`` are ignored by clients because they would not be meaningful in
the client's thread.
Examples
--------
>>> from dask.distributed import Client, warn
>>> client = Client()
>>> def do_warn():
... warn("A warning from a worker.")
>>> client.submit(do_warn).result()
/path/to/distributed/client.py:678: UserWarning: A warning from a worker.
"""
try:
worker = get_worker()
except ValueError: # pragma: no cover
pass
else:
# We are in a worker: log a warn event with args serialized to the
# client. We have to pickle message and category into bytes ourselves
# because msgpack cannot handle them. The expectations is that these are
# always small objects.
worker.log_event(
"warn",
{
"message": pickle.dumps(message),
"category": pickle.dumps(category),
# We ignore stacklevel because it will be meaningless in the
# client's thread/process.
# We ignore source because we don't want to serialize arbitrary
# objects.
},
)
# add 1 to stacklevel so that, at least in the worker's local stderr, we'll
# see the source line that called us
warnings.warn(message, category, stacklevel + 1, source)
def benchmark_disk(
rootdir: str | None = None,
sizes: Iterable[str] = ("1 kiB", "100 kiB", "1 MiB", "10 MiB", "100 MiB"),
duration="1 s",
) -> dict[str, float]:
"""
Benchmark disk bandwidth
Returns
-------
out: dict
Maps sizes of outputs to measured bandwidths
"""
duration = parse_timedelta(duration)
out = {}
for size_str in sizes:
with tmpdir(dir=rootdir) as dir:
dir = pathlib.Path(dir)
names = list(map(str, range(100)))
size = parse_bytes(size_str)
data = random.randbytes(size)
start = time()
total = 0
while time() < start + duration:
with open(dir / random.choice(names), mode="ab") as f:
f.write(data)
f.flush()
os.fsync(f.fileno())
total += size
out[size_str] = total / (time() - start)
return out
def benchmark_memory(
sizes: Iterable[str] = ("2 kiB", "10 kiB", "100 kiB", "1 MiB", "10 MiB"),
duration="200 ms",
) -> dict[str, float]:
"""
Benchmark memory bandwidth
Returns
-------
out: dict
Maps sizes of outputs to measured bandwidths
"""
duration = parse_timedelta(duration)
out = {}
for size_str in sizes:
size = parse_bytes(size_str)
data = random.randbytes(size)
start = time()
total = 0
while time() < start + duration:
_ = data[:-1]
del _
total += size
out[size_str] = total / (time() - start)
return out
async def benchmark_network(
address: str,
rpc: ConnectionPool | Callable[[str], RPCType],
sizes: Iterable[str] = ("1 kiB", "10 kiB", "100 kiB", "1 MiB", "10 MiB", "50 MiB"),
duration="1 s",
) -> dict[str, float]:
"""
Benchmark network communications to another worker
Returns
-------
out: dict
Maps sizes of outputs to measured bandwidths
"""
duration = parse_timedelta(duration)
out = {}
async with rpc(address) as r:
for size_str in sizes:
size = parse_bytes(size_str)
data = to_serialize(random.randbytes(size))
start = time()
total = 0
while time() < start + duration:
await r.echo(data=data)
total += size * 2
out[size_str] = total / (time() - start)
return out
|
Worker
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_pretty.py
|
{
"start": 3604,
"end": 3696
}
|
class ____:
_repr_pretty_ = None
def __repr__(self):
return "Dummy2()"
|
Dummy2
|
python
|
kamyu104__LeetCode-Solutions
|
Python/maximum-area-of-longest-diagonal-rectangle.py
|
{
"start": 37,
"end": 255
}
|
class ____(object):
def areaOfMaxDiagonal(self, dimensions):
"""
:type dimensions: List[List[int]]
:rtype: int
"""
return max((l**2+w**2, l*w) for l, w in dimensions)[1]
|
Solution
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/api/datamodels/generated.py
|
{
"start": 36999,
"end": 37441
}
|
class ____(BaseModel):
model_config = ConfigDict(
extra="forbid",
)
action: Annotated[
Literal["create"], Field(description="The action to be performed on the entities.", title="Action")
]
entities: Annotated[
list[VariableBody], Field(description="A list of entities to be created.", title="Entities")
]
action_on_existence: BulkActionOnExistence | None = "fail"
|
BulkCreateActionVariableBody
|
python
|
huggingface__transformers
|
src/transformers/models/bamba/modeling_bamba.py
|
{
"start": 3731,
"end": 9082
}
|
class ____:
"""
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
(which has a constant shape regardless of seq_len).
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
"""
is_compileable = False
def __init__(self, config: BambaConfig, batch_size, dtype=torch.float16, device=None):
self.layers_block_type = config.layers_block_type
self.has_previous_state = False # only used by mamba
conv_kernel_size = config.mamba_d_conv
ssm_state_size = config.mamba_d_state
self.conv_states = []
self.ssm_states = []
self.transformer_layers = []
for i in range(config.num_hidden_layers):
if self.layers_block_type[i] == "mamba":
self.conv_states += [
torch.zeros(
batch_size,
(config.mamba_expand * config.hidden_size + 2 * config.mamba_n_groups * ssm_state_size),
conv_kernel_size,
device=device,
dtype=dtype,
)
]
self.ssm_states += [
torch.zeros(
batch_size,
config.mamba_n_heads,
config.mamba_d_head,
ssm_state_size,
device=device,
dtype=dtype,
)
]
else:
self.conv_states += [torch.tensor([[]] * batch_size, device=device)]
self.ssm_states += [torch.tensor([[]] * batch_size, device=device)]
self.transformer_layers.append(i)
self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
def __len__(self):
return len(self.key_cache)
def __getitem__(self, layer_idx):
return self.key_cache[layer_idx], self.value_cache[layer_idx]
def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
layer_idx: int,
cache_kwargs: Optional[dict[str, Any]] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
# Update the cache
if self.key_cache[layer_idx].shape[-1] == 0:
self.key_cache[layer_idx] = key_states
self.value_cache[layer_idx] = value_states
else:
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2)
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2)
return self.key_cache[layer_idx], self.value_cache[layer_idx]
def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorders the cache for beam search, given the selected beam indices."""
if self.get_seq_length() > 0:
for layer_idx in range(len(self.key_cache)):
device = self.key_cache[layer_idx].device
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.value_cache[layer_idx].device
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.conv_states[layer_idx].device
self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device))
device = self.ssm_states[layer_idx].device
self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device))
def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]:
"""Return the length and offset of the cache, used to generate the mask"""
kv_offset = 0
query_length = cache_position.shape[0]
kv_length = self.get_seq_length(layer_idx) + query_length
return kv_length, kv_offset
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
# take any layer that contains cache and not empty tensor
layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx
if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].shape[-1] == 0:
return 0
return self.key_cache[layer_idx].shape[-2]
|
HybridMambaAttentionDynamicCache
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_group_subject.py
|
{
"start": 383,
"end": 3888
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str'
}
attribute_map = {
'name': 'name'
}
def __init__(self, name=None, local_vars_configuration=None): # noqa: E501
"""V1GroupSubject - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self.discriminator = None
self.name = name
@property
def name(self):
"""Gets the name of this V1GroupSubject. # noqa: E501
name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required. # noqa: E501
:return: The name of this V1GroupSubject. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1GroupSubject.
name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required. # noqa: E501
:param name: The name of this V1GroupSubject. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1GroupSubject):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1GroupSubject):
return True
return self.to_dict() != other.to_dict()
|
V1GroupSubject
|
python
|
getsentry__sentry
|
src/sentry/search/eap/columns.py
|
{
"start": 6761,
"end": 7484
}
|
class ____(ResolvedFunction):
"""
A formula is a type of function that may accept a parameter, it divides an attribute, aggregate or formula by another.
The FormulaDefinition contains a method `resolve`, which takes in the argument passed into the function and returns the resolved formula.
For example if the user queries for `http_response_rate(5), the FormulaDefinition calles `resolve` with the argument `5` and returns the `ResolvedFormula`.
"""
formula: Column.BinaryFormula
@property
def proto_definition(self) -> Column.BinaryFormula:
"""The definition of this function as needed by the RPC"""
return self.formula
@dataclass(frozen=True, kw_only=True)
|
ResolvedFormula
|
python
|
justquick__django-activity-stream
|
runtests/testapp/models.py
|
{
"start": 257,
"end": 423
}
|
class ____(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class Meta:
abstract = True
|
Abstract
|
python
|
sphinx-doc__sphinx
|
sphinx/builders/texinfo.py
|
{
"start": 1079,
"end": 9908
}
|
class ____(Builder):
"""Builds Texinfo output to create Info documentation."""
name = 'texinfo'
format = 'texinfo'
epilog = __('The Texinfo files are in %(outdir)s.')
if os.name == 'posix':
epilog += __(
"\nRun 'make' in that directory to run these through "
'makeinfo\n'
"(use 'make info' here to do that automatically)."
)
supported_image_types = ['image/png', 'image/jpeg', 'image/gif']
default_translator_class = TexinfoTranslator
def init(self) -> None:
self.docnames: Iterable[str] = []
self.document_data: list[tuple[str, str, str, str, str, str, str, bool]] = []
def get_outdated_docs(self) -> str | list[str]:
return 'all documents' # for now
def get_target_uri(self, docname: str, typ: str | None = None) -> str:
if docname not in self.docnames:
raise NoUri(docname, typ)
return '%' + docname
def get_relative_uri(self, from_: str, to: str, typ: str | None = None) -> str:
# ignore source path
return self.get_target_uri(to, typ)
def prepare_writing(self, _docnames: Set[str]) -> None:
preliminary_document_data = [list(x) for x in self.config.texinfo_documents]
if not preliminary_document_data:
logger.warning(
__(
'no "texinfo_documents" config value found; no documents '
'will be written'
)
)
return
# assign subdirs to titles
self.titles: list[tuple[str, str]] = []
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
logger.warning(
__(
'"texinfo_documents" config value references unknown '
'document %s'
),
docname,
)
continue
self.document_data.append(entry) # type: ignore[arg-type]
docname = docname.removesuffix(SEP + 'index')
self.titles.append((docname, entry[2]))
def write_documents(self, _docnames: Set[str]) -> None:
for entry in self.document_data:
docname, targetname, title, author = entry[:4]
targetname += '.texi'
direntry = description = category = ''
if len(entry) > 6:
direntry, description, category = entry[4:7]
toctree_only = False
if len(entry) > 7:
toctree_only = entry[7]
with progress_message(__('processing %s') % targetname, nonl=False):
appendices = self.config.texinfo_appendices or []
doctree = self.assemble_doctree(
docname, toctree_only, appendices=appendices
)
with progress_message(__('writing')):
self.post_process_images(doctree)
settings = _get_settings(
TexinfoWriter, defaults=self.env.settings, read_config_files=True
)
settings.author = author
settings.title = title
settings.texinfo_filename = targetname[:-5] + '.info'
settings.texinfo_elements = self.config.texinfo_elements
settings.texinfo_dir_entry = direntry or ''
settings.texinfo_dir_category = category or ''
settings.texinfo_dir_description = description or ''
settings.docname = docname
doctree.settings = settings
visitor: TexinfoTranslator = self.create_translator(doctree, self) # type: ignore[assignment]
doctree.walkabout(visitor)
visitor.finish()
(self.outdir / targetname).write_text(visitor.output, encoding='utf-8')
self.copy_image_files(targetname[:-5])
def assemble_doctree(
self,
indexfile: str,
toctree_only: bool,
appendices: list[str],
) -> nodes.document:
self.docnames = {indexfile, *appendices}
logger.info(darkgreen(indexfile))
tree = self.env.get_doctree(indexfile)
tree['docname'] = indexfile
if toctree_only:
# extract toctree nodes from the tree and put them in a
# fresh document
new_tree = new_document('<texinfo output>')
new_sect = nodes.section()
new_sect += nodes.title('<Set title in conf.py>', '<Set title in conf.py>')
new_tree += new_sect
for node in tree.findall(addnodes.toctree):
new_sect += node
tree = new_tree
largetree = inline_all_toctrees(
self, self.docnames, indexfile, tree, darkgreen, [indexfile]
)
largetree['docname'] = indexfile
for docname in appendices:
appendix = self.env.get_doctree(docname)
appendix['docname'] = docname
largetree.append(appendix)
logger.info('')
logger.info(__('resolving references...'))
self.env.resolve_references(largetree, indexfile, self)
# TODO: add support for external :ref:s
for pendingnode in largetree.findall(addnodes.pending_xref):
docname = pendingnode['refdocname']
sectname = pendingnode['refsectname']
newnodes: list[Node] = [nodes.emphasis(sectname, sectname)]
for subdir, title in self.titles:
if docname.startswith(subdir):
newnodes.extend((
nodes.Text(_(' (in ')),
nodes.emphasis(title, title),
nodes.Text(')'),
))
break
pendingnode.replace_self(newnodes)
return largetree
def copy_assets(self) -> None:
self.copy_support_files()
def copy_image_files(self, targetname: str) -> None:
if self.images:
stringify_func = ImageAdapter(self.env).get_original_image_uri
for src in status_iterator(
self.images,
__('copying images... '),
'brown',
len(self.images),
self.config.verbosity,
stringify_func=stringify_func,
):
dest = self.images[src]
try:
imagedir = self.outdir / f'{targetname}-figures'
ensuredir(imagedir)
copyfile(
self.srcdir / src,
imagedir / dest,
force=True,
)
except Exception as err:
logger.warning(
__('cannot copy image file %r: %s'),
self.srcdir / src,
err,
)
def copy_support_files(self) -> None:
try:
with progress_message(__('copying Texinfo support files')):
logger.info('Makefile ', nonl=True)
copyfile(
template_dir / 'Makefile',
self.outdir / 'Makefile',
force=True,
)
except OSError as err:
logger.warning(__('error writing file Makefile: %s'), err)
def default_texinfo_documents(
config: Config,
) -> list[tuple[str, str, str, str, str, str, str]]:
"""Better default texinfo_documents settings."""
filename = make_filename_from_project(config.project)
return [
(
config.root_doc,
filename,
config.project,
config.author,
filename,
'One line description of project',
'Miscellaneous',
)
]
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_builder(TexinfoBuilder)
app.add_config_value(
'texinfo_documents',
default_texinfo_documents,
'',
types=frozenset({list, tuple}),
)
app.add_config_value('texinfo_appendices', [], '', types=frozenset({list, tuple}))
app.add_config_value('texinfo_elements', {}, '', types=frozenset({dict}))
app.add_config_value(
'texinfo_domain_indices',
True,
'',
types=frozenset({frozenset, list, set, tuple}),
)
app.add_config_value('texinfo_show_urls', 'footnote', '', types=frozenset({str}))
app.add_config_value('texinfo_no_detailmenu', False, '', types=frozenset({bool}))
app.add_config_value('texinfo_cross_references', True, '', types=frozenset({bool}))
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
TexinfoBuilder
|
python
|
xlwings__xlwings
|
xlwings/_xlmac.py
|
{
"start": 44690,
"end": 45353
}
|
class ____(base_classes.Collection):
def __init__(self, parent):
self._parent = parent
self.xl = getattr(self.parent.xl, self._attr)
@property
def parent(self):
return self._parent
@property
def api(self):
return self.xl
def __call__(self, key):
if not self.xl[key].exists():
raise KeyError(key)
return self._wrap(self.parent, key)
def __len__(self):
return self.parent.xl.count(each=self._kw)
def __iter__(self):
for i in range(len(self)):
yield self(i + 1)
def __contains__(self, key):
return self.xl[key].exists()
|
Collection
|
python
|
plotly__plotly.py
|
plotly/graph_objs/volume/colorbar/_title.py
|
{
"start": 233,
"end": 3964
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "volume.colorbar"
_path_str = "volume.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.volume.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.volume.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.volume.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Title
|
python
|
PrefectHQ__prefect
|
tests/server/orchestration/api/test_variables.py
|
{
"start": 1409,
"end": 5836
}
|
class ____:
async def test_create_variable(
self,
client: AsyncClient,
):
variable = VariableCreate(
name="my_variable", value="my-value", tags=["123", "456"]
)
res = await client.post(
"/variables/",
json=variable.model_dump(mode="json"),
)
assert res
assert res.status_code == 201
res = res.json()
assert res["id"]
assert res["created"]
assert res["updated"]
assert res["name"] == variable.name
assert res["value"] == variable.value
assert res["tags"] == variable.tags
@pytest.mark.parametrize(
"value",
[
"string-value",
'"string-value"',
123,
12.3,
True,
False,
None,
{"key": "value"},
["value1", "value2"],
{"key": ["value1", "value2"]},
],
)
async def test_create_variable_json_types(
self,
client: AsyncClient,
value: Any,
):
response = await client.post(
"/variables/",
json={"name": "my_variable", "value": value},
)
assert response
assert response.status_code == 201
res = response.json()
assert res["id"]
assert res["created"]
assert res["updated"]
assert res["value"] == value
@pytest.mark.parametrize("variable_name", ["my-variable", "my_variable"])
async def test_variable_name_may_contain_dashes_or_underscores(
self,
client: AsyncClient,
variable_name: str,
):
response = await client.post(
"/variables/",
json={"name": variable_name, "value": "my-value"},
)
assert response
assert response.status_code == 201
res = response.json()
assert res["id"]
assert res["created"]
assert res["updated"]
assert res["name"] == variable_name
assert res["value"] == "my-value"
assert res["tags"] == []
@pytest.mark.parametrize("variable_name", ["MY_VARIABLE", "my variable", "!@#$%"])
async def test_name_constraints(
self,
client: AsyncClient,
variable_name: str,
):
res = await client.post(
"/variables/",
json={"name": variable_name, "value": "my-value"},
)
assert res
assert res.status_code == 422
assert (
"Variable name must only contain lowercase letters, numbers, and dashes or underscores."
in res.json()["exception_detail"][0]["msg"]
)
async def test_name_unique(
self,
client: AsyncClient,
variable,
):
same_name_variable = VariableCreate(name=variable.name, value="other-value")
res = await client.post(
"/variables/",
json=same_name_variable.model_dump(mode="json"),
)
assert res
assert res.status_code == 409
async def test_name_max_length(
self,
client: AsyncClient,
):
max_length = 255
res = await client.post(
"/variables/",
json={"name": "v" * max_length, "value": "value"},
)
assert res
assert res.status_code == 201
max_length_plus1 = max_length + 1
res = await client.post(
"/variables/",
json={"name": "v" * max_length_plus1, "value": "value"},
)
assert res
assert res.status_code == 422
assert "Value should have at most" in res.json()["exception_detail"][0]["msg"]
async def test_value_max_length(
self,
client: AsyncClient,
):
max_length = MAX_VARIABLE_VALUE_LENGTH - 2 # 2 characters for quotes
res = await client.post(
"/variables/",
json={"name": "name", "value": "v" * max_length},
)
assert res
assert res.status_code == 201
max_length_plus1 = max_length + 1
res = await client.post(
"/variables/",
json={"name": "name", "value": "v" * max_length_plus1},
)
assert res
assert res.status_code == 422
assert (
"Variable value must be less than"
in res.json()["exception_detail"][0]["msg"]
)
|
TestCreateVariable
|
python
|
coleifer__peewee
|
tests/mysql_ext.py
|
{
"start": 2842,
"end": 3668
}
|
class ____(ModelTestCase):
requires = [KJ]
def test_mysql_json_field(self):
values = (
0, 1.0, 2.3,
True, False,
'string',
['foo', 'bar', 'baz'],
{'k1': 'v1', 'k2': 'v2'},
{'k3': [0, 1.0, 2.3], 'k4': {'x1': 'y1', 'x2': 'y2'}})
for i, value in enumerate(values):
# Verify data can be written.
kj = KJ.create(key='k%s' % i, data=value)
# Verify value is deserialized correctly.
kj_db = KJ['k%s' % i]
self.assertEqual(kj_db.data, value)
kj = KJ.select().where(KJ.data.extract('$.k1') == 'v1').get()
self.assertEqual(kj.key, 'k7')
with self.assertRaises(IntegrityError):
KJ.create(key='kx', data=None)
@requires_mysql
|
TestMySQLJSONField
|
python
|
lazyprogrammer__machine_learning_examples
|
tensorflow/input_data.py
|
{
"start": 2478,
"end": 5870
}
|
class ____(object):
def __init__(self, images, labels, fake_data=False):
if fake_data:
self._num_examples = 10000
else:
assert images.shape[0] == labels.shape[0], (
"images.shape: %s labels.shape: %s" % (images.shape,
labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1.0 for _ in xrange(784)]
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir, fake_data=False, one_hot=False):
class DataSets(object):
pass
data_sets = DataSets()
if fake_data:
data_sets.train = DataSet([], [], fake_data=True)
data_sets.validation = DataSet([], [], fake_data=True)
data_sets.test = DataSet([], [], fake_data=True)
return data_sets
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 5000
local_file = maybe_download(TRAIN_IMAGES, train_dir)
train_images = extract_images(local_file)
local_file = maybe_download(TRAIN_LABELS, train_dir)
train_labels = extract_labels(local_file, one_hot=one_hot)
local_file = maybe_download(TEST_IMAGES, train_dir)
test_images = extract_images(local_file)
local_file = maybe_download(TEST_LABELS, train_dir)
test_labels = extract_labels(local_file, one_hot=one_hot)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
data_sets.train = DataSet(train_images, train_labels)
data_sets.validation = DataSet(validation_images, validation_labels)
data_sets.test = DataSet(test_images, test_labels)
return data_sets
|
DataSet
|
python
|
PrefectHQ__prefect
|
src/prefect/futures.py
|
{
"start": 7994,
"end": 12360
}
|
class ____(PrefectTaskRunFuture[R]):
"""
Represents the result of a computation happening anywhere.
This class is typically used to interact with the result of a task run
scheduled to run in a Prefect task worker but can be used to interact with
any task run scheduled in Prefect's API.
"""
done_callbacks: list[Callable[[PrefectFuture[R]], None]] = []
waiter = None
def wait(self, timeout: float | None = None) -> None:
return run_coro_as_sync(self.wait_async(timeout=timeout))
async def wait_async(self, timeout: float | None = None) -> None:
if self._final_state:
logger.debug(
"Final state already set for %s. Returning...", self.task_run_id
)
return
# Ask for the instance of TaskRunWaiter _now_ so that it's already running and
# can catch the completion event if it happens before we start listening for it.
TaskRunWaiter.instance()
# Read task run to see if it is still running
async with get_client() as client:
task_run = await client.read_task_run(task_run_id=self._task_run_id)
if task_run.state is None:
raise RuntimeError(
f"Task run {self.task_run_id} has no state which means it hasn't started yet."
)
if task_run.state.is_final():
logger.debug(
"Task run %s already finished. Returning...",
self.task_run_id,
)
self._final_state = task_run.state
return
# If still running, wait for a completed event from the server
logger.debug(
"Waiting for completed event for task run %s...",
self.task_run_id,
)
state_from_event = await TaskRunWaiter.wait_for_task_run(
self._task_run_id, timeout=timeout
)
if state_from_event:
# We got the final state directly from the event
self._final_state = state_from_event
logger.debug(
"Task run %s completed with state from event: %s",
self.task_run_id,
state_from_event.type,
)
return
def result(
self,
timeout: float | None = None,
raise_on_failure: bool = True,
) -> R:
return run_coro_as_sync(
self.result_async(timeout=timeout, raise_on_failure=raise_on_failure)
)
async def result_async(
self,
timeout: float | None = None,
raise_on_failure: bool = True,
) -> R:
if not self._final_state:
await self.wait_async(timeout=timeout)
if not self._final_state:
# If still no final state after wait, try reading it once more.
# This should rarely happen since wait_async() now gets state from events.
async with get_client() as client:
task_run = await client.read_task_run(task_run_id=self._task_run_id)
if task_run.state and task_run.state.is_final():
self._final_state = task_run.state
else:
raise TimeoutError(
f"Task run {self.task_run_id} did not complete within {timeout} seconds"
)
return await self._final_state.aresult(raise_on_failure=raise_on_failure)
def add_done_callback(self, fn: Callable[[PrefectFuture[R]], None]) -> None:
if self._final_state:
fn(self)
return
TaskRunWaiter.instance()
with get_client(sync_client=True) as client:
task_run = client.read_task_run(task_run_id=self._task_run_id)
if task_run.state.is_final():
self._final_state = task_run.state
fn(self)
return
TaskRunWaiter.add_done_callback(self._task_run_id, partial(fn, self))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, PrefectDistributedFuture):
return False
return self.task_run_id == other.task_run_id
def __hash__(self) -> int:
return hash(self.task_run_id)
|
PrefectDistributedFuture
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/cloud_memorystore.py
|
{
"start": 33673,
"end": 37614
}
|
class ____(GoogleCloudBaseOperator):
"""
Updates the metadata and configuration of a specific Redis instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreScaleInstanceOperator`
:param memory_size_gb: Redis memory size in GiB.
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
:param instance_id: The logical name of the Redis instance in the customer project.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"memory_size_gb",
"location",
"instance_id",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (RedisInstanceDetailsLink(),)
def __init__(
self,
*,
memory_size_gb: int,
location: str | None = None,
instance_id: str | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.memory_size_gb = memory_size_gb
self.location = location
self.instance_id = instance_id
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
res = hook.update_instance(
update_mask={"paths": ["memory_size_gb"]},
instance={"memory_size_gb": self.memory_size_gb},
location=self.location,
instance_id=self.instance_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
# projects/PROJECT_NAME/locations/LOCATION/instances/INSTANCE
location_id, instance_id = res.name.split("/")[-3::2]
RedisInstanceDetailsLink.persist(
context=context,
instance_id=self.instance_id or instance_id,
location_id=self.location or location_id,
project_id=self.project_id or hook.project_id,
)
|
CloudMemorystoreScaleInstanceOperator
|
python
|
PrefectHQ__prefect
|
tests/server/api/test_logs.py
|
{
"start": 5968,
"end": 7589
}
|
class ____:
"""Test the API endpoint converts LogCreate to Log objects for messaging"""
async def test_post_logs_api_converts_logcreate_to_log_for_messaging(
self, client, flow_run_id
):
"""Test posting LogCreate to API results in Log objects passed to publish_logs"""
log_create_data = LogCreate(
name="test.api.logger",
level=20,
message="API test message",
timestamp=NOW,
flow_run_id=flow_run_id,
).model_dump(mode="json")
# Mock publish_logs to capture what gets passed to messaging
with patch("prefect.server.logs.messaging.publish_logs") as mock_publish:
response = await client.post(CREATE_LOGS_URL, json=[log_create_data])
# API should succeed
assert response.status_code == 201
# Verify publish_logs was called with Log objects that have IDs
mock_publish.assert_called_once()
published_logs = mock_publish.call_args[0][0]
assert len(published_logs) == 1
published_log = published_logs[0]
# This was the key issue: API accepts LogCreate but messaging needs Log with ID
assert isinstance(published_log, Log)
assert published_log.id is not None
# Core fields should match the original API input
assert published_log.name == "test.api.logger"
assert published_log.level == 20
assert published_log.message == "API test message"
assert published_log.flow_run_id == flow_run_id
|
TestLogSchemaConversionAPI
|
python
|
aio-libs__aiohttp
|
aiohttp/resolver.py
|
{
"start": 2607,
"end": 6044
}
|
class ____(AbstractResolver):
"""Use the `aiodns` package to make asynchronous DNS lookups"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
if aiodns is None:
raise RuntimeError("Resolver requires aiodns library")
self._loop = asyncio.get_running_loop()
self._manager: _DNSResolverManager | None = None
# If custom args are provided, create a dedicated resolver instance
# This means each AsyncResolver with custom args gets its own
# aiodns.DNSResolver instance
if args or kwargs:
self._resolver = aiodns.DNSResolver(*args, **kwargs)
return
# Use the shared resolver from the manager for default arguments
self._manager = _DNSResolverManager()
self._resolver = self._manager.get_resolver(self, self._loop)
async def resolve(
self, host: str, port: int = 0, family: socket.AddressFamily = socket.AF_INET
) -> list[ResolveResult]:
try:
resp = await self._resolver.getaddrinfo(
host,
port=port,
type=socket.SOCK_STREAM,
family=family,
flags=_AI_ADDRCONFIG,
)
except aiodns.error.DNSError as exc:
msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
raise OSError(None, msg) from exc
hosts: list[ResolveResult] = []
for node in resp.nodes:
address: tuple[bytes, int] | tuple[bytes, int, int, int] = node.addr
family = node.family
if family == socket.AF_INET6:
if len(address) > 3 and address[3]:
# This is essential for link-local IPv6 addresses.
# LL IPv6 is a VERY rare case. Strictly speaking, we should use
# getnameinfo() unconditionally, but performance makes sense.
result = await self._resolver.getnameinfo(
(address[0].decode("ascii"), *address[1:]),
_NAME_SOCKET_FLAGS,
)
resolved_host = result.node
else:
resolved_host = address[0].decode("ascii")
port = address[1]
else: # IPv4
assert family == socket.AF_INET
resolved_host = address[0].decode("ascii")
port = address[1]
hosts.append(
ResolveResult(
hostname=host,
host=resolved_host,
port=port,
family=family,
proto=0,
flags=_NUMERIC_SOCKET_FLAGS,
)
)
if not hosts:
raise OSError(None, "DNS lookup failed")
return hosts
async def close(self) -> None:
if self._manager:
# Release the resolver from the manager if using the shared resolver
self._manager.release_resolver(self, self._loop)
self._manager = None # Clear reference to manager
self._resolver = None # type: ignore[assignment] # Clear reference to resolver
return
# Otherwise cancel our dedicated resolver
if self._resolver is not None:
self._resolver.cancel()
self._resolver = None # type: ignore[assignment] # Clear reference
|
AsyncResolver
|
python
|
pypa__pip
|
src/pip/_vendor/urllib3/contrib/appengine.py
|
{
"start": 2264,
"end": 2316
}
|
class ____(HTTPError):
pass
|
AppEnginePlatformError
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_hyperlink02.py
|
{
"start": 315,
"end": 1168
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("hyperlink02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with hyperlinks."""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url("A1", "http://www.perl.org/")
worksheet.write_url("D4", "http://www.perl.org/")
worksheet.write_url("A8", "http://www.perl.org/")
worksheet.write_url("B6", "http://www.cpan.org/")
worksheet.write_url("F12", "http://www.cpan.org/")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
openai__openai-python
|
src/openai/types/beta/chatkit/chat_session_expires_after_param.py
|
{
"start": 228,
"end": 519
}
|
class ____(TypedDict, total=False):
anchor: Required[Literal["created_at"]]
"""Base timestamp used to calculate expiration. Currently fixed to `created_at`."""
seconds: Required[int]
"""Number of seconds after the anchor when the session expires."""
|
ChatSessionExpiresAfterParam
|
python
|
kamyu104__LeetCode-Solutions
|
Python/longest-special-path-ii.py
|
{
"start": 2105,
"end": 3270
}
|
class ____(object):
def longestSpecialPath(self, edges, nums):
"""
:type edges: List[List[int]]
:type nums: List[int]
:rtype: List[int]
"""
def dfs(u, p, d, left):
prev_d, lookup[nums[u]-1] = lookup[nums[u]-1], d
new_left = left[:]
curr = prev_d
for i in xrange(len(new_left)):
if curr > new_left[i]:
curr, new_left[i] = new_left[i], curr
result[0] = min(result[0], [-(prefix[(d-1)+1]-prefix[new_left[1]+1]), d-new_left[1]])
for v, l in adj[u]:
if v == p:
continue
prefix.append(prefix[-1]+l)
dfs(v, u, d+1, new_left)
prefix.pop()
lookup[nums[u]-1] = prev_d
adj = [[] for _ in xrange(len(nums))]
for u, v, l in edges:
adj[u].append((v, l))
adj[v].append((u, l))
lookup = collections.defaultdict(lambda: -1)
prefix = [0]
result = [[float("inf"), float("inf")]]
dfs(0, -1, 0, [-1]*2)
return [-result[0][0], result[0][1]]
|
Solution2
|
python
|
django-import-export__django-import-export
|
import_export/widgets.py
|
{
"start": 16542,
"end": 18040
}
|
class ____(Widget):
"""
Widget for a JSON object
(especially required for jsonb fields in PostgreSQL database.)
:param value: Defaults to JSON format.
The widget covers two cases: Proper JSON string with double quotes, else it
tries to use single quotes and then convert it to proper JSON.
"""
def clean(self, value, row=None, **kwargs):
"""
Parses the input value as JSON and returns the corresponding Python object.
Attempts to parse as valid JSON first, then falls back to single-quote format
by converting single quotes to double quotes before parsing.
:param value: A JSON string to be parsed.
:param row: The current row being processed.
:param **kwargs: Optional keyword arguments.
:returns: The parsed Python object (dict, list, etc.) or None if value is empty.
:raises JSONDecodeError: If the value cannot be parsed as JSON.
"""
val = super().clean(value)
if val:
try:
return json.loads(val)
except json.decoder.JSONDecodeError:
return json.loads(val.replace("'", '"'))
def render(self, value, obj=None, **kwargs):
"""
:return: A JSON formatted string derived from ``value``.
``coerce_to_string`` has no effect on the return value.
"""
self._obj_deprecation_warning(obj)
if value:
return json.dumps(value)
return None
|
JSONWidget
|
python
|
huggingface__transformers
|
src/transformers/models/dinov3_vit/modeling_dinov3_vit.py
|
{
"start": 10823,
"end": 13418
}
|
class ____(nn.Module):
"""
Multi-headed attention compatible with ALL_ATTENTION_FUNCTIONS.
"""
def __init__(self, config: DINOv3ViTConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.is_causal = False
self.scaling = self.head_dim**-0.5
self.is_causal = False
self.dropout = config.attention_dropout
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.key_bias)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.value_bias)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.query_bias)
self.o_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.proj_bias)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, patches, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, patches, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
|
DINOv3ViTAttention
|
python
|
sympy__sympy
|
sympy/physics/quantum/fermion.py
|
{
"start": 3580,
"end": 4552
}
|
class ____(Ket):
"""Fock state ket for a fermionic mode.
Parameters
==========
n : Number
The Fock state number.
"""
def __new__(cls, n):
if n not in (0, 1):
raise ValueError("n must be 0 or 1")
return Ket.__new__(cls, n)
@property
def n(self):
return self.label[0]
@classmethod
def dual_class(self):
return FermionFockBra
@classmethod
def _eval_hilbert_space(cls, label):
return HilbertSpace()
def _eval_innerproduct_FermionFockBra(self, bra, **hints):
return KroneckerDelta(self.n, bra.n)
def _apply_from_right_to_FermionOp(self, op, **options):
if op.is_annihilation:
if self.n == 1:
return FermionFockKet(0)
else:
return S.Zero
else:
if self.n == 0:
return FermionFockKet(1)
else:
return S.Zero
|
FermionFockKet
|
python
|
neetcode-gh__leetcode
|
python/0516-longest-palindromic-subsequence.py
|
{
"start": 55,
"end": 1347
}
|
class ____:
def longestPalindromeSubseq(self, s: str) -> int:
# Dynamic Programming
dp = [ [0] * (len(s) + 1) for i in range(len(s) + 1)]
res = 0
for i in range(len(s)):
for j in range(len(s) - 1, i - 1, -1):
if s[i] == s[j]:
dp[i][j] = 1 if i == j else 2
if i - 1 >= 0:
dp[i][j] += dp[i - 1][j + 1]
else:
dp[i][j] = dp[i][j + 1]
if i - 1 >= 0:
dp[i][j] = max(dp[i][j], dp[i - 1][j])
res = max(res, dp[i][j])
return res
# Memoization
cache = {}
def dfs(i, j):
if i < 0 or j == len(s):
return 0
if (i, j) in cache:
return cache[(i, j)]
if s[i] == s[j]:
length = 1 if i == j else 2
cache[(i, j)] = length + dfs(i - 1, j + 1)
else:
cache[(i, j)] = max(dfs(i - 1, j), dfs(i, j + 1))
return cache[(i, j)]
for i in range(len(s)):
dfs(i, i) # odd length
dfs(i, i + 1) # even length
return max(cache.values())
# LCS Solution
|
Solution
|
python
|
ApeWorX__ape
|
src/ape/plugins/_utils.py
|
{
"start": 20724,
"end": 24077
}
|
class ____(BaseModel):
"""
A group of plugin metadata by type.
"""
plugin_type: PluginType
plugins: dict[str, PluginMetadata] = {}
def __bool__(self) -> bool:
return len(self.plugins) > 0
@log_instead_of_fail(default="<PluginGroup>")
def __repr__(self) -> str:
return f"<{self.name} Plugins Group>"
def __str__(self) -> str:
return self.to_str()
def __iter__(self) -> Iterator[str]: # type: ignore
yield from self.plugins
@field_validator("plugin_type", mode="before")
@classmethod
def validate_plugin_type(cls, value):
return PluginType(value) if isinstance(value, str) else value
@property
def plugin_type_str(self) -> str:
return getattr(self.plugin_type, "value", str(self.plugin_type))
@property
def name(self) -> str:
return self.plugin_type_str.capitalize()
@property
def plugin_names(self) -> list[str]:
return [x.name for x in self.plugins.values()]
def to_str(
self,
max_length: Optional[int] = None,
include_version: bool = True,
output_format: Optional[OutputFormat] = OutputFormat.DEFAULT,
) -> str:
output_format = output_format or OutputFormat.DEFAULT
if output_format in (OutputFormat.DEFAULT, OutputFormat.PREFIXED):
return self._get_default_formatted_str(
max_length=max_length,
include_version=include_version,
include_prefix=f"{output_format}" == OutputFormat.PREFIXED,
)
# Freeze format.
return self._get_freeze_formatted_str(
max_length=max_length, include_version=include_version
)
def _get_default_formatted_str(
self,
max_length: Optional[int] = None,
include_version: bool = True,
include_prefix: bool = False,
) -> str:
title = f"{self.name} Plugins"
if len(self.plugins) <= 0:
return title
lines = [title]
max_length = self.max_name_length if max_length is None else max_length
plugins_sorted = sorted(self.plugins.values(), key=lambda p: p.name)
for plugin in plugins_sorted:
line = plugin.package_name if include_prefix else plugin.name
if include_version:
version = plugin.version or get_package_version(plugin.package_name)
if version:
spacing = (max_length - len(line)) + 4
line = f"{line}{spacing * ' '}{version}"
lines.append(f" {line}") # Indent.
return "\n".join(lines)
def _get_freeze_formatted_str(
self,
max_length: Optional[int] = None,
include_version: bool = True,
include_prefix: bool = False,
) -> str:
lines = []
for plugin in sorted(self.plugins.values(), key=lambda p: p.name):
line = plugin.package_name
if include_version:
version = plugin.version or get_package_version(plugin.package_name)
line = f"{line}=={version}"
lines.append(line)
return "\n".join(lines)
@property
def max_name_length(self) -> int:
if not self.plugins:
return 0
return max(len(x) for x in self.plugin_names)
|
PluginGroup
|
python
|
scipy__scipy
|
benchmarks/benchmarks/go_benchmark_functions/go_funcs_B.py
|
{
"start": 103,
"end": 1194
}
|
class ____(Benchmark):
r"""
Bartels-Conn objective function.
The BartelsConn [1]_ global optimization problem is a multimodal
minimization problem defined as follows:
.. math::
f_{\text{BartelsConn}}(x) = \lvert {x_1^2 + x_2^2 + x_1x_2} \rvert +
\lvert {\sin(x_1)} \rvert + \lvert {\cos(x_2)} \rvert
with :math:`x_i \in [-500, 500]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 1` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-500.] * self.N, [500.] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 1.0
def fun(self, x, *args):
self.nfev += 1
return (abs(x[0] ** 2.0 + x[1] ** 2.0 + x[0] * x[1]) + abs(sin(x[0]))
+ abs(cos(x[1])))
|
BartelsConn
|
python
|
cython__cython
|
Cython/Compiler/ParseTreeTransforms.py
|
{
"start": 83832,
"end": 85631
}
|
class ____(CythonTransform, SkipDeclarations):
"""
Only part of the CythonUtilityCode pipeline. Must be run before
DecoratorTransform in case this is a decorator for a cdef class.
It filters out @cname('my_cname') decorators and rewrites them to
CnameDecoratorNodes.
"""
def handle_function(self, node):
if not getattr(node, 'decorators', None):
return self.visit_Node(node)
for i, decorator in enumerate(node.decorators):
decorator = decorator.decorator
if (isinstance(decorator, ExprNodes.CallNode) and
decorator.function.is_name and
decorator.function.name == 'cname'):
args, kwargs = decorator.explicit_args_kwds()
if kwargs:
raise AssertionError(
"cname decorator does not take keyword arguments")
if len(args) != 1:
raise AssertionError(
"cname decorator takes exactly one argument")
if not (args[0].is_literal and args[0].type is Builtin.unicode_type):
raise AssertionError(
"argument to cname decorator must be a string literal")
cname = args[0].compile_time_value(None)
del node.decorators[i]
node = Nodes.CnameDecoratorNode(pos=node.pos, node=node,
cname=cname)
break
return self.visit_Node(node)
visit_FuncDefNode = handle_function
visit_CClassDefNode = handle_function
visit_CEnumDefNode = handle_function
visit_CStructOrUnionDefNode = handle_function
visit_CVarDefNode = handle_function
|
CnameDirectivesTransform
|
python
|
pypa__hatch
|
tests/project/test_core.py
|
{
"start": 3777,
"end": 4604
}
|
class ____:
def test_missing(self, temp_dir):
project = Project(temp_dir)
project.find_project_root()
assert project.raw_config == {"project": {"name": temp_dir.name}}
def test_exists(self, temp_dir):
project_file = temp_dir / "pyproject.toml"
project_file.touch()
project = Project(temp_dir)
project.find_project_root()
config = {"project": {"name": "foo"}, "bar": "baz"}
project.save_config(config)
assert project.raw_config == config
def test_exists_without_project_table(self, temp_dir):
project_file = temp_dir / "pyproject.toml"
project_file.touch()
project = Project(temp_dir)
project.find_project_root()
assert project.raw_config == {"project": {"name": temp_dir.name}}
|
TestRawConfig
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/ext/mutable.py
|
{
"start": 12588,
"end": 14770
}
|
class ____ a ``weakref.WeakKeyDictionary`` available via the
:meth:`MutableBase._parents` attribute which isn't picklable. If we need to
pickle instances of ``Point`` or its owning class ``Vertex``, we at least need
to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary.
Below we define both a ``__getstate__`` and a ``__setstate__`` that package up
the minimal form of our ``Point`` class::
@dataclasses.dataclass
class Point(MutableComposite):
# ...
def __getstate__(self):
return self.x, self.y
def __setstate__(self, state):
self.x, self.y = state
As with :class:`.Mutable`, the :class:`.MutableComposite` augments the
pickling process of the parent's object-relational state so that the
:meth:`MutableBase._parents` collection is restored to all ``Point`` objects.
""" # noqa: E501
from __future__ import annotations
from collections import defaultdict
from typing import AbstractSet
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import overload
from typing import Set
from typing import SupportsIndex
from typing import Tuple
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
import weakref
from weakref import WeakKeyDictionary
from .. import event
from .. import inspect
from .. import types
from ..orm import Mapper
from ..orm._typing import _ExternalEntityType
from ..orm._typing import _O
from ..orm._typing import _T
from ..orm.attributes import AttributeEventToken
from ..orm.attributes import flag_modified
from ..orm.attributes import InstrumentedAttribute
from ..orm.attributes import QueryableAttribute
from ..orm.context import QueryContext
from ..orm.decl_api import DeclarativeAttributeIntercept
from ..orm.state import InstanceState
from ..orm.unitofwork import UOWTransaction
from ..sql._typing import _TypeEngineArgument
from ..sql.base import SchemaEventTarget
from ..sql.schema import Column
from ..sql.type_api import TypeEngine
from ..util import memoized_property
_KT = TypeVar("_KT") # Key type.
_VT = TypeVar("_VT") # Value type.
|
uses
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/ops/readers.py
|
{
"start": 9908,
"end": 10694
}
|
class ____(dataset_ops.DatasetV1Adapter):
"""A `Dataset` comprising lines from one or more text files."""
def __init__(self,
filenames,
compression_type=None,
buffer_size=None,
num_parallel_reads=None,
name=None):
wrapped = TextLineDatasetV2(filenames, compression_type, buffer_size,
num_parallel_reads, name)
super(TextLineDatasetV1, self).__init__(wrapped)
__init__.__doc__ = TextLineDatasetV2.__init__.__doc__
@property
def _filenames(self):
return self._dataset._filenames # pylint: disable=protected-access
@_filenames.setter
def _filenames(self, value):
self._dataset._filenames = value # pylint: disable=protected-access
|
TextLineDatasetV1
|
python
|
apache__airflow
|
airflow-core/tests/unit/listeners/xcom_listener.py
|
{
"start": 864,
"end": 1634
}
|
class ____:
def __init__(self, path: str, task_id: str):
self.path = path
self.task_id = task_id
def write(self, line: str):
with open(self.path, "a") as f:
f.write(line + "\n")
@hookimpl
def on_task_instance_running(self, previous_state, task_instance):
task_instance.xcom_push(key="listener", value="listener")
task_instance.xcom_pull(task_ids=task_instance.task_id, key="listener")
self.write("on_task_instance_running")
@hookimpl
def on_task_instance_success(self, previous_state, task_instance):
read = task_instance.xcom_pull(task_ids=self.task_id, key="listener")
self.write("on_task_instance_success")
self.write(read)
def clear():
pass
|
XComListener
|
python
|
viewflow__viewflow
|
viewflow/workflow/flow/views/create.py
|
{
"start": 275,
"end": 1238
}
|
class ____(
FormLayoutMixin,
FormAjaxCompleteMixin,
FormDependentSelectMixin,
mixins.SuccessMessageMixin,
mixins.TaskSuccessUrlMixin,
mixins.TaskViewTemplateNames,
generic.UpdateView,
):
"""Default view to start a process"""
success_message = _("Process {process} has been started.")
template_filename = "start.html"
def get_object(self):
"""Return the process for the task activation."""
return self.request.activation.process
def form_valid(self, form):
"""If the form is valid, save the associated model and finish the task."""
self.object = form.save()
if "seed" in form.cleaned_data:
self.object.seed = form.cleaned_data["seed"]
if "artifact" in form.cleaned_data:
self.object.artifact = form.cleaned_data["artifact"]
self.request.activation.execute()
return HttpResponseRedirect(self.get_success_url())
|
CreateProcessView
|
python
|
readthedocs__readthedocs.org
|
readthedocs/rtd_tests/tests/test_api.py
|
{
"start": 28092,
"end": 68370
}
|
class ____(TestCase):
fixtures = ["eric.json", "test_data.json"]
def test_create_key_for_project_with_long_slug(self):
user = get(User)
project = get(Project, users=[user], slug="a" * 60)
build_api_key_obj, build_api_key = BuildAPIKey.objects.create_key(project)
self.assertTrue(BuildAPIKey.objects.is_valid(build_api_key))
self.assertEqual(build_api_key_obj.name, "a" * 50)
def test_revoke_build_api_key(self):
user = get(User)
project = get(Project, users=[user])
_, build_api_key = BuildAPIKey.objects.create_key(project)
client = APIClient()
revoke_url = "/api/v2/revoke/"
self.assertTrue(BuildAPIKey.objects.is_valid(build_api_key))
# Anonymous request.
client.logout()
resp = client.post(revoke_url)
self.assertEqual(resp.status_code, 403)
self.assertTrue(BuildAPIKey.objects.is_valid(build_api_key))
# Using user/password.
client.force_login(user)
resp = client.post(revoke_url)
self.assertEqual(resp.status_code, 403)
self.assertTrue(BuildAPIKey.objects.is_valid(build_api_key))
client.logout()
resp = client.post(revoke_url, HTTP_AUTHORIZATION=f"Token {build_api_key}")
self.assertEqual(resp.status_code, 204)
self.assertFalse(BuildAPIKey.objects.is_valid(build_api_key))
@override_settings(BUILD_TIME_LIMIT=600)
def test_expiricy_key(self):
project = get(Project)
build_api_key_obj, build_api_key = BuildAPIKey.objects.create_key(project)
expected = (build_api_key_obj.expiry_date - timezone.now()).seconds
self.assertAlmostEqual(expected, 86400, delta=5)
# Project with a custom containe time limit
project.container_time_limit = 1200
project.save()
build_api_key_obj, build_api_key = BuildAPIKey.objects.create_key(project)
expected = (build_api_key_obj.expiry_date - timezone.now()).seconds
self.assertAlmostEqual(expected, 86400, delta=5)
def test_user_doesnt_get_full_api_return(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project = get(
Project,
main_language_project=None,
readthedocs_yaml_path="bar",
)
client = APIClient()
for user in [user_normal, user_admin]:
client.force_authenticate(user=user)
resp = client.get("/api/v2/project/%s/" % (project.pk))
self.assertEqual(resp.status_code, 200)
self.assertNotIn("readthedocs_yaml_path", resp.data)
_, build_api_key = BuildAPIKey.objects.create_key(project)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
resp = client.get("/api/v2/project/%s/" % (project.pk))
self.assertEqual(resp.status_code, 200)
self.assertIn("readthedocs_yaml_path", resp.data)
self.assertEqual(resp.data["readthedocs_yaml_path"], "bar")
def test_project_read_only_endpoints_for_normal_user(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
client = APIClient()
client.force_authenticate(user=user_normal)
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/project/")
self.assertEqual(resp.status_code, 410)
# We don't allow creating projects.
resp = client.post("/api/v2/project/")
self.assertEqual(resp.status_code, 403)
projects = [
project_a,
project_b,
project_c,
]
for project in projects:
resp = client.get(f"/api/v2/project/{project.pk}/")
self.assertEqual(resp.status_code, 200)
resp = client.delete(f"/api/v2/project/{project.pk}/")
self.assertEqual(resp.status_code, 403)
resp = client.patch(f"/api/v2/project/{project.pk}/")
self.assertEqual(resp.status_code, 403)
def test_project_read_and_write_endpoints_for_staff_user(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
client = APIClient()
client.force_authenticate(user=user_admin)
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/project/")
self.assertEqual(resp.status_code, 410)
# We don't allow creating projects.
resp = client.post("/api/v2/project/")
self.assertEqual(resp.status_code, 403)
projects = [
project_a,
project_b,
project_c,
]
for project in projects:
resp = client.get(f"/api/v2/project/{project.pk}/")
self.assertEqual(resp.status_code, 200)
# We don't allow deleting projects.
resp = client.delete(f"/api/v2/project/{project.pk}/")
self.assertEqual(resp.status_code, 403)
# We don't allow users to update projects.
resp = client.patch(f"/api/v2/project/{project.pk}/")
self.assertEqual(resp.status_code, 403)
def test_project_read_and_write_endpoints_for_build_api_token(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(project_a)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/project/")
self.assertEqual(resp.status_code, 410)
# We don't allow creating projects.
resp = client.post("/api/v2/project/")
self.assertEqual(resp.status_code, 405)
# The key grants access to project_a only.
resp = client.get(f"/api/v2/project/{project_a.pk}/")
self.assertEqual(resp.status_code, 200)
# We don't allow deleting projects.
resp = client.delete(f"/api/v2/project/{project_a.pk}/")
self.assertEqual(resp.status_code, 405)
# Update is fine.
resp = client.patch(f"/api/v2/project/{project_a.pk}/")
self.assertEqual(resp.status_code, 200)
disallowed_projects = [
project_b,
project_c,
]
for project in disallowed_projects:
resp = client.get(f"/api/v2/project/{project.pk}/")
self.assertEqual(resp.status_code, 404)
resp = client.delete(f"/api/v2/project/{project.pk}/")
self.assertEqual(resp.status_code, 405)
resp = client.patch(f"/api/v2/project/{project.pk}/")
self.assertEqual(resp.status_code, 404)
def test_build_read_only_endpoints_for_normal_user(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
client = APIClient()
client.force_authenticate(user=user_normal)
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/build/")
self.assertEqual(resp.status_code, 410)
# We don't allow creating builds for normal users.
resp = client.post("/api/v2/build/")
self.assertEqual(resp.status_code, 403)
Version.objects.all().update(privacy_level=PUBLIC)
builds = [
get(Build, project=project_a, version=project_a.versions.first()),
get(Build, project=project_b, version=project_b.versions.first()),
get(Build, project=project_c, version=project_c.versions.first()),
]
for build in builds:
resp = client.get(f"/api/v2/build/{build.pk}/")
self.assertEqual(resp.status_code, 200)
# We don't allow deleting builds.
resp = client.delete(f"/api/v2/build/{build.pk}/")
self.assertEqual(resp.status_code, 403)
# Neither update them.
resp = client.patch(f"/api/v2/build/{build.pk}/")
self.assertEqual(resp.status_code, 403)
def test_build_read_and_write_endpoints_for_staff_user(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
client = APIClient()
client.force_authenticate(user=user_admin)
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/build/")
self.assertEqual(resp.status_code, 410)
# We don't allow to create builds.
resp = client.post("/api/v2/build/")
self.assertEqual(resp.status_code, 403)
Version.objects.all().update(privacy_level=PUBLIC)
builds = [
get(Build, project=project_a, version=project_a.versions.first()),
get(Build, project=project_b, version=project_b.versions.first()),
get(Build, project=project_c, version=project_c.versions.first()),
]
for build in builds:
resp = client.get(f"/api/v2/build/{build.pk}/")
self.assertEqual(resp.status_code, 200)
# We don't allow deleting builds.
resp = client.delete(f"/api/v2/build/{build.pk}/")
self.assertEqual(resp.status_code, 403)
# We don't allow users to update them.
resp = client.patch(f"/api/v2/build/{build.pk}/")
self.assertEqual(resp.status_code, 403)
def test_build_read_and_write_endpoints_for_build_api_token(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(project_a)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/build/")
self.assertEqual(resp.status_code, 410)
# We don't allow to create builds.
resp = client.post("/api/v2/build/")
self.assertEqual(resp.status_code, 405)
Version.objects.all().update(privacy_level=PUBLIC)
# The key grants access to builds form project_a only.
build = get(Build, project=project_a, version=project_a.versions.first())
resp = client.get(f"/api/v2/build/{build.pk}/")
self.assertEqual(resp.status_code, 200)
# We don't allow deleting builds.
resp = client.delete(f"/api/v2/build/{build.pk}/")
self.assertEqual(resp.status_code, 405)
# Update them is fine.
resp = client.patch(f"/api/v2/build/{build.pk}/")
self.assertEqual(resp.status_code, 200)
disallowed_builds = [
get(Build, project=project_b, version=project_b.versions.first()),
get(Build, project=project_c, version=project_c.versions.first()),
]
for build in disallowed_builds:
resp = client.get(f"/api/v2/build/{build.pk}/")
self.assertEqual(resp.status_code, 404)
resp = client.delete(f"/api/v2/build/{build.pk}/")
self.assertEqual(resp.status_code, 405)
resp = client.patch(f"/api/v2/build/{build.pk}/")
self.assertEqual(resp.status_code, 404)
def test_build_commands_duplicated_command(self):
"""Sending the same request twice should only create one BuildCommandResult."""
project = get(
Project,
language="en",
)
version = project.versions.first()
build = Build.objects.create(project=project, version=version)
self.assertEqual(BuildCommandResult.objects.count(), 0)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(project)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
now = timezone.now()
start_time = now - datetime.timedelta(seconds=5)
end_time = now
data = {
"build": build.pk,
"command": "git status",
"description": "Git status",
"exit_code": 0,
"start_time": start_time,
"end_time": end_time,
}
response = client.post(
"/api/v2/command/",
data,
format="json",
)
self.assertEqual(response.status_code, 201)
response = client.post(
"/api/v2/command/",
data,
format="json",
)
self.assertEqual(response.status_code, 201)
self.assertEqual(BuildCommandResult.objects.count(), 1)
def test_build_commands_read_only_endpoints_for_normal_user(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
client = APIClient()
client.force_authenticate(user=user_normal)
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/build/")
self.assertEqual(resp.status_code, 410)
# We don't allow creating commands for normal users.
resp = client.post("/api/v2/command/")
self.assertEqual(resp.status_code, 403)
Version.objects.all().update(privacy_level=PUBLIC)
builds = [
get(Build, project=project_a, version=project_a.versions.first()),
get(Build, project=project_b, version=project_b.versions.first()),
get(Build, project=project_c, version=project_c.versions.first()),
]
build_commands = [get(BuildCommandResult, build=build) for build in builds]
for command in build_commands:
resp = client.get(f"/api/v2/command/{command.pk}/")
self.assertEqual(resp.status_code, 200)
# We don't allow deleting builds.
resp = client.delete(f"/api/v2/command/{command.pk}/")
self.assertEqual(resp.status_code, 403)
# Neither update them.
resp = client.patch(f"/api/v2/command/{command.pk}/")
self.assertEqual(resp.status_code, 403)
def test_build_commands_read_and_write_endpoints_for_staff_user(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
client = APIClient()
client.force_authenticate(user=user_admin)
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/command/")
self.assertEqual(resp.status_code, 410)
Version.objects.all().update(privacy_level=PUBLIC)
builds = [
get(Build, project=project_a, version=project_a.versions.first()),
get(Build, project=project_b, version=project_b.versions.first()),
get(Build, project=project_c, version=project_c.versions.first()),
]
build_commands = [get(BuildCommandResult, build=build) for build in builds]
# We don't allow write operations to users.
resp = client.post(
"/api/v2/command/",
{
"build": builds[0].pk,
"command": "test",
"output": "test",
"exit_code": 0,
"start_time": datetime.datetime.utcnow(),
"end_time": datetime.datetime.utcnow(),
},
)
self.assertEqual(resp.status_code, 403)
for command in build_commands:
resp = client.get(f"/api/v2/command/{command.pk}/")
self.assertEqual(resp.status_code, 200)
# We don't allow deleting commands.
resp = client.delete(f"/api/v2/command/{command.pk}/")
self.assertEqual(resp.status_code, 403)
# Neither updating them.
resp = client.patch(f"/api/v2/command/{command.pk}/")
self.assertEqual(resp.status_code, 403)
def test_build_commands_read_and_write_endpoints_for_build_api_token(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(project_a)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/command/")
self.assertEqual(resp.status_code, 410)
Version.objects.all().update(privacy_level=PUBLIC)
build = get(Build, project=project_a, version=project_a.versions.first())
command = get(BuildCommandResult, build=build)
# We allow creating build commands.
resp = client.post(
"/api/v2/command/",
{
"build": build.pk,
"command": "test",
"output": "test",
"exit_code": 0,
"start_time": datetime.datetime.utcnow(),
"end_time": datetime.datetime.utcnow(),
},
)
self.assertEqual(resp.status_code, 201)
resp = client.get(f"/api/v2/command/{command.pk}/")
self.assertEqual(resp.status_code, 200)
# And updating them.
resp = client.patch(
f"/api/v2/command/{command.pk}/",
{
"command": "test2",
"exit_code": 1,
"output": "test2",
"end_time": None,
"start_time": None,
},
)
assert resp.status_code == 200
command.refresh_from_db()
assert command.command == "test2"
assert command.exit_code == 1
assert command.output == "test2"
assert command.start_time is None
assert command.end_time is None
# Isn't possible to update the build the command belongs to.
another_build = get(
Build, project=project_b, version=project_b.versions.first()
)
resp = client.patch(
f"/api/v2/command/{command.pk}/",
{
"build": another_build.pk,
},
)
assert resp.status_code == 200
command.refresh_from_db()
assert command.build == build
# We don't allow deleting commands.
resp = client.delete(f"/api/v2/command/{command.pk}/")
self.assertEqual(resp.status_code, 405)
disallowed_builds = [
get(Build, project=project_b, version=project_b.versions.first()),
get(Build, project=project_c, version=project_c.versions.first()),
]
disallowed_build_commands = [
get(BuildCommandResult, build=build) for build in disallowed_builds
]
for command in disallowed_build_commands:
resp = client.post(
"/api/v2/command/",
{
"build": command.build.pk,
"command": "test",
"output": "test",
"exit_code": 0,
"start_time": datetime.datetime.utcnow(),
"end_time": datetime.datetime.utcnow(),
},
)
self.assertEqual(resp.status_code, 403)
resp = client.get(f"/api/v2/command/{command.pk}/")
self.assertEqual(resp.status_code, 404)
resp = client.delete(f"/api/v2/command/{command.pk}/")
self.assertEqual(resp.status_code, 405)
resp = client.patch(f"/api/v2/command/{command.pk}/")
self.assertEqual(resp.status_code, 404)
def test_versions_read_only_endpoints_for_normal_user(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
Version.objects.all().update(privacy_level=PUBLIC)
client = APIClient()
client.force_authenticate(user=user_normal)
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/version/")
self.assertEqual(resp.status_code, 410)
# We don't allow creating versions.
resp = client.post("/api/v2/version/")
self.assertEqual(resp.status_code, 403)
versions = [
project_a.versions.first(),
project_b.versions.first(),
project_c.versions.first(),
]
for version in versions:
resp = client.get(f"/api/v2/version/{version.pk}/")
self.assertEqual(resp.status_code, 200)
# We don't allow deleting versions.
resp = client.delete(f"/api/v2/version/{version.pk}/")
self.assertEqual(resp.status_code, 403)
# Neither update them.
resp = client.patch(f"/api/v2/version/{version.pk}/")
self.assertEqual(resp.status_code, 403)
def test_versions_read_and_write_endpoints_for_staff_user(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
Version.objects.all().update(privacy_level=PUBLIC)
client = APIClient()
client.force_authenticate(user=user_admin)
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/version/")
self.assertEqual(resp.status_code, 410)
# We don't allow to create versions.
resp = client.post("/api/v2/version/")
self.assertEqual(resp.status_code, 403)
versions = [
project_a.versions.first(),
project_b.versions.first(),
project_c.versions.first(),
]
for version in versions:
resp = client.get(f"/api/v2/version/{version.pk}/")
self.assertEqual(resp.status_code, 200)
# We don't allow users to delete versions.
resp = client.delete(f"/api/v2/version/{version.pk}/")
self.assertEqual(resp.status_code, 403)
# We don't allow users to update versions.
resp = client.patch(f"/api/v2/version/{version.pk}/")
self.assertEqual(resp.status_code, 403)
def test_versions_read_and_write_endpoints_for_build_api_token(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
Version.objects.all().update(privacy_level=PUBLIC)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(project_a)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/version/")
self.assertEqual(resp.status_code, 410)
# We don't allow to create versions.
resp = client.post("/api/v2/version/")
self.assertEqual(resp.status_code, 405)
version = project_a.versions.first()
resp = client.get(f"/api/v2/version/{version.pk}/")
self.assertEqual(resp.status_code, 200)
# We don't allow deleting versions.
resp = client.delete(f"/api/v2/version/{version.pk}/")
self.assertEqual(resp.status_code, 405)
# Update them is fine.
resp = client.patch(f"/api/v2/version/{version.pk}/")
self.assertEqual(resp.status_code, 200)
disallowed_versions = [
project_b.versions.first(),
project_c.versions.first(),
]
for version in disallowed_versions:
resp = client.get(f"/api/v2/version/{version.pk}/")
self.assertEqual(resp.status_code, 404)
# We don't allow deleting versions.
resp = client.delete(f"/api/v2/version/{version.pk}/")
self.assertEqual(resp.status_code, 405)
# Update them is fine.
resp = client.patch(f"/api/v2/version/{version.pk}/")
self.assertEqual(resp.status_code, 404)
def test_domains_read_only_endpoints_for_normal_user(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
Version.objects.all().update(privacy_level=PUBLIC)
client = APIClient()
client.force_authenticate(user=user_normal)
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/domain/")
self.assertEqual(resp.status_code, 410)
# We don't allow creating domains.
resp = client.post("/api/v2/domain/")
self.assertEqual(resp.status_code, 403)
domains = [
get(Domain, project=project_a),
get(Domain, project=project_b),
get(Domain, project=project_c),
]
for domain in domains:
resp = client.get(f"/api/v2/domain/{domain.pk}/")
self.assertEqual(resp.status_code, 200)
# We don't allow deleting domains.
resp = client.delete(f"/api/v2/domain/{domain.pk}/")
self.assertEqual(resp.status_code, 403)
# Neither update them.
resp = client.patch(f"/api/v2/domain/{domain.pk}/")
self.assertEqual(resp.status_code, 403)
def test_domains_read_and_write_endpoints_for_staff_user(self):
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
Version.objects.all().update(privacy_level=PUBLIC)
client = APIClient()
client.force_authenticate(user=user_admin)
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/domain/")
self.assertEqual(resp.status_code, 410)
# We don't allow to create domains.
resp = client.post("/api/v2/domain/")
self.assertEqual(resp.status_code, 403)
domains = [
get(Domain, project=project_a),
get(Domain, project=project_b),
get(Domain, project=project_c),
]
for domain in domains:
resp = client.get(f"/api/v2/domain/{domain.pk}/")
self.assertEqual(resp.status_code, 200)
# We don't allow deleting domains.
resp = client.delete(f"/api/v2/domain/{domain.pk}/")
self.assertEqual(resp.status_code, 403)
# Neither update them.
resp = client.patch(f"/api/v2/domain/{domain.pk}/")
self.assertEqual(resp.status_code, 403)
def test_domains_read_and_write_endpoints_for_build_api_token(self):
# Build API tokens don't grant access to the domain endpoints.
user_normal = get(User, is_staff=False)
user_admin = get(User, is_staff=True)
project_a = get(Project, users=[user_normal], privacy_level=PUBLIC)
project_b = get(Project, users=[user_admin], privacy_level=PUBLIC)
project_c = get(Project, privacy_level=PUBLIC)
Version.objects.all().update(privacy_level=PUBLIC)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(project_a)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
# List operations without a filter aren't allowed.
resp = client.get("/api/v2/domain/")
self.assertEqual(resp.status_code, 410)
# We don't allow to create domains.
resp = client.post("/api/v2/domain/")
self.assertEqual(resp.status_code, 403)
domains = [
get(Domain, project=project_a),
get(Domain, project=project_b),
get(Domain, project=project_c),
]
for domain in domains:
resp = client.get(f"/api/v2/domain/{domain.pk}/")
self.assertEqual(resp.status_code, 200)
# We don't allow deleting domains.
resp = client.delete(f"/api/v2/domain/{domain.pk}/")
self.assertEqual(resp.status_code, 403)
# Neither update them.
resp = client.patch(f"/api/v2/domain/{domain.pk}/")
self.assertEqual(resp.status_code, 403)
def test_project_features(self):
project = get(Project, main_language_project=None)
# One explicit, one implicit feature
feature1 = get(Feature, projects=[project])
feature2 = get(Feature, projects=[], default_true=True)
get(Feature, projects=[], default_true=False)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(project)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
resp = client.get("/api/v2/project/%s/" % (project.pk))
self.assertEqual(resp.status_code, 200)
self.assertIn("features", resp.data)
self.assertCountEqual(
resp.data["features"],
[feature1.feature_id, feature2.feature_id],
)
def test_project_features_multiple_projects(self):
project1 = get(Project, main_language_project=None)
project2 = get(Project, main_language_project=None)
feature = get(Feature, projects=[project1, project2], default_true=True)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(project1)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
resp = client.get("/api/v2/project/%s/" % (project1.pk))
self.assertEqual(resp.status_code, 200)
self.assertIn("features", resp.data)
self.assertEqual(resp.data["features"], [feature.feature_id])
@mock.patch.object(GitHubAppService, "get_clone_token")
def test_project_clone_token(self, get_clone_token):
clone_token = "token:1234"
get_clone_token.return_value = clone_token
project = get(Project)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(project)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
# No remote repository, no token.
assert project.remote_repository is None
resp = client.get(f"/api/v2/project/{project.pk}/")
assert resp.status_code == 200
assert resp.data["clone_token"] == None
get_clone_token.assert_not_called()
# Project has a GitHubApp remote repository, but it's public.
github_app_installation = get(GitHubAppInstallation, installation_id=1234, target_id=1234, target_type=GitHubAccountType.USER)
remote_repository = get(RemoteRepository, vcs_provider=GitHubAppProvider.id, github_app_installation=github_app_installation, private=False)
project.remote_repository = remote_repository
project.save()
resp = client.get(f"/api/v2/project/{project.pk}/")
assert resp.status_code == 200
assert resp.data["clone_token"] == None
get_clone_token.assert_not_called()
# Project has a GitHubApp remote repository, and it's private.
remote_repository.private = True
remote_repository.save()
resp = client.get(f"/api/v2/project/{project.pk}/")
assert resp.status_code == 200
assert resp.data["clone_token"] == clone_token
get_clone_token.assert_called_once_with(project)
def test_remote_repository_pagination(self):
account = get(SocialAccount, provider="github")
user = get(User)
for _ in range(20):
repo = get(RemoteRepository)
get(
RemoteRepositoryRelation,
remote_repository=repo,
user=user,
account=account,
)
client = APIClient()
client.force_authenticate(user=user)
resp = client.get("/api/v2/remote/repo/")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data["results"]), 15) # page_size
self.assertIn("?page=2", resp.data["next"])
def test_remote_organization_pagination(self):
account = get(SocialAccount, provider="github")
user = get(User)
for _ in range(30):
org = get(RemoteOrganization)
get(
RemoteOrganizationRelation,
remote_organization=org,
user=user,
account=account,
)
client = APIClient()
client.force_authenticate(user=user)
resp = client.get("/api/v2/remote/org/")
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(resp.data["results"]), 25) # page_size
self.assertIn("?page=2", resp.data["next"])
def test_project_environment_variables(self):
project = get(Project, main_language_project=None)
get(
EnvironmentVariable,
name="TOKEN",
value="a1b2c3",
project=project,
)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(project)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
resp = client.get("/api/v2/project/%s/" % (project.pk))
self.assertEqual(resp.status_code, 200)
self.assertIn("environment_variables", resp.data)
self.assertEqual(
resp.data["environment_variables"],
{"TOKEN": dict(value="a1b2c3", public=False)},
)
def test_init_api_project(self):
project_data = {
"name": "Test Project",
"slug": "test-project",
"show_advertising": True,
}
api_project = APIProject(**project_data)
self.assertEqual(api_project.slug, "test-project")
self.assertEqual(api_project.features, [])
self.assertFalse(api_project.ad_free)
self.assertTrue(api_project.show_advertising)
self.assertEqual(api_project.environment_variables(public_only=False), {})
self.assertEqual(api_project.environment_variables(public_only=True), {})
project_data["features"] = ["test-feature"]
project_data["show_advertising"] = False
project_data["environment_variables"] = {
"TOKEN": dict(value="a1b2c3", public=False),
"RELEASE": dict(value="prod", public=True),
}
api_project = APIProject(**project_data)
self.assertEqual(api_project.features, ["test-feature"])
self.assertTrue(api_project.ad_free)
self.assertFalse(api_project.show_advertising)
self.assertEqual(
api_project.environment_variables(public_only=False),
{"TOKEN": "a1b2c3", "RELEASE": "prod"},
)
self.assertEqual(
api_project.environment_variables(public_only=True),
{"RELEASE": "prod"},
)
def test_invalid_attributes_api_project(self):
invalid_attribute = "invalid_attribute"
project_data = {
"name": "Test Project",
"slug": "test-project",
"show_advertising": True,
invalid_attribute: "nope",
}
api_project = APIProject(**project_data)
self.assertFalse(hasattr(api_project, invalid_attribute))
def test_invalid_attributes_api_version(self):
invalid_attribute = "invalid_attribute"
version_data = {
"type": "branch",
"identifier": "main",
"verbose_name": "main",
"slug": "v2",
invalid_attribute: "nope",
}
api_version = APIVersion(**version_data)
self.assertFalse(hasattr(api_version, invalid_attribute))
@override_settings(
RTD_DEFAULT_FEATURES=dict(
[RTDProductFeature(type=TYPE_CONCURRENT_BUILDS, value=4).to_item()]
),
)
def test_concurrent_builds(self):
expected = {
"limit_reached": False,
"concurrent": 2,
"max_concurrent": 4,
}
project = get(
Project,
max_concurrent_builds=None,
main_language_project=None,
)
for state in ("triggered", "building", "cloning", "finished", "cancelled"):
get(
Build,
project=project,
state=state,
)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(project)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
resp = client.get(
f"/api/v2/build/concurrent/", data={"project__slug": project.slug}
)
self.assertEqual(resp.status_code, 200)
self.assertDictEqual(expected, resp.data)
def test_add_notification_deduplicated(self):
project = get(Project)
build = get(Build, project=project)
data = {
"attached_to": f"build/{build.pk}",
"message_id": BuildMaxConcurrencyError.LIMIT_REACHED,
"state": READ,
"dismissable": False,
"news": False,
"format_values": {"limit": 10},
}
url = "/api/v2/notifications/"
self.client.logout()
self.assertEqual(Notification.objects.count(), 0)
client = APIClient()
_, build_api_key = BuildAPIKey.objects.create_key(project)
client.credentials(HTTP_AUTHORIZATION=f"Token {build_api_key}")
response = client.post(url, data=data)
self.assertEqual(response.status_code, 201)
self.assertEqual(Notification.objects.count(), 1)
n1 = Notification.objects.first()
# Adding the same notification, de-duplicates it
response = client.post(url, data=data)
self.assertEqual(response.status_code, 201)
self.assertEqual(Notification.objects.count(), 1)
n2 = Notification.objects.first()
self.assertEqual(n1.pk, n2.pk)
self.assertEqual(n1.state, READ)
self.assertEqual(n2.state, UNREAD)
self.assertNotEqual(n1.modified, n2.modified)
|
APITests
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/select/tutorial003_py310.py
|
{
"start": 71,
"end": 1106
}
|
class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str
secret_name: str
age: int | None = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero)
results = session.exec(statement)
heroes = results.all()
print(heroes)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
|
Hero
|
python
|
django__django
|
django/core/validators.py
|
{
"start": 14174,
"end": 15655
}
|
class ____(BaseValidator):
message = _("Ensure this value is a multiple of step size %(limit_value)s.")
code = "step_size"
def __init__(self, limit_value, message=None, offset=None):
super().__init__(limit_value, message)
if offset is not None:
self.message = _(
"Ensure this value is a multiple of step size %(limit_value)s, "
"starting from %(offset)s, e.g. %(offset)s, %(valid_value1)s, "
"%(valid_value2)s, and so on."
)
self.offset = offset
def __call__(self, value):
if self.offset is None:
super().__call__(value)
else:
cleaned = self.clean(value)
limit_value = (
self.limit_value() if callable(self.limit_value) else self.limit_value
)
if self.compare(cleaned, limit_value):
offset = cleaned.__class__(self.offset)
params = {
"limit_value": limit_value,
"offset": offset,
"valid_value1": offset + limit_value,
"valid_value2": offset + 2 * limit_value,
}
raise ValidationError(self.message, code=self.code, params=params)
def compare(self, a, b):
offset = 0 if self.offset is None else self.offset
return not math.isclose(math.remainder(a - offset, b), 0, abs_tol=1e-9)
@deconstructible
|
StepValueValidator
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/model_query_type_annotation.py
|
{
"start": 1340,
"end": 1489
}
|
class ____:
taint_1: Annotated[str, "foo"] = ""
taint_2: Annotated[int, "bar"] = 0
no_taint_1: List[int] = []
no_taint_2: int = 0
|
Test9_C
|
python
|
redis__redis-py
|
redis/commands/search/hybrid_query.py
|
{
"start": 6469,
"end": 6555
}
|
class ____(Enum):
RRF = "RRF"
LINEAR = "LINEAR"
@experimental
|
CombinationMethods
|
python
|
getsentry__sentry
|
tests/sentry/debug_files/test_artifact_bundles.py
|
{
"start": 2862,
"end": 6082
}
|
class ____(TestCase):
def clear_cache(self):
redis_client = get_redis_cluster_for_artifact_bundles()
redis_client.flushall()
def test_indexing_artifacts(self) -> None:
self.clear_cache()
bundle = make_compressed_zip_file(
{
"path/in/zip/foo": {
"url": "~/path/to/app.js",
"content": b"app_idx1",
},
"path/in/zip/bar": {
"url": "~/path/to/other1.js",
"content": b"other1_idx1",
},
"path/in/zip/baz": {
"url": "~/path/to/only1.js",
"content": b"only1_idx1",
},
}
)
with self.tasks():
upload_bundle(bundle, self.project, "1.0.0")
# the first upload will not index anything
bundles = get_artifact_bundles(self.project, "1.0.0")
assert len(bundles) == 1
indexed = get_indexed_files(self.project, "1.0.0")
assert len(indexed) == 0
bundle = make_compressed_zip_file(
{
"path/in/zip/foo": {
"url": "~/path/to/app.js",
"content": b"app_idx1",
},
"path/in/zip/bar": {
"url": "~/path/to/other1.js",
"content": b"other1_idx1",
},
}
)
with self.tasks():
upload_bundle(bundle, self.project, "1.0.0")
# Uploading the same bundle a second time which internally still creates two artifact bundles, which both
# cover the same set of files.
bundles = get_artifact_bundles(self.project, "1.0.0")
assert len(bundles) == 2
indexed = get_indexed_files(self.project, "1.0.0", distinct=True)
assert len(indexed) == 0
bundle = make_compressed_zip_file(
{
"path/in/zip/foo": {
"url": "~/path/to/app.js",
"content": b"app_idx2",
},
"path/in/zip/bar": {
"url": "~/path/to/other2.js",
"content": b"other2_idx1",
},
}
)
with self.tasks():
upload_bundle(bundle, self.project, "1.0.0")
# the second upload will backfill everything that needs indexing
bundles = get_artifact_bundles(self.project, "1.0.0")
assert len(bundles) == 3
indexed = get_indexed_files(self.project, "1.0.0", distinct=True)
assert len(indexed) == 4
# here, we use the more recent bundle for the shared file,
# all other files are disjoint in this example
assert indexed[0].url == "~/path/to/app.js"
assert indexed[0].artifact_bundle == bundles[2]
assert indexed[1].url == "~/path/to/only1.js"
assert indexed[1].artifact_bundle == bundles[0]
assert indexed[2].url == "~/path/to/other1.js"
assert indexed[2].artifact_bundle == bundles[1]
assert indexed[3].url == "~/path/to/other2.js"
assert indexed[3].artifact_bundle == bundles[2]
|
ArtifactLookupTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/profiler/tfprof_logger_test.py
|
{
"start": 922,
"end": 2863
}
|
class ____(test.TestCase):
def _BuildSmallPlaceholderlModel(self):
a = array_ops.placeholder(dtypes.int32, [2, 2])
b = array_ops.placeholder(dtypes.int32, [2, 2])
y = math_ops.matmul(a, b)
return a, b, y
def _BuildSmallModel(self):
a = constant_op.constant([[1, 2], [3, 4]])
b = constant_op.constant([[1, 2], [3, 4]])
return math_ops.matmul(a, b)
# pylint: disable=pointless-string-statement
"""# TODO(xpan): This out of core so it doesn't depend on contrib.
def testFillMissingShape(self):
a, b, y = self._BuildSmallPlaceholderlModel()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess = session.Session()
sess.run(y,
options=run_options,
run_metadata=run_metadata,
feed_dict={a: [[1, 2], [2, 3]],
b: [[1, 2], [2, 3]]})
graph2 = ops.Graph()
# Use copy_op_to_graph to remove shape information.
y2 = copy_elements.copy_op_to_graph(y, graph2, [])
self.assertEqual('<unknown>', str(y2.get_shape()))
tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
self.assertEqual('(2, 2)', str(y2.get_shape()))
def testFailedFillMissingShape(self):
y = self._BuildSmallModel()
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess = session.Session()
sess.run(y, options=run_options, run_metadata=run_metadata)
graph2 = ops.Graph()
y2 = copy_elements.copy_op_to_graph(y, graph2, [])
self.assertEqual('<unknown>', str(y2.get_shape()))
# run_metadata has special name for MatMul, hence failed to fill shape.
tfprof_logger._fill_missing_graph_shape(graph2, run_metadata)
self.assertEqual('<unknown>', str(y2.get_shape()))
"""
if __name__ == '__main__':
test.main()
|
TFProfLoggerTest
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/src/hypothesis/strategies/_internal/strategies.py
|
{
"start": 39286,
"end": 43626
}
|
class ____(SearchStrategy[MappedTo], Generic[MappedFrom, MappedTo]):
"""A strategy which is defined purely by conversion to and from another
strategy.
Its parameter and distribution come from that other strategy.
"""
def __init__(
self,
strategy: SearchStrategy[MappedFrom],
pack: Callable[[MappedFrom], MappedTo],
) -> None:
super().__init__()
self.mapped_strategy = strategy
self.pack = pack
def calc_is_empty(self, recur: RecurT) -> bool:
return recur(self.mapped_strategy)
def calc_is_cacheable(self, recur: RecurT) -> bool:
return recur(self.mapped_strategy)
def __repr__(self) -> str:
if not hasattr(self, "_cached_repr"):
self._cached_repr = f"{self.mapped_strategy!r}.map({get_pretty_function_description(self.pack)})"
return self._cached_repr
def do_validate(self) -> None:
self.mapped_strategy.validate()
def do_draw(self, data: ConjectureData) -> MappedTo:
with warnings.catch_warnings():
if isinstance(self.pack, type) and issubclass(
self.pack, (abc.Mapping, abc.Set)
):
warnings.simplefilter("ignore", BytesWarning)
for _ in range(3):
try:
data.start_span(MAPPED_SEARCH_STRATEGY_DO_DRAW_LABEL)
x = data.draw(self.mapped_strategy)
result = self.pack(x)
data.stop_span()
current_build_context().record_call(
result, self.pack, args=[x], kwargs={}
)
return result
except UnsatisfiedAssumption:
data.stop_span(discard=True)
raise UnsatisfiedAssumption
@property
def branches(self) -> Sequence[SearchStrategy[MappedTo]]:
return [
MappedStrategy(strategy, pack=self.pack)
for strategy in self.mapped_strategy.branches
]
def filter(
self, condition: Callable[[MappedTo], Any]
) -> "SearchStrategy[MappedTo]":
# Includes a special case so that we can rewrite filters on collection
# lengths, when most collections are `st.lists(...).map(the_type)`.
ListStrategy = _list_strategy_type()
if not isinstance(self.mapped_strategy, ListStrategy) or not (
(isinstance(self.pack, type) and issubclass(self.pack, abc.Collection))
or self.pack in _collection_ish_functions()
):
return super().filter(condition)
# Check whether our inner list strategy can rewrite this filter condition.
# If not, discard the result and _only_ apply a new outer filter.
new = ListStrategy.filter(self.mapped_strategy, condition)
if getattr(new, "filtered_strategy", None) is self.mapped_strategy:
return super().filter(condition) # didn't rewrite
# Apply a new outer filter even though we rewrote the inner strategy,
# because some collections can change the list length (dict, set, etc).
return FilteredStrategy(type(self)(new, self.pack), conditions=(condition,))
@lru_cache
def _list_strategy_type() -> Any:
from hypothesis.strategies._internal.collections import ListStrategy
return ListStrategy
def _collection_ish_functions() -> Sequence[Any]:
funcs = [sorted]
if np := sys.modules.get("numpy"):
# c.f. https://numpy.org/doc/stable/reference/routines.array-creation.html
# Probably only `np.array` and `np.asarray` will be used in practice,
# but why should that stop us when we've already gone this far?
funcs += [
np.empty_like,
np.eye,
np.identity,
np.ones_like,
np.zeros_like,
np.array,
np.asarray,
np.asanyarray,
np.ascontiguousarray,
np.asmatrix,
np.copy,
np.rec.array,
np.rec.fromarrays,
np.rec.fromrecords,
np.diag,
# bonus undocumented functions from tab-completion:
np.asarray_chkfinite,
np.asfortranarray,
]
return funcs
filter_not_satisfied = UniqueIdentifier("filter not satisfied")
|
MappedStrategy
|
python
|
pdm-project__pdm
|
src/pdm/models/cached_package.py
|
{
"start": 205,
"end": 3023
}
|
class ____:
"""A package cached in the central package store.
The directory name is similar to wheel's filename:
$PACKAGE_ROOT/<checksum[:2]>/<dist_name>-<version>-<impl>-<abi>-<plat>/
The checksum is stored in a file named `.checksum` under the directory.
Under the directory there could be a text file named `.referrers`.
Each line of the file is a distribution path that refers to this package.
*Only wheel installations will be cached*
"""
cache_files: ClassVar[tuple[str, ...]] = (".lock", ".checksum", ".referrers")
"""List of files storing cache metadata and not being part of the package"""
def __init__(self, path: str | Path, original_wheel: Path | None = None) -> None:
self.path = Path(os.path.normcase(os.path.expanduser(path))).resolve()
self.original_wheel = original_wheel
self._referrers: set[str] | None = None
def lock(self) -> ContextManager[Any]:
import filelock
return filelock.FileLock(self.path / ".lock")
@cached_property
def checksum(self) -> str:
"""The checksum of the path"""
return self.path.joinpath(".checksum").read_text().strip()
@cached_property
def dist_info(self) -> Path:
"""The dist-info directory of the wheel"""
from installer.exceptions import InvalidWheelSource
try:
return next(self.path.glob("*.dist-info"))
except StopIteration:
raise InvalidWheelSource(f"The wheel doesn't contain metadata {self.path!r}") from None
@property
def referrers(self) -> set[str]:
"""A set of entries in referrers file"""
if self._referrers is None:
filepath = self.path / ".referrers"
if not filepath.is_file():
return set()
self._referrers = {
line.strip()
for line in filepath.read_text("utf8").splitlines()
if line.strip() and os.path.exists(line.strip())
}
return self._referrers
def add_referrer(self, path: str) -> None:
"""Add a new referrer"""
path = os.path.normcase(os.path.expanduser(os.path.abspath(path)))
referrers = self.referrers | {path}
(self.path / ".referrers").write_text("\n".join(sorted(referrers)) + "\n", "utf8")
self._referrers = None
def remove_referrer(self, path: str) -> None:
"""Remove a referrer"""
path = os.path.normcase(os.path.expanduser(os.path.abspath(path)))
referrers = self.referrers - {path}
(self.path / ".referrers").write_text("\n".join(referrers) + "\n", "utf8")
self._referrers = None
def cleanup(self) -> None:
logger.info("Clean up cached package %s", self.path)
shutil.rmtree(self.path)
|
CachedPackage
|
python
|
getsentry__sentry
|
src/sentry/search/snuba/backend.py
|
{
"start": 10851,
"end": 11140
}
|
class ____:
"""\
Adds a single filter to a ``QuerySet`` object. Used with
``QuerySetBuilder``.
"""
def apply(
self, queryset: BaseQuerySet[Group, Group], search_filter: SearchFilter
) -> BaseQuerySet[Group, Group]:
raise NotImplementedError
|
Condition
|
python
|
celery__celery
|
t/unit/utils/test_serialization.py
|
{
"start": 1283,
"end": 1484
}
|
class ____:
def test_init(self):
x = UnpickleableExceptionWrapper('foo', 'Bar', [10, lambda x: x])
assert x.exc_args
assert len(x.exc_args) == 2
|
test_UnpickleExceptionWrapper
|
python
|
apache__airflow
|
airflow-core/tests/unit/models/test_deadline.py
|
{
"start": 22236,
"end": 25929
}
|
class ____:
class MyCustomRef(ReferenceModels.BaseDeadlineReference):
def _evaluate_with(self, *, session: Session, **kwargs) -> datetime:
return timezone.datetime(DEFAULT_DATE)
class MyInvalidCustomRef:
pass
class MyCustomRefWithKwargs(ReferenceModels.BaseDeadlineReference):
required_kwargs = {"custom_id"}
def _evaluate_with(self, *, session: Session, **kwargs) -> datetime:
return timezone.datetime(DEFAULT_DATE)
def setup_method(self):
self.original_dagrun_created = DeadlineReference.TYPES.DAGRUN_CREATED
self.original_dagrun_queued = DeadlineReference.TYPES.DAGRUN_QUEUED
self.original_dagrun = DeadlineReference.TYPES.DAGRUN
self.original_attrs = set(dir(ReferenceModels))
self.original_deadline_attrs = set(dir(DeadlineReference))
def teardown_method(self):
DeadlineReference.TYPES.DAGRUN_CREATED = self.original_dagrun_created
DeadlineReference.TYPES.DAGRUN_QUEUED = self.original_dagrun_queued
DeadlineReference.TYPES.DAGRUN = self.original_dagrun
for attr in set(dir(ReferenceModels)):
if attr not in self.original_attrs:
delattr(ReferenceModels, attr)
for attr in set(dir(DeadlineReference)):
if attr not in self.original_deadline_attrs:
delattr(DeadlineReference, attr)
@pytest.mark.parametrize(
"reference",
[
pytest.param(MyCustomRef, id="basic_custom_reference"),
pytest.param(MyCustomRefWithKwargs, id="custom_reference_with_kwargs"),
],
)
@pytest.mark.parametrize(
"timing",
[
pytest.param(None, id="default_timing"),
pytest.param(DeadlineReference.TYPES.DAGRUN_CREATED, id="dagrun_created"),
pytest.param(DeadlineReference.TYPES.DAGRUN_QUEUED, id="dagrun_queued"),
],
)
def test_register_custom_reference(self, timing, reference):
if timing is None:
result = DeadlineReference.register_custom_reference(reference)
expected_timing = DeadlineReference.TYPES.DAGRUN_CREATED
else:
result = DeadlineReference.register_custom_reference(reference, timing)
expected_timing = timing
assert result is reference
assert getattr(ReferenceModels, reference.__name__) is reference
assert getattr(DeadlineReference, reference.__name__).__class__ is reference
assert_correct_timing(reference, expected_timing)
assert_builtin_types_unchanged(
DeadlineReference.TYPES.DAGRUN_QUEUED, DeadlineReference.TYPES.DAGRUN_CREATED
)
def test_register_custom_reference_invalid_inheritance(self):
with pytest.raises(ValueError, match="must inherit from BaseDeadlineReference"):
DeadlineReference.register_custom_reference(self.MyInvalidCustomRef)
def test_register_custom_reference_invalid_timing(self):
invalid_timing = ("not", "a", "valid", "timing")
with pytest.raises(
ValueError,
match=re.escape(
f"Invalid deadline reference type {invalid_timing}; "
f"must be a valid DeadlineReference.TYPES option."
),
):
DeadlineReference.register_custom_reference(self.MyCustomRef, invalid_timing)
def test_custom_reference_discoverable_by_get_reference_class(self):
DeadlineReference.register_custom_reference(self.MyCustomRef)
found_class = ReferenceModels.get_reference_class(self.MyCustomRef.__name__)
assert found_class is self.MyCustomRef
|
TestCustomDeadlineReference
|
python
|
google__jax
|
tests/state_test.py
|
{
"start": 41309,
"end": 42045
}
|
class ____(NamedTuple):
vmap_index_param: VmappableIndexParam
bat_ref: np.ndarray
bat_idxs: tuple[np.ndarray, ...]
@hps.composite
def get_vmap_params(draw):
vmap_index_param: VmappableIndexParam = draw(
vmappable_index_params(op_type="get"))
bat_ref = draw(hnp.arrays(np.float32, vmap_index_param.bat_ref_shape))
bat_idx_shapes_ = iter(vmap_index_param.bat_non_slice_idx_shapes)
bat_idxs = tuple(
draw(index_arrays(size, next(bat_idx_shapes_)))
for size, indexed in zip(
vmap_index_param.index_param.ref_shape,
vmap_index_param.index_param.indexed_dims)
if indexed)
assert next(bat_idx_shapes_, None) is None
return GetVmapParams(vmap_index_param, bat_ref, bat_idxs)
|
GetVmapParams
|
python
|
coleifer__peewee
|
tests/regressions.py
|
{
"start": 51431,
"end": 52276
}
|
class ____(ModelTestCase):
requires = [CharPK, CharFK]
def test_model_conversion_regression(self):
cpks = [CharPK.create(id=str(i), name='u%s' % i) for i in range(3)]
query = CharPK.select().where(CharPK.id << cpks)
self.assertEqual(sorted([c.id for c in query]), ['0', '1', '2'])
query = CharPK.select().where(CharPK.id.in_(list(CharPK.select())))
self.assertEqual(sorted([c.id for c in query]), ['0', '1', '2'])
def test_model_conversion_fk_retained(self):
cpks = [CharPK.create(id=str(i), name='u%s' % i) for i in range(3)]
cfks = [CharFK.create(id=i + 1, cpk='u%s' % i) for i in range(3)]
c0, c1, c2 = cpks
query = CharFK.select().where(CharFK.cpk << [c0, c2])
self.assertEqual(sorted([f.id for f in query]), [1, 3])
|
TestModelConversionRegression
|
python
|
numba__numba
|
numba/tests/test_dyn_array.py
|
{
"start": 35779,
"end": 38092
}
|
class ____(TestCase):
def setUp(self):
v = np.array([1, 2, 3])
hv = np.array([[1, 2, 3]])
vv = np.transpose(hv)
self.vectors = [v, hv, vv]
a3x4 = np.arange(12).reshape(3, 4)
a4x3 = np.arange(12).reshape(4, 3)
self.matricies = [a3x4, a4x3]
def func(q):
return np.diag(q)
self.py = func
self.jit = nrtjit(func)
def func_kwarg(q, k=0):
return np.diag(q, k=k)
self.py_kw = func_kwarg
self.jit_kw = nrtjit(func_kwarg)
def check_diag(self, pyfunc, nrtfunc, *args, **kwargs):
expected = pyfunc(*args, **kwargs)
computed = nrtfunc(*args, **kwargs)
self.assertEqual(computed.size, expected.size)
self.assertEqual(computed.dtype, expected.dtype)
# NOTE: stride not tested as np returns a RO view, nb returns new data
np.testing.assert_equal(expected, computed)
# create a diag matrix from a vector
def test_diag_vect_create(self):
for d in self.vectors:
self.check_diag(self.py, self.jit, d)
# create a diag matrix from a vector at a given offset
def test_diag_vect_create_kwarg(self):
for k in range(-10, 10):
for d in self.vectors:
self.check_diag(self.py_kw, self.jit_kw, d, k=k)
# extract the diagonal
def test_diag_extract(self):
for d in self.matricies:
self.check_diag(self.py, self.jit, d)
# extract a diagonal at a given offset
def test_diag_extract_kwarg(self):
for k in range(-4, 4):
for d in self.matricies:
self.check_diag(self.py_kw, self.jit_kw, d, k=k)
# check error handling
def test_error_handling(self):
d = np.array([[[1.]]])
cfunc = nrtjit(self.py)
# missing arg
with self.assertRaises(TypeError):
cfunc()
# > 2d
with self.assertRaises(TypingError):
cfunc(d)
with self.assertRaises(TypingError):
dfunc = nrtjit(self.py_kw)
dfunc(d, k=3)
def test_bad_shape(self):
cfunc = nrtjit(self.py)
msg = '.*The argument "v" must be array-like.*'
with self.assertRaisesRegex(TypingError, msg) as raises:
cfunc(None)
|
TestNdDiag
|
python
|
ray-project__ray
|
python/ray/data/tests/test_pandas_block.py
|
{
"start": 3326,
"end": 7967
}
|
class ____:
@pytest.fixture
def all_null_series(self):
return pd.Series([None] * 3, dtype=np.float64)
def test_count_all_null(self, all_null_series):
accessor = PandasBlockColumnAccessor(all_null_series)
# When ignoring nulls, count should be 0; otherwise, count returns length.
assert accessor.count(ignore_nulls=True, as_py=True) == 0
assert accessor.count(ignore_nulls=False, as_py=True) == len(all_null_series)
@pytest.mark.parametrize("ignore_nulls", [True, False])
def test_sum_all_null(self, all_null_series, ignore_nulls):
accessor = PandasBlockColumnAccessor(all_null_series)
result = accessor.sum(ignore_nulls=ignore_nulls)
assert is_null(result)
@pytest.mark.parametrize("ignore_nulls", [True, False])
def test_min_all_null(self, all_null_series, ignore_nulls):
accessor = PandasBlockColumnAccessor(all_null_series)
result = accessor.min(ignore_nulls=ignore_nulls, as_py=True)
assert is_null(result)
@pytest.mark.parametrize("ignore_nulls", [True, False])
def test_max_all_null(self, all_null_series, ignore_nulls):
accessor = PandasBlockColumnAccessor(all_null_series)
result = accessor.max(ignore_nulls=ignore_nulls)
assert is_null(result)
@pytest.mark.parametrize("ignore_nulls", [True, False])
def test_mean_all_null(self, all_null_series, ignore_nulls):
accessor = PandasBlockColumnAccessor(all_null_series)
result = accessor.mean(ignore_nulls=ignore_nulls)
assert is_null(result)
@pytest.mark.parametrize("ignore_nulls", [True, False])
def test_sum_of_squared_diffs_all_null(self, all_null_series, ignore_nulls):
accessor = PandasBlockColumnAccessor(all_null_series)
result = accessor.sum_of_squared_diffs_from_mean(
ignore_nulls=ignore_nulls, mean=None
)
assert is_null(result)
@pytest.mark.parametrize(
"input_block, fill_column_name, fill_value, expected_output_block",
[
(
pd.DataFrame({"a": [0, 1]}),
"b",
2,
pd.DataFrame({"a": [0, 1], "b": [2, 2]}),
),
],
)
def test_fill_column(input_block, fill_column_name, fill_value, expected_output_block):
block_accessor = PandasBlockAccessor.for_block(input_block)
actual_output_block = block_accessor.fill_column(fill_column_name, fill_value)
assert actual_output_block.equals(expected_output_block)
def test_pandas_block_timestamp_ns(ray_start_regular_shared):
# Input data with nanosecond precision timestamps
data_rows = [
{"col1": 1, "col2": pd.Timestamp("2023-01-01T00:00:00.123456789")},
{"col1": 2, "col2": pd.Timestamp("2023-01-01T01:15:30.987654321")},
{"col1": 3, "col2": pd.Timestamp("2023-01-01T02:30:15.111111111")},
{"col1": 4, "col2": pd.Timestamp("2023-01-01T03:45:45.222222222")},
{"col1": 5, "col2": pd.Timestamp("2023-01-01T05:00:00.333333333")},
]
# Initialize PandasBlockBuilder
pandas_builder = PandasBlockBuilder()
for row in data_rows:
pandas_builder.add(row)
pandas_block = pandas_builder.build()
assert pd.api.types.is_datetime64_ns_dtype(pandas_block["col2"])
for original_row, result_row in zip(
data_rows, pandas_block.to_dict(orient="records")
):
assert (
original_row["col2"] == result_row["col2"]
), "Timestamp mismatch in PandasBlockBuilder output"
@pytest.mark.skipif(
_object_extension_type_allowed(), reason="Objects can be put into Arrow"
)
def test_dict_fallback_to_pandas_block(ray_start_regular_shared):
# If the UDF returns a column with dict, this throws
# an error during block construction because we cannot cast dicts
# to a supported arrow type. This test checks that the block
# construction falls back to pandas and still succeeds.
def fn(batch):
batch["data_dict"] = [{"data": 0} for _ in range(len(batch["id"]))]
return batch
ds = ray.data.range(10).map_batches(fn)
ds = ds.materialize()
block = ray.get(ds.get_internal_block_refs()[0])
# TODO: Once we support converting dict to a supported arrow type,
# the block type should be Arrow.
assert isinstance(block, pd.DataFrame)
def fn2(batch):
batch["data_none"] = [None for _ in range(len(batch["id"]))]
return batch
ds2 = ray.data.range(10).map_batches(fn2)
ds2 = ds2.materialize()
block = ray.get(ds2.get_internal_block_refs()[0])
assert isinstance(block, pd.DataFrame)
|
TestPandasBlockColumnAccessorAllNullSeries
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 843784,
"end": 844532
}
|
class ____(sgqlc.types.relay.Connection):
"""The connection type for ProjectV2Actor."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("ProjectV2ActorEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("ProjectV2Actor"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
|
ProjectV2ActorConnection
|
python
|
cython__cython
|
Cython/Compiler/Code.py
|
{
"start": 139443,
"end": 139857
}
|
class ____:
# f file output file
# level int indentation level
def __init__(self, outfile_name):
self.f = Utils.open_new_file(outfile_name)
self.level = 0
def putln(self, code):
self.f.write("%s%s\n" % (" " * self.level, code))
def indent(self):
self.level += 1
def dedent(self):
self.level -= 1
|
PyrexCodeWriter
|
python
|
bokeh__bokeh
|
src/bokeh/application/handlers/server_lifecycle.py
|
{
"start": 1748,
"end": 5083
}
|
class ____(LifecycleHandler):
''' Load a script which contains server lifecycle callbacks.
'''
def __init__(self, *, filename: PathLike, argv: list[str] = [], package: ModuleType | None = None) -> None:
'''
Keyword Args:
filename (str) : path to a module to load lifecycle callbacks from
argv (list[str], optional) : a list of string arguments to use as
``sys.argv`` when the callback code is executed. (default: [])
'''
super().__init__()
with open(filename, encoding='utf-8') as f:
source = f.read()
self._runner = CodeRunner(source, filename, argv, package=package)
if not self._runner.failed:
# unlike ScriptHandler, we only load the module one time
self._module = self._runner.new_module()
def extract_callbacks() -> None:
contents = self._module.__dict__
if 'on_server_loaded' in contents:
self._on_server_loaded = contents['on_server_loaded']
if 'on_server_unloaded' in contents:
self._on_server_unloaded = contents['on_server_unloaded']
if 'on_session_created' in contents:
self._on_session_created = contents['on_session_created']
if 'on_session_destroyed' in contents:
self._on_session_destroyed = contents['on_session_destroyed']
_check_callback(self._on_server_loaded, ('server_context',), what="on_server_loaded")
_check_callback(self._on_server_unloaded, ('server_context',), what="on_server_unloaded")
_check_callback(self._on_session_created, ('session_context',), what="on_session_created")
_check_callback(self._on_session_destroyed, ('session_context',), what="on_session_destroyed")
self._runner.run(self._module, extract_callbacks)
# Properties --------------------------------------------------------------
@property
def error(self) -> str | None:
''' If the handler fails, may contain a related error message.
'''
return self._runner.error
@property
def error_detail(self) -> str | None:
''' If the handler fails, may contain a traceback or other details.
'''
return self._runner.error_detail
@property
def failed(self) -> bool:
''' ``True`` if the lifecycle callbacks failed to execute
'''
return self._runner.failed
# Public methods ----------------------------------------------------------
def url_path(self) -> str | None:
''' The last path component for the basename of the path to the
callback module.
'''
if self.failed:
return None
else:
# TODO should fix invalid URL characters
return '/' + os.path.splitext(os.path.basename(self._runner.path))[0]
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
ServerLifecycleHandler
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 30441,
"end": 30702
}
|
class ____(BaseModel, extra="forbid"):
target: "VectorInput" = Field(..., description="")
context: Union[List["ContextPair"], "ContextPair"] = Field(
..., description="Search space will be constrained by these pairs of vectors"
)
|
DiscoverInput
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_source.py
|
{
"start": 1550,
"end": 7412
}
|
class ____:
def test_check_connection_ok(self, config, logger_mock, fb_marketing):
ok, error_msg = fb_marketing.check_connection(logger_mock, config=config)
assert ok
assert not error_msg
def test_check_connection_find_account_was_called(self, api_find_account, config, logger_mock, fb_marketing):
"""Check if _find_account was called to validate credentials"""
ok, error_msg = fb_marketing.check_connection(logger_mock, config=config)
api_find_account.assert_called_once_with(config["account_ids"][0])
logger_mock.info.assert_has_calls(
[
call("Attempting to retrieve information for account with ID: 123"),
call("Successfully retrieved account information for account: 1234"),
]
)
assert ok
assert not error_msg
def test_check_connection_future_date_range(self, api, config, logger_mock, fb_marketing):
config["start_date"] = "2219-10-10T00:00:00"
config["end_date"] = "2219-10-11T00:00:00"
assert fb_marketing.check_connection(logger_mock, config=config) == (
False,
"Date range can not be in the future.",
)
def test_check_connection_end_date_before_start_date(self, api, config, logger_mock, fb_marketing):
config["start_date"] = "2019-10-10T00:00:00"
config["end_date"] = "2019-10-09T00:00:00"
assert fb_marketing.check_connection(logger_mock, config=config) == (
False,
"End date must be equal or after start date.",
)
def test_check_connection_empty_config(self, api, logger_mock, fb_marketing):
config = {}
ok, error_msg = fb_marketing.check_connection(logger_mock, config=config)
assert not ok
assert error_msg
def test_check_connection_config_no_start_date(self, api, config, logger_mock, fb_marketing):
config.pop("start_date")
ok, error_msg = fb_marketing.check_connection(logger_mock, config=config)
assert ok
assert not error_msg
def test_check_connection_exception(self, api, config, logger_mock, fb_marketing):
api.side_effect = RuntimeError("Something went wrong!")
ok, error_msg = fb_marketing.check_connection(logger_mock, config=config)
assert not ok
assert error_msg == "Unexpected error: RuntimeError('Something went wrong!')"
def test_streams(self, config, api, fb_marketing):
streams = fb_marketing.streams(config)
assert len(streams) == 30
def test_spec(self, fb_marketing):
spec = fb_marketing.spec()
assert isinstance(spec, ConnectorSpecification)
def test_get_custom_insights_streams(self, api, config, fb_marketing):
config["custom_insights"] = [
{
"name": "test",
"fields": ["account_id"],
"breakdowns": ["ad_format_asset"],
"action_breakdowns": ["action_device"],
},
]
config = ConnectorConfig.parse_obj(config)
assert fb_marketing.get_custom_insights_streams(api, config)
def test_get_custom_insights_action_breakdowns_allow_empty(self, api, config, fb_marketing):
config["custom_insights"] = [
{
"name": "test",
"fields": ["account_id"],
"breakdowns": ["ad_format_asset"],
"action_breakdowns": [],
},
]
config["action_breakdowns_allow_empty"] = False
streams = fb_marketing.get_custom_insights_streams(api, ConnectorConfig.parse_obj(config))
assert len(streams) == 1
assert streams[0].breakdowns == ["ad_format_asset"]
assert streams[0].action_breakdowns == [
"action_type",
"action_target_id",
"action_destination",
]
config["action_breakdowns_allow_empty"] = True
streams = fb_marketing.get_custom_insights_streams(api, ConnectorConfig.parse_obj(config))
assert len(streams) == 1
assert streams[0].breakdowns == ["ad_format_asset"]
assert streams[0].action_breakdowns == []
def test_read_missing_stream(self, config, api, logger_mock, fb_marketing):
catalog = ConfiguredAirbyteCatalog(
streams=[
ConfiguredAirbyteStream(
stream=AirbyteStream(
name="fake_stream",
json_schema={},
supported_sync_modes=[SyncMode.full_refresh],
),
sync_mode=SyncMode.full_refresh,
destination_sync_mode=DestinationSyncMode.overwrite,
)
]
)
with pytest.raises(AirbyteTracedException):
list(fb_marketing.read(logger_mock, config=config, catalog=catalog))
def test_check_config(config_gen, requests_mock, fb_marketing):
requests_mock.register_uri("GET", FacebookSession.GRAPH + f"/{FacebookAdsApi.API_VERSION}/act_123/", {})
assert command_check(fb_marketing, config_gen()) == AirbyteConnectionStatus(status=Status.SUCCEEDED, message=None)
status = command_check(fb_marketing, config_gen(start_date="2019-99-10T00:00:00Z"))
assert status.status == Status.FAILED
status = command_check(fb_marketing, config_gen(end_date="2019-99-10T00:00:00Z"))
assert status.status == Status.FAILED
status = command_check(fb_marketing, config_gen(start_date=...))
assert status.status == Status.SUCCEEDED
assert command_check(fb_marketing, config_gen(end_date=...)) == AirbyteConnectionStatus(status=Status.SUCCEEDED, message=None)
assert command_check(fb_marketing, config_gen(end_date="")) == AirbyteConnectionStatus(status=Status.SUCCEEDED, message=None)
|
TestSourceFacebookMarketing
|
python
|
PyCQA__pylint
|
tests/functional/m/match_class_pattern.py
|
{
"start": 494,
"end": 2442
}
|
class ____(NamedTuple):
# inherits from tuple -> match self
x: int
y: int
def f1(x):
"""Check too many positional sub-patterns"""
match x:
case A(1): ...
case A(1, 2): ... # [too-many-positional-sub-patterns]
case B(1, 2): ...
case B(1, 2, 3): ... # [too-many-positional-sub-patterns]
case int(1): ...
case int(1, 2): ... # [too-many-positional-sub-patterns]
case tuple(1): ...
case tuple(1, 2): ... # [too-many-positional-sub-patterns]
case tuple((1, 2)): ...
case Result(1, 2): ...
def f2(x):
"""Check multiple sub-patterns for attribute"""
match x:
case A(1, x=1): ... # [multiple-class-sub-patterns]
case A(1, y=1): ...
case A(x=1, x=2, x=3): ... # [multiple-class-sub-patterns]
# with invalid __match_args__ we can't detect duplicates with positional patterns
case D(1, x=1): ...
# If class name is undefined, we can't get __match_args__
case NotDefined(1, x=1): ... # [undefined-variable]
def f3(x):
"""Check class pattern with name binding to self."""
match x:
case int(y): ... # [match-class-bind-self]
case int() as y: ...
case int(2 as y): ...
case str(y): ... # [match-class-bind-self]
case str() as y: ...
case str("Hello" as y): ...
case tuple(y, 2): ... # pylint: disable=too-many-positional-sub-patterns
case tuple((y, 2)): ...
def f4(x):
"""Check for positional attributes if keywords could be used."""
# pylint: enable=match-class-positional-attributes
match x:
case int(2): ...
case bool(True): ...
case A(1): ... # [match-class-positional-attributes]
case A(x=1): ...
case B(1, 2): ... # [match-class-positional-attributes]
case B(x=1, y=2): ...
case Result(1, 2): ...
case Result(x=1, y=2): ...
|
Result
|
python
|
getsentry__sentry
|
src/sentry/interfaces/contexts.py
|
{
"start": 5889,
"end": 6017
}
|
class ____(ContextType):
type = "app"
context_to_tag_mapping = {"device": "{device_app_hash}"}
@contexttype
|
AppContextType
|
python
|
gwtw__py-sorting
|
test/bubble_sort_test.py
|
{
"start": 406,
"end": 737
}
|
class ____(unittest.TestCase,
BaseCustomComparisonSortTest,
BasePositiveIntegerSortTest,
BaseNegativeIntegerSortTest,
BaseStringSortTest):
def setUp(self):
self.sort = bubble_sort.sort
if __name__ == '__main__':
unittest.main()
|
BubbleSortTest
|
python
|
ansible__ansible
|
lib/ansible/plugins/action/assemble.py
|
{
"start": 1195,
"end": 6153
}
|
class ____(ActionBase):
TRANSFERS_FILES = True
def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, decrypt=True):
""" assemble a file from a directory of fragments """
tmpfd, temp_path = tempfile.mkstemp(dir=C.DEFAULT_LOCAL_TMP)
tmp = os.fdopen(tmpfd, 'wb')
delimit_me = False
add_newline = False
for f in (to_text(p, errors='surrogate_or_strict') for p in sorted(os.listdir(src_path))):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = u"%s/%s" % (src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
with open(self._loader.get_real_file(fragment, decrypt=decrypt), 'rb') as fragment_fh:
fragment_content = fragment_fh.read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write(b'\n')
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = codecs.escape_decode(delimiter)[0]
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != b'\n':
tmp.write(b'\n')
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith(b'\n'):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if task_vars is None:
task_vars = dict()
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
delimiter = self._task.args.get('delimiter', None)
remote_src = self._task.args.get('remote_src', 'yes')
regexp = self._task.args.get('regexp', None)
follow = self._task.args.get('follow', False)
ignore_hidden = self._task.args.get('ignore_hidden', False)
decrypt = self._task.args.pop('decrypt', True)
try:
if src is None or dest is None:
raise AnsibleActionFail("src and dest are required")
if boolean(remote_src, strict=False):
# call assemble via ansible.legacy to allow library/ overrides of the module without collection search
return self._execute_module(module_name='ansible.legacy.assemble', task_vars=task_vars)
src = self._find_needle('files', src)
if not os.path.isdir(src):
raise AnsibleActionFail(u"Source (%s) is not a directory" % src)
_re = None
if regexp is not None:
_re = re.compile(regexp)
# Does all work assembling the file
path = self._assemble_from_fragments(src, delimiter, _re, ignore_hidden, decrypt)
path_checksum = checksum_s(path)
dest = self._remote_expand_user(dest)
dest_stat = self._execute_remote_stat(dest, all_vars=task_vars, follow=follow)
diff = {}
# setup args for running modules
new_module_args = self._task.args.copy()
# clean assemble specific options
for opt in ['remote_src', 'regexp', 'delimiter', 'ignore_hidden', 'decrypt']:
if opt in new_module_args:
del new_module_args[opt]
new_module_args['dest'] = dest
if path_checksum != dest_stat['checksum']:
if self._task.diff:
diff = self._get_diff_data(dest, path, task_vars)
remote_path = self._connection._shell.join_path(self._connection._shell.tmpdir, 'src')
xfered = self._transfer_file(path, remote_path)
# fix file permissions when the copy is done as a different user
self._fixup_perms2((self._connection._shell.tmpdir, remote_path))
new_module_args.update(dict(src=xfered,))
res = self._execute_module(module_name='ansible.legacy.copy', module_args=new_module_args, task_vars=task_vars)
if diff:
res['diff'] = diff
return res
else:
return self._execute_module(module_name='ansible.legacy.file', module_args=new_module_args, task_vars=task_vars)
finally:
self._remove_tmp_path(self._connection._shell.tmpdir)
|
ActionModule
|
python
|
django__django
|
tests/middleware_exceptions/middleware.py
|
{
"start": 3109,
"end": 3254
}
|
class ____(BaseMiddleware):
async def process_template_response(self, request, response):
return None
|
AsyncNoTemplateResponseMiddleware
|
python
|
scipy__scipy
|
scipy/optimize/_differentialevolution.py
|
{
"start": 25680,
"end": 85674
}
|
class ____:
"""This class implements the differential evolution solver
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function. The number of parameters, N, is equal
to ``len(x)``.
bounds : sequence or `Bounds`
Bounds for variables. There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. ``(min, max)`` pairs for each element in ``x``, defining the
finite lower and upper bounds for the optimizing argument of
`func`.
The total number of bounds is used to determine the number of
parameters, N. If there are parameters whose bounds are equal the total
number of free parameters is ``N - N_equal``.
args : tuple, optional
Any additional fixed parameters needed to
completely specify the objective function.
strategy : {str, callable}, optional
The differential evolution strategy to use. Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1bin'
- 'rand1exp'
- 'rand2bin'
- 'rand2exp'
- 'randtobest1bin'
- 'randtobest1exp'
- 'currenttobest1bin'
- 'currenttobest1exp'
- 'best2exp'
- 'best2bin'
The default is 'best1bin'. Strategies that may be
implemented are outlined in 'Notes'.
Alternatively the differential evolution strategy can be customized
by providing a callable that constructs a trial vector. The callable
must have the form
``strategy(candidate: int, population: np.ndarray, rng=None)``,
where ``candidate`` is an integer specifying which entry of the
population is being evolved, ``population`` is an array of shape
``(S, N)`` containing all the population members (where S is the
total population size), and ``rng`` is the random number generator
being used within the solver.
``candidate`` will be in the range ``[0, S)``.
``strategy`` must return a trial vector with shape ``(N,)``. The
fitness of this trial vector is compared against the fitness of
``population[candidate]``.
maxiter : int, optional
The maximum number of generations over which the entire population is
evolved. The maximum number of function evaluations (with no polishing)
is: ``(maxiter + 1) * popsize * (N - N_equal)``
popsize : int, optional
A multiplier for setting the total population size. The population has
``popsize * (N - N_equal)`` individuals. This keyword is overridden if
an initial population is supplied via the `init` keyword. When using
``init='sobol'`` the population size is calculated as the next power
of 2 after ``popsize * (N - N_equal)``.
tol : float, optional
Relative tolerance for convergence, the solving stops when
``np.std(population_energies) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
mutation : float or tuple(float, float), optional
The mutation constant. In the literature this is also known as
differential weight, being denoted by F.
If specified as a float it should be in the range [0, 2].
If specified as a tuple ``(min, max)`` dithering is employed. Dithering
randomly changes the mutation constant on a generation by generation
basis. The mutation constant for that generation is taken from
U[min, max). Dithering can help speed convergence significantly.
Increasing the mutation constant increases the search radius, but will
slow down convergence.
recombination : float, optional
The recombination constant, should be in the range [0, 1]. In the
literature this is also known as the crossover probability, being
denoted by CR. Increasing this value allows a larger number of mutants
to progress into the next generation, but at the risk of population
stability.
rng : {None, int, `numpy.random.Generator`}, optional
..versionchanged:: 1.15.0
As part of the `SPEC-007 <https://scientific-python.org/specs/spec-0007/>`_
transition from use of `numpy.random.RandomState` to
`numpy.random.Generator` this keyword was changed from `seed` to `rng`.
For an interim period both keywords will continue to work (only specify
one of them). After the interim period using the `seed` keyword will emit
warnings. The behavior of the `seed` and `rng` keywords is outlined below.
If `rng` is passed by keyword, types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a `Generator`.
If `rng` is already a `Generator` instance, then the provided instance is
used.
If this argument is passed by position or `seed` is passed by keyword, the
behavior is:
- If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
- If `seed` is an int, a new `RandomState` instance is used,
seeded with `seed`.
- If `seed` is already a `Generator` or `RandomState` instance then
that instance is used.
Specify `seed`/`rng` for repeatable minimizations.
disp : bool, optional
Prints the evaluated `func` at every iteration.
callback : callable, optional
A callable called after each iteration. Has the signature:
``callback(intermediate_result: OptimizeResult)``
where ``intermediate_result`` is a keyword parameter containing an
`OptimizeResult` with attributes ``x`` and ``fun``, the best solution
found so far and the objective function. Note that the name
of the parameter must be ``intermediate_result`` for the callback
to be passed an `OptimizeResult`.
The callback also supports a signature like:
``callback(x, convergence: float=val)``
``val`` represents the fractional value of the population convergence.
When ``val`` is greater than ``1.0``, the function halts.
Introspection is used to determine which of the signatures is invoked.
Global minimization will halt if the callback raises ``StopIteration``
or returns ``True``; any polishing is still carried out.
.. versionchanged:: 1.12.0
callback accepts the ``intermediate_result`` keyword.
polish : {bool, callable}, optional
If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
method is used to polish the best population member at the end, which
can improve the minimization slightly. If a constrained problem is
being studied then the `trust-constr` method is used instead. For large
problems with many constraints, polishing can take a long time due to
the Jacobian computations.
Alternatively supply a callable that has a `minimize`-like signature,
``polish_func(func, x0, **kwds)`` and returns an `OptimizeResult`. This
allows the user to have fine control over how the polishing occurs.
`bounds` and `constraints` will be present in ``kwds``. Extra keywords
could be supplied to `polish_func` using `functools.partial`. It is the
user's responsibility to ensure that the polishing function obeys
bounds, any constraints (including integrality constraints), and that
appropriate attributes are set in the `OptimizeResult`, such as ``fun``,
```x``, ``nfev``, ``jac``.
maxfun : int, optional
Set the maximum number of function evaluations. However, it probably
makes more sense to set `maxiter` instead.
init : str or array-like, optional
Specify which type of population initialization is performed. Should be
one of:
- 'latinhypercube'
- 'sobol'
- 'halton'
- 'random'
- array specifying the initial population. The array should have
shape ``(S, N)``, where S is the total population size and
N is the number of parameters.
`init` is clipped to `bounds` before use.
The default is 'latinhypercube'. Latin Hypercube sampling tries to
maximize coverage of the available parameter space.
'sobol' and 'halton' are superior alternatives and maximize even more
the parameter space. 'sobol' will enforce an initial population
size which is calculated as the next power of 2 after
``popsize * (N - N_equal)``. 'halton' has no requirements but is a bit
less efficient. See `scipy.stats.qmc` for more details.
'random' initializes the population randomly - this has the drawback
that clustering can occur, preventing the whole of parameter space
being covered. Use of an array to specify a population could be used,
for example, to create a tight bunch of initial guesses in an location
where the solution is known to exist, thereby reducing time for
convergence.
atol : float, optional
Absolute tolerance for convergence, the solving stops when
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
updating : {'immediate', 'deferred'}, optional
If ``'immediate'``, the best solution vector is continuously updated
within a single generation [4]_. This can lead to faster convergence as
trial vectors can take advantage of continuous improvements in the best
solution.
With ``'deferred'``, the best solution vector is updated once per
generation. Only ``'deferred'`` is compatible with parallelization or
vectorization, and the `workers` and `vectorized` keywords can
over-ride this option.
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel
(uses `multiprocessing.Pool <multiprocessing>`).
Supply `-1` to use all cores available to the Process.
Alternatively supply a map-like callable, such as
`multiprocessing.Pool.map` for evaluating the population in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
This option will override the `updating` keyword to
`updating='deferred'` if `workers != 1`.
Requires that `func` be pickleable.
constraints : {NonLinearConstraint, LinearConstraint, Bounds}
Constraints on the solver, over and above those applied by the `bounds`
kwd. Uses the approach by Lampinen.
x0 : None or array-like, optional
Provides an initial guess to the minimization. Once the population has
been initialized this vector replaces the first (best) member. This
replacement is done even if `init` is given an initial population.
``x0.shape == (N,)``.
integrality : 1-D array, optional
For each decision variable, a boolean value indicating whether the
decision variable is constrained to integer values. The array is
broadcast to ``(N,)``.
If any decision variables are constrained to be integral, they will not
be changed during polishing.
Only integer values lying between the lower and upper bounds are used.
If there are no integer values lying between the bounds then a
`ValueError` is raised.
vectorized : bool, optional
If ``vectorized is True``, `func` is sent an `x` array with
``x.shape == (N, S)``, and is expected to return an array of shape
``(S,)``, where `S` is the number of solution vectors to be calculated.
If constraints are applied, each of the functions used to construct
a `Constraint` object should accept an `x` array with
``x.shape == (N, S)``, and return an array of shape ``(M, S)``, where
`M` is the number of constraint components.
This option is an alternative to the parallelization offered by
`workers`, and may help in optimization speed. This keyword is
ignored if ``workers != 1``.
This option will override the `updating` keyword to
``updating='deferred'``.
""" # noqa: E501
# Dispatch of mutation strategy method (binomial or exponential).
_binomial = {'best1bin': '_best1',
'randtobest1bin': '_randtobest1',
'currenttobest1bin': '_currenttobest1',
'best2bin': '_best2',
'rand2bin': '_rand2',
'rand1bin': '_rand1'}
_exponential = {'best1exp': '_best1',
'rand1exp': '_rand1',
'randtobest1exp': '_randtobest1',
'currenttobest1exp': '_currenttobest1',
'best2exp': '_best2',
'rand2exp': '_rand2'}
__combined = _binomial | _exponential
__init_error_msg = ("The population initialization method must be one of "
"'latinhypercube' or 'random', or an array of shape "
"(S, N) where N is the number of parameters and S>5")
def __init__(self, func, bounds, args=(),
strategy='best1bin', maxiter=1000, popsize=15,
tol=0.01, mutation=(0.5, 1), recombination=0.7, rng=None,
maxfun=np.inf, callback=None, disp=False, polish=True,
init='latinhypercube', atol=0, updating='immediate',
workers=1, constraints=(), x0=None, *, integrality=None,
vectorized=False):
if callable(strategy):
# a callable strategy is going to be stored in self.strategy anyway
pass
elif strategy not in self.__combined:
raise ValueError("Please select a valid mutation strategy")
self.strategy = strategy
self.callback = _wrap_callback(callback, "differential_evolution")
self.polish = polish
# set the updating / parallelisation options
if updating in ['immediate', 'deferred']:
self._updating = updating
self.vectorized = vectorized
# want to use parallelisation, but updating is immediate
if workers != 1 and updating == 'immediate':
warnings.warn("differential_evolution: the 'workers' keyword has"
" overridden updating='immediate' to"
" updating='deferred'", UserWarning, stacklevel=2)
self._updating = 'deferred'
if vectorized and workers != 1:
warnings.warn("differential_evolution: the 'workers' keyword"
" overrides the 'vectorized' keyword", stacklevel=2)
self.vectorized = vectorized = False
if vectorized and updating == 'immediate':
warnings.warn("differential_evolution: the 'vectorized' keyword"
" has overridden updating='immediate' to updating"
"='deferred'", UserWarning, stacklevel=2)
self._updating = 'deferred'
# an object with a map method.
if vectorized:
def maplike_for_vectorized_func(func, x):
# send an array (N, S) to the user func,
# expect to receive (S,). Transposition is required because
# internally the population is held as (S, N)
return np.atleast_1d(func(x.T))
workers = maplike_for_vectorized_func
self._mapwrapper = MapWrapper(workers)
# relative and absolute tolerances for convergence
self.tol, self.atol = tol, atol
# Mutation constant should be in [0, 2). If specified as a sequence
# then dithering is performed.
self.scale = mutation
if (not np.all(np.isfinite(mutation)) or
np.any(np.array(mutation) >= 2) or
np.any(np.array(mutation) < 0)):
raise ValueError('The mutation constant must be a float in '
'U[0, 2), or specified as a tuple(min, max)'
' where min < max and min, max are in U[0, 2).')
self.dither = None
if hasattr(mutation, '__iter__') and len(mutation) > 1:
self.dither = [mutation[0], mutation[1]]
self.dither.sort()
self.cross_over_probability = recombination
# we create a wrapped function to allow the use of map (and Pool.map
# in the future)
self.original_func = func
self.func = _FunctionWrapper(func, args)
self.args = args
# convert tuple of lower and upper bounds to limits
# [(low_0, high_0), ..., (low_n, high_n]
# -> [[low_0, ..., low_n], [high_0, ..., high_n]]
if isinstance(bounds, Bounds):
self.limits = np.array(new_bounds_to_old(bounds.lb,
bounds.ub,
len(bounds.lb)),
dtype=float).T
else:
self.limits = np.array(bounds, dtype='float').T
if (np.size(self.limits, 0) != 2 or not
np.all(np.isfinite(self.limits))):
raise ValueError('bounds should be a sequence containing finite '
'real valued (min, max) pairs for each value'
' in x')
if maxiter is None: # the default used to be None
maxiter = 1000
self.maxiter = maxiter
if maxfun is None: # the default used to be None
maxfun = np.inf
self.maxfun = maxfun
# population is scaled to between [0, 1].
# We have to scale between parameter <-> population
# save these arguments for _scale_parameter and
# _unscale_parameter. This is an optimization
self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
with np.errstate(divide='ignore'):
# if lb == ub then the following line will be 1/0, which is why
# we ignore the divide by zero warning. The result from 1/0 is
# inf, so replace those values by 0.
self.__recip_scale_arg2 = 1 / self.__scale_arg2
self.__recip_scale_arg2[~np.isfinite(self.__recip_scale_arg2)] = 0
self.parameter_count = np.size(self.limits, 1)
self.random_number_generator = check_random_state(rng)
# Which parameters are going to be integers?
if np.any(integrality):
# # user has provided a truth value for integer constraints
integrality = np.broadcast_to(
integrality,
self.parameter_count
)
integrality = np.asarray(integrality, bool)
# For integrality parameters change the limits to only allow
# integer values lying between the limits.
lb, ub = np.copy(self.limits)
lb = np.ceil(lb)
ub = np.floor(ub)
if not (lb[integrality] <= ub[integrality]).all():
# there's a parameter that doesn't have an integer value
# lying between the limits
raise ValueError("One of the integrality constraints does not"
" have any possible integer values between"
" the lower/upper bounds.")
nlb = np.nextafter(lb[integrality] - 0.5, np.inf)
nub = np.nextafter(ub[integrality] + 0.5, -np.inf)
self.integrality = integrality
self.limits[0, self.integrality] = nlb
self.limits[1, self.integrality] = nub
else:
self.integrality = False
# check for equal bounds
eb = self.limits[0] == self.limits[1]
eb_count = np.count_nonzero(eb)
# default population initialization is a latin hypercube design, but
# there are other population initializations possible.
# the minimum is 5 because 'best2bin' requires a population that's at
# least 5 long
# 202301 - reduced population size to account for parameters with
# equal bounds. If there are no varying parameters set N to at least 1
self.num_population_members = max(
5,
popsize * max(1, self.parameter_count - eb_count)
)
self.population_shape = (self.num_population_members,
self.parameter_count)
self._nfev = 0
# check first str otherwise will fail to compare str with array
if isinstance(init, str):
if init == 'latinhypercube':
self.init_population_lhs()
elif init == 'sobol':
# must be Ns = 2**m for Sobol'
n_s = int(2 ** np.ceil(np.log2(self.num_population_members)))
self.num_population_members = n_s
self.population_shape = (self.num_population_members,
self.parameter_count)
self.init_population_qmc(qmc_engine='sobol')
elif init == 'halton':
self.init_population_qmc(qmc_engine='halton')
elif init == 'random':
self.init_population_random()
else:
raise ValueError(self.__init_error_msg)
else:
self.init_population_array(init)
if x0 is not None:
# scale to within unit interval and
# ensure parameters are within bounds.
x0_scaled = self._unscale_parameters(np.asarray(x0))
if ((x0_scaled > 1.0) | (x0_scaled < 0.0)).any():
raise ValueError(
"Some entries in x0 lay outside the specified bounds"
)
self.population[0] = x0_scaled
# infrastructure for constraints
self.constraints = constraints
self._wrapped_constraints = []
if hasattr(constraints, '__len__'):
# sequence of constraints, this will also deal with default
# keyword parameter
for c in constraints:
self._wrapped_constraints.append(
_ConstraintWrapper(c, self.x)
)
else:
self._wrapped_constraints = [
_ConstraintWrapper(constraints, self.x)
]
self.total_constraints = np.sum(
[c.num_constr for c in self._wrapped_constraints]
)
self.constraint_violation = np.zeros((self.num_population_members, 1))
self.feasible = np.ones(self.num_population_members, bool)
# an array to shuffle when selecting candidates. Create it here
# rather than repeatedly creating it in _select_samples.
self._random_population_index = np.arange(self.num_population_members)
self.disp = disp
@property
def mutation_func(self):
return getattr(self, self.__combined[self.strategy])
def init_population_lhs(self):
"""
Initializes the population with Latin Hypercube Sampling.
Latin Hypercube Sampling ensures that each parameter is uniformly
sampled over its range.
"""
rng = self.random_number_generator
# Each parameter range needs to be sampled uniformly. The scaled
# parameter range ([0, 1)) needs to be split into
# `self.num_population_members` segments, each of which has the following
# size:
segsize = 1.0 / self.num_population_members
# Within each segment we sample from a uniform random distribution.
# We need to do this sampling for each parameter.
samples = (segsize * rng.uniform(size=self.population_shape)
# Offset each segment to cover the entire parameter range [0, 1)
+ np.linspace(0., 1., self.num_population_members,
endpoint=False)[:, np.newaxis])
# Create an array for population of candidate solutions.
self.population = np.zeros_like(samples)
# Initialize population of candidate solutions by permutation of the
# random samples.
for j in range(self.parameter_count):
order = rng.permutation(range(self.num_population_members))
self.population[:, j] = samples[order, j]
# reset population energies
self.population_energies = np.full(self.num_population_members,
np.inf)
# reset number of function evaluations counter
self._nfev = 0
def init_population_qmc(self, qmc_engine):
"""Initializes the population with a QMC method.
QMC methods ensures that each parameter is uniformly
sampled over its range.
Parameters
----------
qmc_engine : str
The QMC method to use for initialization. Can be one of
``latinhypercube``, ``sobol`` or ``halton``.
"""
from scipy.stats import qmc
rng = self.random_number_generator
# Create an array for population of candidate solutions.
if qmc_engine == 'latinhypercube':
sampler = qmc.LatinHypercube(d=self.parameter_count, seed=rng)
elif qmc_engine == 'sobol':
sampler = qmc.Sobol(d=self.parameter_count, seed=rng)
elif qmc_engine == 'halton':
sampler = qmc.Halton(d=self.parameter_count, seed=rng)
else:
raise ValueError(self.__init_error_msg)
self.population = sampler.random(n=self.num_population_members)
# reset population energies
self.population_energies = np.full(self.num_population_members,
np.inf)
# reset number of function evaluations counter
self._nfev = 0
def init_population_random(self):
"""
Initializes the population at random. This type of initialization
can possess clustering, Latin Hypercube sampling is generally better.
"""
rng = self.random_number_generator
self.population = rng.uniform(size=self.population_shape)
# reset population energies
self.population_energies = np.full(self.num_population_members,
np.inf)
# reset number of function evaluations counter
self._nfev = 0
def init_population_array(self, init):
"""
Initializes the population with a user specified population.
Parameters
----------
init : np.ndarray
Array specifying subset of the initial population. The array should
have shape (S, N), where N is the number of parameters.
The population is clipped to the lower and upper bounds.
"""
# make sure you're using a float array
popn = np.asarray(init, dtype=np.float64)
if (np.size(popn, 0) < 5 or
popn.shape[1] != self.parameter_count or
len(popn.shape) != 2):
raise ValueError("The population supplied needs to have shape"
" (S, len(x)), where S > 4.")
# scale values and clip to bounds, assigning to population
self.population = np.clip(self._unscale_parameters(popn), 0, 1)
self.num_population_members = np.size(self.population, 0)
self.population_shape = (self.num_population_members,
self.parameter_count)
# reset population energies
self.population_energies = np.full(self.num_population_members,
np.inf)
# reset number of function evaluations counter
self._nfev = 0
@property
def x(self):
"""
The best solution from the solver
"""
return self._scale_parameters(self.population[0])
@property
def convergence(self):
"""
The standard deviation of the population energies divided by their
mean.
"""
if np.any(np.isinf(self.population_energies)):
return np.inf
return (np.std(self.population_energies) /
(np.abs(np.mean(self.population_energies)) + _MACHEPS))
def converged(self):
"""
Return True if the solver has converged.
"""
if np.any(np.isinf(self.population_energies)):
return False
return (np.std(self.population_energies) <=
self.atol +
self.tol * np.abs(np.mean(self.population_energies)))
def solve(self):
"""
Runs the DifferentialEvolutionSolver.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully,
``message`` which describes the cause of the termination,
``population`` the solution vectors present in the population, and
``population_energies`` the value of the objective function for
each entry in ``population``.
See `OptimizeResult` for a description of other attributes. If
`polish` was employed, and a lower minimum was obtained by the
polishing, then OptimizeResult also contains the ``jac`` attribute.
If the eventual solution does not satisfy the applied constraints
``success`` will be `False`.
"""
nit, warning_flag = 0, False
status_message = _status_message['success']
# The population may have just been initialized (all entries are
# np.inf). If it has you have to calculate the initial energies.
# Although this is also done in the evolve generator it's possible
# that someone can set maxiter=0, at which point we still want the
# initial energies to be calculated (the following loop isn't run).
if np.all(np.isinf(self.population_energies)):
self.feasible, self.constraint_violation = (
self._calculate_population_feasibilities(self.population))
# only work out population energies for feasible solutions
self.population_energies[self.feasible] = (
self._calculate_population_energies(
self.population[self.feasible]))
self._promote_lowest_energy()
# do the optimization.
for nit in range(1, self.maxiter + 1):
# evolve the population by a generation
try:
next(self)
except StopIteration:
warning_flag = True
if self._nfev > self.maxfun:
status_message = _status_message['maxfev']
elif self._nfev == self.maxfun:
status_message = ('Maximum number of function evaluations'
' has been reached.')
break
if self.disp:
print(f"differential_evolution step {nit}: f(x)="
f" {self.population_energies[0]}"
)
if self.callback:
c = self.tol / (self.convergence + _MACHEPS)
res = self._result(nit=nit, message="in progress")
res.convergence = c
try:
warning_flag = bool(self.callback(res))
except StopIteration:
warning_flag = True
if warning_flag:
status_message = 'callback function requested stop early'
# should the solver terminate?
if warning_flag or self.converged():
break
else:
status_message = _status_message['maxiter']
warning_flag = True
DE_result = self._result(
nit=nit, message=status_message, warning_flag=warning_flag
)
if self.polish and not np.all(self.integrality):
# can't polish if all the parameters are integers
if np.any(self.integrality):
# set the lower/upper bounds equal so that any integrality
# constraints work.
limits, integrality = self.limits, self.integrality
limits[0, integrality] = DE_result.x[integrality]
limits[1, integrality] = DE_result.x[integrality]
polish_method = 'L-BFGS-B'
if self._wrapped_constraints:
polish_method = 'trust-constr'
constr_violation = self._constraint_violation_fn(DE_result.x)
if np.any(constr_violation > 0.):
warnings.warn("differential evolution didn't find a "
"solution satisfying the constraints, "
"attempting to polish from the least "
"infeasible solution",
UserWarning, stacklevel=2)
pf = self.polish
_f = self.original_func
if not callable(pf):
pf = partial(minimize, method=polish_method)
def _f(x):
return list(self._mapwrapper(self.func, np.atleast_2d(x)))[0]
if self.disp:
print(f"Polishing solution with '{polish_method}'")
result = pf(
_f,
np.copy(DE_result.x),
bounds=Bounds(lb=self.limits[0], ub=self.limits[1]),
constraints=self.constraints
)
if not isinstance(result, OptimizeResult):
raise ValueError(
"The result from a user defined polishing function "
"should return an OptimizeResult."
)
self._nfev += result.get("nfev", 0)
DE_result.nfev = self._nfev
# Polishing solution is only accepted if there is an improvement in
# cost function, the polishing was successful and the solution lies
# within the bounds.
if (result.fun < DE_result.fun and
result.success and
np.all(result.x <= self.limits[1]) and
np.all(self.limits[0] <= result.x)):
DE_result.fun = result.fun
DE_result.x = result.x
DE_result.jac = result.get("jac", None)
# to keep internal state consistent
self.population_energies[0] = result.fun
self.population[0] = self._unscale_parameters(result.x)
if self._wrapped_constraints:
DE_result.constr = [c.violation(DE_result.x) for
c in self._wrapped_constraints]
DE_result.constr_violation = np.max(
np.concatenate(DE_result.constr))
DE_result.maxcv = DE_result.constr_violation
if DE_result.maxcv > 0:
# if the result is infeasible then success must be False
DE_result.success = False
DE_result.message = ("The solution does not satisfy the "
f"constraints, MAXCV = {DE_result.maxcv}")
return DE_result
def _result(self, **kwds):
# form an intermediate OptimizeResult
nit = kwds.get('nit', None)
message = kwds.get('message', None)
warning_flag = kwds.get('warning_flag', False)
result = OptimizeResult(
x=self.x,
fun=self.population_energies[0],
nfev=self._nfev,
nit=nit,
message=message,
success=(warning_flag is not True),
population=self._scale_parameters(self.population),
population_energies=self.population_energies
)
if self._wrapped_constraints:
result.constr = [c.violation(result.x)
for c in self._wrapped_constraints]
result.constr_violation = np.max(np.concatenate(result.constr))
result.maxcv = result.constr_violation
if result.maxcv > 0:
result.success = False
return result
def _calculate_population_energies(self, population):
"""
Calculate the energies of a population.
Parameters
----------
population : ndarray
An array of parameter vectors normalised to [0, 1] using lower
and upper limits. Has shape ``(np.size(population, 0), N)``.
Returns
-------
energies : ndarray
An array of energies corresponding to each population member. If
maxfun will be exceeded during this call, then the number of
function evaluations will be reduced and energies will be
right-padded with np.inf. Has shape ``(np.size(population, 0),)``
"""
num_members = np.size(population, 0)
# S is the number of function evals left to stay under the
# maxfun budget
S = min(num_members, self.maxfun - self._nfev)
energies = np.full(num_members, np.inf)
parameters_pop = self._scale_parameters(population)
try:
calc_energies = list(
self._mapwrapper(self.func, parameters_pop[0:S])
)
calc_energies = np.squeeze(calc_energies)
except (TypeError, ValueError) as e:
# wrong number of arguments for _mapwrapper
# or wrong length returned from the mapper
raise RuntimeError(
"The map-like callable must be of the form f(func, iterable), "
"returning a sequence of numbers the same length as 'iterable'"
) from e
if calc_energies.size != S:
if self.vectorized:
raise RuntimeError("The vectorized function must return an"
" array of shape (S,) when given an array"
" of shape (len(x), S)")
raise RuntimeError("func(x, *args) must return a scalar value")
energies[0:S] = calc_energies
if self.vectorized:
self._nfev += 1
else:
self._nfev += S
return energies
def _promote_lowest_energy(self):
# swaps 'best solution' into first population entry
idx = np.arange(self.num_population_members)
feasible_solutions = idx[self.feasible]
if feasible_solutions.size:
# find the best feasible solution
idx_t = np.argmin(self.population_energies[feasible_solutions])
l = feasible_solutions[idx_t]
else:
# no solution was feasible, use 'best' infeasible solution, which
# will violate constraints the least
l = np.argmin(np.sum(self.constraint_violation, axis=1))
self.population_energies[[0, l]] = self.population_energies[[l, 0]]
self.population[[0, l], :] = self.population[[l, 0], :]
self.feasible[[0, l]] = self.feasible[[l, 0]]
self.constraint_violation[[0, l], :] = (
self.constraint_violation[[l, 0], :])
def _constraint_violation_fn(self, x):
"""
Calculates total constraint violation for all the constraints, for a
set of solutions.
Parameters
----------
x : ndarray
Solution vector(s). Has shape (S, N), or (N,), where S is the
number of solutions to investigate and N is the number of
parameters.
Returns
-------
cv : ndarray
Total violation of constraints. Has shape ``(S, M)``, where M is
the total number of constraint components (which is not necessarily
equal to len(self._wrapped_constraints)).
"""
# how many solution vectors you're calculating constraint violations
# for
S = np.size(x) // self.parameter_count
_out = np.zeros((S, self.total_constraints))
offset = 0
for con in self._wrapped_constraints:
# the input/output of the (vectorized) constraint function is
# {(N, S), (N,)} --> (M, S)
# The input to _constraint_violation_fn is (S, N) or (N,), so
# transpose to pass it to the constraint. The output is transposed
# from (M, S) to (S, M) for further use.
c = con.violation(x.T).T
# The shape of c should be (M,), (1, M), or (S, M). Check for
# those shapes, as an incorrect shape indicates that the
# user constraint function didn't return the right thing, and
# the reshape operation will fail. Intercept the wrong shape
# to give a reasonable error message. I'm not sure what failure
# modes an inventive user will come up with.
if c.shape[-1] != con.num_constr or (S > 1 and c.shape[0] != S):
raise RuntimeError("An array returned from a Constraint has"
" the wrong shape. If `vectorized is False`"
" the Constraint should return an array of"
" shape (M,). If `vectorized is True` then"
" the Constraint must return an array of"
" shape (M, S), where S is the number of"
" solution vectors and M is the number of"
" constraint components in a given"
" Constraint object.")
# the violation function may return a 1D array, but is it a
# sequence of constraints for one solution (S=1, M>=1), or the
# value of a single constraint for a sequence of solutions
# (S>=1, M=1)
c = np.reshape(c, (S, con.num_constr))
_out[:, offset:offset + con.num_constr] = c
offset += con.num_constr
return _out
def _calculate_population_feasibilities(self, population):
"""
Calculate the feasibilities of a population.
Parameters
----------
population : ndarray
An array of parameter vectors normalised to [0, 1] using lower
and upper limits. Has shape ``(np.size(population, 0), N)``.
Returns
-------
feasible, constraint_violation : ndarray, ndarray
Boolean array of feasibility for each population member, and an
array of the constraint violation for each population member.
constraint_violation has shape ``(np.size(population, 0), M)``,
where M is the number of constraints.
"""
num_members = np.size(population, 0)
if not self._wrapped_constraints:
# shortcut for no constraints
return np.ones(num_members, bool), np.zeros((num_members, 1))
# (S, N)
parameters_pop = self._scale_parameters(population)
if self.vectorized:
# (S, M)
constraint_violation = np.array(
self._constraint_violation_fn(parameters_pop)
)
else:
# (S, 1, M)
constraint_violation = np.array([self._constraint_violation_fn(x)
for x in parameters_pop])
# if you use the list comprehension in the line above it will
# create an array of shape (S, 1, M), because each iteration
# generates an array of (1, M). In comparison the vectorized
# version returns (S, M). It's therefore necessary to remove axis 1
constraint_violation = constraint_violation[:, 0]
feasible = ~(np.sum(constraint_violation, axis=1) > 0)
return feasible, constraint_violation
def __iter__(self):
return self
def __enter__(self):
return self
def __exit__(self, *args):
return self._mapwrapper.__exit__(*args)
def _accept_trial(self, energy_trial, feasible_trial, cv_trial,
energy_orig, feasible_orig, cv_orig):
"""
Trial is accepted if:
* it satisfies all constraints and provides a lower or equal objective
function value, while both the compared solutions are feasible
- or -
* it is feasible while the original solution is infeasible,
- or -
* it is infeasible, but provides a lower or equal constraint violation
for all constraint functions.
This test corresponds to section III of Lampinen [1]_.
Parameters
----------
energy_trial : float
Energy of the trial solution
feasible_trial : float
Feasibility of trial solution
cv_trial : array-like
Excess constraint violation for the trial solution
energy_orig : float
Energy of the original solution
feasible_orig : float
Feasibility of original solution
cv_orig : array-like
Excess constraint violation for the original solution
Returns
-------
accepted : bool
"""
if feasible_orig and feasible_trial:
return energy_trial <= energy_orig
elif feasible_trial and not feasible_orig:
return True
elif not feasible_trial and (cv_trial <= cv_orig).all():
# cv_trial < cv_orig would imply that both trial and orig are not
# feasible
return True
return False
def __next__(self):
"""
Evolve the population by a single generation
Returns
-------
x : ndarray
The best solution from the solver.
fun : float
Value of objective function obtained from the best solution.
"""
# the population may have just been initialized (all entries are
# np.inf). If it has you have to calculate the initial energies
if np.all(np.isinf(self.population_energies)):
self.feasible, self.constraint_violation = (
self._calculate_population_feasibilities(self.population))
# only need to work out population energies for those that are
# feasible
self.population_energies[self.feasible] = (
self._calculate_population_energies(
self.population[self.feasible]))
self._promote_lowest_energy()
if self.dither is not None:
self.scale = self.random_number_generator.uniform(self.dither[0],
self.dither[1])
if self._updating == 'immediate':
# update best solution immediately
for candidate in range(self.num_population_members):
if self._nfev > self.maxfun:
raise StopIteration
# create a trial solution
trial = self._mutate(candidate)
# ensuring that it's in the range [0, 1)
self._ensure_constraint(trial)
# scale from [0, 1) to the actual parameter value
parameters = self._scale_parameters(trial)
# determine the energy of the objective function
if self._wrapped_constraints:
cv = self._constraint_violation_fn(parameters)
feasible = False
energy = np.inf
if not np.sum(cv) > 0:
# solution is feasible
feasible = True
energy = self.func(parameters)
self._nfev += 1
else:
feasible = True
cv = np.atleast_2d([0.])
energy = self.func(parameters)
self._nfev += 1
# compare trial and population member
if self._accept_trial(energy, feasible, cv,
self.population_energies[candidate],
self.feasible[candidate],
self.constraint_violation[candidate]):
self.population[candidate] = trial
self.population_energies[candidate] = np.squeeze(energy)
self.feasible[candidate] = feasible
self.constraint_violation[candidate] = cv
# if the trial candidate is also better than the best
# solution then promote it.
if self._accept_trial(energy, feasible, cv,
self.population_energies[0],
self.feasible[0],
self.constraint_violation[0]):
self._promote_lowest_energy()
elif self._updating == 'deferred':
# update best solution once per generation
if self._nfev >= self.maxfun:
raise StopIteration
# 'deferred' approach, vectorised form.
# create trial solutions
trial_pop = self._mutate_many(
np.arange(self.num_population_members)
)
# enforce bounds
self._ensure_constraint(trial_pop)
# determine the energies of the objective function, but only for
# feasible trials
feasible, cv = self._calculate_population_feasibilities(trial_pop)
trial_energies = np.full(self.num_population_members, np.inf)
# only calculate for feasible entries
trial_energies[feasible] = self._calculate_population_energies(
trial_pop[feasible])
# which solutions are 'improved'?
loc = [self._accept_trial(*val) for val in
zip(trial_energies, feasible, cv, self.population_energies,
self.feasible, self.constraint_violation)]
loc = np.array(loc)
self.population = np.where(loc[:, np.newaxis],
trial_pop,
self.population)
self.population_energies = np.where(loc,
trial_energies,
self.population_energies)
self.feasible = np.where(loc,
feasible,
self.feasible)
self.constraint_violation = np.where(loc[:, np.newaxis],
cv,
self.constraint_violation)
# make sure the best solution is updated if updating='deferred'.
# put the lowest energy into the best solution position.
self._promote_lowest_energy()
return self.x, self.population_energies[0]
def _scale_parameters(self, trial):
"""Scale from a number between 0 and 1 to parameters."""
# trial either has shape (N, ) or (L, N), where L is the number of
# solutions being scaled
scaled = self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
if np.count_nonzero(self.integrality):
i = np.broadcast_to(self.integrality, scaled.shape)
scaled[i] = np.round(scaled[i])
return scaled
def _unscale_parameters(self, parameters):
"""Scale from parameters to a number between 0 and 1."""
return (parameters - self.__scale_arg1) * self.__recip_scale_arg2 + 0.5
def _ensure_constraint(self, trial):
"""Make sure the parameters lie between the limits."""
mask = np.bitwise_or(trial > 1, trial < 0)
if oob := np.count_nonzero(mask):
trial[mask] = self.random_number_generator.uniform(size=oob)
def _mutate_custom(self, candidate):
rng = self.random_number_generator
msg = (
"strategy must have signature"
" f(candidate: int, population: np.ndarray, rng=None) returning an"
" array of shape (N,)"
)
_population = self._scale_parameters(self.population)
if not len(np.shape(candidate)):
# single entry in population
trial = self.strategy(candidate, _population, rng=rng)
if trial.shape != (self.parameter_count,):
raise RuntimeError(msg)
else:
S = candidate.shape[0]
trial = np.array(
[self.strategy(c, _population, rng=rng) for c in candidate],
dtype=float
)
if trial.shape != (S, self.parameter_count):
raise RuntimeError(msg)
return self._unscale_parameters(trial)
def _mutate_many(self, candidates):
"""Create trial vectors based on a mutation strategy."""
rng = self.random_number_generator
S = len(candidates)
if callable(self.strategy):
return self._mutate_custom(candidates)
trial = np.copy(self.population[candidates])
samples = np.array([self._select_samples(c, 5) for c in candidates])
if self.strategy in ['currenttobest1exp', 'currenttobest1bin']:
bprime = self.mutation_func(candidates, samples)
else:
bprime = self.mutation_func(samples)
fill_point = rng_integers(rng, self.parameter_count, size=S)
crossovers = rng.uniform(size=(S, self.parameter_count))
crossovers = crossovers < self.cross_over_probability
if self.strategy in self._binomial:
# A randomly selected parameter is always from the bprime vector for
# binomial crossover. The fill_point ensures at least one parameter
# comes from bprime, preventing the possibility of no mutation
# influence in the trial vector.
i = np.arange(S)
crossovers[i, fill_point[i]] = True
trial = np.where(crossovers, bprime, trial)
return trial
elif self.strategy in self._exponential:
# For exponential crossover, fill_point determines the starting index
# for a consecutive sequence of parameters from bprime. The sequence
# continues until a crossover probability check fails. The starting
# index is always from the bprime vector ensuring at least one
# parameter comes from bprime.
crossovers[..., 0] = True
for j in range(S):
i = 0
init_fill = fill_point[j]
while (i < self.parameter_count and crossovers[j, i]):
trial[j, init_fill] = bprime[j, init_fill]
init_fill = (init_fill + 1) % self.parameter_count
i += 1
return trial
def _mutate(self, candidate):
"""Create a trial vector based on a mutation strategy."""
rng = self.random_number_generator
if callable(self.strategy):
return self._mutate_custom(candidate)
fill_point = rng_integers(rng, self.parameter_count)
samples = self._select_samples(candidate, 5)
trial = np.copy(self.population[candidate])
if self.strategy in ['currenttobest1exp', 'currenttobest1bin']:
bprime = self.mutation_func(candidate, samples)
else:
bprime = self.mutation_func(samples)
crossovers = rng.uniform(size=self.parameter_count)
crossovers = crossovers < self.cross_over_probability
if self.strategy in self._binomial:
# A randomly selected parameter is always from the bprime vector for
# binomial crossover. The fill_point ensures at least one parameter
# comes from bprime, preventing the possibility of no mutation
# influence in the trial vector.
crossovers[fill_point] = True
trial = np.where(crossovers, bprime, trial)
return trial
elif self.strategy in self._exponential:
# For exponential crossover, fill_point determines the starting index
# for a consecutive sequence of parameters from bprime. The sequence
# continues until a crossover probability check fails. The starting
# index is always from the bprime vector ensuring at least one
# parameter comes from bprime.
i = 0
crossovers[0] = True
while i < self.parameter_count and crossovers[i]:
trial[fill_point] = bprime[fill_point]
fill_point = (fill_point + 1) % self.parameter_count
i += 1
return trial
def _best1(self, samples):
"""best1bin, best1exp"""
# samples.shape == (S, 5)
# or
# samples.shape(5,)
r0, r1 = samples[..., :2].T
return (self.population[0] + self.scale *
(self.population[r0] - self.population[r1]))
def _rand1(self, samples):
"""rand1bin, rand1exp"""
r0, r1, r2 = samples[..., :3].T
return (self.population[r0] + self.scale *
(self.population[r1] - self.population[r2]))
def _randtobest1(self, samples):
"""randtobest1bin, randtobest1exp"""
r0, r1, r2 = samples[..., :3].T
bprime = np.copy(self.population[r0])
bprime += self.scale * (self.population[0] - bprime)
bprime += self.scale * (self.population[r1] -
self.population[r2])
return bprime
def _currenttobest1(self, candidate, samples):
"""currenttobest1bin, currenttobest1exp"""
r0, r1 = samples[..., :2].T
bprime = (self.population[candidate] + self.scale *
(self.population[0] - self.population[candidate] +
self.population[r0] - self.population[r1]))
return bprime
def _best2(self, samples):
"""best2bin, best2exp"""
r0, r1, r2, r3 = samples[..., :4].T
bprime = (self.population[0] + self.scale *
(self.population[r0] + self.population[r1] -
self.population[r2] - self.population[r3]))
return bprime
def _rand2(self, samples):
"""rand2bin, rand2exp"""
r0, r1, r2, r3, r4 = samples[..., :5].T
bprime = (self.population[r0] + self.scale *
(self.population[r1] + self.population[r2] -
self.population[r3] - self.population[r4]))
return bprime
def _select_samples(self, candidate, number_samples):
"""
obtain random integers from range(self.num_population_members),
without replacement. You can't have the original candidate either.
"""
self.random_number_generator.shuffle(self._random_population_index)
idxs = self._random_population_index[:number_samples + 1]
return idxs[idxs != candidate][:number_samples]
|
DifferentialEvolutionSolver
|
python
|
openai__openai-python
|
src/openai/types/realtime/realtime_tools_config_param.py
|
{
"start": 2488,
"end": 4708
}
|
class ____(TypedDict, total=False):
server_label: Required[str]
"""A label for this MCP server, used to identify it in tool calls."""
type: Required[Literal["mcp"]]
"""The type of the MCP tool. Always `mcp`."""
allowed_tools: Optional[McpAllowedTools]
"""List of allowed tool names or a filter object."""
authorization: str
"""
An OAuth access token that can be used with a remote MCP server, either with a
custom MCP server URL or a service connector. Your application must handle the
OAuth authorization flow and provide the token here.
"""
connector_id: Literal[
"connector_dropbox",
"connector_gmail",
"connector_googlecalendar",
"connector_googledrive",
"connector_microsoftteams",
"connector_outlookcalendar",
"connector_outlookemail",
"connector_sharepoint",
]
"""Identifier for service connectors, like those available in ChatGPT.
One of `server_url` or `connector_id` must be provided. Learn more about service
connectors
[here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
Currently supported `connector_id` values are:
- Dropbox: `connector_dropbox`
- Gmail: `connector_gmail`
- Google Calendar: `connector_googlecalendar`
- Google Drive: `connector_googledrive`
- Microsoft Teams: `connector_microsoftteams`
- Outlook Calendar: `connector_outlookcalendar`
- Outlook Email: `connector_outlookemail`
- SharePoint: `connector_sharepoint`
"""
headers: Optional[Dict[str, str]]
"""Optional HTTP headers to send to the MCP server.
Use for authentication or other purposes.
"""
require_approval: Optional[McpRequireApproval]
"""Specify which of the MCP server's tools require approval."""
server_description: str
"""Optional description of the MCP server, used to provide more context."""
server_url: str
"""The URL for the MCP server.
One of `server_url` or `connector_id` must be provided.
"""
RealtimeToolsConfigUnionParam: TypeAlias = Union[RealtimeFunctionToolParam, Mcp]
RealtimeToolsConfigParam: TypeAlias = List[RealtimeToolsConfigUnionParam]
|
Mcp
|
python
|
huggingface__transformers
|
src/transformers/models/blip/configuration_blip.py
|
{
"start": 10444,
"end": 14630
}
|
class ____(PreTrainedConfig):
r"""
[`BlipConfig`] is the configuration class to store the configuration of a [`BlipModel`]. It is used to instantiate
a BLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating
a configuration with the defaults will yield a similar configuration to that of the BLIP-base
[Salesforce/blip-vqa-base](https://huggingface.co/Salesforce/blip-vqa-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`BlipTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`BlipVisionConfig`].
projection_dim (`int`, *optional*, defaults to 512):
Dimensionality of text and vision projection layers.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The initial value of the *logit_scale* parameter. Default is used as per the original BLIP implementation.
image_text_hidden_size (`int`, *optional*, defaults to 256):
Dimensionality of the hidden state of the image-text fusion layer.
label_smoothing (float, optional, *optional*, defaults to 0.0):
A float in [0.0, 1.0]. Specifies the amount of smoothing when computing the loss, where 0.0 means no smoothing. The targets
become a mixture of the original ground truth and a uniform distribution as described in
`Rethinking the Inception Architecture for Computer Vision <https://huggingface.co/papers/1512.00567>`__. Default: :math:`0.0`.
kwargs (*optional*):
Dictionary of keyword arguments.
Example:
```python
>>> from transformers import BlipConfig, BlipModel
>>> # Initializing a BlipConfig with Salesforce/blip-vqa-base style configuration
>>> configuration = BlipConfig()
>>> # Initializing a BlipPModel (with random weights) from the Salesforce/blip-vqa-base style configuration
>>> model = BlipModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
>>> # We can also initialize a BlipConfig from a BlipTextConfig and a BlipVisionConfig
>>> # Initializing a BLIPText and BLIPVision configuration
>>> config_text = BlipTextConfig()
>>> config_vision = BlipVisionConfig()
>>> config = BlipConfig(text_config=config_text, vision_config=config_vision)
```"""
model_type = "blip"
sub_configs = {"text_config": BlipTextConfig, "vision_config": BlipVisionConfig}
def __init__(
self,
text_config=None,
vision_config=None,
projection_dim=512,
logit_scale_init_value=2.6592,
image_text_hidden_size=256,
label_smoothing=0.0,
**kwargs,
):
if text_config is None:
text_config = BlipTextConfig()
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values.")
elif isinstance(text_config, dict):
text_config = BlipTextConfig(**text_config)
if vision_config is None:
vision_config = BlipVisionConfig()
logger.info("`vision_config` is `None`. initializing the `BlipVisionConfig` with default values.")
elif isinstance(vision_config, dict):
vision_config = BlipVisionConfig(**vision_config)
self.text_config = text_config
self.vision_config = vision_config
self.text_config.encoder_hidden_size = self.vision_config.hidden_size
self.projection_dim = projection_dim
self.logit_scale_init_value = logit_scale_init_value
self.initializer_factor = 1.0
self.initializer_range = 0.02
self.image_text_hidden_size = image_text_hidden_size
self.label_smoothing = label_smoothing
super().__init__(**kwargs)
__all__ = ["BlipConfig", "BlipTextConfig", "BlipVisionConfig"]
|
BlipConfig
|
python
|
ansible__ansible
|
lib/ansible/module_utils/facts/network/linux.py
|
{
"start": 893,
"end": 18392
}
|
class ____(Network):
"""
This is a Linux-specific subclass of Network. It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- ipv4_address and ipv6_address: the first non-local address for each family.
"""
platform = 'Linux'
INTERFACE_TYPE = {
'1': 'ether',
'32': 'infiniband',
'512': 'ppp',
'772': 'loopback',
'65534': 'tunnel',
}
def populate(self, collected_facts=None):
network_facts = {}
ip_path = self.module.get_bin_path('ip')
if ip_path is None:
return network_facts
default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path,
collected_facts=collected_facts)
interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
network_facts['interfaces'] = interfaces.keys()
for iface in interfaces:
network_facts[iface] = interfaces[iface]
network_facts['default_ipv4'] = default_ipv4
network_facts['default_ipv6'] = default_ipv6
network_facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
network_facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
network_facts['locally_reachable_ips'] = self.get_locally_reachable_ips(ip_path)
return network_facts
# List all `scope host` routes/addresses.
# They belong to routes, but it means the whole prefix is reachable
# locally, regardless of specific IP addresses.
# E.g.: 192.168.0.0/24, any IP address is reachable from this range
# if assigned as scope host.
def get_locally_reachable_ips(self, ip_path):
locally_reachable_ips = dict(
ipv4=[],
ipv6=[],
)
def parse_locally_reachable_ips(output):
for line in output.splitlines():
if not line:
continue
words = line.split()
if words[0] != 'local':
continue
address = words[1]
if ":" in address:
if address not in locally_reachable_ips['ipv6']:
locally_reachable_ips['ipv6'].append(address)
else:
if address not in locally_reachable_ips['ipv4']:
locally_reachable_ips['ipv4'].append(address)
args = [ip_path, '-4', 'route', 'show', 'table', 'local']
rc, routes, dummy = self.module.run_command(args)
if rc == 0:
parse_locally_reachable_ips(routes)
args = [ip_path, '-6', 'route', 'show', 'table', 'local']
rc, routes, dummy = self.module.run_command(args)
if rc == 0:
parse_locally_reachable_ips(routes)
return locally_reachable_ips
def get_default_interfaces(self, ip_path, collected_facts=None):
collected_facts = collected_facts or {}
# Use the commands:
# ip -4 route get 8.8.8.8 -> Google public DNS
# ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4=[ip_path, '-4', 'route', 'get', '8.8.8.8'],
v6=[ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
)
interface = dict(v4={}, v6={})
for v in 'v4', 'v6':
if (v == 'v6' and collected_facts.get('ansible_os_family') == 'RedHat' and
collected_facts.get('ansible_distribution_version', '').startswith('4.')):
continue
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = self.module.run_command(command[v], errors='surrogate_then_replace')
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
words = out.splitlines()[0].split()
# A valid output starts with the queried address on the first line
if len(words) > 0 and words[0] == command[v][-1]:
for i in range(len(words) - 1):
if words[i] == 'dev':
interface[v]['interface'] = words[i + 1]
elif words[i] == 'src':
interface[v]['address'] = words[i + 1]
elif words[i] == 'via' and words[i + 1] != command[v][-1]:
interface[v]['gateway'] = words[i + 1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
interfaces = {}
ips = dict(
all_ipv4_addresses=[],
all_ipv6_addresses=[],
)
# FIXME: maybe split into smaller methods?
# FIXME: this is pretty much a constructor
for path in glob.glob('/sys/class/net/*'):
if not os.path.isdir(path):
continue
device = os.path.basename(path)
interfaces[device] = {'device': device}
if os.path.exists(os.path.join(path, 'address')):
macaddress = get_file_content(os.path.join(path, 'address'), default='')
if macaddress and macaddress != '00:00:00:00:00:00':
interfaces[device]['macaddress'] = macaddress
if os.path.exists(os.path.join(path, 'mtu')):
interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu')))
if os.path.exists(os.path.join(path, 'operstate')):
interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down'
if os.path.exists(os.path.join(path, 'device', 'driver', 'module')):
interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
if os.path.exists(os.path.join(path, 'type')):
_type = get_file_content(os.path.join(path, 'type'))
interfaces[device]['type'] = self.INTERFACE_TYPE.get(_type, 'unknown')
if os.path.exists(os.path.join(path, 'bridge')):
interfaces[device]['type'] = 'bridge'
interfaces[device]['interfaces'] = [os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*'))]
if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='')
if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1'
if os.path.exists(os.path.join(path, 'bonding')):
interfaces[device]['type'] = 'bonding'
interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split()
interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0]
interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0]
interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0]
primary = get_file_content(os.path.join(path, 'bonding', 'primary'))
if primary:
interfaces[device]['primary'] = primary
path = os.path.join(path, 'bonding', 'all_slaves_active')
if os.path.exists(path):
interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
if os.path.exists(os.path.join(path, 'bonding_slave')):
interfaces[device]['perm_macaddress'] = get_file_content(os.path.join(path, 'bonding_slave', 'perm_hwaddr'), default='')
if os.path.exists(os.path.join(path, 'device')):
interfaces[device]['pciid'] = os.path.basename(os.readlink(os.path.join(path, 'device')))
if os.path.exists(os.path.join(path, 'speed')):
speed = get_file_content(os.path.join(path, 'speed'))
if speed is not None:
interfaces[device]['speed'] = int(speed)
# Check whether an interface is in promiscuous mode
if os.path.exists(os.path.join(path, 'flags')):
promisc_mode = False
# The second byte indicates whether the interface is in promiscuous mode.
# 1 = promisc
# 0 = no promisc
data = int(get_file_content(os.path.join(path, 'flags')), 16)
promisc_mode = (data & 0x0100 > 0)
interfaces[device]['promisc'] = promisc_mode
# TODO: determine if this needs to be in a nested scope/closure
def parse_ip_output(output, secondary=False):
for line in output.splitlines():
if not line:
continue
words = line.split()
broadcast = ''
if words[0] == 'inet':
if '/' in words[1]:
address, netmask_length = words[1].split('/')
if len(words) > 3:
if words[2] == 'brd':
broadcast = words[3]
else:
# pointopoint interfaces do not have a prefix
address = words[1]
netmask_length = "32"
address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
netmask_bin = (1 << 32) - (1 << 32 >> int(netmask_length))
netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
iface = words[-1]
# NOTE: device is ref to outside scope
# NOTE: interfaces is also ref to outside scope
if iface != device:
interfaces[iface] = {}
if not secondary and "ipv4" not in interfaces[iface]:
interfaces[iface]['ipv4'] = {'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
'prefix': netmask_length,
}
else:
if "ipv4_secondaries" not in interfaces[iface]:
interfaces[iface]["ipv4_secondaries"] = []
interfaces[iface]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
'prefix': netmask_length,
})
# add this secondary IP to the main device
if secondary:
if "ipv4_secondaries" not in interfaces[device]:
interfaces[device]["ipv4_secondaries"] = []
if device != iface:
interfaces[device]["ipv4_secondaries"].append({
'address': address,
'broadcast': broadcast,
'netmask': netmask,
'network': network,
'prefix': netmask_length,
})
# NOTE: default_ipv4 is ref to outside scope
# If this is the default address, update default_ipv4
if 'address' in default_ipv4 and default_ipv4['address'] == address:
default_ipv4['broadcast'] = broadcast
default_ipv4['netmask'] = netmask
default_ipv4['network'] = network
default_ipv4['prefix'] = netmask_length
# NOTE: macaddress is ref from outside scope
default_ipv4['macaddress'] = macaddress
default_ipv4['mtu'] = interfaces[device]['mtu']
default_ipv4['type'] = interfaces[device].get("type", "unknown")
default_ipv4['alias'] = words[-1]
if not address.startswith('127.'):
ips['all_ipv4_addresses'].append(address)
elif words[0] == 'inet6':
if 'peer' == words[2]:
address = words[1]
dummy, prefix = words[3].split('/')
scope = words[5]
else:
address, prefix = words[1].split('/')
scope = words[3]
if 'ipv6' not in interfaces[device]:
interfaces[device]['ipv6'] = []
interfaces[device]['ipv6'].append({
'address': address,
'prefix': prefix,
'scope': scope
})
# If this is the default address, update default_ipv6
if 'address' in default_ipv6 and default_ipv6['address'] == address:
default_ipv6['prefix'] = prefix
default_ipv6['scope'] = scope
default_ipv6['macaddress'] = macaddress
default_ipv6['mtu'] = interfaces[device]['mtu']
default_ipv6['type'] = interfaces[device].get("type", "unknown")
if not address == '::1':
ips['all_ipv6_addresses'].append(address)
args = [ip_path, 'addr', 'show', 'primary', 'dev', device]
rc, primary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
if rc == 0:
parse_ip_output(primary_data)
else:
# possibly busybox, fallback to running without the "primary" arg
# https://github.com/ansible/ansible/issues/50871
args = [ip_path, 'addr', 'show', 'dev', device]
rc, data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
if rc == 0:
parse_ip_output(data)
args = [ip_path, 'addr', 'show', 'secondary', 'dev', device]
rc, secondary_data, stderr = self.module.run_command(args, errors='surrogate_then_replace')
if rc == 0:
parse_ip_output(secondary_data, secondary=True)
interfaces[device].update(self.get_ethtool_data(device))
# replace : by _ in interface name since they are hard to use in template
new_interfaces = {}
# i is a dict key (string) not an index int
for i in interfaces:
if ':' in i:
new_interfaces[i.replace(':', '_')] = interfaces[i]
else:
new_interfaces[i] = interfaces[i]
return new_interfaces, ips
def get_ethtool_data(self, device):
data = {}
ethtool_path = self.module.get_bin_path("ethtool")
# FIXME: exit early on falsey ethtool_path and un-indent
if ethtool_path:
args = [ethtool_path, '-k', device]
rc, stdout, stderr = self.module.run_command(args, errors='surrogate_then_replace')
# FIXME: exit early on falsey if we can
if rc == 0:
features = {}
for line in stdout.strip().splitlines():
if not line or line.endswith(":"):
continue
key, value = line.split(": ")
if not value:
continue
features[key.strip().replace('-', '_')] = value.strip()
data['features'] = features
args = [ethtool_path, '-T', device]
rc, stdout, stderr = self.module.run_command(args, errors='surrogate_then_replace')
if rc == 0:
data['timestamping'] = [m.lower() for m in re.findall(r'SOF_TIMESTAMPING_(\w+)', stdout)]
data['hw_timestamp_filters'] = [m.lower() for m in re.findall(r'HWTSTAMP_FILTER_(\w+)', stdout)]
m = re.search(r'PTP Hardware Clock: (\d+)', stdout)
if m:
data['phc_index'] = int(m.groups()[0])
return data
|
LinuxNetwork
|
python
|
numpy__numpy
|
numpy/linalg/tests/test_linalg.py
|
{
"start": 12594,
"end": 12923
}
|
class ____(LinalgTestCase):
def test_herm_cases(self):
self.check_cases(require={'hermitian'},
exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
self.check_cases(require={'hermitian', 'size-0'},
exclude={'generalized'})
|
HermitianTestCase
|
python
|
django__django
|
tests/admin_inlines/admin.py
|
{
"start": 7117,
"end": 7219
}
|
class ____(admin.StackedInline):
model = ChildModel2
# admin for #19425 and #18388
|
ChildModel2Inline
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 637656,
"end": 639137
}
|
class ____(Gradient):
"""
LinearGradient schema wrapper.
Parameters
----------
gradient : Literal['linear']
The type of gradient. Use ``"linear"`` for a linear gradient.
stops : Sequence[dict, :class:`GradientStop`]
An array of gradient stops defining the gradient color sequence.
id : str
x1 : float
The starting x-coordinate, in normalized [0, 1] coordinates, of the linear gradient.
**Default value:** ``0``
x2 : float
The ending x-coordinate, in normalized [0, 1] coordinates, of the linear gradient.
**Default value:** ``1``
y1 : float
The starting y-coordinate, in normalized [0, 1] coordinates, of the linear gradient.
**Default value:** ``0``
y2 : float
The ending y-coordinate, in normalized [0, 1] coordinates, of the linear gradient.
**Default value:** ``0``
"""
_schema = {"$ref": "#/definitions/LinearGradient"}
def __init__(
self,
gradient: Optional[Literal["linear"]] = Undefined,
stops: Optional[Sequence[SchemaBase | Map]] = Undefined,
id: Optional[str] = Undefined,
x1: Optional[float] = Undefined,
x2: Optional[float] = Undefined,
y1: Optional[float] = Undefined,
y2: Optional[float] = Undefined,
**kwds,
):
super().__init__(
gradient=gradient, stops=stops, id=id, x1=x1, x2=x2, y1=y1, y2=y2, **kwds
)
|
LinearGradient
|
python
|
django__django
|
django/contrib/gis/forms/fields.py
|
{
"start": 4255,
"end": 4324
}
|
class ____(GeometryField):
geom_type = "MULTIPOINT"
|
MultiPointField
|
python
|
ApeWorX__ape
|
src/ape_ethereum/provider.py
|
{
"start": 59373,
"end": 68024
}
|
class ____(Web3Provider, ABC):
# optimal values for geth
block_page_size: int = 5000
concurrency: int = 16
name: str = "node"
# NOTE: Appends user-agent to base User-Agent string.
request_header: dict = {"User-Agent": f"EthereumNodeProvider/web3.py/{web3_version}"}
@property
def connection_str(self) -> str:
return self.uri or f"{self.ipc_path}"
@property
def connection_id(self) -> Optional[str]:
return f"{self.network_choice}:{self.uri}"
@property
def _clean_uri(self) -> str:
uri = f"{self.uri}"
return sanitize_url(uri) if _is_http_url(uri) or _is_ws_url(uri) else uri
@property
def data_dir(self) -> Path:
if self.settings.data_dir:
return self.settings.data_dir.expanduser()
return _get_default_data_dir()
@property
def ipc_path(self) -> Optional[Path]:
if path := super().ipc_path:
return path
# Default (used by geth-process).
return self.data_dir / "geth.ipc"
@cached_property
def has_poa_history(self) -> bool:
"""
``True`` if detected any PoA history. If the chain was _ever_ PoA, the special
middleware is needed for web3.py. Provider plugins use this property when
creating Web3 instances.
"""
findings = False
for option in ("earliest", "latest"):
try:
block = self.web3.eth.get_block(option) # type: ignore[arg-type]
except ExtraDataLengthError:
findings = True
break
except Exception:
# Some chains are "light" and we may not be able to detect
# if it needs PoA middleware.
continue
else:
findings = (
"proofOfAuthorityData" in block
or len(block.get("extraData", "")) > MAX_EXTRADATA_LENGTH
)
if findings:
break
return findings
@cached_property
def _ots_api_level(self) -> Optional[int]:
# NOTE: Returns None when OTS namespace is not enabled.
try:
result = self.make_request("ots_getApiLevel")
except (NotImplementedError, ApeException, ValueError):
return None
if isinstance(result, int):
return result
elif isinstance(result, str) and result.isnumeric():
return int(result)
return None
def _set_web3(self):
# Clear cached version when connecting to another URI.
self._client_version = None
headers = self.network_manager.get_request_headers(
self.network.ecosystem.name, self.network.name, self.name
)
self._web3 = _create_web3(
http_uri=self.http_uri,
ipc_path=self.ipc_path,
ws_uri=self.ws_uri,
request_kwargs={"headers": headers},
)
def _complete_connect(self):
client_version = self.client_version.lower()
if "geth" in client_version:
self._log_connection("Geth")
elif "reth" in client_version:
self._log_connection("Reth")
elif "erigon" in client_version:
self._log_connection("Erigon")
self.concurrency = 8
self.block_page_size = 40_000
elif "nethermind" in client_version:
self._log_connection("Nethermind")
self.concurrency = 32
self.block_page_size = 50_000
else:
client_name = client_version.partition("/")[0]
logger.info(f"Connecting to a '{client_name}' node.")
if not self.network.is_dev:
self.web3.eth.set_gas_price_strategy(rpc_gas_price_strategy)
if self.has_poa_history and ExtraDataToPOAMiddleware not in self.web3.middleware_onion:
self.web3.middleware_onion.inject(ExtraDataToPOAMiddleware, layer=0)
chain_id = self.chain_id
self.network.verify_chain_id(chain_id)
# Correct network name, if using custom-URL approach.
if self.network.name == "custom":
for ecosystem_name, network in PUBLIC_CHAIN_META.items():
for network_name, meta in network.items():
if "chainId" not in meta or meta["chainId"] != chain_id:
continue
# Network found.
self.network.name = network_name
self.network.ecosystem.name = ecosystem_name
break
def disconnect(self):
self._call_trace_approach = None
self._web3 = None
self._client_version = None
def _log_connection(self, client_name: str):
msg = f"Connecting to existing {client_name.strip()} node at"
suffix = (
self.ipc_path.as_posix().replace(Path.home().as_posix(), "$HOME")
if self.ipc_path is not None and self.ipc_path.exists()
else self._clean_uri
)
logger.info(f"{msg} {suffix}.")
def ots_get_contract_creator(self, address: "AddressType") -> Optional[dict]:
if self._ots_api_level is None:
return None
result = self.make_request("ots_getContractCreator", [address])
if result is None:
# NOTE: Skip the explorer part of the error message via `has_explorer=True`.
raise ContractNotFoundError(address, True, self.network_choice)
return result
def _get_contract_creation_receipt(self, address: "AddressType") -> Optional[ReceiptAPI]:
if result := self.ots_get_contract_creator(address):
tx_hash = result["hash"]
return self.get_receipt(tx_hash)
return None
def connect(self):
self._set_web3()
if not self.is_connected:
uri = self._clean_uri
message = f"No (supported) node found on '{uri}'."
raise ProviderError(message)
self._complete_connect()
def simulate_transaction_bundle(
self, bundle: Bundle, sim_overrides: Optional[dict] = None
) -> SimulationReport:
"""
Submit a bundle and get the simulation result.
Args:
bundle (:class:`~ape.types.private_mempool.Bundle`) A bundle of transactions to send to the matchmaker.
sim_overrides (dict | None) Optional fields to override simulation state.
Returns:
:class:`~ape.types.private_mempool.SimulationReport`
"""
bundle_request = {"bundle": bundle.model_dump(), "simOverrides": sim_overrides or {}}
result = self.provider.make_request("mev_simBundle", bundle_request)
return SimulationReport.model_validate(result)
def _create_web3(
http_uri: Optional[str] = None,
ipc_path: Optional[Path] = None,
ws_uri: Optional[str] = None,
request_kwargs: Optional[dict] = None,
):
# NOTE: This list is ordered by try-attempt.
# Try ENV, then IPC, and then HTTP last.
providers: list = [load_provider_from_environment]
if ipc := ipc_path:
providers.append(lambda: IPCProvider(ipc_path=ipc))
if http := http_uri:
request_kwargs = request_kwargs or {}
if "timeout" not in request_kwargs:
request_kwargs["timeout"] = 30 * 60
providers.append(lambda: HTTPProvider(endpoint_uri=http, request_kwargs=request_kwargs))
if ws := ws_uri:
providers.append(lambda: WebsocketProvider(endpoint_uri=ws))
provider = AutoProvider(potential_providers=providers)
return Web3(provider, middleware=[])
def _get_default_data_dir() -> Path:
# Modified from web3.py package to always return IPC even when none exist.
if sys.platform == "darwin":
return Path.home() / "Library" / "Ethereum"
elif sys.platform.startswith("linux") or sys.platform.startswith("freebsd"):
return Path.home() / "ethereum"
elif sys.platform == "win32":
return Path(os.path.join("\\\\", ".", "pipe"))
else:
raise ValueError(
f"Unsupported platform '{sys.platform}'. Only darwin/linux/win32/"
"freebsd are supported. You must specify the data_dir."
)
def _is_uri(val: str) -> bool:
return _is_http_url(val) or _is_ws_url(val) or _is_ipc_path(val)
def _is_http_url(val: str) -> bool:
return val.startswith("https://") or val.startswith("http://")
def _is_ws_url(val: str) -> bool:
return val.startswith("wss://") or val.startswith("ws://")
def _is_ipc_path(val: Union[str, Path]) -> bool:
return f"{val}".endswith(".ipc")
|
EthereumNodeProvider
|
python
|
mwaskom__seaborn
|
seaborn/_statistics.py
|
{
"start": 7169,
"end": 14259
}
|
class ____:
"""Univariate and bivariate histogram estimator."""
def __init__(
self,
stat="count",
bins="auto",
binwidth=None,
binrange=None,
discrete=False,
cumulative=False,
):
"""Initialize the estimator with its parameters.
Parameters
----------
stat : str
Aggregate statistic to compute in each bin.
- `count`: show the number of observations in each bin
- `frequency`: show the number of observations divided by the bin width
- `probability` or `proportion`: normalize such that bar heights sum to 1
- `percent`: normalize such that bar heights sum to 100
- `density`: normalize such that the total area of the histogram equals 1
bins : str, number, vector, or a pair of such values
Generic bin parameter that can be the name of a reference rule,
the number of bins, or the breaks of the bins.
Passed to :func:`numpy.histogram_bin_edges`.
binwidth : number or pair of numbers
Width of each bin, overrides ``bins`` but can be used with
``binrange``.
binrange : pair of numbers or a pair of pairs
Lowest and highest value for bin edges; can be used either
with ``bins`` or ``binwidth``. Defaults to data extremes.
discrete : bool or pair of bools
If True, set ``binwidth`` and ``binrange`` such that bin
edges cover integer values in the dataset.
cumulative : bool
If True, return the cumulative statistic.
"""
stat_choices = [
"count", "frequency", "density", "probability", "proportion", "percent",
]
_check_argument("stat", stat_choices, stat)
self.stat = stat
self.bins = bins
self.binwidth = binwidth
self.binrange = binrange
self.discrete = discrete
self.cumulative = cumulative
self.bin_kws = None
def _define_bin_edges(self, x, weights, bins, binwidth, binrange, discrete):
"""Inner function that takes bin parameters as arguments."""
if binrange is None:
start, stop = x.min(), x.max()
else:
start, stop = binrange
if discrete:
bin_edges = np.arange(start - .5, stop + 1.5)
elif binwidth is not None:
step = binwidth
bin_edges = np.arange(start, stop + step, step)
# Handle roundoff error (maybe there is a less clumsy way?)
if bin_edges.max() < stop or len(bin_edges) < 2:
bin_edges = np.append(bin_edges, bin_edges.max() + step)
else:
bin_edges = np.histogram_bin_edges(
x, bins, binrange, weights,
)
return bin_edges
def define_bin_params(self, x1, x2=None, weights=None, cache=True):
"""Given data, return numpy.histogram parameters to define bins."""
if x2 is None:
bin_edges = self._define_bin_edges(
x1, weights, self.bins, self.binwidth, self.binrange, self.discrete,
)
if isinstance(self.bins, (str, Number)):
n_bins = len(bin_edges) - 1
bin_range = bin_edges.min(), bin_edges.max()
bin_kws = dict(bins=n_bins, range=bin_range)
else:
bin_kws = dict(bins=bin_edges)
else:
bin_edges = []
for i, x in enumerate([x1, x2]):
# Resolve out whether bin parameters are shared
# or specific to each variable
bins = self.bins
if not bins or isinstance(bins, (str, Number)):
pass
elif isinstance(bins[i], str):
bins = bins[i]
elif len(bins) == 2:
bins = bins[i]
binwidth = self.binwidth
if binwidth is None:
pass
elif not isinstance(binwidth, Number):
binwidth = binwidth[i]
binrange = self.binrange
if binrange is None:
pass
elif not isinstance(binrange[0], Number):
binrange = binrange[i]
discrete = self.discrete
if not isinstance(discrete, bool):
discrete = discrete[i]
# Define the bins for this variable
bin_edges.append(self._define_bin_edges(
x, weights, bins, binwidth, binrange, discrete,
))
bin_kws = dict(bins=tuple(bin_edges))
if cache:
self.bin_kws = bin_kws
return bin_kws
def _eval_bivariate(self, x1, x2, weights):
"""Inner function for histogram of two variables."""
bin_kws = self.bin_kws
if bin_kws is None:
bin_kws = self.define_bin_params(x1, x2, cache=False)
density = self.stat == "density"
hist, *bin_edges = np.histogram2d(
x1, x2, **bin_kws, weights=weights, density=density
)
area = np.outer(
np.diff(bin_edges[0]),
np.diff(bin_edges[1]),
)
if self.stat == "probability" or self.stat == "proportion":
hist = hist.astype(float) / hist.sum()
elif self.stat == "percent":
hist = hist.astype(float) / hist.sum() * 100
elif self.stat == "frequency":
hist = hist.astype(float) / area
if self.cumulative:
if self.stat in ["density", "frequency"]:
hist = (hist * area).cumsum(axis=0).cumsum(axis=1)
else:
hist = hist.cumsum(axis=0).cumsum(axis=1)
return hist, bin_edges
def _eval_univariate(self, x, weights):
"""Inner function for histogram of one variable."""
bin_kws = self.bin_kws
if bin_kws is None:
bin_kws = self.define_bin_params(x, weights=weights, cache=False)
density = self.stat == "density"
hist, bin_edges = np.histogram(
x, **bin_kws, weights=weights, density=density,
)
if self.stat == "probability" or self.stat == "proportion":
hist = hist.astype(float) / hist.sum()
elif self.stat == "percent":
hist = hist.astype(float) / hist.sum() * 100
elif self.stat == "frequency":
hist = hist.astype(float) / np.diff(bin_edges)
if self.cumulative:
if self.stat in ["density", "frequency"]:
hist = (hist * np.diff(bin_edges)).cumsum()
else:
hist = hist.cumsum()
return hist, bin_edges
def __call__(self, x1, x2=None, weights=None):
"""Count the occurrences in each bin, maybe normalize."""
if x2 is None:
return self._eval_univariate(x1, weights)
else:
return self._eval_bivariate(x1, x2, weights)
|
Histogram
|
python
|
PyCQA__pylint
|
pylint/utils/pragma_parser.py
|
{
"start": 2993,
"end": 3110
}
|
class ____(PragmaParserError):
"""Thrown in case the of a valid but unrecognized option."""
|
UnRecognizedOptionError
|
python
|
huggingface__transformers
|
src/transformers/models/flex_olmo/modeling_flex_olmo.py
|
{
"start": 5824,
"end": 9862
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
q_type, k_type = q.dtype, k.dtype
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed.to(q_type), k_embed.to(k_type)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
|
FlexOlmoMLP
|
python
|
walkccc__LeetCode
|
solutions/3547. Maximum Sum of Edge Values in a Graph/3547.py
|
{
"start": 0,
"end": 1546
}
|
class ____:
def maxScore(self, n: int, edges: list[list[int]]) -> int:
ans = 0
graph = [[] for _ in range(n)]
cycleSizes = [] # components where all nodes have degree 2
pathSizes = [] # components that are not cycleSizes
seen = set()
for u, v in edges:
graph[u].append(v)
graph[v].append(u)
for i in range(n):
if i in seen:
continue
component = self._getComponent(graph, i, seen)
if all(len(graph[u]) == 2 for u in component):
cycleSizes.append(len(component))
elif len(component) > 1:
pathSizes.append(len(component))
for cycleSize in cycleSizes:
ans += self._calculateScore(n - cycleSize + 1, n, True)
n -= cycleSize
for pathSize in sorted(pathSizes, reverse=True):
ans += self._calculateScore(n - pathSize + 1, n, False)
n -= pathSize
return ans
def _getComponent(
self,
graph: list[list[int]],
start: int,
seen: set[int],
) -> list[int]:
component = [start]
seen.add(start)
for u in component:
for v in graph[u]:
if v in seen:
continue
component.append(v)
seen.add(v)
return component
def _calculateScore(self, left: int, right: int, isCycle: bool) -> int:
window = collections.deque([right, right])
score = 0
for value in range(right - 1, left - 1, -1):
windowValue = window.popleft()
score += windowValue * value
window.append(value)
return score + window[0] * window[1] * isCycle
|
Solution
|
python
|
PrefectHQ__prefect
|
tests/server/models/test_flow_runs.py
|
{
"start": 49030,
"end": 52683
}
|
class ____:
async def test_delete_flow_run(self, flow, session):
# create a flow run to delete
flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(flow_id=flow.id),
)
assert await models.flow_runs.delete_flow_run(
session=session, flow_run_id=flow_run.id
)
# make sure the flow run is deleted
result = await models.flow_runs.read_flow_run(
session=session, flow_run_id=flow_run.id
)
assert result is None
async def test_delete_flow_run_returns_false_if_does_not_exist(self, session):
result = await models.flow_runs.delete_flow_run(
session=session, flow_run_id=uuid4()
)
assert result is False
async def test_delete_flow_run_with_data(self, flow, session, db):
state_id = uuid4()
flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
state=schemas.states.State(
id=state_id,
type="COMPLETED",
name="My Running State",
data={"hello": "world"},
),
),
)
assert flow_run.flow_id == flow.id
assert flow_run.state.id == state_id
# make sure the flow run exists
assert await models.flow_runs.read_flow_run(
session=session, flow_run_id=flow_run.id
)
assert await models.flow_runs.delete_flow_run(
session=session, flow_run_id=flow_run.id
)
# make sure the flow run is deleted
assert (
await models.flow_runs.read_flow_run(
session=session, flow_run_id=flow_run.id
)
is None
)
@pytest.mark.parametrize(
"state_type,expected_slots",
[
("PENDING", 0),
("RUNNING", 0),
("CANCELLING", 0),
*[
(type, 1)
for type in schemas.states.StateType
if type not in ("PENDING", "RUNNING", "CANCELLING")
],
],
)
async def test_delete_flow_run_with_deployment_concurrency_limit(
self,
session,
flow,
deployment_with_concurrency_limit,
state_type,
expected_slots,
):
flow_run = await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
deployment_id=deployment_with_concurrency_limit.id,
state=schemas.states.State(
type=state_type,
),
),
)
# Take one active slot
await models.concurrency_limits_v2.bulk_increment_active_slots(
session=session,
concurrency_limit_ids=[
deployment_with_concurrency_limit.concurrency_limit_id
],
slots=1,
)
await session.commit()
concurrency_limit = await models.concurrency_limits_v2.read_concurrency_limit(
session=session,
concurrency_limit_id=deployment_with_concurrency_limit.concurrency_limit_id,
)
assert concurrency_limit.active_slots == 0
assert await models.flow_runs.delete_flow_run(
session=session, flow_run_id=flow_run.id
)
await session.refresh(concurrency_limit)
assert concurrency_limit.active_slots == expected_slots
|
TestDeleteFlowRun
|
python
|
mlflow__mlflow
|
mlflow/tracing/export/mlflow_v3.py
|
{
"start": 1264,
"end": 10803
}
|
class ____(SpanExporter):
"""
An exporter implementation that logs the traces to MLflow Tracking Server
using the V3 trace schema and API.
"""
def __init__(self, tracking_uri: str | None = None) -> None:
self._client = TracingClient(tracking_uri)
self._is_async_enabled = self._should_enable_async_logging()
if self._is_async_enabled:
self._async_queue = AsyncTraceExportQueue()
# Display handler is no-op when running outside of notebooks.
self._display_handler = get_display_handler()
# A flag to cache the failure of exporting spans so that the client will not try to export
# spans again and trigger excessive server side errors. Default to True (optimistically
# assume the store supports span-level logging).
self._should_export_spans_incrementally = True
def export(self, spans: Sequence[ReadableSpan]) -> None:
"""
Export the spans to the destination.
Args:
spans: A sequence of OpenTelemetry ReadableSpan objects passed from
a span processor. All spans (root and non-root) are exported.
"""
if self._should_export_spans_incrementally:
self._export_spans_incrementally(spans)
self._export_traces(spans)
def _export_spans_incrementally(self, spans: Sequence[ReadableSpan]) -> None:
"""
Export spans incrementally as they complete.
Args:
spans: Sequence of ReadableSpan objects to export.
manager: The trace manager instance.
"""
if is_databricks_uri(self._client.tracking_uri):
_logger.debug(
"Databricks tracking server only supports logging spans to UC table, "
"skipping span exporting."
)
return
mlflow_spans_by_experiment = self._collect_mlflow_spans_for_export(spans)
for experiment_id, spans_to_log in mlflow_spans_by_experiment.items():
if self._should_log_async():
self._async_queue.put(
task=Task(
handler=self._log_spans,
args=(experiment_id, spans_to_log),
error_msg="Failed to log spans to the trace server.",
)
)
else:
self._log_spans(experiment_id, spans_to_log)
def _collect_mlflow_spans_for_export(
self, spans: Sequence[ReadableSpan]
) -> dict[str, list[Span]]:
"""
Collect MLflow spans from ReadableSpans for export, grouped by experiment ID.
Args:
spans: Sequence of ReadableSpan objects.
Returns:
Dictionary mapping experiment_id to list of MLflow Span objects.
"""
manager = InMemoryTraceManager.get_instance()
spans_by_experiment = defaultdict(list)
for span in spans:
mlflow_trace_id = manager.get_mlflow_trace_id_from_otel_id(span.context.trace_id)
experiment_id = get_experiment_id_for_trace(span)
span_id = encode_span_id(span.context.span_id)
# we need to fetch the mlflow span from the trace manager because the span
# may be updated in processor before exporting (e.g. deduplication).
if mlflow_span := manager.get_span_from_id(mlflow_trace_id, span_id):
spans_by_experiment[experiment_id].append(mlflow_span)
return spans_by_experiment
def _export_traces(self, spans: Sequence[ReadableSpan]) -> None:
"""
Export full traces for root spans.
Args:
spans: Sequence of ReadableSpan objects.
"""
manager = InMemoryTraceManager.get_instance()
for span in spans:
if span._parent is not None:
continue
manager_trace = manager.pop_trace(span.context.trace_id)
if manager_trace is None:
_logger.debug(f"Trace for root span {span} not found. Skipping full export.")
continue
trace = manager_trace.trace
_set_last_active_trace_id(trace.info.request_id)
# Store mapping from eval request ID to trace ID so that the evaluation
# harness can access to the trace using mlflow.get_trace(eval_request_id)
if eval_request_id := trace.info.tags.get(TraceTagKey.EVAL_REQUEST_ID):
_EVAL_REQUEST_ID_TO_TRACE_ID[eval_request_id] = trace.info.trace_id
if not maybe_get_request_id(is_evaluate=True):
self._display_handler.display_traces([trace])
if self._should_log_async():
self._async_queue.put(
task=Task(
handler=self._log_trace,
args=(trace, manager_trace.prompts),
error_msg="Failed to log trace to the trace server.",
)
)
else:
self._log_trace(trace, prompts=manager_trace.prompts)
def _log_spans(self, experiment_id: str, spans: list[Span]) -> None:
"""
Helper method to log spans with error handling.
Args:
experiment_id: The experiment ID to log spans to.
spans: List of spans to log.
"""
try:
self._client.log_spans(experiment_id, spans)
except NotImplementedError:
# Silently skip if the store doesn't support log_spans. This is expected for stores that
# don't implement span-level logging, and we don't want to spam warnings for every span.
self._should_export_spans_incrementally = False
except RestException as e:
# When the FileStore is behind the tracking server, it returns 501 exception.
# However, the OTLP endpoint returns general HTTP error, not MlflowException, which does
# not include error_code in the body and handled as a general server side error. Hence,
# we need to check the message to handle this case.
if "REST OTLP span logging is not supported" in e.message:
self._should_export_spans_incrementally = False
else:
_logger.debug(f"Failed to log span to MLflow backend: {e}")
except Exception as e:
_logger.debug(f"Failed to log span to MLflow backend: {e}")
def _log_trace(self, trace: Trace, prompts: Sequence[PromptVersion]) -> None:
"""
Handles exporting a trace to MLflow using the V3 API and blob storage.
Steps:
1. Create the trace in MLflow
2. Upload the trace data to blob storage using the returned trace info.
"""
try:
if trace:
add_size_stats_to_trace_metadata(trace)
returned_trace_info = self._client.start_trace(trace.info)
if self._should_log_spans_to_artifacts(returned_trace_info):
self._client._upload_trace_data(returned_trace_info, trace.data)
else:
_logger.warning("No trace or trace info provided, unable to export")
except Exception as e:
_logger.warning(f"Failed to send trace to MLflow backend: {e}")
try:
# Always run prompt linking asynchronously since (1) prompt linking API calls
# would otherwise add latency to the export procedure and (2) prompt linking is not
# critical for trace export (if the prompt fails to link, the user's workflow is
# minorly affected), so we don't have to await successful linking
try_link_prompts_to_trace(
client=self._client,
trace_id=trace.info.trace_id,
prompts=prompts,
synchronous=False,
)
except Exception as e:
_logger.warning(f"Failed to link prompts to trace: {e}")
def _should_enable_async_logging(self) -> bool:
if (
is_in_databricks_notebook()
# NB: Not defaulting OSS backend to async logging for now to reduce blast radius.
or not is_databricks_uri(self._client.tracking_uri)
):
# NB: We don't turn on async logging in Databricks notebook by default
# until we are confident that the async logging is working on the
# offline workload on Databricks, to derisk the inclusion to the
# standard image. When it is enabled explicitly via the env var, we
# will respect that.
return (
MLFLOW_ENABLE_ASYNC_TRACE_LOGGING.get()
if MLFLOW_ENABLE_ASYNC_TRACE_LOGGING.is_set()
else False
)
return MLFLOW_ENABLE_ASYNC_TRACE_LOGGING.get()
def _should_log_async(self) -> bool:
# During evaluate, the eval harness relies on the generated trace objects,
# so we should not log traces asynchronously.
if maybe_get_request_id(is_evaluate=True):
return False
return self._is_async_enabled
def _should_log_spans_to_artifacts(self, trace_info: TraceInfo) -> bool:
"""
Whether to log spans to artifacts. Overridden by UC table exporter to False.
"""
# We only log traces to artifacts when the tracking store doesn't support span logging
return trace_info.tags.get(TraceTagKey.SPANS_LOCATION) != SpansLocation.TRACKING_STORE.value
|
MlflowV3SpanExporter
|
python
|
langchain-ai__langchain
|
libs/partners/openai/tests/unit_tests/embeddings/test_base_standard.py
|
{
"start": 210,
"end": 916
}
|
class ____(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> type[Embeddings]:
return OpenAIEmbeddings
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"OPENAI_API_KEY": "api_key",
"OPENAI_ORG_ID": "org_id",
"OPENAI_API_BASE": "api_base",
"OPENAI_PROXY": "https://proxy.com",
},
{},
{
"openai_api_key": "api_key",
"openai_organization": "org_id",
"openai_api_base": "api_base",
"openai_proxy": "https://proxy.com",
},
)
|
TestOpenAIStandard
|
python
|
nryoung__algorithms
|
tests/test_math.py
|
{
"start": 813,
"end": 1517
}
|
class ____(unittest.TestCase):
def test_extended_gcd(self):
# Find extended_gcd of 35 and 77
(a, b) = extended_gcd(35, 77)
print(a, b)
self.assertIs(35 * a + 77 * b, 7)
# Find extended_gcd of 15 and 19
(a, b) = extended_gcd(15, 19)
self.assertIs(15 * a + 19 * b, 1)
# Find extended_gcd of 18 and 9
(a, b) = extended_gcd(18, 9)
self.assertIs(18 * a + 9 * b, 9)
# Find extended_gcd of 99 and 81
(a, b) = extended_gcd(99, 81)
self.assertIs(99 * a + 81 * b, 9)
# Find extended_gcd of 50 and 15
(a, b) = extended_gcd(50, 15)
self.assertIs(50 * a + 15 * b, 5)
|
TestExtendedGCD
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.