language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | python_modules/dagster/dagster/components/resolved/metadata.py | {
"start": 299,
"end": 3023
} | class ____(FieldInfo): # pyright: ignore[reportGeneralTypeIssues]
"""Wrapper class that stores additional resolution metadata within a pydantic FieldInfo object.
Examples:
```python
class MyModel(ComponentSchema):
resolvable_obj: Annotated[str, ResolvableFieldInfo(required_scope={"some_field"})]
```
"""
def __init__(
self,
*,
required_scope: Optional[Set[str]] = None,
):
super().__init__(
json_schema_extra={JSON_SCHEMA_EXTRA_REQUIRED_SCOPE_KEY: list(required_scope or [])},
)
def _subschemas_on_path(
valpath: Sequence[Union[str, int]], json_schema: Mapping[str, Any], subschema: Mapping[str, Any]
) -> Iterator[Mapping[str, Any]]:
"""Given a valpath and the json schema of a given target type, returns the subschemas at each step of the path."""
# List[ComplexType] (e.g.) will contain a reference to the complex type schema in the
# top-level $defs, so we dereference it here.
if "$ref" in subschema:
# depending on the pydantic version, the extras may be stored with the reference or not
extras = {k: v for k, v in subschema.items() if k != "$ref"}
subschema = {**json_schema["$defs"].get(subschema["$ref"][len(REF_BASE) :]), **extras}
yield subschema
if len(valpath) == 0:
return
# Optional[ComplexType] (e.g.) will contain multiple schemas in the "anyOf" field
if "anyOf" in subschema:
for inner in subschema["anyOf"]:
yield from _subschemas_on_path(valpath, json_schema, inner)
el = valpath[0]
if isinstance(el, str):
# valpath: ['field']
# field: X
inner = subschema.get("properties", {}).get(el)
elif isinstance(el, int):
# valpath: ['field', 0]
# field: List[X]
inner = subschema.get("items")
else:
check.failed(f"Unexpected valpath element: {el}")
# the path wasn't valid, or unspecified
if not inner:
return
_, *rest = valpath
yield from _subschemas_on_path(rest, json_schema, inner)
def _get_additional_required_scope(subschema: Mapping[str, Any]) -> Set[str]:
raw = check.opt_inst(subschema.get(JSON_SCHEMA_EXTRA_REQUIRED_SCOPE_KEY), list)
return set(raw) if raw else set()
def get_required_scope(
valpath: Sequence[Union[str, int]], json_schema: Mapping[str, Any]
) -> Set[str]:
"""Given a valpath and the json schema of a given target type, determines the available rendering scope."""
required_scope = set()
for subschema in _subschemas_on_path(valpath, json_schema, json_schema):
required_scope |= _get_additional_required_scope(subschema)
return required_scope
| ResolvableFieldInfo |
python | readthedocs__readthedocs.org | readthedocs/integrations/models.py | {
"start": 11859,
"end": 11979
} | class ____:
installation_id: int
repository_id: int
repository_full_name: str
| GitHubAppIntegrationProviderData |
python | catalyst-team__catalyst | catalyst/loggers/tensorboard.py | {
"start": 596,
"end": 5270
} | class ____(ILogger):
"""Tensorboard logger for parameters, metrics, images and other artifacts.
Args:
logdir: path to logdir for tensorboard.
use_logdir_postfix: boolean flag
to use extra ``tensorboard`` prefix in the logdir.
log_batch_metrics: boolean flag to log batch metrics
(default: SETTINGS.log_batch_metrics or False).
log_epoch_metrics: boolean flag to log epoch metrics
(default: SETTINGS.log_epoch_metrics or True).
.. note::
This logger is used by default by ``dl.Runner`` and ``dl.SupervisedRunner``
in case of specified logdir during ``runner.train(..., logdir=/path/to/logdir)``.
Examples:
.. code-block:: python
from catalyst import dl
runner = dl.SupervisedRunner()
runner.train(
...,
loggers={"tensorboard": dl.TensorboardLogger(logdir="./logdir/tensorboard"}
)
.. code-block:: python
from catalyst import dl
class CustomRunner(dl.IRunner):
# ...
def get_loggers(self):
return {
"console": dl.ConsoleLogger(),
"tensorboard": dl.TensorboardLogger(logdir="./logdir/tensorboard")
}
# ...
runner = CustomRunner().run()
"""
def __init__(
self,
logdir: str,
use_logdir_postfix: bool = False,
log_batch_metrics: bool = SETTINGS.log_batch_metrics,
log_epoch_metrics: bool = SETTINGS.log_epoch_metrics,
):
"""Init."""
super().__init__(
log_batch_metrics=log_batch_metrics, log_epoch_metrics=log_epoch_metrics
)
if use_logdir_postfix:
logdir = os.path.join(logdir, "tensorboard")
self.logdir = logdir
self.loggers = {}
os.makedirs(self.logdir, exist_ok=True)
@property
def logger(self):
"""Internal logger/experiment/etc. from the monitoring system."""
return self.loggers
def _check_loader_key(self, loader_key: str):
if loader_key not in self.loggers.keys():
logdir = os.path.join(self.logdir, f"{loader_key}")
self.loggers[loader_key] = SummaryWriter(logdir)
def _log_metrics(
self, metrics: Dict[str, float], step: int, loader_key: str, suffix=""
):
for key, value in metrics.items():
self.loggers[loader_key].add_scalar(f"{key}{suffix}", float(value), step)
def log_image(
self,
tag: str,
image: np.ndarray,
runner: "IRunner",
scope: str = None,
) -> None:
"""Logs image to Tensorboard for current scope on current step."""
assert runner.loader_key is not None
self._check_loader_key(loader_key=runner.loader_key)
tensor = _image_to_tensor(image)
self.loggers[runner.loader_key].add_image(
f"{tag}", tensor, global_step=runner.epoch_step
)
def log_metrics(
self,
metrics: Dict[str, float],
scope: str,
runner: "IRunner",
) -> None:
"""Logs batch and epoch metrics to Tensorboard."""
if scope == "batch" and self.log_batch_metrics:
self._check_loader_key(loader_key=runner.loader_key)
# metrics = {k: float(v) for k, v in metrics.items()}
self._log_metrics(
metrics=metrics,
step=runner.sample_step,
loader_key=runner.loader_key,
suffix="/batch",
)
elif scope == "loader" and self.log_epoch_metrics:
self._check_loader_key(loader_key=runner.loader_key)
self._log_metrics(
metrics=metrics,
step=runner.epoch_step,
loader_key=runner.loader_key,
suffix="/epoch",
)
elif scope == "epoch" and self.log_epoch_metrics:
# @TODO: remove naming magic
loader_key = "_epoch_"
per_loader_metrics = metrics[loader_key]
self._check_loader_key(loader_key=loader_key)
self._log_metrics(
metrics=per_loader_metrics,
step=runner.epoch_step,
loader_key=loader_key,
suffix="/epoch",
)
def flush_log(self) -> None:
"""Flushes the loggers."""
for logger in self.loggers.values():
logger.flush()
def close_log(self) -> None:
"""Closes the loggers."""
for logger in self.loggers.values():
logger.close()
__all__ = ["TensorboardLogger"]
| TensorboardLogger |
python | mlflow__mlflow | mlflow/server/graphql/autogenerated_graphql_schema.py | {
"start": 4410,
"end": 4644
} | class ____(graphene.ObjectType):
name = graphene.String()
digest = graphene.String()
source_type = graphene.String()
source = graphene.String()
schema = graphene.String()
profile = graphene.String()
| MlflowDataset |
python | RaRe-Technologies__gensim | gensim/parsing/porter.py | {
"start": 897,
"end": 15535
} | class ____:
"""Class contains implementation of Porter stemming algorithm.
Attributes
--------
b : str
Buffer holding a word to be stemmed. The letters are in b[0], b[1] ... ending at b[`k`].
k : int
Readjusted downwards as the stemming progresses.
j : int
Word length.
"""
def __init__(self):
self.b = "" # buffer for word to be stemmed
self.k = 0
self.j = 0 # j is a general offset into the string
def _cons(self, i):
"""Check if b[i] is a consonant letter.
Parameters
----------
i : int
Index for `b`.
Returns
-------
bool
Examples
--------
.. sourcecode:: pycon
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "hi"
>>> p._cons(1)
False
>>> p.b = "meow"
>>> p._cons(3)
True
"""
ch = self.b[i]
if ch in "aeiou":
return False
if ch == 'y':
return i == 0 or not self._cons(i - 1)
return True
def _m(self):
"""Calculate the number of consonant sequences between 0 and j.
If c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
Returns
-------
int
The number of consonant sequences between 0 and j.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "<bm>aobm<ao>"
>>> p.j = 11
>>> p._m()
2
"""
i = 0
while True:
if i > self.j:
return 0
if not self._cons(i):
break
i += 1
i += 1
n = 0
while True:
while True:
if i > self.j:
return n
if self._cons(i):
break
i += 1
i += 1
n += 1
while 1:
if i > self.j:
return n
if not self._cons(i):
break
i += 1
i += 1
def _vowelinstem(self):
"""Check if b[0: j + 1] contains a vowel letter.
Returns
-------
bool
Examples
--------
.. sourcecode:: pycon
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "gnsm"
>>> p.j = 3
>>> p._vowelinstem()
False
>>> p.b = "gensim"
>>> p.j = 5
>>> p._vowelinstem()
True
"""
return not all(self._cons(i) for i in range(self.j + 1))
def _doublec(self, j):
"""Check if b[j - 1: j + 1] contain a double consonant letter.
Parameters
----------
j : int
Index for `b`
Returns
-------
bool
Examples
--------
.. sourcecode:: pycon
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "real"
>>> p.j = 3
>>> p._doublec(3)
False
>>> p.b = "really"
>>> p.j = 5
>>> p._doublec(4)
True
"""
return j > 0 and self.b[j] == self.b[j - 1] and self._cons(j)
def _cvc(self, i):
"""Check if b[j - 2: j + 1] makes the (consonant, vowel, consonant) pattern and also
if the second 'c' is not 'w', 'x' or 'y'. This is used when trying to restore an 'e' at the end of a short word,
e.g. cav(e), lov(e), hop(e), crim(e), but snow, box, tray.
Parameters
----------
i : int
Index for `b`
Returns
-------
bool
Examples
--------
.. sourcecode:: pycon
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "lib"
>>> p.j = 2
>>> p._cvc(2)
True
>>> p.b = "dll"
>>> p.j = 2
>>> p._cvc(2)
False
>>> p.b = "wow"
>>> p.j = 2
>>> p._cvc(2)
False
"""
if i < 2 or not self._cons(i) or self._cons(i - 1) or not self._cons(i - 2):
return False
return self.b[i] not in "wxy"
def _ends(self, s):
"""Check if b[: k + 1] ends with `s`.
Parameters
----------
s : str
Returns
-------
bool
Examples
--------
.. sourcecode:: pycon
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.b = "cowboy"
>>> p.j = 5
>>> p.k = 2
>>> p._ends("cow")
True
"""
if s[-1] != self.b[self.k]: # tiny speed-up
return False
length = len(s)
if length > (self.k + 1):
return False
if self.b[self.k - length + 1:self.k + 1] != s:
return False
self.j = self.k - length
return True
def _setto(self, s):
"""Append `s` to `b`, adjusting `k`.
Parameters
----------
s : str
"""
self.b = self.b[:self.j + 1] + s
self.k = len(self.b) - 1
def _r(self, s):
if self._m() > 0:
self._setto(s)
def _step1ab(self):
"""Get rid of plurals and -ed or -ing.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self._ends("sses"):
self.k -= 2
elif self._ends("ies"):
self._setto("i")
elif self.b[self.k - 1] != 's':
self.k -= 1
if self._ends("eed"):
if self._m() > 0:
self.k -= 1
elif (self._ends("ed") or self._ends("ing")) and self._vowelinstem():
self.k = self.j
if self._ends("at"):
self._setto("ate")
elif self._ends("bl"):
self._setto("ble")
elif self._ends("iz"):
self._setto("ize")
elif self._doublec(self.k):
if self.b[self.k - 1] not in "lsz":
self.k -= 1
elif self._m() == 1 and self._cvc(self.k):
self._setto("e")
def _step1c(self):
"""Turn terminal 'y' to 'i' when there is another vowel in the stem."""
if self._ends("y") and self._vowelinstem():
self.b = self.b[:self.k] + 'i'
def _step2(self):
"""Map double suffices to single ones.
So, -ization ( = -ize plus -ation) maps to -ize etc. Note that the
string before the suffix must give _m() > 0.
"""
ch = self.b[self.k - 1]
if ch == 'a':
if self._ends("ational"):
self._r("ate")
elif self._ends("tional"):
self._r("tion")
elif ch == 'c':
if self._ends("enci"):
self._r("ence")
elif self._ends("anci"):
self._r("ance")
elif ch == 'e':
if self._ends("izer"):
self._r("ize")
elif ch == 'l':
if self._ends("bli"):
self._r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self._ends("abli"): self._r("able")
elif self._ends("alli"):
self._r("al")
elif self._ends("entli"):
self._r("ent")
elif self._ends("eli"):
self._r("e")
elif self._ends("ousli"):
self._r("ous")
elif ch == 'o':
if self._ends("ization"):
self._r("ize")
elif self._ends("ation"):
self._r("ate")
elif self._ends("ator"):
self._r("ate")
elif ch == 's':
if self._ends("alism"):
self._r("al")
elif self._ends("iveness"):
self._r("ive")
elif self._ends("fulness"):
self._r("ful")
elif self._ends("ousness"):
self._r("ous")
elif ch == 't':
if self._ends("aliti"):
self._r("al")
elif self._ends("iviti"):
self._r("ive")
elif self._ends("biliti"):
self._r("ble")
elif ch == 'g': # --DEPARTURE--
if self._ends("logi"):
self._r("log")
# To match the published algorithm, delete this phrase
def _step3(self):
"""Deal with -ic-, -full, -ness etc. Similar strategy to _step2."""
ch = self.b[self.k]
if ch == 'e':
if self._ends("icate"):
self._r("ic")
elif self._ends("ative"):
self._r("")
elif self._ends("alize"):
self._r("al")
elif ch == 'i':
if self._ends("iciti"):
self._r("ic")
elif ch == 'l':
if self._ends("ical"):
self._r("ic")
elif self._ends("ful"):
self._r("")
elif ch == 's':
if self._ends("ness"):
self._r("")
def _step4(self):
"""Takes off -ant, -ence etc., in context <c>vcvc<v>."""
ch = self.b[self.k - 1]
if ch == 'a':
if not self._ends("al"):
return
elif ch == 'c':
if not self._ends("ance") and not self._ends("ence"):
return
elif ch == 'e':
if not self._ends("er"):
return
elif ch == 'i':
if not self._ends("ic"):
return
elif ch == 'l':
if not self._ends("able") and not self._ends("ible"):
return
elif ch == 'n':
if self._ends("ant"):
pass
elif self._ends("ement"):
pass
elif self._ends("ment"):
pass
elif self._ends("ent"):
pass
else:
return
elif ch == 'o':
if self._ends("ion") and self.b[self.j] in "st":
pass
elif self._ends("ou"):
pass
# takes care of -ous
else:
return
elif ch == 's':
if not self._ends("ism"):
return
elif ch == 't':
if not self._ends("ate") and not self._ends("iti"):
return
elif ch == 'u':
if not self._ends("ous"):
return
elif ch == 'v':
if not self._ends("ive"):
return
elif ch == 'z':
if not self._ends("ize"):
return
else:
return
if self._m() > 1:
self.k = self.j
def _step5(self):
"""Remove a final -e if _m() > 1, and change -ll to -l if m() > 1."""
k = self.j = self.k
if self.b[k] == 'e':
a = self._m()
if a > 1 or (a == 1 and not self._cvc(k - 1)):
self.k -= 1
if self.b[self.k] == 'l' and self._doublec(self.k) and self._m() > 1:
self.k -= 1
def stem(self, w):
"""Stem the word `w`.
Parameters
----------
w : str
Returns
-------
str
Stemmed version of `w`.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.stem("ponies")
'poni'
"""
w = w.lower()
k = len(w) - 1
if k <= 1:
return w # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.b = w
self.k = k
self._step1ab()
self._step1c()
self._step2()
self._step3()
self._step4()
self._step5()
return self.b[:self.k + 1]
def stem_sentence(self, txt):
"""Stem the sentence `txt`.
Parameters
----------
txt : str
Input sentence.
Returns
-------
str
Stemmed sentence.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.stem_sentence("Wow very nice woman with apple")
'wow veri nice woman with appl'
"""
return " ".join(self.stem(x) for x in txt.split())
def stem_documents(self, docs):
"""Stem documents.
Parameters
----------
docs : list of str
Input documents
Returns
-------
list of str
Stemmed documents.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.parsing.porter import PorterStemmer
>>> p = PorterStemmer()
>>> p.stem_documents(["Have a very nice weekend", "Have a very nice weekend"])
['have a veri nice weekend', 'have a veri nice weekend']
"""
return [self.stem_sentence(x) for x in docs]
if __name__ == '__main__':
import sys
p = PorterStemmer()
for f in sys.argv[1:]:
with open(f) as infile:
for line in infile:
print(p.stem_sentence(line))
| PorterStemmer |
python | django__django | tests/forms_tests/tests/tests.py | {
"start": 487,
"end": 599
} | class ____(ModelForm):
class Meta:
model = ChoiceFieldModel
fields = "__all__"
| ChoiceFieldForm |
python | django__django | tests/migrations/test_migrations_conflict_long_name/0002_conflicting_second_migration_with_long_name.py | {
"start": 43,
"end": 321
} | class ____(migrations.Migration):
dependencies = [("migrations", "0001_initial")]
operations = [
migrations.CreateModel(
"SomethingElse",
[
("id", models.AutoField(primary_key=True)),
],
),
]
| Migration |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 193712,
"end": 198100
} | class ____(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
| UnbufferedFileObjectClassTestCase |
python | astropy__astropy | astropy/units/quantity.py | {
"start": 5456,
"end": 8019
} | class ____(QuantityInfoBase):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
_represent_as_dict_attrs = ("value", "unit")
_construct_from_dict_args = ["value"]
_represent_as_dict_primary_data = "value"
def new_like(self, cols, length, metadata_conflicts="warn", name=None):
"""
Return a new Quantity instance which is consistent with the
input ``cols`` and has ``length`` rows.
This is intended for creating an empty column object whose elements can
be set in-place for table operations like join or vstack.
Parameters
----------
cols : list
List of input columns
length : int
Length of the output column object
metadata_conflicts : str ('warn'|'error'|'silent')
How to handle metadata conflicts
name : str or None
Output column name
Returns
-------
col : `~astropy.units.Quantity` (or subclass)
Empty instance of this class consistent with ``cols``
"""
# Get merged info attributes like shape, dtype, format, description, etc.
attrs = self.merge_cols_attributes(
cols, metadata_conflicts, name, ("meta", "format", "description")
)
# Make an empty quantity using the unit of the last one.
shape = (length,) + attrs.pop("shape")
dtype = attrs.pop("dtype")
# Use zeros so we do not get problems for Quantity subclasses such
# as Longitude and Latitude, which cannot take arbitrary values.
data = np.zeros(shape=shape, dtype=dtype)
# Get arguments needed to reconstruct class
map = {
key: (data if key == "value" else getattr(cols[-1], key))
for key in self._represent_as_dict_attrs
}
map["copy"] = COPY_IF_NEEDED
out = self._construct_from_dict(map)
# Set remaining info attributes
for attr, value in attrs.items():
setattr(out.info, attr, value)
return out
def get_sortable_arrays(self):
"""
Return a list of arrays which can be lexically sorted to represent
the order of the parent column.
For Quantity this is just the quantity itself.
Returns
-------
arrays : list of ndarray
"""
return [self._parent]
| QuantityInfo |
python | psf__black | tests/test_black.py | {
"start": 118053,
"end": 119288
} | class ____:
"""Test that certain symbols that are commonly used externally keep working.
We don't (yet) formally expose an API (see issue #779), but we should endeavor to
keep certain functions that external users commonly rely on working.
"""
def test_format_str(self) -> None:
# format_str and Mode should keep working
assert (
black.format_str("print('hello')", mode=black.Mode()) == 'print("hello")\n'
)
# you can pass line length
assert (
black.format_str("print('hello')", mode=black.Mode(line_length=42))
== 'print("hello")\n'
)
# invalid input raises InvalidInput
with pytest.raises(black.InvalidInput):
black.format_str("syntax error", mode=black.Mode())
def test_format_file_contents(self) -> None:
# You probably should be using format_str() instead, but let's keep
# this one around since people do use it
assert (
black.format_file_contents("x=1", fast=True, mode=black.Mode()) == "x = 1\n"
)
with pytest.raises(black.NothingChanged):
black.format_file_contents("x = 1\n", fast=True, mode=black.Mode())
| TestDeFactoAPI |
python | PyCQA__pylint | tests/test_func.py | {
"start": 3465,
"end": 6356
} | class ____(LintTestUsingModule):
def _check_result(self, got: str) -> None:
if not self._has_output():
return
try:
expected = self._get_expected()
except OSError:
expected = ""
if got != expected:
with open(self.output or "", "w", encoding="utf-8") as f:
f.write(got)
def gen_tests(
filter_rgx: str | re.Pattern[str] | None,
) -> list[tuple[str, str, list[tuple[str, str]]]]:
if filter_rgx:
is_to_run = re.compile(filter_rgx).search
else:
is_to_run = ( # noqa: E731 we're going to throw all this anyway
lambda x: 1 # type: ignore[assignment] # pylint: disable=unnecessary-lambda-assignment
)
tests: list[tuple[str, str, list[tuple[str, str]]]] = []
for module_file, messages_file in _get_tests_info(INPUT_DIR, MSG_DIR, "func_", ""):
if not is_to_run(module_file) or module_file.endswith((".pyc", "$py.class")):
continue
base = module_file.replace(".py", "").split("_")[1]
dependencies = _get_tests_info(INPUT_DIR, MSG_DIR, base, ".py")
tests.append((module_file, messages_file, dependencies))
if UPDATE_FILE.exists():
return tests
assert len(tests) < 13, "Please do not add new test cases here." + "\n".join(
str(k) for k in tests if not k[2]
)
return tests
TEST_WITH_EXPECTED_DEPRECATION = ["func_excess_escapes.py"]
@pytest.mark.parametrize(
"module_file,messages_file,dependencies",
gen_tests(FILTER_RGX),
ids=[o[0] for o in gen_tests(FILTER_RGX)],
)
def test_functionality(
module_file: str,
messages_file: str,
dependencies: list[tuple[str, str]],
recwarn: pytest.WarningsRecorder,
) -> None:
__test_functionality(module_file, messages_file, dependencies)
if recwarn.list:
if module_file in TEST_WITH_EXPECTED_DEPRECATION and sys.version_info.minor > 5:
assert any(
"invalid escape sequence" in str(i.message)
for i in recwarn.list
if issubclass(i.category, DeprecationWarning)
)
def __test_functionality(
module_file: str, messages_file: str, dependencies: list[tuple[str, str]]
) -> None:
lint_test = LintTestUpdate() if UPDATE_FILE.exists() else LintTestUsingModule()
lint_test.module = module_file.replace(".py", "")
lint_test.output = messages_file
lint_test.depends = dependencies or None
lint_test.INPUT_DIR = INPUT_DIR
lint_test._test_functionality()
if __name__ == "__main__":
if UPDATE_OPTION in sys.argv:
UPDATE_FILE.touch()
sys.argv.remove(UPDATE_OPTION)
if len(sys.argv) > 1:
FILTER_RGX = sys.argv[1]
del sys.argv[1]
try:
pytest.main(sys.argv)
finally:
if UPDATE_FILE.exists():
UPDATE_FILE.unlink()
| LintTestUpdate |
python | sympy__sympy | sympy/core/relational.py | {
"start": 38879,
"end": 39171
} | class ____(_Greater):
__doc__ = GreaterThan.__doc__
__slots__ = ()
rel_op = '>'
@classmethod
def _eval_fuzzy_relation(cls, lhs, rhs):
return is_gt(lhs, rhs)
@property
def weak(self):
return Ge(*self.args)
Gt = StrictGreaterThan
| StrictGreaterThan |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 92192,
"end": 92266
} | class ____(Binop):
operation = operator.xor
_operator_repr = "^"
| XOr |
python | redis__redis-py | redis/commands/search/querystring.py | {
"start": 6590,
"end": 6784
} | class ____(DisjunctNode):
"""
This node is true if *all* of its children are false. This is equivalent to
```
disjunct(union(...))
```
"""
JOINSTR = "|"
| DistjunctUnion |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 380172,
"end": 380625
} | class ____(sgqlc.types.Input):
"""Ordering options for user status connections."""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(UserStatusOrderField), graphql_name="field")
"""The field to order user statuses by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The ordering direction."""
| UserStatusOrder |
python | django__django | tests/postgres_tests/test_operations.py | {
"start": 9261,
"end": 9362
} | class ____:
def allow_migrate(self, db, app_label, **hints):
return False
| NoMigrationRouter |
python | joblib__joblib | joblib/externals/loky/backend/queues.py | {
"start": 6145,
"end": 7322
} | class ____(mp_SimpleQueue):
def __init__(self, reducers=None, ctx=None):
super().__init__(ctx=ctx)
# Add possiblity to use custom reducers
self._reducers = reducers
def close(self):
self._reader.close()
self._writer.close()
# Use custom queue set/get state to be able to reduce the custom reducers
def __getstate__(self):
assert_spawning(self)
return (
self._reader,
self._writer,
self._reducers,
self._rlock,
self._wlock,
)
def __setstate__(self, state):
(
self._reader,
self._writer,
self._reducers,
self._rlock,
self._wlock,
) = state
# Overload put to use our customizable reducer
def put(self, obj):
# serialize the data before acquiring the lock
obj = dumps(obj, reducers=self._reducers)
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self._writer.send_bytes(obj)
else:
with self._wlock:
self._writer.send_bytes(obj)
| SimpleQueue |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/rnn_cell_wrapper_v2.py | {
"start": 1386,
"end": 3731
} | class ____(recurrent.AbstractRNNCell):
"""Base class for cells wrappers V2 compatibility.
This class along with `rnn_cell_impl._RNNCellWrapperV1` allows to define
wrappers that are compatible with V1 and V2, and defines helper methods for
this purpose.
"""
def __init__(self, cell, *args, **kwargs):
super(_RNNCellWrapperV2, self).__init__(*args, **kwargs)
self.cell = cell
cell_call_spec = tf_inspect.getfullargspec(cell.call)
self._expects_training_arg = ("training" in cell_call_spec.args) or (
cell_call_spec.varkw is not None
)
def call(self, inputs, state, **kwargs):
"""Runs the RNN cell step computation.
When `call` is being used, we assume that the wrapper object has been built,
and therefore the wrapped cells has been built via its `build` method and
its `call` method can be used directly.
This allows to use the wrapped cell and the non-wrapped cell equivalently
when using `call` and `build`.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
**kwargs: Additional arguments passed to the wrapped cell's `call`.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
return self._call_wrapped_cell(
inputs, state, cell_call_fn=self.cell.call, **kwargs)
def build(self, inputs_shape):
"""Builds the wrapped cell."""
self.cell.build(inputs_shape)
self.built = True
def get_config(self):
config = {
"cell": {
"class_name": self.cell.__class__.__name__,
"config": self.cell.get_config()
},
}
base_config = super(_RNNCellWrapperV2, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
config = config.copy()
from tensorflow.python.keras.layers.serialization import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cell = deserialize_layer(config.pop("cell"), custom_objects=custom_objects)
return cls(cell, **config)
@deprecated(None, "Please use tf.keras.layers.RNN instead.")
@tf_export("nn.RNNCellDropoutWrapper", v1=[])
| _RNNCellWrapperV2 |
python | sqlalchemy__sqlalchemy | test/sql/test_returning.py | {
"start": 21524,
"end": 22397
} | class ____(fixtures.TestBase):
__requires__ = ("insert_returning",)
__sparse_driver_backend__ = True
@testing.provide_metadata
def test_select_doesnt_pollute_result(self, connection):
class MyType(TypeDecorator):
impl = Integer
cache_ok = True
def process_result_value(self, value, dialect):
raise Exception("I have not been selected")
t1 = Table("t1", self.metadata, Column("x", MyType()))
t2 = Table("t2", self.metadata, Column("x", Integer))
self.metadata.create_all(connection)
connection.execute(t1.insert().values(x=5))
stmt = (
t2.insert()
.values(x=select(t1.c.x).scalar_subquery())
.returning(t2.c.x)
)
result = connection.execute(stmt)
eq_(result.scalar(), 5)
| CompositeStatementTest |
python | walkccc__LeetCode | solutions/1228. Missing Number In Arithmetic Progression/1228.py | {
"start": 0,
"end": 298
} | class ____:
def missingNumber(self, arr: list[int]) -> int:
n = len(arr)
delta = (arr[-1] - arr[0]) // n
l = 0
r = n - 1
while l < r:
m = (l + r) // 2
if arr[m] == arr[0] + m * delta:
l = m + 1
else:
r = m
return arr[0] + l * delta
| Solution |
python | tensorflow__tensorflow | tensorflow/python/training/basic_session_run_hooks.py | {
"start": 18980,
"end": 25284
} | class ____(session_run_hook.SessionRunHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None,
save_graph_def=True):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
save_graph_def: Whether to save the GraphDef and MetaGraphDef to
`checkpoint_dir`. The GraphDef is saved after the session is created as
`graph.pbtxt`. MetaGraphDefs are saved out for every checkpoint as
`model.ckpt-*.meta`.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create CheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
# Set sufficiently high default that it never skips checking the actual
# global step counter -- unless the user overrides it with the right value
# for the steps_per_run.
self._steps_per_run = 1000000
self._save_graph_def = save_graph_def
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._save_graph_def:
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
stale_global_step = run_values.results
if self._timer.should_trigger_for_step(stale_global_step +
self._steps_per_run):
# get the real value after train op.
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
last_step = session.run(self._global_step_tensor)
if last_step != self._timer.last_triggered_step():
self._save(session, last_step)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step):
"""Saves the latest checkpoint, returns should_stop."""
logging.info("Calling checkpoint listeners before saving checkpoint %d...",
step)
for l in self._listeners:
l.before_save(session, step)
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
self._get_saver().save(session, self._save_path, global_step=step,
write_meta_graph=self._save_graph_def)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
logging.info("Calling checkpoint listeners after saving checkpoint %d...",
step)
should_stop = False
for l in self._listeners:
if l.after_save(session, step):
logging.info(
"A CheckpointSaverListener requested that training be stopped. "
"listener: {}".format(l))
should_stop = True
return should_stop
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
self._saver = savers[0]
return savers[0]
@tf_export(v1=["train.StepCounterHook"])
| CheckpointSaverHook |
python | pytorch__pytorch | test/quantization/core/experimental/test_floatx.py | {
"start": 13956,
"end": 15120
} | class ____(TestCase):
# TODO(#146647): make the testing generic for shell dtypes
def test_float4_e2m1fn_x2(self, device):
# can create a tensor of dtype float4
x1 = torch.empty(4096, 4096, device=device, dtype=torch.float4_e2m1fn_x2)
# can create a string (so printing will work)
str(x1)
# can view float4_e2m1fn_x2 as uint8
x2 = x1.view(torch.uint8)
# can view uint8 as float4_e2m1fn_x2
x2.view(torch.float4_e2m1fn_x2)
def test_f4_save_load(self, device):
x1 = torch.randint(0, 10, (4, 4), device=device, dtype=torch.uint8).view(
torch.float4_e2m1fn_x2
)
with TemporaryFileName() as fname:
torch.save(x1, fname)
x1_save_load = torch.load(fname)
# TODO(#146647): make this and all other shell dtypes support equality
# comparison
torch.testing.assert_close(
x1.view(torch.uint8), x1_save_load.view(torch.uint8), atol=0, rtol=0
)
instantiate_device_type_tests(TestFloat8Dtype, globals())
instantiate_device_type_tests(TestFloat4Dtype, globals())
| TestFloat4Dtype |
python | automl__auto-sklearn | autosklearn/pipeline/components/classification/lda.py | {
"start": 574,
"end": 3282
} | class ____(AutoSklearnClassificationAlgorithm):
def __init__(self, shrinkage, tol, shrinkage_factor=0.5, random_state=None):
self.shrinkage = shrinkage
self.tol = tol
self.shrinkage_factor = shrinkage_factor
self.estimator = None
def fit(self, X, Y):
import sklearn.discriminant_analysis
import sklearn.multiclass
if check_none(self.shrinkage):
self.shrinkage_ = None
solver = "svd"
elif self.shrinkage == "auto":
self.shrinkage_ = "auto"
solver = "lsqr"
elif self.shrinkage == "manual":
self.shrinkage_ = float(self.shrinkage_factor)
solver = "lsqr"
else:
raise ValueError(self.shrinkage)
self.tol = float(self.tol)
estimator = sklearn.discriminant_analysis.LinearDiscriminantAnalysis(
shrinkage=self.shrinkage_, tol=self.tol, solver=solver
)
if len(Y.shape) == 2 and Y.shape[1] > 1:
self.estimator = sklearn.multiclass.OneVsRestClassifier(estimator, n_jobs=1)
else:
self.estimator = estimator
self.estimator.fit(X, Y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
df = self.estimator.predict_proba(X)
return softmax(df)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "LDA",
"name": "Linear Discriminant Analysis",
"handles_regression": False,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": False,
"is_deterministic": True,
"input": (DENSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
shrinkage = CategoricalHyperparameter(
"shrinkage", ["None", "auto", "manual"], default_value="None"
)
shrinkage_factor = UniformFloatHyperparameter("shrinkage_factor", 0.0, 1.0, 0.5)
tol = UniformFloatHyperparameter(
"tol", 1e-5, 1e-1, default_value=1e-4, log=True
)
cs.add_hyperparameters([shrinkage, shrinkage_factor, tol])
cs.add_condition(EqualsCondition(shrinkage_factor, shrinkage, "manual"))
return cs
| LDA |
python | kamyu104__LeetCode-Solutions | Python/longest-balanced-subarray-i.py | {
"start": 75,
"end": 3018
} | class ____(object):
def longestBalanced(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
class SegmentTree(object):
def __init__(self, N):
self.min = [0]*(1<<((N-1).bit_length()+1))
self.max = [0]*(1<<((N-1).bit_length()+1))
self.base = len(self.min)>>1
self.lazy = [0]*self.base
def __apply(self, x, val):
self.min[x] += val
self.max[x] += val
if x < self.base:
self.lazy[x] += val
def __push(self, x):
for h in reversed(xrange(1, x.bit_length())):
y = x>>h
if self.lazy[y]:
self.__apply(y<<1, self.lazy[y])
self.__apply((y<<1)|1, self.lazy[y])
self.lazy[y] = 0
def update(self, L, R, h): # Time: O(logN), Space: O(N)
def pull(x):
while x > 1:
x >>= 1
self.min[x] = self.min[x<<1] if self.min[x<<1] < self.min[(x<<1)|1] else self.min[(x<<1)|1]
self.max[x] = self.max[x<<1] if self.max[x<<1] > self.max[(x<<1)|1] else self.max[(x<<1)|1]
if self.lazy[x]:
self.min[x] += self.lazy[x]
self.max[x] += self.lazy[x]
L += self.base
R += self.base
L0, R0 = L, R
while L <= R:
if L & 1: # is right child
self.__apply(L, h)
L += 1
if R & 1 == 0: # is left child
self.__apply(R, h)
R -= 1
L >>= 1
R >>= 1
pull(L0)
pull(R0)
def binary_search(self, x):
i = 1
while not i >= self.base:
if self.lazy[i]:
self.__apply(i<<1, self.lazy[i])
self.__apply((i<<1)|1, self.lazy[i])
self.lazy[i] = 0
i <<= 1
if not self.min[i] <= x <= self.max[i]:
i |= 1
return i-self.base
n = len(nums)+1
st = SegmentTree(n)
result = curr = 0
lookup = {}
for i, x in enumerate(nums, 1):
d = +1 if x&1 else -1
if x in lookup:
st.update(lookup[x], n-1, -d)
curr -= d
curr += d
lookup[x] = i
st.update(lookup[x], n-1, +d)
l = i-st.binary_search(curr)
if l > result:
result = l
return result
# Time: O(n^2)
# Space: O(n)
# brute force
| Solution |
python | FactoryBoy__factory_boy | examples/flask_alchemy/demoapp_factories.py | {
"start": 211,
"end": 370
} | class ____(BaseFactory):
class Meta:
model = demoapp.User
username = factory.fuzzy.FuzzyText()
email = factory.fuzzy.FuzzyText()
| UserFactory |
python | pytorch__pytorch | torch/_inductor/select_algorithm.py | {
"start": 7573,
"end": 8721
} | class ____:
body: IndentedBuffer
template_mask: Optional[str] = None
template_out_shape: Optional[Union[str, tuple[str]]] = None
compute: IndentedBuffer = dataclasses.field(default_factory=IndentedBuffer)
indexing_code: IndentedBuffer = dataclasses.field(default_factory=IndentedBuffer)
loads: IndentedBuffer = dataclasses.field(default_factory=IndentedBuffer)
stores: IndentedBuffer = dataclasses.field(default_factory=IndentedBuffer)
ops_handler: Optional[V.WrapperHandler] = None # type: ignore[name-defined]
cse: Optional["CSE[Any]"] = None
# only copied over if not None
range_trees: Optional[list["IterationRangesRoot"]] = None
range_tree_nodes: Optional[dict[sympy.Symbol, "IterationRangesEntry"]] = None
numels: Optional[dict[str, sympy.Expr]] = None
def __post_init__(self):
self.only_copy_if_non_none_fields = (
"range_trees",
"range_tree_nodes",
"numels",
"cse",
)
def to_dict(self):
return {
field.name: getattr(self, field.name) for field in dataclasses.fields(self)
}
| SubgraphInfo |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/data.py | {
"start": 12897,
"end": 13233
} | class ____(SpanProperty):
def __init__(self, spans: "Spans") -> None:
super().__init__(spans)
self.result = IntList.of_length(len(self.spans))
def start_span(self, i: int, label_index: int) -> None:
self.result[i] = len(self.span_stack)
def finish(self) -> IntList:
return self.result
| _depths |
python | huggingface__transformers | src/transformers/models/yoso/modeling_yoso.py | {
"start": 27827,
"end": 30694
} | class ____(YosoPreTrainedModel):
_tied_weights_keys = {
"cls.predictions.decoder.bias": "cls.predictions.bias",
"cls.predictions.decoder.weight": "yoso.embeddings.word_embeddings.weight",
}
def __init__(self, config):
super().__init__(config)
self.yoso = YosoModel(config)
self.cls = YosoOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.yoso(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| YosoForMaskedLM |
python | django-haystack__django-haystack | haystack/backends/whoosh_backend.py | {
"start": 2151,
"end": 2455
} | class ____(HtmlFormatter):
"""
This is a HtmlFormatter simpler than the whoosh.HtmlFormatter.
We use it to have consistent results across backends. Specifically,
Solr, Xapian and Elasticsearch are using this formatting.
"""
template = "<%(tag)s>%(t)s</%(tag)s>"
| WhooshHtmlFormatter |
python | PrefectHQ__prefect | tests/server/models/test_variables.py | {
"start": 6667,
"end": 8108
} | class ____:
async def test_update_name(
self,
session,
variable,
):
new_name = "another_name"
updated = await update_variable(
session,
variable.id,
VariableUpdate(name=new_name), # type: ignore
)
assert updated
updated_variable = await read_variable(session, variable.id) # type: ignore
assert updated_variable
assert updated_variable.name == new_name
async def test_update_value(
self,
session,
variable,
):
new_value = "another_name"
updated = await update_variable(
session,
variable.id,
VariableUpdate(value=new_value), # type: ignore
)
assert updated
updated_variable = await read_variable(session, variable.id) # type: ignore
assert updated_variable
assert updated_variable.value == new_value
async def test_update_tags(
self,
session,
variable,
):
new_tags = ["new-tag-123"]
updated = await update_variable(
session,
variable.id,
VariableUpdate(tags=new_tags), # type: ignore
)
assert updated
updated_variable = await read_variable(session, variable.id) # type: ignore
assert updated_variable
assert updated_variable.tags == new_tags
| TestUpdateVariable |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py | {
"start": 5810,
"end": 6936
} | class ____(Benchmark):
r"""
Mishra 1 objective function.
This class defines the Mishra 1 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Mishra01}}(x) = (1 + x_n)^{x_n}
where
.. math::
x_n = n - \sum_{i=1}^{n-1} x_i
with :math:`x_i \in [0, 1]` for :math:`i =1, ..., n`.
*Global optimum*: :math:`f(x) = 2` for :math:`x_i = 1` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([0.0] * self.N,
[1.0 + 1e-9] * self.N))
self.global_optimum = [[1.0 for _ in range(self.N)]]
self.fglob = 2.0
def fun(self, x, *args):
self.nfev += 1
xn = self.N - sum(x[0:-1])
return (1 + xn) ** xn
| Mishra01 |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 29955,
"end": 30395
} | class ____(VOTableSpecWarning):
"""
Bit values do not support masking. This warning is raised upon
setting masked data in a bit column.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:datatypes>`__,
`1.2
<http://www.ivoa.net/documents/VOTable/20091130/REC-VOTable-1.2.html#sec:datatypes>`__
"""
message_template = "Bit values can not be masked"
| W39 |
python | gevent__gevent | src/gevent/libuv/watcher.py | {
"start": 20685,
"end": 21187
} | class ____(_SimulatedWithAsyncMixin,
_base.ForkMixin,
watcher):
# We'll have to implement this one completely manually.
_watcher_skip_ffi = False
def _register_loop_callback(self):
self.loop._fork_watchers.add(self)
def _unregister_loop_callback(self):
try:
# stop() should be idempotent
self.loop._fork_watchers.remove(self)
except KeyError:
pass
def _on_fork(self):
self._async.send()
| fork |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_union.py | {
"start": 3363,
"end": 3815
} | class ____(ModelA):
pass
@pytest.mark.parametrize('input_value', [ModelA(b'bite', 2.3456), SubclassA(b'bite', 2.3456)])
def test_model_a(model_serializer: SchemaSerializer, input_value):
assert model_serializer.to_python(input_value) == {'a': b'bite', 'b': '2.3'}
assert model_serializer.to_python(input_value, mode='json') == {'a': 'bite', 'b': '2.3'}
assert model_serializer.to_json(input_value) == b'{"a":"bite","b":"2.3"}'
| SubclassA |
python | cython__cython | Cython/Build/Tests/TestStripLiterals.py | {
"start": 131,
"end": 5285
} | class ____(unittest.TestCase):
maxDiff = None
@staticmethod
def _rebuild_string(stripped, literals):
def lookup(match):
return literals[match.group()]
return re.sub("__Pyx_L[0-9]+_", lookup, stripped)
def test_strip_string_literals(self):
def strip_equals(s, expected):
stripped, literals = strip_string_literals(s)
self.assertEqual(expected, stripped)
recovered = self._rebuild_string(stripped, literals)
self.assertEqual(s, recovered)
unchanged = [
"",
"""abc""",
"""123""",
"""func(123)""",
""" '' """,
""" '''''''''''' """,
""" '''''''''''''' """,
]
tests = [(code, code) for code in unchanged] + [
# strings and quotes
('"x"',
'"__Pyx_L1_"'),
("'x'",
"'__Pyx_L1_'"),
(""" '"' "'" """,
""" '__Pyx_L1_' "__Pyx_L2_" """),
(""" '''' ''' """,
""" '''__Pyx_L1_''' """),
(''' """" """ ''',
''' """__Pyx_L1_""" '''),
(" '''a\n''' ",
" '''__Pyx_L1_''' "),
# escapes
(r"'a\'b'",
"'__Pyx_L1_'"),
(r"'a\\'",
"'__Pyx_L1_'"),
(r"'a\\\'b'",
"'__Pyx_L1_'"),
# string prefixes
("u'abc'",
"u'__Pyx_L1_'"),
(r"r'abc\\'",
"r'__Pyx_L1_'"),
(r"ru'abc\\'",
"ru'__Pyx_L1_'"),
# comments
("abc # foo",
"abc #__Pyx_L1_"),
("abc # 'x'",
"abc #__Pyx_L1_"),
("'abc#'",
"'__Pyx_L1_'"),
# special commands
("include 'a.pxi' # something here",
"include '__Pyx_L1_' #__Pyx_L2_"),
("cdef extern from 'a.h': # comment",
"cdef extern from '__Pyx_L1_': #__Pyx_L2_"),
# mixed strings
(""" func('xyz') + " " + "" '' # '' | "" "123" 'xyz' "' """,
""" func('__Pyx_L1_') + "__Pyx_L2_" + "" '' #__Pyx_L3_"""),
(""" f'f' """,
""" f'__Pyx_L1_' """),
(""" f'a{123}b' """,
""" f'__Pyx_L1_{123}__Pyx_L2_' """),
(""" f'{1}{f'xyz'}' """,
""" f'{1}{f'__Pyx_L1_'}' """),
(""" f'{f'''xyz{f\"""abc\"""}'''}' """,
""" f'{f'''__Pyx_L1_{f\"""__Pyx_L2_\"""}'''}' """),
(""" f'{{{{{"abc"}}}}}{{}}{{' == '{{abc}}{}{' """,
""" f'__Pyx_L1_{"__Pyx_L2_"}__Pyx_L3_' == '__Pyx_L4_' """),
("f'" + ('{x} ' * 250) + "{x:{width}} '",
"f'" + ''.join([f'{{x}}__Pyx_L{n}_' for n in range(1, 251)]) + "{x:{width}}__Pyx_L251_'")
]
for code, expected in tests:
with self.subTest(code=code):
strip_equals(code, expected) # plain
code = code.strip()
expected = expected.strip()
with self.subTest(code=code):
strip_equals(code, expected) # stripped
code += "\n"
expected += "\n"
with self.subTest(code=code):
strip_equals(code, expected) # +EOL
# GH-5977: unclosed string literal
strip_equals(
""" print("Say something: %s' % something) """,
""" print("__Pyx_L1_"""
)
def _test_all_files(self, base_dir, file_paths):
_find_leftover_string = re.compile(r"""[^_'"}](['"]+)[^_'"{]""").search
for file_path in sorted(file_paths):
with self.subTest(file=str(file_path.relative_to(base_dir))):
with open_source_file(str(file_path)) as f:
code = f.read()
stripped, literals = strip_string_literals(code)
match = _find_leftover_string(stripped)
if match and len(match.group(1)) != 2:
match_pos = match.start() + 1
self.fail(f"Leftover string found: {stripped[match_pos - 12 : match_pos + 12]!r}")
recovered = self._rebuild_string(stripped, literals)
self.assertEqual(code, recovered)
def test_strip_string_literals_py_files(self):
# process all .py files in the Cython package
package_dir = pathlib.Path(__file__).absolute().parents[2]
assert package_dir.name == 'Cython'
base_dir = package_dir.parent
self._test_all_files(base_dir, package_dir.rglob("*.py"))
def test_strip_string_literals_test_files(self):
# process all .py[x] files in the tests package
base_dir = pathlib.Path(__file__).absolute().parents[3]
tests_dir = base_dir / 'tests'
test_files = []
for test_subdir in tests_dir.iterdir():
if test_subdir.is_dir() and test_subdir.name != 'errors':
test_files.extend(test_subdir.rglob("*.py"))
test_files.extend(test_subdir.rglob("*.pyx"))
self._test_all_files(base_dir, test_files)
| TestStripLiterals |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 43993,
"end": 44207
} | class ____(TypedDict, total=False):
"""
:class:`altair.AxisResolveMap` ``TypedDict`` wrapper.
Parameters
----------
x
y
"""
x: ResolveMode_T
y: ResolveMode_T
| AxisResolveMapKwds |
python | davidhalter__jedi | test/completion/precedence.py | {
"start": 705,
"end": 1304
} | class ____():
foo = 2
#? int()
(X.foo ** 3)
# -----------------
# assignments
# -----------------
x = [1, 'a', 1.0]
i = 0
i += 1
i += 1
#? float()
x[i]
i = 1
i += 1
i -= 3
i += 1
#? int()
x[i]
# -----------------
# in
# -----------------
if 'X' in 'Y':
a = 3
else:
a = ''
# For now don't really check for truth values. So in should return both
# results.
#? str() int()
a
if 'X' not in 'Y':
b = 3
else:
b = ''
# For now don't really check for truth values. So in should return both
# results.
#? str() int()
b
# -----------------
# for flow assignments
# -----------------
| X |
python | great-expectations__great_expectations | docs/docusaurus/docs/core/trigger_actions_based_on_results/_examples/create_a_custom_action.py | {
"start": 709,
"end": 2261
} | class ____(ValidationAction):
# </snippet>
# 2. Set the `type` attribute to a unique string that identifies the Action.
# <snippet name="docs/docusaurus/docs/core/trigger_actions_based_on_results/_examples/create_a_custom_action.py - set type">
type: Literal["my_custom_action"] = "my_custom_action"
# </snippet>
# 3. Optional. Add any additional fields your Action requires at runtime.
# <snippet name="docs/docusaurus/docs/core/trigger_actions_based_on_results/_examples/create_a_custom_action.py - add custom fields">
my_custom_str_field: str
# </snippet>
# 4. Override the `run()` method to perform the desired task.
# <snippet name="docs/docusaurus/docs/core/trigger_actions_based_on_results/_examples/create_a_custom_action.py - override run">
@override
def run(
self,
checkpoint_result: CheckpointResult,
action_context: Union[
ActionContext, None
], # Contains results from prior Actions in the same Checkpoint run.
) -> dict:
# Domain-specific logic
self._do_my_custom_action(checkpoint_result)
# Optional. Access custom fields you provide the Action at runtime.
extra_context = self.my_custom_str_field
# Return information about the Action
return {"some": "info", "extra_context": extra_context}
def _do_my_custom_action(self, checkpoint_result: CheckpointResult):
# Perform custom logic based on the validation results.
...
# </snippet>
# </snippet>
| MyCustomAction |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/event_log/sql_event_log.py | {
"start": 4542,
"end": 138788
} | class ____(EventLogStorage):
"""Base class for SQL backed event log storages.
Distinguishes between run-based connections and index connections in order to support run-level
sharding, while maintaining the ability to do cross-run queries
"""
@abstractmethod
def run_connection(self, run_id: Optional[str]) -> ContextManager[Connection]:
"""Context manager yielding a connection to access the event logs for a specific run.
Args:
run_id (Optional[str]): Enables those storages which shard based on run_id, e.g.,
SqliteEventLogStorage, to connect appropriately.
"""
@abstractmethod
def index_connection(self) -> ContextManager[Connection]:
"""Context manager yielding a connection to access cross-run indexed tables."""
@contextmanager
def index_transaction(self) -> Iterator[Connection]:
"""Context manager yielding a connection to the index shard that has begun a transaction."""
with self.index_connection() as conn:
if conn.in_transaction():
yield conn
else:
with conn.begin():
yield conn
@abstractmethod
def upgrade(self) -> None:
"""This method should perform any schema migrations necessary to bring an
out-of-date instance of the storage up to date.
"""
@abstractmethod
def has_table(self, table_name: str) -> bool:
"""This method checks if a table exists in the database."""
def prepare_insert_event(self, event: EventLogEntry) -> Any:
"""Helper method for preparing the event log SQL insertion statement. Abstracted away to
have a single place for the logical table representation of the event, while having a way
for SQL backends to implement different execution implementations for `store_event`. See
the `dagster-postgres` implementation which overrides the generic SQL implementation of
`store_event`.
"""
# https://stackoverflow.com/a/54386260/324449
return SqlEventLogStorageTable.insert().values(**self._event_to_row(event))
def prepare_insert_event_batch(self, events: Sequence[EventLogEntry]) -> Any:
# https://stackoverflow.com/a/54386260/324449
return SqlEventLogStorageTable.insert().values(
[self._event_to_row(event) for event in events]
)
def _event_to_row(self, event: EventLogEntry) -> dict[str, Any]:
dagster_event_type = None
asset_key_str = None
partition = None
step_key = event.step_key
if event.is_dagster_event:
dagster_event = event.get_dagster_event()
dagster_event_type = dagster_event.event_type_value
step_key = dagster_event.step_key
if dagster_event.asset_key:
check.inst_param(dagster_event.asset_key, "asset_key", AssetKey)
asset_key_str = dagster_event.asset_key.to_string()
if dagster_event.partition:
partition = dagster_event.partition
return {
"run_id": event.run_id,
"event": serialize_value(event),
"dagster_event_type": dagster_event_type,
"timestamp": self._event_insert_timestamp(event),
"step_key": step_key,
"asset_key": asset_key_str,
"partition": partition,
}
def has_asset_key_col(self, column_name: str) -> bool:
with self.index_connection() as conn:
column_names = [x.get("name") for x in db.inspect(conn).get_columns(AssetKeyTable.name)]
return column_name in column_names
def has_asset_key_index_cols(self) -> bool:
return self.has_asset_key_col("last_materialization_timestamp")
def store_asset_event(self, event: EventLogEntry, event_id: int):
check.inst_param(event, "event", EventLogEntry)
if not (event.dagster_event and event.dagster_event.asset_key):
return
# We switched to storing the entire event record of the last materialization instead of just
# the AssetMaterialization object, so that we have access to metadata like timestamp,
# pipeline, run_id, etc.
#
# This should make certain asset queries way more performant, without having to do extra
# queries against the event log.
#
# This should be accompanied by a schema change in 0.12.0, renaming `last_materialization`
# to `last_materialization_event`, for clarity. For now, we should do some back-compat.
#
# https://github.com/dagster-io/dagster/issues/3945
values = self._get_asset_entry_values(event, event_id, self.has_asset_key_index_cols())
if not values:
return
insert_statement = AssetKeyTable.insert().values(
asset_key=event.dagster_event.asset_key.to_string(), **values
)
update_statement = (
AssetKeyTable.update()
.values(**values)
.where(
AssetKeyTable.c.asset_key == event.dagster_event.asset_key.to_string(),
)
)
with self.index_connection() as conn:
try:
conn.execute(insert_statement)
except db_exc.IntegrityError:
conn.execute(update_statement)
def _get_asset_entry_values(
self, event: EventLogEntry, event_id: int, has_asset_key_index_cols: bool
) -> dict[str, Any]:
# The AssetKeyTable contains a `last_materialization_timestamp` column that is exclusively
# used to determine if an asset exists (last materialization timestamp > wipe timestamp).
# This column is used nowhere else, and as of AssetObservation/AssetMaterializationPlanned
# event creation, we want to extend this functionality to ensure that assets with any event
# (observation, materialization, or materialization planned) yielded with timestamp
# > wipe timestamp display in the Dagster UI.
# As of the following PRs, we update last_materialization_timestamp to store the timestamp
# of the latest asset observation, materialization, or materialization_planned that has occurred.
# https://github.com/dagster-io/dagster/pull/6885
# https://github.com/dagster-io/dagster/pull/7319
entry_values: dict[str, Any] = {}
dagster_event = check.not_none(event.dagster_event)
if dagster_event.is_step_materialization:
entry_values.update(
{
"last_materialization": serialize_value(
EventLogRecord(
storage_id=event_id,
event_log_entry=event,
)
),
"last_run_id": event.run_id,
}
)
if has_asset_key_index_cols:
entry_values.update(
{
"last_materialization_timestamp": datetime_from_timestamp(event.timestamp),
}
)
elif dagster_event.is_asset_materialization_planned:
# The AssetKeyTable also contains a `last_run_id` column that is updated upon asset
# materialization. This column was not being used until the below PR. This new change
# writes to the column upon `ASSET_MATERIALIZATION_PLANNED` events to fetch the last
# run id for a set of assets in one roundtrip call to event log storage.
# https://github.com/dagster-io/dagster/pull/7319
entry_values.update({"last_run_id": event.run_id})
if has_asset_key_index_cols:
entry_values.update(
{
"last_materialization_timestamp": datetime_from_timestamp(event.timestamp),
}
)
elif dagster_event.is_asset_observation:
if has_asset_key_index_cols:
entry_values.update(
{
"last_materialization_timestamp": datetime_from_timestamp(event.timestamp),
}
)
return entry_values
def store_asset_event_tags(
self, events: Sequence[EventLogEntry], event_ids: Sequence[int]
) -> None:
check.sequence_param(events, "events", EventLogEntry)
check.sequence_param(event_ids, "event_ids", int)
all_values = [
dict(
event_id=event_id,
asset_key=check.not_none(event.get_dagster_event().asset_key).to_string(),
key=key,
value=value,
event_timestamp=self._event_insert_timestamp(event),
)
for event_id, event in zip(event_ids, events)
for key, value in self._tags_for_asset_event(event).items()
]
# Only execute if tags table exists. This is to support OSS users who have not yet run the
# migration to create the table. On read, we will throw an error if the table does not
# exist.
if len(all_values) > 0 and self.has_table(AssetEventTagsTable.name):
with self.index_connection() as conn:
conn.execute(AssetEventTagsTable.insert(), all_values)
def _tags_for_asset_event(self, event: EventLogEntry) -> Mapping[str, str]:
tags = {}
if event.dagster_event and event.dagster_event.asset_key:
if event.dagster_event.is_step_materialization:
tags = (
event.get_dagster_event().step_materialization_data.materialization.tags or {}
)
elif event.dagster_event.is_asset_observation:
tags = event.get_dagster_event().asset_observation_data.asset_observation.tags
keys_to_index = self.get_asset_tags_to_index(set(tags.keys()))
return {k: v for k, v in tags.items() if k in keys_to_index}
def store_event(self, event: EventLogEntry) -> None:
"""Store an event corresponding to a pipeline run.
Args:
event (EventLogEntry): The event to store.
"""
check.inst_param(event, "event", EventLogEntry)
insert_event_statement = self.prepare_insert_event(event)
run_id = event.run_id
event_id = None
with self.run_connection(run_id) as conn:
result = conn.execute(insert_event_statement)
event_id = result.inserted_primary_key[0]
if (
event.is_dagster_event
and event.dagster_event_type in ASSET_EVENTS
and event.dagster_event.asset_key # type: ignore
):
self.store_asset_event(event, event_id)
if event_id is None:
raise DagsterInvariantViolationError(
"Cannot store asset event tags for null event id."
)
self.store_asset_event_tags([event], [event_id])
if event.is_dagster_event and event.dagster_event_type in ASSET_CHECK_EVENTS:
self.store_asset_check_event(event, event_id)
def get_records_for_run(
self,
run_id,
cursor: Optional[str] = None,
of_type: Optional[Union[DagsterEventType, set[DagsterEventType]]] = None,
limit: Optional[int] = None,
ascending: bool = True,
) -> EventLogConnection:
"""Get all of the logs corresponding to a run.
Args:
run_id (str): The id of the run for which to fetch logs.
cursor (Optional[int]): Zero-indexed logs will be returned starting from cursor + 1,
i.e., if cursor is -1, all logs will be returned. (default: -1)
of_type (Optional[DagsterEventType]): the dagster event type to filter the logs.
limit (Optional[int]): the maximum number of events to fetch
"""
check.str_param(run_id, "run_id")
check.opt_str_param(cursor, "cursor")
check.invariant(not of_type or isinstance(of_type, (DagsterEventType, frozenset, set)))
dagster_event_types = (
{of_type}
if isinstance(of_type, DagsterEventType)
else check.opt_set_param(of_type, "dagster_event_type", of_type=DagsterEventType)
)
query = (
db_select([SqlEventLogStorageTable.c.id, SqlEventLogStorageTable.c.event])
.where(SqlEventLogStorageTable.c.run_id == run_id)
.order_by(
SqlEventLogStorageTable.c.id.asc()
if ascending
else SqlEventLogStorageTable.c.id.desc()
)
)
if dagster_event_types:
query = query.where(
SqlEventLogStorageTable.c.dagster_event_type.in_(
[dagster_event_type.value for dagster_event_type in dagster_event_types]
)
)
# adjust 0 based index cursor to SQL offset
if cursor is not None:
cursor_obj = EventLogCursor.parse(cursor)
if cursor_obj.is_offset_cursor():
query = query.offset(cursor_obj.offset())
elif cursor_obj.is_id_cursor():
if ascending:
query = query.where(SqlEventLogStorageTable.c.id > cursor_obj.storage_id())
else:
query = query.where(SqlEventLogStorageTable.c.id < cursor_obj.storage_id())
if limit:
query = query.limit(limit)
with self.run_connection(run_id) as conn:
results = conn.execute(query).fetchall()
last_record_id = None
try:
records = []
for (
record_id,
json_str,
) in results:
records.append(
EventLogRecord(
storage_id=record_id,
event_log_entry=deserialize_value(json_str, EventLogEntry),
)
)
last_record_id = record_id
except (seven.JSONDecodeError, DeserializationError) as err:
raise DagsterEventLogInvalidForRun(run_id=run_id) from err
if last_record_id is not None:
next_cursor = EventLogCursor.from_storage_id(last_record_id).to_string()
elif cursor:
# record fetch returned no new logs, return the same cursor
next_cursor = cursor
else:
# rely on the fact that all storage ids will be positive integers
next_cursor = EventLogCursor.from_storage_id(-1).to_string()
return EventLogConnection(
records=records,
cursor=next_cursor,
has_more=bool(limit and len(results) == limit),
)
def get_stats_for_run(self, run_id: str) -> DagsterRunStatsSnapshot:
check.str_param(run_id, "run_id")
query = (
db_select(
[
SqlEventLogStorageTable.c.dagster_event_type,
db.func.count().label("n_events_of_type"),
db.func.max(SqlEventLogStorageTable.c.timestamp).label("last_event_timestamp"),
]
)
.where(
db.and_(
SqlEventLogStorageTable.c.run_id == run_id,
SqlEventLogStorageTable.c.dagster_event_type.in_(
[event_type.value for event_type in RUN_STATS_EVENT_TYPES]
),
)
)
.group_by("dagster_event_type")
)
with self.run_connection(run_id) as conn:
results = conn.execute(query).fetchall()
try:
counts = {}
times = {}
for result in results:
(dagster_event_type, n_events_of_type, last_event_timestamp) = result
check.invariant(dagster_event_type is not None)
counts[dagster_event_type] = n_events_of_type
times[dagster_event_type] = last_event_timestamp
enqueued_time = times.get(DagsterEventType.PIPELINE_ENQUEUED.value, None)
launch_time = times.get(DagsterEventType.PIPELINE_STARTING.value, None)
start_time = times.get(DagsterEventType.PIPELINE_START.value, None)
end_time = times.get(
DagsterEventType.PIPELINE_SUCCESS.value,
times.get(
DagsterEventType.PIPELINE_FAILURE.value,
times.get(DagsterEventType.PIPELINE_CANCELED.value, None),
),
)
return DagsterRunStatsSnapshot(
run_id=run_id,
steps_succeeded=counts.get(DagsterEventType.STEP_SUCCESS.value, 0),
steps_failed=counts.get(DagsterEventType.STEP_FAILURE.value, 0),
materializations=counts.get(DagsterEventType.ASSET_MATERIALIZATION.value, 0),
expectations=counts.get(DagsterEventType.STEP_EXPECTATION_RESULT.value, 0),
enqueued_time=(
utc_datetime_from_naive(enqueued_time).timestamp() if enqueued_time else None
),
launch_time=(
utc_datetime_from_naive(launch_time).timestamp() if launch_time else None
),
start_time=(
utc_datetime_from_naive(start_time).timestamp() if start_time else None
),
end_time=(utc_datetime_from_naive(end_time).timestamp() if end_time else None),
)
except (seven.JSONDecodeError, DeserializationError) as err:
raise DagsterEventLogInvalidForRun(run_id=run_id) from err
def get_step_stats_for_run(
self, run_id: str, step_keys: Optional[Sequence[str]] = None
) -> Sequence[RunStepKeyStatsSnapshot]:
check.str_param(run_id, "run_id")
check.opt_list_param(step_keys, "step_keys", of_type=str)
# Originally, this was two different queries:
# 1) one query which aggregated top-level step stats by grouping by event type / step_key in
# a single query, using pure SQL (e.g. start_time, end_time, status, attempt counts).
# 2) one query which fetched all the raw events for a specific event type and then inspected
# the deserialized event object to aggregate stats derived from sequences of events.
# (e.g. marker events, materializations, expectations resuls, attempts timing, etc.)
#
# For simplicity, we now just do the second type of query and derive the stats in Python
# from the raw events. This has the benefit of being easier to read and also the benefit of
# being able to share code with the in-memory event log storage implementation. We may
# choose to revisit this in the future, especially if we are able to do JSON-column queries
# in SQL as a way of bypassing the serdes layer in all cases.
raw_event_query = (
db_select([SqlEventLogStorageTable.c.event])
.where(SqlEventLogStorageTable.c.run_id == run_id)
.where(SqlEventLogStorageTable.c.step_key != None) # noqa: E711
.where(
SqlEventLogStorageTable.c.dagster_event_type.in_(
[event_type.value for event_type in STEP_STATS_EVENT_TYPES]
)
)
.order_by(SqlEventLogStorageTable.c.id.asc())
)
if step_keys:
raw_event_query = raw_event_query.where(
SqlEventLogStorageTable.c.step_key.in_(step_keys)
)
with self.run_connection(run_id) as conn:
results = conn.execute(raw_event_query).fetchall()
try:
records = deserialize_values((json_str for (json_str,) in results), EventLogEntry)
return build_run_step_stats_from_events(run_id, records)
except (seven.JSONDecodeError, DeserializationError) as err:
raise DagsterEventLogInvalidForRun(run_id=run_id) from err
def _apply_migration(self, migration_name, migration_fn, print_fn, force):
if self.has_secondary_index(migration_name):
if not force:
if print_fn:
print_fn(f"Skipping already applied data migration: {migration_name}")
return
if print_fn:
print_fn(f"Starting data migration: {migration_name}")
migration_fn()(self, print_fn)
self.enable_secondary_index(migration_name)
if print_fn:
print_fn(f"Finished data migration: {migration_name}")
def reindex_events(self, print_fn: Optional[PrintFn] = None, force: bool = False) -> None:
"""Call this method to run any data migrations across the event_log table."""
for migration_name, migration_fn in EVENT_LOG_DATA_MIGRATIONS.items():
self._apply_migration(migration_name, migration_fn, print_fn, force)
def reindex_assets(self, print_fn: Optional[PrintFn] = None, force: bool = False) -> None:
"""Call this method to run any data migrations across the asset_keys table."""
for migration_name, migration_fn in ASSET_DATA_MIGRATIONS.items():
self._apply_migration(migration_name, migration_fn, print_fn, force)
def wipe(self) -> None:
"""Clears the event log storage."""
# Should be overridden by SqliteEventLogStorage and other storages that shard based on
# run_id
# https://stackoverflow.com/a/54386260/324449
with self.run_connection(run_id=None) as conn:
conn.execute(SqlEventLogStorageTable.delete())
conn.execute(AssetKeyTable.delete())
if self.has_table("asset_event_tags"):
conn.execute(AssetEventTagsTable.delete())
if self.has_table("dynamic_partitions"):
conn.execute(DynamicPartitionsTable.delete())
if self.has_table("concurrency_limits"):
conn.execute(ConcurrencyLimitsTable.delete())
if self.has_table("concurrency_slots"):
conn.execute(ConcurrencySlotsTable.delete())
if self.has_table("pending_steps"):
conn.execute(PendingStepsTable.delete())
if self.has_table("asset_check_executions"):
conn.execute(AssetCheckExecutionsTable.delete())
self._wipe_index()
def _wipe_index(self):
with self.index_connection() as conn:
conn.execute(SqlEventLogStorageTable.delete())
conn.execute(AssetKeyTable.delete())
if self.has_table("asset_event_tags"):
conn.execute(AssetEventTagsTable.delete())
if self.has_table("dynamic_partitions"):
conn.execute(DynamicPartitionsTable.delete())
if self.has_table("concurrency_slots"):
conn.execute(ConcurrencySlotsTable.delete())
if self.has_table("pending_steps"):
conn.execute(PendingStepsTable.delete())
if self.has_table("asset_check_executions"):
conn.execute(AssetCheckExecutionsTable.delete())
def delete_events(self, run_id: str) -> None:
with self.run_connection(run_id) as conn:
self.delete_events_for_run(conn, run_id)
with self.index_connection() as conn:
self.delete_events_for_run(conn, run_id)
if self.supports_global_concurrency_limits:
self.free_concurrency_slots_for_run(run_id)
def delete_events_for_run(self, conn: Connection, run_id: str) -> None:
check.str_param(run_id, "run_id")
records = conn.execute(
db_select([SqlEventLogStorageTable.c.id]).where(
db.and_(
SqlEventLogStorageTable.c.run_id == run_id,
db.or_(
SqlEventLogStorageTable.c.dagster_event_type
== DagsterEventType.ASSET_MATERIALIZATION.value,
SqlEventLogStorageTable.c.dagster_event_type
== DagsterEventType.ASSET_OBSERVATION.value,
),
)
)
).fetchall()
asset_event_ids = [record[0] for record in records]
conn.execute(
SqlEventLogStorageTable.delete().where(SqlEventLogStorageTable.c.run_id == run_id)
)
if asset_event_ids:
conn.execute(
AssetEventTagsTable.delete().where(
AssetEventTagsTable.c.event_id.in_(asset_event_ids)
)
)
@property
def is_persistent(self) -> bool:
return True
def update_event_log_record(self, record_id: int, event: EventLogEntry) -> None:
"""Utility method for migration scripts to update SQL representation of event records."""
check.int_param(record_id, "record_id")
check.inst_param(event, "event", EventLogEntry)
dagster_event_type = None
asset_key_str = None
if event.is_dagster_event:
dagster_event_type = event.dagster_event.event_type_value # type: ignore
if event.dagster_event.asset_key: # type: ignore
check.inst_param(event.dagster_event.asset_key, "asset_key", AssetKey) # type: ignore
asset_key_str = event.dagster_event.asset_key.to_string() # type: ignore
with self.run_connection(run_id=event.run_id) as conn:
conn.execute(
SqlEventLogStorageTable.update()
.where(SqlEventLogStorageTable.c.id == record_id)
.values(
event=serialize_value(event),
dagster_event_type=dagster_event_type,
timestamp=self._event_insert_timestamp(event),
step_key=event.step_key,
asset_key=asset_key_str,
)
)
def get_event_log_table_data(self, run_id: str, record_id: int) -> Optional[SqlAlchemyRow]:
"""Utility method to test representation of the record in the SQL table. Returns all of
the columns stored in the event log storage (as opposed to the deserialized `EventLogEntry`).
This allows checking that certain fields are extracted to support performant lookups (e.g.
extracting `step_key` for fast filtering).
"""
with self.run_connection(run_id=run_id) as conn:
query = (
db_select([SqlEventLogStorageTable])
.where(SqlEventLogStorageTable.c.id == record_id)
.order_by(SqlEventLogStorageTable.c.id.asc())
)
return conn.execute(query).fetchone()
def has_secondary_index(self, name: str) -> bool:
"""This method uses a checkpoint migration table to see if summary data has been constructed
in a secondary index table. Can be used to checkpoint event_log data migrations.
"""
query = (
db_select([1])
.where(SecondaryIndexMigrationTable.c.name == name)
.where(SecondaryIndexMigrationTable.c.migration_completed != None) # noqa: E711
.limit(1)
)
with self.index_connection() as conn:
results = conn.execute(query).fetchall()
return len(results) > 0
def enable_secondary_index(self, name: str) -> None:
"""This method marks an event_log data migration as complete, to indicate that a summary
data migration is complete.
"""
query = SecondaryIndexMigrationTable.insert().values(
name=name,
migration_completed=datetime.now(),
)
with self.index_connection() as conn:
try:
conn.execute(query)
except db_exc.IntegrityError:
conn.execute(
SecondaryIndexMigrationTable.update()
.where(SecondaryIndexMigrationTable.c.name == name)
.values(migration_completed=datetime.now())
)
def _apply_filter_to_query(
self,
query: SqlAlchemyQuery,
event_records_filter: EventRecordsFilter,
asset_details: Optional[AssetDetails] = None,
apply_cursor_filters: bool = True,
) -> SqlAlchemyQuery:
query = query.where(
SqlEventLogStorageTable.c.dagster_event_type == event_records_filter.event_type.value
)
if event_records_filter.asset_key:
query = query.where(
SqlEventLogStorageTable.c.asset_key == event_records_filter.asset_key.to_string(),
)
if event_records_filter.asset_partitions:
query = query.where(
SqlEventLogStorageTable.c.partition.in_(event_records_filter.asset_partitions)
)
if asset_details and asset_details.last_wipe_timestamp:
query = query.where(
SqlEventLogStorageTable.c.timestamp
> datetime.fromtimestamp(asset_details.last_wipe_timestamp, timezone.utc).replace(
tzinfo=None
)
)
if apply_cursor_filters:
# allow the run-sharded sqlite implementation to disable this cursor filtering so that
# it can implement its own custom cursor logic, as cursor ids are not unique across run
# shards
if event_records_filter.before_cursor is not None:
before_cursor_id = (
event_records_filter.before_cursor.id
if isinstance(event_records_filter.before_cursor, RunShardedEventsCursor)
else event_records_filter.before_cursor
)
query = query.where(SqlEventLogStorageTable.c.id < before_cursor_id)
if event_records_filter.after_cursor is not None:
after_cursor_id = (
event_records_filter.after_cursor.id
if isinstance(event_records_filter.after_cursor, RunShardedEventsCursor)
else event_records_filter.after_cursor
)
query = query.where(SqlEventLogStorageTable.c.id > after_cursor_id)
if event_records_filter.before_timestamp:
query = query.where(
SqlEventLogStorageTable.c.timestamp
< datetime.fromtimestamp(
event_records_filter.before_timestamp, timezone.utc
).replace(tzinfo=None)
)
if event_records_filter.after_timestamp:
query = query.where(
SqlEventLogStorageTable.c.timestamp
> datetime.fromtimestamp(
event_records_filter.after_timestamp, timezone.utc
).replace(tzinfo=None)
)
if event_records_filter.storage_ids:
query = query.where(SqlEventLogStorageTable.c.id.in_(event_records_filter.storage_ids))
return query
def _apply_tags_table_joins(
self,
table: db.Table,
tags: Mapping[str, Union[str, Sequence[str]]],
asset_key: Optional[AssetKey],
) -> db.Table:
event_id_col = table.c.id if table == SqlEventLogStorageTable else table.c.event_id
i = 0
for key, value in tags.items():
i += 1
tags_table = db_subquery(
db_select([AssetEventTagsTable]), f"asset_event_tags_subquery_{i}"
)
table = table.join(
tags_table,
db.and_(
event_id_col == tags_table.c.event_id,
not asset_key or tags_table.c.asset_key == asset_key.to_string(),
tags_table.c.key == key,
(
tags_table.c.value == value
if isinstance(value, str)
else tags_table.c.value.in_(value)
),
),
)
return table
def get_event_records(
self,
event_records_filter: EventRecordsFilter,
limit: Optional[int] = None,
ascending: bool = False,
) -> Sequence[EventLogRecord]:
return self._get_event_records(
event_records_filter=event_records_filter, limit=limit, ascending=ascending
)
def _get_event_records(
self,
event_records_filter: EventRecordsFilter,
limit: Optional[int] = None,
ascending: bool = False,
) -> Sequence[EventLogRecord]:
"""Returns a list of (record_id, record)."""
check.inst_param(event_records_filter, "event_records_filter", EventRecordsFilter)
check.opt_int_param(limit, "limit")
check.bool_param(ascending, "ascending")
if event_records_filter.asset_key:
asset_details = next(iter(self._get_assets_details([event_records_filter.asset_key])))
else:
asset_details = None
query = db_select(
[SqlEventLogStorageTable.c.id, SqlEventLogStorageTable.c.event]
).select_from(SqlEventLogStorageTable)
query = self._apply_filter_to_query(
query=query,
event_records_filter=event_records_filter,
asset_details=asset_details,
)
if limit:
query = query.limit(limit)
if ascending:
query = query.order_by(SqlEventLogStorageTable.c.id.asc())
else:
query = query.order_by(SqlEventLogStorageTable.c.id.desc())
with self.index_connection() as conn:
results = conn.execute(query).fetchall()
event_records = []
for row_id, json_str in results:
try:
event_record = deserialize_value(json_str, NamedTuple)
if not isinstance(event_record, EventLogEntry):
logging.warning(
"Could not resolve event record as EventLogEntry for id `%s`.", row_id
)
continue
event_records.append(
EventLogRecord(storage_id=row_id, event_log_entry=event_record)
)
except seven.JSONDecodeError:
logging.warning("Could not parse event record id `%s`.", row_id)
return event_records
def _get_event_records_result(
self,
event_records_filter: EventRecordsFilter,
limit: int,
cursor: Optional[str],
ascending: bool,
):
records = self._get_event_records(
event_records_filter=event_records_filter,
limit=limit,
ascending=ascending,
)
if records:
new_cursor = EventLogCursor.from_storage_id(records[-1].storage_id).to_string()
elif cursor:
new_cursor = cursor
else:
new_cursor = EventLogCursor.from_storage_id(-1).to_string()
has_more = len(records) == limit
return EventRecordsResult(records, cursor=new_cursor, has_more=has_more)
def fetch_materializations(
self,
records_filter: Union[AssetKey, AssetRecordsFilter],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> EventRecordsResult:
enforce_max_records_limit(limit)
if isinstance(records_filter, AssetRecordsFilter):
event_records_filter = records_filter.to_event_records_filter(
event_type=DagsterEventType.ASSET_MATERIALIZATION,
cursor=cursor,
ascending=ascending,
)
else:
before_cursor, after_cursor = EventRecordsFilter.get_cursor_params(cursor, ascending)
asset_key = records_filter
event_records_filter = EventRecordsFilter(
event_type=DagsterEventType.ASSET_MATERIALIZATION,
asset_key=asset_key,
before_cursor=before_cursor,
after_cursor=after_cursor,
)
return self._get_event_records_result(event_records_filter, limit, cursor, ascending)
def fetch_failed_materializations(
self,
records_filter: Union[AssetKey, AssetRecordsFilter],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> EventRecordsResult:
return EventRecordsResult(records=[], cursor=cursor or "", has_more=False)
def fetch_observations(
self,
records_filter: Union[AssetKey, AssetRecordsFilter],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> EventRecordsResult:
enforce_max_records_limit(limit)
if isinstance(records_filter, AssetRecordsFilter):
event_records_filter = records_filter.to_event_records_filter(
event_type=DagsterEventType.ASSET_OBSERVATION,
cursor=cursor,
ascending=ascending,
)
else:
before_cursor, after_cursor = EventRecordsFilter.get_cursor_params(cursor, ascending)
asset_key = records_filter
event_records_filter = EventRecordsFilter(
event_type=DagsterEventType.ASSET_OBSERVATION,
asset_key=asset_key,
before_cursor=before_cursor,
after_cursor=after_cursor,
)
return self._get_event_records_result(event_records_filter, limit, cursor, ascending)
def fetch_run_status_changes(
self,
records_filter: Union[DagsterEventType, RunStatusChangeRecordsFilter],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> EventRecordsResult:
enforce_max_records_limit(limit)
event_type = (
records_filter
if isinstance(records_filter, DagsterEventType)
else records_filter.event_type
)
if event_type not in EVENT_TYPE_TO_PIPELINE_RUN_STATUS:
expected = ", ".join(EVENT_TYPE_TO_PIPELINE_RUN_STATUS.keys())
check.failed(f"Expected one of {expected}, received {event_type.value}")
before_cursor, after_cursor = EventRecordsFilter.get_cursor_params(cursor, ascending)
event_records_filter = (
records_filter.to_event_records_filter_without_job_names(cursor, ascending)
if isinstance(records_filter, RunStatusChangeRecordsFilter)
else EventRecordsFilter(
event_type, before_cursor=before_cursor, after_cursor=after_cursor
)
)
has_job_name_filter = (
isinstance(records_filter, RunStatusChangeRecordsFilter) and records_filter.job_names
)
if has_job_name_filter and not self.supports_run_status_change_job_name_filter:
check.failed(
"Called fetch_run_status_changes with selectors, which are not supported with this storage."
)
return self._get_event_records_result(event_records_filter, limit, cursor, ascending)
def get_logs_for_all_runs_by_log_id(
self,
after_cursor: int = -1,
dagster_event_type: Optional[Union[DagsterEventType, set[DagsterEventType]]] = None,
limit: Optional[int] = None,
) -> Mapping[int, EventLogEntry]:
check.int_param(after_cursor, "after_cursor")
check.invariant(
after_cursor >= -1,
f"Don't know what to do with negative cursor {after_cursor}",
)
if isinstance(dagster_event_type, set) and len(dagster_event_type) > 1:
deprecation_warning(
"Support for multiple event types to get_logs_for_all_runs_by_log_id",
"1.8.0",
"You should break up your query into multiple calls, one for each event type.",
)
dagster_event_types = (
{dagster_event_type}
if isinstance(dagster_event_type, DagsterEventType)
else check.opt_set_param(
dagster_event_type, "dagster_event_type", of_type=DagsterEventType
)
)
query = (
db_select([SqlEventLogStorageTable.c.id, SqlEventLogStorageTable.c.event])
.where(SqlEventLogStorageTable.c.id > after_cursor)
.order_by(SqlEventLogStorageTable.c.id.asc())
)
if dagster_event_types:
query = query.where(
SqlEventLogStorageTable.c.dagster_event_type.in_(
[dagster_event_type.value for dagster_event_type in dagster_event_types]
)
)
if limit:
query = query.limit(limit)
with self.index_connection() as conn:
results = conn.execute(query).fetchall()
events = {}
record_id = None
try:
for (
record_id,
json_str,
) in results:
events[record_id] = deserialize_value(json_str, EventLogEntry)
except (seven.JSONDecodeError, DeserializationError):
logging.warning("Could not parse event record id `%s`.", record_id)
return events
def get_maximum_record_id(self) -> Optional[int]:
with self.index_connection() as conn:
result = conn.execute(db_select([db.func.max(SqlEventLogStorageTable.c.id)])).fetchone()
return result[0] # type: ignore
def _construct_asset_record_from_row(
self,
row,
last_materialization_record: Optional[EventLogRecord],
can_read_asset_status_cache: bool,
) -> AssetRecord:
from dagster._core.storage.partition_status_cache import AssetStatusCacheValue
asset_key = AssetKey.from_db_string(row["asset_key"])
if asset_key:
return AssetRecord(
storage_id=row["id"],
asset_entry=AssetEntry(
asset_key=asset_key,
last_materialization_record=last_materialization_record,
last_run_id=row["last_run_id"],
asset_details=AssetDetails.from_db_string(row["asset_details"]),
cached_status=(
AssetStatusCacheValue.from_db_string(row["cached_status_data"])
if can_read_asset_status_cache
else None
),
last_planned_materialization_storage_id=None,
),
)
else:
check.failed("Row did not contain asset key.")
def _get_latest_materialization_records(
self, raw_asset_rows
) -> Mapping[AssetKey, Optional[EventLogRecord]]:
# Given a list of raw asset rows, returns a mapping of asset key to latest asset materialization
# event log entry. Fetches backcompat EventLogEntry records when the last_materialization
# in the raw asset row is an AssetMaterialization.
to_backcompat_fetch = set()
results: dict[AssetKey, Optional[EventLogRecord]] = {}
for row in raw_asset_rows:
asset_key = AssetKey.from_db_string(row["asset_key"])
if not asset_key:
continue
event_or_materialization = (
deserialize_value(row["last_materialization"], NamedTuple)
if row["last_materialization"]
else None
)
if isinstance(event_or_materialization, EventLogRecord):
results[asset_key] = event_or_materialization
else:
to_backcompat_fetch.add(asset_key)
latest_event_subquery = db_subquery(
db_select(
[
SqlEventLogStorageTable.c.asset_key,
db.func.max(SqlEventLogStorageTable.c.id).label("id"),
]
)
.where(
db.and_(
SqlEventLogStorageTable.c.asset_key.in_(
[asset_key.to_string() for asset_key in to_backcompat_fetch]
),
SqlEventLogStorageTable.c.dagster_event_type
== DagsterEventType.ASSET_MATERIALIZATION.value,
)
)
.group_by(SqlEventLogStorageTable.c.asset_key),
"latest_event_subquery",
)
backcompat_query = db_select(
[
SqlEventLogStorageTable.c.asset_key,
SqlEventLogStorageTable.c.id,
SqlEventLogStorageTable.c.event,
]
).select_from(
latest_event_subquery.join(
SqlEventLogStorageTable,
db.and_(
SqlEventLogStorageTable.c.asset_key == latest_event_subquery.c.asset_key,
SqlEventLogStorageTable.c.id == latest_event_subquery.c.id,
),
)
)
with self.index_connection() as conn:
event_rows = db_fetch_mappings(conn, backcompat_query)
for row in event_rows:
asset_key = AssetKey.from_db_string(cast("Optional[str]", row["asset_key"]))
if asset_key:
results[asset_key] = EventLogRecord(
storage_id=cast("int", row["id"]),
event_log_entry=deserialize_value(cast("str", row["event"]), EventLogEntry),
)
return results
def can_read_asset_status_cache(self) -> bool:
return self.has_asset_key_col("cached_status_data")
def can_write_asset_status_cache(self) -> bool:
return self.has_asset_key_col("cached_status_data")
def wipe_asset_cached_status(self, asset_key: AssetKey) -> None:
if self.can_read_asset_status_cache():
check.inst_param(asset_key, "asset_key", AssetKey)
with self.index_connection() as conn:
conn.execute(
AssetKeyTable.update()
.values(dict(cached_status_data=None))
.where(
AssetKeyTable.c.asset_key == asset_key.to_string(),
)
)
def get_asset_records(
self, asset_keys: Optional[Sequence[AssetKey]] = None
) -> Sequence[AssetRecord]:
rows = self._fetch_asset_rows(asset_keys=asset_keys)
latest_materialization_records = self._get_latest_materialization_records(rows)
can_read_asset_status_cache = self.can_read_asset_status_cache()
asset_records: list[AssetRecord] = []
for row in rows:
asset_key = AssetKey.from_db_string(row["asset_key"])
if asset_key:
asset_records.append(
self._construct_asset_record_from_row(
row,
latest_materialization_records.get(asset_key),
can_read_asset_status_cache,
)
)
return asset_records
def get_freshness_state_records(
self, keys: Sequence[AssetKey]
) -> Mapping[AssetKey, FreshnessStateRecord]:
latest_event_id_subquery = db_subquery(
db_select(
[
SqlEventLogStorageTable.c.asset_key,
db.func.max(SqlEventLogStorageTable.c.id).label("id"),
]
)
.where(
db.and_(
SqlEventLogStorageTable.c.asset_key.in_(
[asset_key.to_string() for asset_key in keys]
),
SqlEventLogStorageTable.c.dagster_event_type
== DagsterEventType.FRESHNESS_STATE_CHANGE.value,
)
)
.group_by(SqlEventLogStorageTable.c.asset_key)
)
latest_event_query = db_select([SqlEventLogStorageTable.c.event]).select_from(
latest_event_id_subquery.join(
SqlEventLogStorageTable,
db.and_(
SqlEventLogStorageTable.c.asset_key == latest_event_id_subquery.c.asset_key,
SqlEventLogStorageTable.c.id == latest_event_id_subquery.c.id,
),
)
)
with self.index_connection() as conn:
rows = db_fetch_mappings(conn, latest_event_query)
entries = [deserialize_value(row["event"], EventLogEntry) for row in rows]
records = [FreshnessStateRecord.from_event_log_entry(entry) for entry in entries]
return {record.entity_key: record for record in records}
def get_asset_check_summary_records(
self, asset_check_keys: Sequence[AssetCheckKey]
) -> Mapping[AssetCheckKey, AssetCheckSummaryRecord]:
states = {}
for asset_check_key in asset_check_keys:
last_execution_record = self.get_asset_check_execution_history(asset_check_key, limit=1)
last_completed_execution_record = (
last_execution_record
# If the check has never been executed or the latest record is a completed record,
# Avoid refetching the last completed record
if (
not last_execution_record
or last_execution_record[0].status
in COMPLETED_ASSET_CHECK_EXECUTION_RECORD_STATUSES
)
else self.get_asset_check_execution_history(
asset_check_key, limit=1, status=COMPLETED_ASSET_CHECK_EXECUTION_RECORD_STATUSES
)
)
states[asset_check_key] = AssetCheckSummaryRecord(
asset_check_key=asset_check_key,
last_check_execution_record=last_execution_record[0]
if last_execution_record
else None,
last_run_id=last_execution_record[0].run_id if last_execution_record else None,
last_completed_check_execution_record=last_completed_execution_record[0]
if last_completed_execution_record
else None,
)
return states
def has_asset_key(self, asset_key: AssetKey) -> bool:
check.inst_param(asset_key, "asset_key", AssetKey)
rows = self._fetch_asset_rows(asset_keys=[asset_key])
return bool(rows)
def all_asset_keys(self):
rows = self._fetch_asset_rows()
asset_keys = [
AssetKey.from_db_string(row["asset_key"])
for row in sorted(rows, key=lambda x: x["asset_key"])
]
return [asset_key for asset_key in asset_keys if asset_key]
def get_asset_keys(
self,
prefix: Optional[Sequence[str]] = None,
limit: Optional[int] = None,
cursor: Optional[str] = None,
) -> Sequence[AssetKey]:
rows = self._fetch_asset_rows(prefix=prefix, limit=limit, cursor=cursor)
asset_keys = [
AssetKey.from_db_string(row["asset_key"])
for row in sorted(rows, key=lambda x: x["asset_key"])
]
return [asset_key for asset_key in asset_keys if asset_key]
def get_latest_materialization_events(
self, asset_keys: Iterable[AssetKey]
) -> Mapping[AssetKey, Optional[EventLogEntry]]:
check.iterable_param(asset_keys, "asset_keys", AssetKey)
rows = self._fetch_asset_rows(asset_keys=asset_keys)
return {
asset_key: event_log_record.event_log_entry if event_log_record is not None else None
for asset_key, event_log_record in self._get_latest_materialization_records(
rows
).items()
}
def _fetch_asset_rows(
self,
asset_keys=None,
prefix: Optional[Sequence[str]] = None,
limit: Optional[int] = None,
cursor: Optional[str] = None,
) -> Sequence[SqlAlchemyRow]:
# fetches rows containing asset_key, last_materialization, and asset_details from the DB,
# applying the filters specified in the arguments.
#
# Differs from _fetch_raw_asset_rows, in that it loops through to make sure enough rows are
# returned to satisfy the limit.
#
# returns a list of rows where each row is a tuple of serialized asset_key, materialization,
# and asset_details
should_query = True
current_cursor = cursor
if self.has_secondary_index(ASSET_KEY_INDEX_COLS):
# if we have migrated, we can limit using SQL
fetch_limit = limit
else:
# if we haven't migrated, overfetch in case the first N results are wiped
fetch_limit = max(limit, MIN_ASSET_ROWS) if limit else None
result = []
while should_query:
rows, has_more, current_cursor = self._fetch_raw_asset_rows(
asset_keys=asset_keys, prefix=prefix, limit=fetch_limit, cursor=current_cursor
)
result.extend(rows)
should_query = bool(has_more) and bool(limit) and len(result) < cast("int", limit)
is_partial_query = asset_keys is not None or bool(prefix) or bool(limit) or bool(cursor)
if not is_partial_query and self._can_mark_assets_as_migrated(rows): # pyright: ignore[reportPossiblyUnboundVariable]
self.enable_secondary_index(ASSET_KEY_INDEX_COLS)
return result[:limit] if limit else result
def _fetch_raw_asset_rows(
self,
asset_keys: Optional[Sequence[AssetKey]] = None,
prefix: Optional[Sequence[str]] = None,
limit: Optional[int] = None,
cursor=None,
) -> tuple[Iterable[SqlAlchemyRow], bool, Optional[str]]:
# fetches rows containing asset_key, last_materialization, and asset_details from the DB,
# applying the filters specified in the arguments. Does not guarantee that the number of
# rows returned will match the limit specified. This helper function is used to fetch a
# chunk of asset key rows, which may or may not be wiped.
#
# Returns a tuple of (rows, has_more, cursor), where each row is a tuple of serialized
# asset_key, materialization, and asset_details
# TODO update comment
columns = [
AssetKeyTable.c.id,
AssetKeyTable.c.asset_key,
AssetKeyTable.c.last_materialization,
AssetKeyTable.c.last_run_id,
AssetKeyTable.c.asset_details,
]
if self.can_read_asset_status_cache():
columns.extend([AssetKeyTable.c.cached_status_data])
is_partial_query = asset_keys is not None or bool(prefix) or bool(limit) or bool(cursor)
if self.has_asset_key_index_cols() and not is_partial_query:
# if the schema has been migrated, fetch the last_materialization_timestamp to see if
# we can lazily migrate the data table
columns.append(AssetKeyTable.c.last_materialization_timestamp)
columns.append(AssetKeyTable.c.wipe_timestamp)
query = db_select(columns).order_by(AssetKeyTable.c.asset_key.asc())
query = self._apply_asset_filter_to_query(query, asset_keys, prefix, limit, cursor)
if self.has_secondary_index(ASSET_KEY_INDEX_COLS):
query = query.where(
db.or_(
AssetKeyTable.c.wipe_timestamp.is_(None),
AssetKeyTable.c.last_materialization_timestamp > AssetKeyTable.c.wipe_timestamp,
)
)
with self.index_connection() as conn:
rows = db_fetch_mappings(conn, query)
return rows, False, None
with self.index_connection() as conn:
rows = db_fetch_mappings(conn, query)
wiped_timestamps_by_asset_key: dict[AssetKey, float] = {}
row_by_asset_key: dict[AssetKey, SqlAlchemyRow] = OrderedDict()
for row in rows:
asset_key = AssetKey.from_db_string(cast("str", row["asset_key"]))
if not asset_key:
continue
asset_details = AssetDetails.from_db_string(row["asset_details"])
if not asset_details or not asset_details.last_wipe_timestamp:
row_by_asset_key[asset_key] = row
continue
materialization_or_event_or_record = (
deserialize_value(cast("str", row["last_materialization"]), NamedTuple)
if row["last_materialization"]
else None
)
if isinstance(materialization_or_event_or_record, (EventLogRecord, EventLogEntry)):
if isinstance(materialization_or_event_or_record, EventLogRecord):
event_timestamp = materialization_or_event_or_record.event_log_entry.timestamp
else:
event_timestamp = materialization_or_event_or_record.timestamp
if asset_details.last_wipe_timestamp > event_timestamp:
# this asset has not been materialized since being wiped, skip
continue
else:
# add the key
row_by_asset_key[asset_key] = row
else:
row_by_asset_key[asset_key] = row
wiped_timestamps_by_asset_key[asset_key] = asset_details.last_wipe_timestamp
if wiped_timestamps_by_asset_key:
materialization_times = self._fetch_backcompat_materialization_times(
wiped_timestamps_by_asset_key.keys() # type: ignore
)
for asset_key, wiped_timestamp in wiped_timestamps_by_asset_key.items():
materialization_time = materialization_times.get(asset_key)
if not materialization_time or utc_datetime_from_naive(
materialization_time
) < datetime_from_timestamp(wiped_timestamp):
# remove rows that have not been materialized since being wiped
row_by_asset_key.pop(asset_key)
has_more = limit and len(rows) == limit
new_cursor = rows[-1]["id"] if rows else None
return row_by_asset_key.values(), has_more, new_cursor # type: ignore
def update_asset_cached_status_data(
self, asset_key: AssetKey, cache_values: "AssetStatusCacheValue"
) -> None:
if self.can_read_asset_status_cache():
with self.index_connection() as conn:
conn.execute(
AssetKeyTable.update()
.where(
AssetKeyTable.c.asset_key == asset_key.to_string(),
)
.values(cached_status_data=serialize_value(cache_values))
)
def _fetch_backcompat_materialization_times(
self, asset_keys: Sequence[AssetKey]
) -> Mapping[AssetKey, datetime]:
# fetches the latest materialization timestamp for the given asset_keys. Uses the (slower)
# raw event log table.
backcompat_query = (
db_select(
[
SqlEventLogStorageTable.c.asset_key,
db.func.max(SqlEventLogStorageTable.c.timestamp).label("timestamp"),
]
)
.where(
SqlEventLogStorageTable.c.asset_key.in_(
[asset_key.to_string() for asset_key in asset_keys]
)
)
.group_by(SqlEventLogStorageTable.c.asset_key)
.order_by(db.func.max(SqlEventLogStorageTable.c.timestamp).asc())
)
with self.index_connection() as conn:
backcompat_rows = db_fetch_mappings(conn, backcompat_query)
return {
AssetKey.from_db_string(row["asset_key"]): row["timestamp"] for row in backcompat_rows
} # type: ignore
def _can_mark_assets_as_migrated(self, rows):
if not self.has_asset_key_index_cols():
return False
if self.has_secondary_index(ASSET_KEY_INDEX_COLS):
# we have already migrated
return False
for row in rows:
if not _get_from_row(row, "last_materialization_timestamp"):
return False
if _get_from_row(row, "asset_details") and not _get_from_row(row, "wipe_timestamp"):
return False
return True
def _apply_asset_filter_to_query(
self,
query: SqlAlchemyQuery,
asset_keys: Optional[Sequence[AssetKey]] = None,
prefix=None,
limit: Optional[int] = None,
cursor: Optional[str] = None,
) -> SqlAlchemyQuery:
if asset_keys is not None:
query = query.where(
AssetKeyTable.c.asset_key.in_([asset_key.to_string() for asset_key in asset_keys])
)
if prefix:
prefix_str = seven.dumps(prefix)[:-1]
query = query.where(AssetKeyTable.c.asset_key.startswith(prefix_str))
if cursor:
query = query.where(AssetKeyTable.c.asset_key > cursor)
if limit:
query = query.limit(limit)
return query
def _get_assets_details(
self, asset_keys: Sequence[AssetKey]
) -> Sequence[Optional[AssetDetails]]:
check.sequence_param(asset_keys, "asset_key", AssetKey)
rows = None
with self.index_connection() as conn:
rows = db_fetch_mappings(
conn,
db_select([AssetKeyTable.c.asset_key, AssetKeyTable.c.asset_details]).where(
AssetKeyTable.c.asset_key.in_(
[asset_key.to_string() for asset_key in asset_keys]
),
),
)
asset_key_to_details = {
cast("str", row["asset_key"]): (
deserialize_value(cast("str", row["asset_details"]), AssetDetails)
if row["asset_details"]
else None
)
for row in rows
}
# returns a list of the corresponding asset_details to provided asset_keys
return [
asset_key_to_details.get(asset_key.to_string(), None) for asset_key in asset_keys
]
def _add_assets_wipe_filter_to_query(
self,
query: SqlAlchemyQuery,
assets_details: Sequence[Optional[AssetDetails]],
asset_keys: Sequence[AssetKey],
) -> SqlAlchemyQuery:
check.invariant(
len(assets_details) == len(asset_keys),
"asset_details and asset_keys must be the same length",
)
for i in range(len(assets_details)):
asset_key, asset_details = asset_keys[i], assets_details[i]
if asset_details and asset_details.last_wipe_timestamp:
asset_key_in_row = SqlEventLogStorageTable.c.asset_key == asset_key.to_string()
# If asset key is in row, keep the row if the timestamp > wipe timestamp, else remove the row.
# If asset key is not in row, keep the row.
query = query.where(
db.or_(
db.and_(
asset_key_in_row,
SqlEventLogStorageTable.c.timestamp
> datetime.fromtimestamp(
asset_details.last_wipe_timestamp, timezone.utc
).replace(tzinfo=None),
),
db.not_(asset_key_in_row),
)
)
return query
def get_event_tags_for_asset(
self,
asset_key: AssetKey,
filter_tags: Optional[Mapping[str, str]] = None,
filter_event_id: Optional[int] = None,
) -> Sequence[Mapping[str, str]]:
"""Fetches asset event tags for the given asset key.
If filter_tags is provided, searches for events containing all of the filter tags. Then,
returns all tags for those events. This enables searching for multipartitioned asset
partition tags with a fixed dimension value, e.g. all of the tags for events where
"country" == "US".
If filter_event_id is provided, fetches only tags applied to the given event.
Returns a list of dicts, where each dict is a mapping of tag key to tag value for a
single event.
"""
asset_key = check.inst_param(asset_key, "asset_key", AssetKey)
filter_tags = check.opt_mapping_param(
filter_tags, "filter_tags", key_type=str, value_type=str
)
filter_event_id = check.opt_int_param(filter_event_id, "filter_event_id")
if not self.has_table(AssetEventTagsTable.name):
raise DagsterInvalidInvocationError(
"In order to search for asset event tags, you must run "
"`dagster instance migrate` to create the AssetEventTags table."
)
asset_details = self._get_assets_details([asset_key])[0]
if not filter_tags:
tags_query = db_select(
[
AssetEventTagsTable.c.key,
AssetEventTagsTable.c.value,
AssetEventTagsTable.c.event_id,
]
).where(AssetEventTagsTable.c.asset_key == asset_key.to_string())
if asset_details and asset_details.last_wipe_timestamp:
tags_query = tags_query.where(
AssetEventTagsTable.c.event_timestamp
> datetime.fromtimestamp(
asset_details.last_wipe_timestamp, timezone.utc
).replace(tzinfo=None)
)
else:
table = self._apply_tags_table_joins(AssetEventTagsTable, filter_tags, asset_key)
tags_query = db_select(
[
AssetEventTagsTable.c.key,
AssetEventTagsTable.c.value,
AssetEventTagsTable.c.event_id,
]
).select_from(table)
if asset_details and asset_details.last_wipe_timestamp:
tags_query = tags_query.where(
AssetEventTagsTable.c.event_timestamp
> datetime.fromtimestamp(
asset_details.last_wipe_timestamp, timezone.utc
).replace(tzinfo=None)
)
if filter_event_id is not None:
tags_query = tags_query.where(AssetEventTagsTable.c.event_id == filter_event_id)
with self.index_connection() as conn:
results = conn.execute(tags_query).fetchall()
tags_by_event_id: dict[int, dict[str, str]] = defaultdict(dict)
for row in results:
key, value, event_id = row
tags_by_event_id[event_id][key] = value
return list(tags_by_event_id.values())
def _asset_materialization_from_json_column(
self, json_str: str
) -> Optional[AssetMaterialization]:
if not json_str:
return None
# We switched to storing the entire event record of the last materialization instead of just
# the AssetMaterialization object, so that we have access to metadata like timestamp,
# pipeline, run_id, etc.
#
# This should make certain asset queries way more performant, without having to do extra
# queries against the event log.
#
# This should be accompanied by a schema change in 0.12.0, renaming `last_materialization`
# to `last_materialization_event`, for clarity. For now, we should do some back-compat.
#
# https://github.com/dagster-io/dagster/issues/3945
event_or_materialization = deserialize_value(json_str, NamedTuple)
if isinstance(event_or_materialization, AssetMaterialization):
return event_or_materialization
if (
not isinstance(event_or_materialization, EventLogEntry)
or not event_or_materialization.is_dagster_event
or not event_or_materialization.dagster_event.asset_key # type: ignore
):
return None
return event_or_materialization.dagster_event.step_materialization_data.materialization # type: ignore
def _get_asset_key_values_on_wipe(self) -> Mapping[str, Any]:
wipe_timestamp = get_current_timestamp()
values = {
"asset_details": serialize_value(AssetDetails(last_wipe_timestamp=wipe_timestamp)),
"last_run_id": None,
}
if self.has_asset_key_index_cols():
values.update(
dict(
wipe_timestamp=datetime_from_timestamp(wipe_timestamp),
)
)
if self.can_read_asset_status_cache():
values.update(dict(cached_status_data=None))
return values
def wipe_asset(self, asset_key: AssetKey) -> None:
check.inst_param(asset_key, "asset_key", AssetKey)
wiped_values = self._get_asset_key_values_on_wipe()
with self.index_connection() as conn:
conn.execute(
AssetKeyTable.update()
.values(**wiped_values)
.where(
AssetKeyTable.c.asset_key == asset_key.to_string(),
)
)
if self.has_table("asset_check_executions"):
conn.execute(
AssetCheckExecutionsTable.delete().where(
AssetCheckExecutionsTable.c.asset_key == asset_key.to_string()
)
)
def wipe_asset_partitions(self, asset_key: AssetKey, partition_keys: Sequence[str]) -> None:
"""Remove asset index history from event log for given asset partitions."""
raise NotImplementedError(
"Partitioned asset wipe is not supported yet for this event log storage."
)
def get_materialized_partitions(
self,
asset_key: AssetKey,
before_cursor: Optional[int] = None,
after_cursor: Optional[int] = None,
) -> set[str]:
query = (
db_select(
[
SqlEventLogStorageTable.c.partition,
db.func.max(SqlEventLogStorageTable.c.id),
]
)
.where(
db.and_(
SqlEventLogStorageTable.c.asset_key == asset_key.to_string(),
SqlEventLogStorageTable.c.partition != None, # noqa: E711
SqlEventLogStorageTable.c.dagster_event_type
== DagsterEventType.ASSET_MATERIALIZATION.value,
)
)
.group_by(SqlEventLogStorageTable.c.partition)
)
assets_details = self._get_assets_details([asset_key])
query = self._add_assets_wipe_filter_to_query(query, assets_details, [asset_key])
if after_cursor:
query = query.where(SqlEventLogStorageTable.c.id > after_cursor)
if before_cursor:
query = query.where(SqlEventLogStorageTable.c.id < before_cursor)
with self.index_connection() as conn:
results = conn.execute(query).fetchall()
return set([cast("str", row[0]) for row in results])
def _latest_event_ids_by_partition_subquery(
self,
asset_key: AssetKey,
event_types: Sequence[DagsterEventType],
asset_partitions: Optional[Sequence[str]] = None,
before_cursor: Optional[int] = None,
after_cursor: Optional[int] = None,
):
"""Subquery for locating the latest event ids by partition for a given asset key and set
of event types.
"""
query = db_select(
[
SqlEventLogStorageTable.c.dagster_event_type,
SqlEventLogStorageTable.c.partition,
db.func.max(SqlEventLogStorageTable.c.id).label("id"),
]
).where(
db.and_(
SqlEventLogStorageTable.c.asset_key == asset_key.to_string(),
SqlEventLogStorageTable.c.partition != None, # noqa: E711
SqlEventLogStorageTable.c.dagster_event_type.in_(
[event_type.value for event_type in event_types]
),
)
)
if asset_partitions is not None:
query = query.where(SqlEventLogStorageTable.c.partition.in_(asset_partitions))
if before_cursor is not None:
query = query.where(SqlEventLogStorageTable.c.id < before_cursor)
if after_cursor is not None:
query = query.where(SqlEventLogStorageTable.c.id > after_cursor)
latest_event_ids_subquery = query.group_by(
SqlEventLogStorageTable.c.dagster_event_type, SqlEventLogStorageTable.c.partition
)
assets_details = self._get_assets_details([asset_key])
return db_subquery(
self._add_assets_wipe_filter_to_query(
latest_event_ids_subquery, assets_details, [asset_key]
),
"latest_event_ids_by_partition_subquery",
)
def get_latest_storage_id_by_partition(
self,
asset_key: AssetKey,
event_type: DagsterEventType,
partitions: Optional[set[str]] = None,
) -> Mapping[str, int]:
"""Fetch the latest materialzation storage id for each partition for a given asset key.
Returns a mapping of partition to storage id.
"""
check.inst_param(asset_key, "asset_key", AssetKey)
latest_event_ids_by_partition_subquery = self._latest_event_ids_by_partition_subquery(
asset_key, [event_type], asset_partitions=list(partitions) if partitions else None
)
latest_event_ids_by_partition = db_select(
[
latest_event_ids_by_partition_subquery.c.partition,
latest_event_ids_by_partition_subquery.c.id,
]
)
with self.index_connection() as conn:
rows = conn.execute(latest_event_ids_by_partition).fetchall()
latest_materialization_storage_id_by_partition: dict[str, int] = {}
for row in rows:
latest_materialization_storage_id_by_partition[cast("str", row[0])] = cast(
"int", row[1]
)
return latest_materialization_storage_id_by_partition
def get_latest_tags_by_partition(
self,
asset_key: AssetKey,
event_type: DagsterEventType,
tag_keys: Sequence[str],
asset_partitions: Optional[Sequence[str]] = None,
before_cursor: Optional[int] = None,
after_cursor: Optional[int] = None,
) -> Mapping[str, Mapping[str, str]]:
check.inst_param(asset_key, "asset_key", AssetKey)
check.inst_param(event_type, "event_type", DagsterEventType)
check.sequence_param(tag_keys, "tag_keys", of_type=str)
check.opt_nullable_sequence_param(asset_partitions, "asset_partitions", of_type=str)
check.opt_int_param(before_cursor, "before_cursor")
check.opt_int_param(after_cursor, "after_cursor")
if not tag_keys or len(tag_keys) != len(self.get_asset_tags_to_index(set(tag_keys))):
check.failed(
"Only a limited set of tag keys are whitelisted for querying the latest tag values by partition."
)
latest_event_ids_subquery = self._latest_event_ids_by_partition_subquery(
asset_key=asset_key,
event_types=[event_type],
asset_partitions=asset_partitions,
before_cursor=before_cursor,
after_cursor=after_cursor,
)
latest_tags_by_partition_query = (
db_select(
[
latest_event_ids_subquery.c.partition,
AssetEventTagsTable.c.key,
AssetEventTagsTable.c.value,
]
)
.select_from(
latest_event_ids_subquery.join(
AssetEventTagsTable,
AssetEventTagsTable.c.event_id == latest_event_ids_subquery.c.id,
)
)
.where(AssetEventTagsTable.c.key.in_(tag_keys))
)
latest_tags_by_partition: dict[str, dict[str, str]] = defaultdict(dict)
with self.index_connection() as conn:
rows = conn.execute(latest_tags_by_partition_query).fetchall()
for row in rows:
latest_tags_by_partition[cast("str", row[0])][cast("str", row[1])] = cast("str", row[2])
# convert defaultdict to dict
return dict(latest_tags_by_partition)
def get_latest_asset_partition_materialization_attempts_without_materializations(
self, asset_key: AssetKey, after_storage_id: Optional[int] = None
) -> Mapping[str, tuple[str, int]]:
"""Fetch the latest materialzation and materialization planned events for each partition of the given asset.
Return the partitions that have a materialization planned event but no matching (same run) materialization event.
These materializations could be in progress, or they could have failed. A separate query checking the run status
is required to know.
Returns a mapping of partition to [run id, event id].
"""
check.inst_param(asset_key, "asset_key", AssetKey)
latest_event_ids_subquery = self._latest_event_ids_by_partition_subquery(
asset_key,
[
DagsterEventType.ASSET_MATERIALIZATION,
DagsterEventType.ASSET_MATERIALIZATION_PLANNED,
],
after_cursor=after_storage_id,
)
latest_events_subquery = db_subquery(
db_select(
[
SqlEventLogStorageTable.c.dagster_event_type,
SqlEventLogStorageTable.c.partition,
SqlEventLogStorageTable.c.run_id,
SqlEventLogStorageTable.c.id,
]
).select_from(
latest_event_ids_subquery.join(
SqlEventLogStorageTable,
SqlEventLogStorageTable.c.id == latest_event_ids_subquery.c.id,
),
),
"latest_events_subquery",
)
materialization_planned_events = db_select(
[
latest_events_subquery.c.dagster_event_type,
latest_events_subquery.c.partition,
latest_events_subquery.c.run_id,
latest_events_subquery.c.id,
]
).where(
latest_events_subquery.c.dagster_event_type
== DagsterEventType.ASSET_MATERIALIZATION_PLANNED.value
)
materialization_events = db_select(
[
latest_events_subquery.c.dagster_event_type,
latest_events_subquery.c.partition,
latest_events_subquery.c.run_id,
latest_events_subquery.c.id,
]
).where(
latest_events_subquery.c.dagster_event_type
== DagsterEventType.ASSET_MATERIALIZATION.value
)
with self.index_connection() as conn:
materialization_planned_rows = db_fetch_mappings(conn, materialization_planned_events)
materialization_rows = db_fetch_mappings(conn, materialization_events)
materialization_planned_rows_by_partition = {
row["partition"]: (row["run_id"], row["id"]) for row in materialization_planned_rows
}
for mat_row in materialization_rows:
mat_partition = mat_row["partition"]
mat_event_id = mat_row["id"]
if mat_partition not in materialization_planned_rows_by_partition:
continue
_, planned_event_id = materialization_planned_rows_by_partition[mat_partition]
if planned_event_id < mat_event_id:
# this planned materialization event was followed by a materialization event
materialization_planned_rows_by_partition.pop(mat_partition)
return materialization_planned_rows_by_partition
def _check_partitions_table(self) -> None:
# Guards against cases where the user is not running the latest migration for
# partitions storage. Should be updated when the partitions storage schema changes.
if not self.has_table("dynamic_partitions"):
raise DagsterInvalidInvocationError(
"Using dynamic partitions definitions requires the dynamic partitions table, which"
" currently does not exist. Add this table by running `dagster"
" instance migrate`."
)
def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]:
"""Get the list of partition keys for a partition definition."""
self._check_partitions_table()
columns = [
DynamicPartitionsTable.c.partitions_def_name,
DynamicPartitionsTable.c.partition,
]
query = (
db_select(columns)
.where(DynamicPartitionsTable.c.partitions_def_name == partitions_def_name)
.order_by(DynamicPartitionsTable.c.id)
)
with self.index_connection() as conn:
rows = conn.execute(query).fetchall()
return [cast("str", row[1]) for row in rows]
def get_paginated_dynamic_partitions(
self, partitions_def_name: str, limit: int, ascending: bool, cursor: Optional[str] = None
) -> PaginatedResults[str]:
self._check_partitions_table()
order_by = (
DynamicPartitionsTable.c.id.asc() if ascending else DynamicPartitionsTable.c.id.desc()
)
query = (
db_select(
[
DynamicPartitionsTable.c.id,
DynamicPartitionsTable.c.partition,
]
)
.where(DynamicPartitionsTable.c.partitions_def_name == partitions_def_name)
.order_by(order_by)
.limit(limit)
)
if cursor:
last_storage_id = StorageIdCursor.from_cursor(cursor).storage_id
if ascending:
query = query.where(DynamicPartitionsTable.c.id > last_storage_id)
else:
query = query.where(DynamicPartitionsTable.c.id < last_storage_id)
with self.index_connection() as conn:
rows = conn.execute(query).fetchall()
if rows:
next_cursor = StorageIdCursor(storage_id=cast("int", rows[-1][0])).to_string()
elif cursor:
next_cursor = cursor
else:
next_cursor = StorageIdCursor(storage_id=-1).to_string()
return PaginatedResults(
results=[cast("str", row[1]) for row in rows],
cursor=next_cursor,
has_more=len(rows) == limit,
)
def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool:
self._check_partitions_table()
query = (
db_select([DynamicPartitionsTable.c.partition])
.where(
db.and_(
DynamicPartitionsTable.c.partitions_def_name == partitions_def_name,
DynamicPartitionsTable.c.partition == partition_key,
)
)
.limit(1)
)
with self.index_connection() as conn:
results = conn.execute(query).fetchall()
return len(results) > 0
def add_dynamic_partitions(
self, partitions_def_name: str, partition_keys: Sequence[str]
) -> None:
self._check_partitions_table()
with self.index_connection() as conn:
existing_rows = conn.execute(
db_select([DynamicPartitionsTable.c.partition]).where(
db.and_(
DynamicPartitionsTable.c.partition.in_(partition_keys),
DynamicPartitionsTable.c.partitions_def_name == partitions_def_name,
)
)
).fetchall()
existing_keys = set([row[0] for row in existing_rows])
new_keys = [
partition_key
for partition_key in partition_keys
if partition_key not in existing_keys
]
if new_keys:
conn.execute(
DynamicPartitionsTable.insert(),
[
dict(partitions_def_name=partitions_def_name, partition=partition_key)
for partition_key in new_keys
],
)
def delete_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> None:
self._check_partitions_table()
with self.index_connection() as conn:
conn.execute(
DynamicPartitionsTable.delete().where(
db.and_(
DynamicPartitionsTable.c.partitions_def_name == partitions_def_name,
DynamicPartitionsTable.c.partition == partition_key,
)
)
)
@cached_property
def supports_global_concurrency_limits(self) -> bool: # pyright: ignore[reportIncompatibleMethodOverride]
return self.has_table(ConcurrencySlotsTable.name)
@cached_property
def has_default_pool_limit_col(self) -> bool:
# This table was added later, and to avoid forcing a migration
# we handle in the code if its been added or not.
if not self.has_table(ConcurrencyLimitsTable.name):
return False
with self.index_connection() as conn:
column_names = [
x.get("name") for x in db.inspect(conn).get_columns(ConcurrencyLimitsTable.name)
]
return ConcurrencyLimitsTable.c.using_default_limit.name in column_names
@cached_property
def has_concurrency_limits_table(self) -> bool:
# This table was added later, and to avoid forcing a migration
# we handle in the code if its been added or not.
return self.has_table(ConcurrencyLimitsTable.name)
def _reconcile_concurrency_limits_from_slots(self) -> None:
"""Helper function that can be reconciles the concurrency limits table from the concurrency
slots table. This should only run when the concurrency limits table exists and is empty,
since all of the slot configuration operations should keep them in sync. We reconcile from
the slots table because the initial implementation did not have the limits table.
"""
if not self.has_concurrency_limits_table:
return
if not self._has_rows(ConcurrencySlotsTable) or self._has_rows(ConcurrencyLimitsTable):
return
with self.index_transaction() as conn:
rows = conn.execute(
db_select(
[
ConcurrencySlotsTable.c.concurrency_key,
db.func.count().label("count"),
]
)
.where(
ConcurrencySlotsTable.c.deleted == False, # noqa: E712
)
.group_by(
ConcurrencySlotsTable.c.concurrency_key,
)
).fetchall()
conn.execute(
ConcurrencyLimitsTable.insert().values(
[
{
"concurrency_key": row[0],
"limit": row[1],
}
for row in rows
]
)
)
def _has_rows(self, table) -> bool:
with self.index_connection() as conn:
row = conn.execute(db_select([True]).select_from(table).limit(1)).fetchone()
return bool(row[0]) if row else False
def initialize_concurrency_limit_to_default(self, concurrency_key: str) -> bool:
if not self.has_concurrency_limits_table:
return False
self._reconcile_concurrency_limits_from_slots()
if not self.has_instance:
return False
default_limit = self._instance.global_op_concurrency_default_limit
# initialize outside of connection context
has_default_pool_limit_col = self.has_default_pool_limit_col
if has_default_pool_limit_col:
with self.index_transaction() as conn:
if default_limit is None:
conn.execute(
ConcurrencyLimitsTable.delete().where(
ConcurrencyLimitsTable.c.concurrency_key == concurrency_key,
)
)
self._allocate_concurrency_slots(conn, concurrency_key, 0)
else:
try:
conn.execute(
ConcurrencyLimitsTable.insert().values(
concurrency_key=concurrency_key,
limit=default_limit,
using_default_limit=True,
)
)
except db_exc.IntegrityError:
conn.execute(
ConcurrencyLimitsTable.update()
.values(limit=default_limit)
.where(
db.and_(
ConcurrencyLimitsTable.c.concurrency_key == concurrency_key,
ConcurrencyLimitsTable.c.limit != default_limit,
)
)
)
self._allocate_concurrency_slots(conn, concurrency_key, default_limit)
return True
if default_limit is None:
return False
with self.index_transaction() as conn:
try:
conn.execute(
ConcurrencyLimitsTable.insert().values(
concurrency_key=concurrency_key, limit=default_limit
)
)
except db_exc.IntegrityError:
pass
self._allocate_concurrency_slots(conn, concurrency_key, default_limit)
return True
def _upsert_and_lock_limit_row(
self, conn, concurrency_key: str, num: int, has_default_pool_limit_col: bool
):
"""Helper function that can be overridden by each implementing sql variant which obtains a
lock on the concurrency limits row for the given key and updates it to the given value.
"""
if not self.has_concurrency_limits_table:
# no need to grab the lock on the concurrency limits row if the table does not exist
return None
row = conn.execute(
db_select([ConcurrencyLimitsTable.c.id])
.select_from(ConcurrencyLimitsTable)
.where(ConcurrencyLimitsTable.c.concurrency_key == concurrency_key)
.with_for_update()
.limit(1)
).fetchone()
if not row:
if has_default_pool_limit_col:
conn.execute(
ConcurrencyLimitsTable.insert().values(
concurrency_key=concurrency_key,
limit=num,
using_default_limit=False,
)
)
else:
conn.execute(
ConcurrencyLimitsTable.insert().values(
concurrency_key=concurrency_key, limit=num
)
)
else:
if has_default_pool_limit_col:
conn.execute(
ConcurrencyLimitsTable.update()
.where(ConcurrencyLimitsTable.c.concurrency_key == concurrency_key)
.values(limit=num, using_default_limit=False)
)
else:
conn.execute(
ConcurrencyLimitsTable.update()
.where(ConcurrencyLimitsTable.c.concurrency_key == concurrency_key)
.values(limit=num)
)
def set_concurrency_slots(self, concurrency_key: str, num: int) -> None:
"""Allocate a set of concurrency slots.
Args:
concurrency_key (str): The key to allocate the slots for.
num (int): The number of slots to allocate.
"""
max_limit = get_max_concurrency_limit_value()
if num > max_limit:
raise DagsterInvalidInvocationError(
f"Cannot have more than {max_limit} slots per concurrency key."
)
if num < 0:
raise DagsterInvalidInvocationError("Cannot have a negative number of slots.")
# ensure that we have concurrency limits set for all keys
self._reconcile_concurrency_limits_from_slots()
# initialize outside of connection context
has_default_pool_limit_col = self.has_default_pool_limit_col
with self.index_transaction() as conn:
self._upsert_and_lock_limit_row(conn, concurrency_key, num, has_default_pool_limit_col)
keys_to_assign = self._allocate_concurrency_slots(conn, concurrency_key, num)
if keys_to_assign:
# we've added some slots... if there are any pending steps, we can assign them now or
# they will be unutilized until free_concurrency_slots is called
self.assign_pending_steps(keys_to_assign)
def delete_concurrency_limit(self, concurrency_key: str) -> None:
"""Delete a concurrency limit and its associated slots.
Args:
concurrency_key (str): The key to delete.
"""
# ensure that we have concurrency limits set for all keys
self._reconcile_concurrency_limits_from_slots()
with self.index_transaction() as conn:
if self.has_concurrency_limits_table:
conn.execute(
ConcurrencyLimitsTable.delete().where(
ConcurrencyLimitsTable.c.concurrency_key == concurrency_key
)
)
self._allocate_concurrency_slots(conn, concurrency_key, 0)
def _allocate_concurrency_slots(self, conn, concurrency_key: str, num: int) -> list[str]:
keys_to_assign = []
count_row = conn.execute(
db_select([db.func.count()])
.select_from(ConcurrencySlotsTable)
.where(
db.and_(
ConcurrencySlotsTable.c.concurrency_key == concurrency_key,
ConcurrencySlotsTable.c.deleted == False, # noqa: E712
)
)
).fetchone()
existing = cast("int", count_row[0]) if count_row else 0
if existing > num:
# need to delete some slots, favoring ones where the slot is unallocated
rows = conn.execute(
db_select([ConcurrencySlotsTable.c.id])
.select_from(ConcurrencySlotsTable)
.where(
db.and_(
ConcurrencySlotsTable.c.concurrency_key == concurrency_key,
ConcurrencySlotsTable.c.deleted == False, # noqa: E712
)
)
.order_by(
db_case([(ConcurrencySlotsTable.c.run_id.is_(None), 1)], else_=0).desc(),
ConcurrencySlotsTable.c.id.desc(),
)
.limit(existing - num)
).fetchall()
if rows:
# mark rows as deleted
conn.execute(
ConcurrencySlotsTable.update()
.values(deleted=True)
.where(ConcurrencySlotsTable.c.id.in_([row[0] for row in rows]))
)
# actually delete rows that are marked as deleted and are not claimed... the rest
# will be deleted when the slots are released by the free_concurrency_slots
conn.execute(
ConcurrencySlotsTable.delete().where(
db.and_(
ConcurrencySlotsTable.c.deleted == True, # noqa: E712
ConcurrencySlotsTable.c.run_id == None, # noqa: E711
)
)
)
elif num > existing:
# need to add some slots
rows = [
{
"concurrency_key": concurrency_key,
"run_id": None,
"step_key": None,
"deleted": False,
}
for _ in range(existing, num)
]
conn.execute(ConcurrencySlotsTable.insert().values(rows))
keys_to_assign.extend([concurrency_key for _ in range(existing, num)])
return keys_to_assign
def has_unassigned_slots(self, concurrency_key: str) -> bool:
with self.index_connection() as conn:
pending_row = conn.execute(
db_select([db.func.count()])
.select_from(PendingStepsTable)
.where(
db.and_(
PendingStepsTable.c.concurrency_key == concurrency_key,
PendingStepsTable.c.assigned_timestamp != None, # noqa: E711
)
)
).fetchone()
slots = conn.execute(
db_select([db.func.count()])
.select_from(ConcurrencySlotsTable)
.where(
db.and_(
ConcurrencySlotsTable.c.concurrency_key == concurrency_key,
ConcurrencySlotsTable.c.deleted == False, # noqa: E712
)
)
).fetchone()
pending_count = cast("int", pending_row[0]) if pending_row else 0
slots_count = cast("int", slots[0]) if slots else 0
return slots_count > pending_count
def check_concurrency_claim(
self, concurrency_key: str, run_id: str, step_key: str
) -> ConcurrencyClaimStatus:
with self.index_connection() as conn:
pending_row = conn.execute(
db_select(
[
PendingStepsTable.c.assigned_timestamp,
PendingStepsTable.c.priority,
PendingStepsTable.c.create_timestamp,
]
).where(
db.and_(
PendingStepsTable.c.run_id == run_id,
PendingStepsTable.c.step_key == step_key,
PendingStepsTable.c.concurrency_key == concurrency_key,
)
)
).fetchone()
if not pending_row:
# no pending step pending_row exists, the slot is blocked and the enqueued timestamp is None
return ConcurrencyClaimStatus(
concurrency_key=concurrency_key,
slot_status=ConcurrencySlotStatus.BLOCKED,
priority=None,
assigned_timestamp=None,
enqueued_timestamp=None,
)
priority = cast("int", pending_row[1]) if pending_row[1] else None
assigned_timestamp = cast("datetime", pending_row[0]) if pending_row[0] else None
create_timestamp = cast("datetime", pending_row[2]) if pending_row[2] else None
if assigned_timestamp is None:
return ConcurrencyClaimStatus(
concurrency_key=concurrency_key,
slot_status=ConcurrencySlotStatus.BLOCKED,
priority=priority,
assigned_timestamp=None,
enqueued_timestamp=create_timestamp,
)
# pending step is assigned, check to see if it's been claimed
slot_row = conn.execute(
db_select([db.func.count()]).where(
db.and_(
ConcurrencySlotsTable.c.concurrency_key == concurrency_key,
ConcurrencySlotsTable.c.run_id == run_id,
ConcurrencySlotsTable.c.step_key == step_key,
)
)
).fetchone()
return ConcurrencyClaimStatus(
concurrency_key=concurrency_key,
slot_status=(
ConcurrencySlotStatus.CLAIMED
if slot_row and slot_row[0]
else ConcurrencySlotStatus.BLOCKED
),
priority=priority,
assigned_timestamp=assigned_timestamp,
enqueued_timestamp=create_timestamp,
)
def can_claim_from_pending(self, concurrency_key: str, run_id: str, step_key: str):
with self.index_connection() as conn:
row = conn.execute(
db_select([PendingStepsTable.c.assigned_timestamp]).where(
db.and_(
PendingStepsTable.c.run_id == run_id,
PendingStepsTable.c.step_key == step_key,
PendingStepsTable.c.concurrency_key == concurrency_key,
)
)
).fetchone()
return row and row[0] is not None
def has_pending_step(self, concurrency_key: str, run_id: str, step_key: str):
with self.index_connection() as conn:
row = conn.execute(
db_select([db.func.count()])
.select_from(PendingStepsTable)
.where(
db.and_(
PendingStepsTable.c.concurrency_key == concurrency_key,
PendingStepsTable.c.run_id == run_id,
PendingStepsTable.c.step_key == step_key,
)
)
).fetchone()
return row and cast("int", row[0]) > 0
def assign_pending_steps(self, concurrency_keys: Sequence[str]):
if not concurrency_keys:
return
with self.index_connection() as conn:
for key in concurrency_keys:
row = conn.execute(
db_select([PendingStepsTable.c.id])
.where(
db.and_(
PendingStepsTable.c.concurrency_key == key,
PendingStepsTable.c.assigned_timestamp == None, # noqa: E711
)
)
.order_by(
PendingStepsTable.c.priority.desc(),
PendingStepsTable.c.create_timestamp.asc(),
)
.limit(1)
).fetchone()
if row:
conn.execute(
PendingStepsTable.update()
.where(PendingStepsTable.c.id == row[0])
.values(assigned_timestamp=db.func.now())
)
def add_pending_step(
self,
concurrency_key: str,
run_id: str,
step_key: str,
priority: Optional[int] = None,
should_assign: bool = False,
):
with self.index_connection() as conn:
try:
conn.execute(
PendingStepsTable.insert().values(
[
dict(
run_id=run_id,
step_key=step_key,
concurrency_key=concurrency_key,
priority=priority or 0,
assigned_timestamp=db.func.now() if should_assign else None,
)
]
)
)
except db_exc.IntegrityError:
# do nothing
pass
def _remove_pending_steps(self, run_id: str, step_key: Optional[str] = None) -> Sequence[str]:
# fetch the assigned steps to delete, while grabbing the concurrency keys so that we can
# assign the next set of queued steps, if necessary
select_query = (
db_select(
[
PendingStepsTable.c.id,
PendingStepsTable.c.assigned_timestamp,
PendingStepsTable.c.concurrency_key,
]
)
.select_from(PendingStepsTable)
.where(PendingStepsTable.c.run_id == run_id)
.with_for_update()
)
if step_key:
select_query = select_query.where(PendingStepsTable.c.step_key == step_key)
with self.index_connection() as conn:
rows = conn.execute(select_query).fetchall()
if not rows:
return []
# now, actually delete the pending steps
conn.execute(
PendingStepsTable.delete().where(
PendingStepsTable.c.id.in_([row[0] for row in rows])
)
)
# return the concurrency keys for the freed slots which were assigned
to_assign = [cast("str", row[2]) for row in rows if row[1] is not None]
return to_assign
def claim_concurrency_slot(
self, concurrency_key: str, run_id: str, step_key: str, priority: Optional[int] = None
) -> ConcurrencyClaimStatus:
"""Claim concurrency slot for step.
Args:
concurrency_keys (str): The concurrency key to claim.
run_id (str): The run id to claim for.
step_key (str): The step key to claim for.
"""
# first, register the step by adding to pending queue
if not self.has_pending_step(
concurrency_key=concurrency_key, run_id=run_id, step_key=step_key
):
has_unassigned_slots = self.has_unassigned_slots(concurrency_key)
self.add_pending_step(
concurrency_key=concurrency_key,
run_id=run_id,
step_key=step_key,
priority=priority,
should_assign=has_unassigned_slots,
)
# if the step is not assigned (i.e. has not been popped from queue), block the claim
claim_status = self.check_concurrency_claim(
concurrency_key=concurrency_key, run_id=run_id, step_key=step_key
)
if claim_status.is_claimed or not claim_status.is_assigned:
return claim_status
# attempt to claim a concurrency slot... this should generally work because we only assign
# based on the number of unclaimed slots, but this should act as a safeguard, using the slot
# rows as a semaphore
slot_status = self._claim_concurrency_slot(
concurrency_key=concurrency_key, run_id=run_id, step_key=step_key
)
return claim_status.with_slot_status(slot_status)
def _claim_concurrency_slot(
self, concurrency_key: str, run_id: str, step_key: str
) -> ConcurrencySlotStatus:
"""Claim a concurrency slot for the step. Helper method that is called for steps that are
popped off the priority queue.
Args:
concurrency_key (str): The concurrency key to claim.
run_id (str): The run id to claim a slot for.
step_key (str): The step key to claim a slot for.
"""
with self.index_connection() as conn:
result = conn.execute(
db_select([ConcurrencySlotsTable.c.id])
.select_from(ConcurrencySlotsTable)
.where(
db.and_(
ConcurrencySlotsTable.c.concurrency_key == concurrency_key,
ConcurrencySlotsTable.c.step_key == None, # noqa: E711
ConcurrencySlotsTable.c.deleted == False, # noqa: E712
)
)
.with_for_update(skip_locked=True)
.limit(1)
).fetchone()
if not result or not result[0]:
return ConcurrencySlotStatus.BLOCKED
if not conn.execute(
ConcurrencySlotsTable.update()
.values(run_id=run_id, step_key=step_key)
.where(ConcurrencySlotsTable.c.id == result[0])
).rowcount:
return ConcurrencySlotStatus.BLOCKED
return ConcurrencySlotStatus.CLAIMED
def get_pool_limits(self) -> Sequence[PoolLimit]:
self._reconcile_concurrency_limits_from_slots()
# initialize outside of connection context
has_default_col = self.has_default_pool_limit_col
with self.index_connection() as conn:
if self.has_concurrency_limits_table and has_default_col:
query = db_select(
[
ConcurrencyLimitsTable.c.concurrency_key,
ConcurrencyLimitsTable.c.limit,
ConcurrencyLimitsTable.c.using_default_limit,
]
).select_from(ConcurrencyLimitsTable)
rows = conn.execute(query).fetchall()
return [
PoolLimit(
name=cast("str", row[0]),
limit=cast("int", row[1]),
from_default=cast("bool", row[2]),
)
for row in rows
]
if self.has_concurrency_limits_table:
query = db_select(
[
ConcurrencyLimitsTable.c.concurrency_key,
ConcurrencyLimitsTable.c.limit,
]
).select_from(ConcurrencyLimitsTable)
else:
query = (
db_select(
[
ConcurrencySlotsTable.c.concurrency_key,
db.func.count().label("count"),
]
)
.where(
ConcurrencySlotsTable.c.deleted == False, # noqa: E712
)
.group_by(
ConcurrencySlotsTable.c.concurrency_key,
)
)
rows = conn.execute(query).fetchall()
return [
PoolLimit(
name=cast("str", row[0]),
limit=cast("int", row[1]),
from_default=False,
)
for row in rows
]
def get_concurrency_keys(self) -> set[str]:
self._reconcile_concurrency_limits_from_slots()
"""Get the set of concurrency limited keys."""
with self.index_connection() as conn:
if self.has_concurrency_limits_table:
query = db_select([ConcurrencyLimitsTable.c.concurrency_key]).select_from(
ConcurrencyLimitsTable
)
else:
query = (
db_select([ConcurrencySlotsTable.c.concurrency_key])
.select_from(ConcurrencySlotsTable)
.where(ConcurrencySlotsTable.c.deleted == False) # noqa: E712
.distinct()
)
rows = conn.execute(query).fetchall()
return {cast("str", row[0]) for row in rows}
def get_concurrency_info(self, concurrency_key: str) -> ConcurrencyKeyInfo:
"""Get the list of concurrency slots for a given concurrency key.
Args:
concurrency_key (str): The concurrency key to get the slots for.
Returns:
List[Tuple[str, int]]: A list of tuples of run_id and the number of slots it is
occupying for the given concurrency key.
"""
with self.index_connection() as conn:
slot_query = (
db_select(
[
ConcurrencySlotsTable.c.run_id,
ConcurrencySlotsTable.c.step_key,
ConcurrencySlotsTable.c.deleted,
]
)
.select_from(ConcurrencySlotsTable)
.where(ConcurrencySlotsTable.c.concurrency_key == concurrency_key)
)
slot_rows = db_fetch_mappings(conn, slot_query)
slot_count = len([slot_row for slot_row in slot_rows if not slot_row["deleted"]])
limit = slot_count
using_default = False
if self.has_concurrency_limits_table:
limit_row = conn.execute(
db_select(
[
ConcurrencyLimitsTable.c.limit,
ConcurrencyLimitsTable.c.using_default_limit,
]
).where(ConcurrencyLimitsTable.c.concurrency_key == concurrency_key)
).fetchone()
if limit_row:
limit = cast("int", limit_row[0])
using_default = cast("bool", limit_row[1])
elif not slot_count:
limit = self._instance.global_op_concurrency_default_limit
using_default = True
pending_query = (
db_select(
[
PendingStepsTable.c.run_id,
PendingStepsTable.c.step_key,
PendingStepsTable.c.assigned_timestamp,
PendingStepsTable.c.create_timestamp,
PendingStepsTable.c.priority,
]
)
.select_from(PendingStepsTable)
.where(PendingStepsTable.c.concurrency_key == concurrency_key)
)
pending_rows = db_fetch_mappings(conn, pending_query)
return ConcurrencyKeyInfo(
concurrency_key=concurrency_key,
slot_count=slot_count,
claimed_slots=[
ClaimedSlotInfo(run_id=slot_row["run_id"], step_key=slot_row["step_key"])
for slot_row in slot_rows
if slot_row["run_id"]
],
pending_steps=[
PendingStepInfo(
run_id=row["run_id"],
step_key=row["step_key"],
enqueued_timestamp=utc_datetime_from_naive(row["create_timestamp"]),
assigned_timestamp=utc_datetime_from_naive(row["assigned_timestamp"])
if row["assigned_timestamp"]
else None,
priority=row["priority"],
)
for row in pending_rows
],
limit=limit,
using_default_limit=using_default,
)
def get_concurrency_run_ids(self) -> set[str]:
with self.index_connection() as conn:
rows = conn.execute(db_select([PendingStepsTable.c.run_id]).distinct()).fetchall()
return set([cast("str", row[0]) for row in rows])
def free_concurrency_slots_for_run(self, run_id: str) -> None:
self._free_concurrency_slots(run_id=run_id)
removed_assigned_concurrency_keys = self._remove_pending_steps(run_id=run_id)
if removed_assigned_concurrency_keys:
# assign any pending steps that can now claim a slot
self.assign_pending_steps(removed_assigned_concurrency_keys)
def free_concurrency_slot_for_step(self, run_id: str, step_key: str) -> None:
self._free_concurrency_slots(run_id=run_id, step_key=step_key)
removed_assigned_concurrency_keys = self._remove_pending_steps(
run_id=run_id, step_key=step_key
)
if removed_assigned_concurrency_keys:
# assign any pending steps that can now claim a slot
self.assign_pending_steps(removed_assigned_concurrency_keys)
def _free_concurrency_slots(self, run_id: str, step_key: Optional[str] = None) -> Sequence[str]:
"""Frees concurrency slots for a given run/step.
Args:
run_id (str): The run id to free the slots for.
step_key (Optional[str]): The step key to free the slots for. If not provided, all the
slots for all the steps of the run will be freed.
"""
with self.index_connection() as conn:
# first delete any rows that apply and are marked as deleted. This happens when the
# configured number of slots has been reduced, and some of the pruned slots included
# ones that were already allocated to the run/step
delete_query = ConcurrencySlotsTable.delete().where(
db.and_(
ConcurrencySlotsTable.c.run_id == run_id,
ConcurrencySlotsTable.c.deleted == True, # noqa: E712
)
)
if step_key:
delete_query = delete_query.where(ConcurrencySlotsTable.c.step_key == step_key)
conn.execute(delete_query)
# next, fetch the slots to free up, while grabbing the concurrency keys so that we can
# allocate any pending steps from the queue for the freed slots, if necessary
select_query = (
db_select([ConcurrencySlotsTable.c.id, ConcurrencySlotsTable.c.concurrency_key])
.select_from(ConcurrencySlotsTable)
.where(ConcurrencySlotsTable.c.run_id == run_id)
.with_for_update()
)
if step_key:
select_query = select_query.where(ConcurrencySlotsTable.c.step_key == step_key)
rows = conn.execute(select_query).fetchall()
if not rows:
return []
# now, actually free the slots
conn.execute(
ConcurrencySlotsTable.update()
.values(run_id=None, step_key=None)
.where(
db.and_(
ConcurrencySlotsTable.c.id.in_([row[0] for row in rows]),
)
)
)
# return the concurrency keys for the freed slots
return [cast("str", row[1]) for row in rows]
def store_asset_check_event(self, event: EventLogEntry, event_id: Optional[int]) -> None:
check.inst_param(event, "event", EventLogEntry)
check.opt_int_param(event_id, "event_id")
check.invariant(
self.supports_asset_checks,
"Asset checks require a database schema migration. Run `dagster instance migrate`.",
)
if event.dagster_event_type == DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED:
self._store_asset_check_evaluation_planned(event, event_id)
if event.dagster_event_type == DagsterEventType.ASSET_CHECK_EVALUATION:
if event.run_id == "" or event.run_id is None:
self._store_runless_asset_check_evaluation(event, event_id)
else:
self._update_asset_check_evaluation(event, event_id)
def _store_asset_check_evaluation_planned(
self, event: EventLogEntry, event_id: Optional[int]
) -> None:
planned = cast(
"AssetCheckEvaluationPlanned", check.not_none(event.dagster_event).event_specific_data
)
with self.index_connection() as conn:
conn.execute(
AssetCheckExecutionsTable.insert().values(
asset_key=planned.asset_key.to_string(),
check_name=planned.check_name,
run_id=event.run_id,
execution_status=AssetCheckExecutionRecordStatus.PLANNED.value,
evaluation_event=serialize_value(event),
evaluation_event_timestamp=self._event_insert_timestamp(event),
)
)
def _event_insert_timestamp(self, event):
# Postgres requires a datetime that is in UTC but has no timezone info
return datetime.fromtimestamp(event.timestamp, timezone.utc).replace(tzinfo=None)
def _store_runless_asset_check_evaluation(
self, event: EventLogEntry, event_id: Optional[int]
) -> None:
evaluation = cast(
"AssetCheckEvaluation", check.not_none(event.dagster_event).event_specific_data
)
with self.index_connection() as conn:
conn.execute(
AssetCheckExecutionsTable.insert().values(
asset_key=evaluation.asset_key.to_string(),
check_name=evaluation.check_name,
run_id=event.run_id,
execution_status=(
AssetCheckExecutionRecordStatus.SUCCEEDED.value
if evaluation.passed
else AssetCheckExecutionRecordStatus.FAILED.value
),
evaluation_event=serialize_value(event),
evaluation_event_timestamp=self._event_insert_timestamp(event),
evaluation_event_storage_id=event_id,
materialization_event_storage_id=(
evaluation.target_materialization_data.storage_id
if evaluation.target_materialization_data
else None
),
)
)
def _update_asset_check_evaluation(self, event: EventLogEntry, event_id: Optional[int]) -> None:
evaluation = cast(
"AssetCheckEvaluation", check.not_none(event.dagster_event).event_specific_data
)
with self.index_connection() as conn:
rows_updated = conn.execute(
AssetCheckExecutionsTable.update()
.where(
# (asset_key, check_name, run_id) uniquely identifies the row created for the planned event
db.and_(
AssetCheckExecutionsTable.c.asset_key == evaluation.asset_key.to_string(),
AssetCheckExecutionsTable.c.check_name == evaluation.check_name,
AssetCheckExecutionsTable.c.run_id == event.run_id,
)
)
.values(
execution_status=(
AssetCheckExecutionRecordStatus.SUCCEEDED.value
if evaluation.passed
else AssetCheckExecutionRecordStatus.FAILED.value
),
evaluation_event=serialize_value(event),
evaluation_event_timestamp=self._event_insert_timestamp(event),
evaluation_event_storage_id=event_id,
materialization_event_storage_id=(
evaluation.target_materialization_data.storage_id
if evaluation.target_materialization_data
else None
),
)
).rowcount
# TODO fix the idx_asset_check_executions_unique index so that this can be a single
# upsert
if rows_updated == 0:
rows_updated = conn.execute(
AssetCheckExecutionsTable.insert().values(
asset_key=evaluation.asset_key.to_string(),
check_name=evaluation.check_name,
run_id=event.run_id,
execution_status=(
AssetCheckExecutionRecordStatus.SUCCEEDED.value
if evaluation.passed
else AssetCheckExecutionRecordStatus.FAILED.value
),
evaluation_event=serialize_value(event),
evaluation_event_timestamp=datetime.utcfromtimestamp(event.timestamp),
evaluation_event_storage_id=event_id,
materialization_event_storage_id=(
evaluation.target_materialization_data.storage_id
if evaluation.target_materialization_data
else None
),
)
).rowcount
# 0 isn't normally expected, but occurs with the external instance of step launchers where
# they don't have planned events.
if rows_updated > 1:
raise DagsterInvariantViolationError(
f"Updated {rows_updated} rows for asset check evaluation {evaluation.asset_check_key} "
"as a result of duplicate AssetCheckPlanned events."
)
def get_asset_check_execution_history(
self,
check_key: AssetCheckKey,
limit: int,
cursor: Optional[int] = None,
status: Optional[AbstractSet[AssetCheckExecutionRecordStatus]] = None,
) -> Sequence[AssetCheckExecutionRecord]:
check.inst_param(check_key, "key", AssetCheckKey)
check.int_param(limit, "limit")
check.opt_int_param(cursor, "cursor")
query = (
db_select(
[
AssetCheckExecutionsTable.c.id,
AssetCheckExecutionsTable.c.run_id,
AssetCheckExecutionsTable.c.execution_status,
AssetCheckExecutionsTable.c.evaluation_event,
AssetCheckExecutionsTable.c.create_timestamp,
]
)
.where(
db.and_(
AssetCheckExecutionsTable.c.asset_key == check_key.asset_key.to_string(),
AssetCheckExecutionsTable.c.check_name == check_key.name,
)
)
.order_by(AssetCheckExecutionsTable.c.id.desc())
).limit(limit)
if cursor:
query = query.where(AssetCheckExecutionsTable.c.id < cursor)
if status:
query = query.where(
AssetCheckExecutionsTable.c.execution_status.in_([s.value for s in status])
)
with self.index_connection() as conn:
rows = db_fetch_mappings(conn, query)
return [AssetCheckExecutionRecord.from_db_row(row, key=check_key) for row in rows]
def get_latest_asset_check_execution_by_key(
self, check_keys: Sequence[AssetCheckKey]
) -> Mapping[AssetCheckKey, AssetCheckExecutionRecord]:
if not check_keys:
return {}
latest_ids_subquery = db_subquery(
db_select(
[
db.func.max(AssetCheckExecutionsTable.c.id).label("id"),
]
)
.where(
db.and_(
AssetCheckExecutionsTable.c.asset_key.in_(
[key.asset_key.to_string() for key in check_keys]
),
AssetCheckExecutionsTable.c.check_name.in_([key.name for key in check_keys]),
)
)
.group_by(
AssetCheckExecutionsTable.c.asset_key,
AssetCheckExecutionsTable.c.check_name,
)
)
query = db_select(
[
AssetCheckExecutionsTable.c.id,
AssetCheckExecutionsTable.c.asset_key,
AssetCheckExecutionsTable.c.check_name,
AssetCheckExecutionsTable.c.run_id,
AssetCheckExecutionsTable.c.execution_status,
AssetCheckExecutionsTable.c.evaluation_event,
AssetCheckExecutionsTable.c.create_timestamp,
]
).select_from(
AssetCheckExecutionsTable.join(
latest_ids_subquery,
db.and_(
AssetCheckExecutionsTable.c.id == latest_ids_subquery.c.id,
),
)
)
with self.index_connection() as conn:
rows = db_fetch_mappings(conn, query)
results = {}
for row in rows:
check_key = AssetCheckKey(
asset_key=check.not_none(AssetKey.from_db_string(cast("str", row["asset_key"]))),
name=cast("str", row["check_name"]),
)
results[check_key] = AssetCheckExecutionRecord.from_db_row(row, key=check_key)
return results
@property
def supports_asset_checks(self): # pyright: ignore[reportIncompatibleMethodOverride]
return self.has_table(AssetCheckExecutionsTable.name)
def get_latest_planned_materialization_info(
self,
asset_key: AssetKey,
partition: Optional[str] = None,
) -> Optional[PlannedMaterializationInfo]:
records = self._get_event_records(
event_records_filter=EventRecordsFilter(
DagsterEventType.ASSET_MATERIALIZATION_PLANNED,
asset_key=asset_key,
asset_partitions=[partition] if partition else None,
),
limit=1,
ascending=False,
)
if not records:
return None
return PlannedMaterializationInfo(
storage_id=records[0].storage_id,
run_id=records[0].run_id,
)
def _get_partition_data_versions(
self,
asset_key: AssetKey,
partitions: Sequence[str],
before_storage_id: Optional[int] = None,
after_storage_id: Optional[int] = None,
) -> dict[str, str]:
partition_subquery = db_select(
[SqlEventLogStorageTable.c.partition, SqlEventLogStorageTable.c.id]
).where(
db.and_(
db.or_(
SqlEventLogStorageTable.c.dagster_event_type
== DagsterEventType.ASSET_MATERIALIZATION.value,
SqlEventLogStorageTable.c.dagster_event_type
== DagsterEventType.ASSET_OBSERVATION.value,
),
SqlEventLogStorageTable.c.asset_key == asset_key.to_string(),
SqlEventLogStorageTable.c.partition.in_(partitions),
)
)
data_version_subquery = db_select(
[
AssetEventTagsTable.c.event_id,
AssetEventTagsTable.c.value,
]
).where(
db.and_(
AssetEventTagsTable.c.key == DATA_VERSION_TAG,
AssetEventTagsTable.c.asset_key == asset_key.to_string(),
)
)
if before_storage_id is not None:
partition_subquery = partition_subquery.where(
SqlEventLogStorageTable.c.id < before_storage_id
)
data_version_subquery = data_version_subquery.where(
AssetEventTagsTable.c.event_id < before_storage_id
)
if after_storage_id is not None:
partition_subquery = partition_subquery.where(
SqlEventLogStorageTable.c.id > after_storage_id
)
data_version_subquery = data_version_subquery.where(
AssetEventTagsTable.c.event_id > after_storage_id
)
partition_subquery = db_subquery(partition_subquery, "partition_subquery")
data_version_subquery = db_subquery(data_version_subquery, "data_version_subquery")
data_version_by_partition_subquery = db_subquery(
db_select(
[
partition_subquery.c.partition,
data_version_subquery.c.value,
db.func.rank()
.over(
order_by=db.desc(partition_subquery.c.id),
partition_by=partition_subquery.c.partition,
)
.label("rank"),
]
).select_from(
partition_subquery.join(
data_version_subquery,
data_version_subquery.c.event_id == partition_subquery.c.id,
)
),
"data_version_by_partition_subquery",
)
latest_data_version_by_partition_query = (
db_select(
[
data_version_by_partition_subquery.c.partition,
data_version_by_partition_subquery.c.value,
]
)
.order_by(data_version_by_partition_subquery.c.rank.asc())
.where(data_version_by_partition_subquery.c.rank == 1)
)
with self.index_connection() as conn:
rows = conn.execute(latest_data_version_by_partition_query).fetchall()
return {cast("str", row[0]): cast("str", row[1]) for row in rows}
def get_updated_data_version_partitions(
self, asset_key: AssetKey, partitions: Iterable[str], since_storage_id: int
) -> set[str]:
previous_data_versions = self._get_partition_data_versions(
asset_key=asset_key,
partitions=list(partitions),
before_storage_id=since_storage_id + 1,
)
current_data_versions = self._get_partition_data_versions(
asset_key=asset_key,
partitions=list(partitions),
after_storage_id=since_storage_id,
)
updated_partitions = set()
for partition, data_version in current_data_versions.items():
previous_data_version = previous_data_versions.get(partition)
if data_version and data_version != previous_data_version:
updated_partitions.add(partition)
return updated_partitions
def _get_from_row(row: SqlAlchemyRow, column: str) -> object:
"""Utility function for extracting a column from a sqlalchemy row proxy, since '_asdict' is not
supported in sqlalchemy 1.3.
"""
if column not in row.keys():
return None
return row[column]
| SqlEventLogStorage |
python | kamyu104__LeetCode-Solutions | Python/number-of-beautiful-partitions.py | {
"start": 38,
"end": 871
} | class ____(object):
def beautifulPartitions(self, s, k, minLength):
"""
:type s: str
:type k: int
:type minLength: int
:rtype: int
"""
MOD = 10**9+7
PRIMES = {'2', '3', '5', '7'}
dp = [0]*len(s) # dp[i] at j : number of j beautiful partitions in s[:i+1]
for i in xrange(minLength-1, len(s)):
if s[0] in PRIMES and s[i] not in PRIMES:
dp[i] = 1
for j in xrange(2, k+1):
new_dp = [0]*len(s)
curr = int(j == 1)
for i in xrange(j*minLength-1, len(s)):
if s[i-minLength+1] in PRIMES:
curr = (curr+dp[i-minLength])%MOD
if s[i] not in PRIMES:
new_dp[i] = curr
dp = new_dp
return dp[-1]
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 774150,
"end": 774383
} | class ____(sgqlc.types.Type, GitSignature):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("key_id",)
key_id = sgqlc.types.Field(String, graphql_name="keyId")
| GpgSignature |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-bedrock/llama_index/llms/bedrock/utils.py | {
"start": 9379,
"end": 9711
} | class ____(Provider):
max_tokens_key = "max_tokens"
def __init__(self) -> None:
self.messages_to_prompt = messages_to_llama_prompt
self.completion_to_prompt = completion_to_llama_prompt
def get_text_from_response(self, response: dict) -> str:
return response["outputs"][0]["text"]
| MistralProvider |
python | simplejson__simplejson | simplejson/tests/test_bigint_as_string.py | {
"start": 59,
"end": 2238
} | class ____(TestCase):
# Python 2.5, at least the one that ships on Mac OS X, calculates
# 2 ** 53 as 0! It manages to calculate 1 << 53 correctly.
values = [(200, 200),
((1 << 53) - 1, 9007199254740991),
((1 << 53), '9007199254740992'),
((1 << 53) + 1, '9007199254740993'),
(-100, -100),
((-1 << 53), '-9007199254740992'),
((-1 << 53) - 1, '-9007199254740993'),
((-1 << 53) + 1, -9007199254740991)]
options = (
{"bigint_as_string": True},
{"int_as_string_bitcount": 53}
)
def test_ints(self):
for opts in self.options:
for val, expect in self.values:
self.assertEqual(
val,
json.loads(json.dumps(val)))
self.assertEqual(
expect,
json.loads(json.dumps(val, **opts)))
def test_lists(self):
for opts in self.options:
for val, expect in self.values:
val = [val, val]
expect = [expect, expect]
self.assertEqual(
val,
json.loads(json.dumps(val)))
self.assertEqual(
expect,
json.loads(json.dumps(val, **opts)))
def test_dicts(self):
for opts in self.options:
for val, expect in self.values:
val = {'k': val}
expect = {'k': expect}
self.assertEqual(
val,
json.loads(json.dumps(val)))
self.assertEqual(
expect,
json.loads(json.dumps(val, **opts)))
def test_dict_keys(self):
for opts in self.options:
for val, _ in self.values:
expect = {str(val): 'value'}
val = {val: 'value'}
self.assertEqual(
expect,
json.loads(json.dumps(val)))
self.assertEqual(
expect,
json.loads(json.dumps(val, **opts)))
| TestBigintAsString |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/normalization.py | {
"start": 8075,
"end": 10049
} | class ____(torch.nn.InstanceNorm3d):
r"""This is the quantized version of :class:`~torch.nn.InstanceNorm3d`.
Additional args:
* **scale** - quantization scale of the output, type: double.
* **zero_point** - quantization zero point of the output, type: long.
"""
def __init__(
self,
num_features,
weight,
bias,
scale,
zero_point,
eps=1e-5,
momentum=0.1,
affine=False,
track_running_stats=False,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(
num_features, eps, momentum, affine, track_running_stats, **factory_kwargs
)
self.weight = weight
self.bias = bias
# pyrefly: ignore [bad-argument-type]
self.register_buffer("scale", torch.tensor(scale, **factory_kwargs))
# pyrefly: ignore [bad-argument-type]
self.register_buffer("zero_point", torch.tensor(zero_point, **factory_kwargs))
def forward(self, input):
return torch.ops.quantized.instance_norm(
input, self.weight, self.bias, self.eps, self.scale, self.zero_point
)
def _get_name(self):
return "QuantizedInstanceNorm3d"
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False):
scale, zero_point = mod.activation_post_process.calculate_qparams()
new_mod = cls(
mod.num_features,
mod.weight,
mod.bias,
float(scale),
int(zero_point),
mod.eps,
mod.affine,
)
return new_mod
@classmethod
def from_reference(cls, mod, scale, zero_point):
return cls(
mod.num_features,
mod.weight,
mod.bias,
float(scale),
int(zero_point),
mod.eps,
mod.affine,
)
| InstanceNorm3d |
python | Netflix__metaflow | metaflow/_vendor/click/types.py | {
"start": 12481,
"end": 12951
} | class ____(ParamType):
name = "boolean"
def convert(self, value, param, ctx):
if isinstance(value, bool):
return bool(value)
value = value.lower()
if value in ("true", "t", "1", "yes", "y"):
return True
elif value in ("false", "f", "0", "no", "n"):
return False
self.fail("{} is not a valid boolean".format(value), param, ctx)
def __repr__(self):
return "BOOL"
| BoolParamType |
python | PrefectHQ__prefect | src/prefect/server/exceptions.py | {
"start": 407,
"end": 596
} | class ____(PrefectException):
"""An error raised by the Prefect REST API when attempting to create or update a
deployment with missing required variables.
"""
| MissingVariableError |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 292083,
"end": 292731
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("EnterpriseMemberEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("EnterpriseMember"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| EnterpriseMemberConnection |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 88414,
"end": 91814
} | class ____(Response):
"""
Response of events.get_multi_task_plots endpoint.
:param plots: Plots mapping (keyed by task name)
:type plots: dict
:param returned: Number of results returned
:type returned: int
:param total: Total number of results available for this query. In case there
are more than 10000 results it is set to 10000
:type total: float
:param scroll_id: Scroll ID for getting more results
:type scroll_id: str
"""
_service = "events"
_action = "get_multi_task_plots"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"plots": {
"description": "Plots mapping (keyed by task name)",
"type": ["object", "null"],
},
"returned": {
"description": "Number of results returned",
"type": ["integer", "null"],
},
"scroll_id": {
"description": "Scroll ID for getting more results",
"type": ["string", "null"],
},
"total": {
"description": "Total number of results available for this query. In case there are more than 10000 results it is set to 10000",
"type": ["number", "null"],
},
},
"type": "object",
}
def __init__(
self,
plots: Optional[dict] = None,
returned: Optional[int] = None,
total: Optional[float] = None,
scroll_id: Optional[str] = None,
**kwargs: Any
) -> None:
super(GetMultiTaskPlotsResponse, self).__init__(**kwargs)
self.plots = plots
self.returned = returned
self.total = total
self.scroll_id = scroll_id
@schema_property("plots")
def plots(self) -> Optional[dict]:
return self._property_plots
@plots.setter
def plots(self, value: Optional[dict]) -> None:
if value is None:
self._property_plots = None
return
self.assert_isinstance(value, "plots", (dict,))
self._property_plots = value
@schema_property("returned")
def returned(self) -> Optional[int]:
return self._property_returned
@returned.setter
def returned(self, value: Optional[int]) -> None:
if value is None:
self._property_returned = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "returned", six.integer_types)
self._property_returned = value
@schema_property("total")
def total(self) -> Optional[float]:
return self._property_total
@total.setter
def total(self, value: Optional[float]) -> None:
if value is None:
self._property_total = None
return
self.assert_isinstance(value, "total", six.integer_types + (float,))
self._property_total = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetMultiTaskPlotsResponse |
python | cython__cython | tests/run/withstat_py.py | {
"start": 3619,
"end": 4700
} | class ____(object):
def get(self, *args):
return ContextManager(*args)
def manager_from_expression():
"""
>>> manager_from_expression()
enter
1
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
enter
2
exit <type 'NoneType'> <type 'NoneType'> <type 'NoneType'>
"""
with GetManager().get(1) as x:
print(x)
g = GetManager()
with g.get(2) as x:
print(x)
def manager_from_ternary(use_first):
"""
>>> manager_from_ternary(True)
enter
exit <type 'type'> <type 'ValueError'> <type 'traceback'>
>>> manager_from_ternary(False)
enter
exit <type 'type'> <type 'ValueError'> <type 'traceback'>
In except
"""
# This is mostly testing a parsing problem, hence the
# result of the ternary must be callable
cm1_getter = lambda: ContextManager("1", exit_ret=True)
cm2_getter = lambda: ContextManager("2")
try:
with (cm1_getter if use_first else cm2_getter)():
raise ValueError
except ValueError:
print("In except")
| GetManager |
python | django__django | tests/prefetch_related/test_uuid.py | {
"start": 2090,
"end": 4965
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
house = House.objects.create(name="Redwood", address="Arcata")
room = Room.objects.create(name="Racoon", house=house)
fleas = [Flea.objects.create(current_room=room) for i in range(3)]
pet = Pet.objects.create(name="Spooky")
pet.fleas_hosted.add(*fleas)
person = Person.objects.create(name="Bob")
person.houses.add(house)
person.pets.add(pet)
person.fleas_hosted.add(*fleas)
def test_from_uuid_pk_lookup_uuid_pk_integer_pk(self):
# From uuid-pk model, prefetch <uuid-pk model>.<integer-pk model>:
with self.assertNumQueries(4):
spooky = Pet.objects.prefetch_related(
"fleas_hosted__current_room__house"
).get(name="Spooky")
with self.assertNumQueries(0):
self.assertEqual("Racoon", spooky.fleas_hosted.all()[0].current_room.name)
def test_from_uuid_pk_lookup_integer_pk2_uuid_pk2(self):
# From uuid-pk model, prefetch
# <integer-pk model>.<integer-pk model>.<uuid-pk model>.<uuid-pk
# model>:
with self.assertNumQueries(5):
spooky = Pet.objects.prefetch_related("people__houses__rooms__fleas").get(
name="Spooky"
)
with self.assertNumQueries(0):
self.assertEqual(
3,
len(spooky.people.all()[0].houses.all()[0].rooms.all()[0].fleas.all()),
)
def test_from_integer_pk_lookup_uuid_pk_integer_pk(self):
# From integer-pk model, prefetch <uuid-pk model>.<integer-pk model>:
with self.assertNumQueries(3):
racoon = Room.objects.prefetch_related("fleas__people_visited").get(
name="Racoon"
)
with self.assertNumQueries(0):
self.assertEqual("Bob", racoon.fleas.all()[0].people_visited.all()[0].name)
def test_from_integer_pk_lookup_integer_pk_uuid_pk(self):
# From integer-pk model, prefetch <integer-pk model>.<uuid-pk model>:
with self.assertNumQueries(3):
redwood = House.objects.prefetch_related("rooms__fleas").get(name="Redwood")
with self.assertNumQueries(0):
self.assertEqual(3, len(redwood.rooms.all()[0].fleas.all()))
def test_from_integer_pk_lookup_integer_pk_uuid_pk_uuid_pk(self):
# From integer-pk model, prefetch
# <integer-pk model>.<uuid-pk model>.<uuid-pk model>:
with self.assertNumQueries(4):
redwood = House.objects.prefetch_related("rooms__fleas__pets_visited").get(
name="Redwood"
)
with self.assertNumQueries(0):
self.assertEqual(
"Spooky",
redwood.rooms.all()[0].fleas.all()[0].pets_visited.all()[0].name,
)
| UUIDPrefetchRelatedLookups |
python | readthedocs__readthedocs.org | readthedocs/organizations/migrations/0010_add_stripe_customer.py | {
"start": 182,
"end": 1292
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("djstripe", "0010_alter_customer_balance"),
("organizations", "0009_update_meta_options"),
]
operations = [
migrations.AddField(
model_name="historicalorganization",
name="stripe_customer",
field=models.ForeignKey(
blank=True,
db_constraint=False,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="+",
to="djstripe.customer",
verbose_name="Stripe customer",
),
),
migrations.AddField(
model_name="organization",
name="stripe_customer",
field=models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="rtd_organization",
to="djstripe.customer",
verbose_name="Stripe customer",
),
),
]
| Migration |
python | tensorflow__tensorflow | tensorflow/python/framework/type_spec_test.py | {
"start": 2042,
"end": 3119
} | class ____(type_spec.TypeSpec):
"""A TypeSpec for the TwoTensors value type."""
def __init__(self, x_shape, x_dtype, y_shape, y_dtype, color="red"):
self.x_shape = tensor_shape.as_shape(x_shape)
self.x_dtype = dtypes.as_dtype(x_dtype)
self.y_shape = tensor_shape.as_shape(y_shape)
self.y_dtype = dtypes.as_dtype(y_dtype)
self.color = color
value_type = property(lambda self: TwoTensors)
@property
def _component_specs(self):
return (tensor_spec.TensorSpec(self.x_shape, self.x_dtype),
tensor_spec.TensorSpec(self.y_shape, self.y_dtype))
def _to_components(self, value):
return (value.x, value.y)
def _from_components(self, components):
x, y = components
return TwoTensors(x, y, self.color)
def _serialize(self):
return (self.x_shape, self.x_dtype, self.y_shape, self.y_dtype, self.color)
@classmethod
def from_value(cls, value):
return cls(value.x.shape, value.x.dtype, value.y.shape, value.y.dtype,
value.color)
@type_spec_registry.register("tf.TwoTensorsSpecTwin")
| TwoTensorsSpec |
python | keon__algorithms | tests/test_matrix.py | {
"start": 5046,
"end": 6361
} | class ____(unittest.TestCase):
"""[summary]
Test for the file matrix_inversion.py
Arguments:
unittest {[type]} -- [description]
"""
def test_inversion(self):
from fractions import Fraction
m1 = [[1, 1], [1, 2]]
self.assertEqual(matrix_inversion.invert_matrix(m1),
[[2, -1], [-1, 1]])
m2 = [[1, 2], [3, 4, 5]]
self.assertEqual(matrix_inversion.invert_matrix(m2), [[-1]])
m3 = [[1, 1, 1, 1], [2, 2, 2, 2]]
self.assertEqual(matrix_inversion.invert_matrix(m3), [[-2]])
m4 = [[1]]
self.assertEqual(matrix_inversion.invert_matrix(m4), [[-3]])
m5 = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
self.assertEqual(matrix_inversion.invert_matrix(m5), [[-4]])
m6 = [[3, 5, 1], [2, 5, 0], [1, 9, 8]]
self.assertEqual(matrix_inversion.invert_matrix(m6),
[[Fraction(40, 53),
Fraction(-31, 53),
Fraction(-5, 53)],
[Fraction(-16, 53),
Fraction(23, 53),
Fraction(2, 53)],
[Fraction(13, 53),
Fraction(-22, 53),
Fraction(5, 53)]])
| TestInversion |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 123469,
"end": 123866
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(PullRequestOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
| PullRequestOrder |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-recharge/unit_tests/test_streams.py | {
"start": 3055,
"end": 7170
} | class ____:
def generate_records(self, stream_name, count) -> Union[Mapping[str, List[Mapping[str, Any]]], Mapping[str, Any]]:
if not stream_name:
return {f"record_{1}": f"test_{1}"}
result = []
for i in range(0, count):
result.append({f"record_{i}": f"test_{i}"})
return {stream_name: result}
@pytest.mark.parametrize(
"stream_cls, use_deprecated_api, cursor_response, expected",
[
(Orders, True, {}, {"page": 2}),
(Orders, False, {"next_cursor": "some next cursor"}, {"cursor": "some next cursor"}),
],
)
def test_next_page_token(self, config, use_deprecated_api, stream_cls, cursor_response, requests_mock, expected) -> None:
test_config = use_orders_deprecated_api_config(config, use_deprecated_api)
stream = stream_cls(test_config, authenticator=None)
stream.page_size = 2
url = f"{stream.url_base}{stream.path()}"
response = {**cursor_response, **self.generate_records(stream.data_path, 2)}
requests_mock.get(url, json=response)
response = requests.get(url)
assert stream.next_page_token(response) == expected
@pytest.mark.parametrize(
"stream_cls, use_deprecated_api, next_page_token, stream_slice, expected",
[
(
Orders,
True,
None,
{"start_date": "2023-01-01 00:00:01", "end_date": "2023-01-31 00:00:01"},
{
"limit": 250,
"sort_by": "updated_at-asc",
"updated_at_min": "2023-01-01 00:00:01",
"updated_at_max": "2023-01-31 00:00:01",
},
),
(
Orders,
False,
None,
{"start_date": "2023-01-01 00:00:01", "end_date": "2023-01-31 00:00:01"},
{
"limit": 250,
"sort_by": "updated_at-asc",
"updated_at_min": "2023-01-01 00:00:01",
"updated_at_max": "2023-01-31 00:00:01",
},
),
],
)
def test_request_params(self, config, stream_cls, use_deprecated_api, next_page_token, stream_slice, expected) -> None:
test_config = use_orders_deprecated_api_config(config, use_deprecated_api)
stream = stream_cls(test_config, authenticator=None)
result = stream.request_params(stream_slice, next_page_token)
assert result == expected
@pytest.mark.parametrize(
"stream_cls, use_deprecated_api, data, expected",
[
(Orders, True, [{"test": 123}], [{"test": 123}]),
(Orders, False, [{"test": 123}], [{"test": 123}]),
],
)
def test_parse_response(self, config, stream_cls, use_deprecated_api, data, requests_mock, expected) -> None:
test_config = use_orders_deprecated_api_config(config, use_deprecated_api)
stream = stream_cls(test_config, authenticator=None)
url = f"{stream.url_base}{stream.path()}"
data = {stream.data_path: data} if stream.data_path else data
requests_mock.get(url, json=data)
response = requests.get(url)
assert list(stream.parse_response(response)) == expected
@pytest.mark.parametrize(
"stream_cls, use_deprecated_api, data, expected",
[
(Orders, True, [{"test": 123}], [{"test": 123}]),
(Orders, False, [{"test": 123}], [{"test": 123}]),
],
)
def get_stream_data(self, config, stream_cls, use_deprecated_api, data, requests_mock, expected) -> None:
test_config = use_orders_deprecated_api_config(config, use_deprecated_api)
stream = stream_cls(test_config, authenticator=None)
url = f"{stream.url_base}{stream.path()}"
data = {stream.data_path: data} if stream.data_path else data
requests_mock.get(url, json=data)
response = requests.get(url)
assert list(stream.parse_response(response)) == expected
| TestFullRefreshStreams |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 487211,
"end": 487528
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("PackageFile", graphql_name="node")
| PackageFileEdge |
python | Pylons__pyramid | src/pyramid/interfaces.py | {
"start": 15426,
"end": 15679
} | class ____(Interface):
def __call__(info):
"""Return an object that implements
:class:`pyramid.interfaces.IRenderer`. ``info`` is an
object that implements :class:`pyramid.interfaces.IRendererInfo`.
"""
| IRendererFactory |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/partitions/subset/default.py | {
"start": 574,
"end": 8310
} | class ____(
PartitionsSubset,
NamedTuple("_DefaultPartitionsSubset", [("subset", Set[str])]),
):
# Every time we change the serialization format, we should increment the version number.
# This will ensure that we can gracefully degrade when deserializing old data.
SERIALIZATION_VERSION = 1
def __new__(
cls,
subset: Optional[Set[str]] = None,
):
check.opt_set_param(subset, "subset")
return super().__new__(cls, subset or set())
@property
def is_empty(self) -> bool:
return len(self.subset) == 0
def get_partition_keys_not_in_subset(
self, partitions_def: PartitionsDefinition
) -> Iterable[str]:
return [key for key in partitions_def.get_partition_keys() if key not in self.subset]
def get_partition_keys(self) -> Iterable[str]:
return self.subset
def __sub__(self, other: "PartitionsSubset") -> "PartitionsSubset":
if not isinstance(other, DefaultPartitionsSubset):
return super().__sub__(other)
if self is other:
return self.empty_subset()
if other.is_empty:
return self
return DefaultPartitionsSubset(self.subset - other.subset)
def __or__(self, other: "PartitionsSubset") -> "PartitionsSubset":
if not isinstance(other, DefaultPartitionsSubset):
return super().__or__(other)
if self is other or other.is_empty:
return self
if self.is_empty:
return other
return DefaultPartitionsSubset(self.subset | other.subset)
def __and__(self, other: "PartitionsSubset") -> "PartitionsSubset":
if not isinstance(other, DefaultPartitionsSubset):
return super().__and__(other)
if self is other:
return self
if other.is_empty:
return other
if self.is_empty:
return self
return DefaultPartitionsSubset(self.subset & other.subset)
def get_ranges_for_keys(self, partition_keys: Sequence[str]) -> Sequence[PartitionKeyRange]:
cur_range_start = None
cur_range_end = None
result = []
for partition_key in partition_keys:
if partition_key in self.subset:
if cur_range_start is None:
cur_range_start = partition_key
cur_range_end = partition_key
else:
if cur_range_start is not None and cur_range_end is not None:
result.append(PartitionKeyRange(cur_range_start, cur_range_end))
cur_range_start = cur_range_end = None
if cur_range_start is not None and cur_range_end is not None:
result.append(PartitionKeyRange(cur_range_start, cur_range_end))
return result
def get_partition_key_ranges(
self, partitions_def: PartitionsDefinition
) -> Sequence[PartitionKeyRange]:
from dagster._core.definitions.partitions.definition.multi import MultiPartitionsDefinition
if isinstance(partitions_def, MultiPartitionsDefinition):
# For multi-partitions, we construct the ranges by holding one dimension constant
# and constructing the range for the other dimension
primary_dimension = partitions_def.primary_dimension
secondary_dimension = partitions_def.secondary_dimension
primary_keys_in_subset = set()
secondary_keys_in_subset = set()
for partition_key in self.subset:
primary_keys_in_subset.add(
partitions_def.get_partition_key_from_str(partition_key).keys_by_dimension[
primary_dimension.name
]
)
secondary_keys_in_subset.add(
partitions_def.get_partition_key_from_str(partition_key).keys_by_dimension[
secondary_dimension.name
]
)
# for efficiency, group the keys by whichever dimension has fewer distinct keys
grouping_dimension = (
primary_dimension
if len(primary_keys_in_subset) <= len(secondary_keys_in_subset)
else secondary_dimension
)
grouping_keys = (
primary_keys_in_subset
if grouping_dimension == primary_dimension
else secondary_keys_in_subset
)
results = []
for grouping_key in grouping_keys:
keys = partitions_def.get_multipartition_keys_with_dimension_value(
dimension_name=grouping_dimension.name,
dimension_partition_key=grouping_key,
)
results.extend(self.get_ranges_for_keys(keys))
return results
else:
partition_keys = partitions_def.get_partition_keys()
return self.get_ranges_for_keys(partition_keys)
def with_partition_keys(self, partition_keys: Iterable[str]) -> "DefaultPartitionsSubset":
return DefaultPartitionsSubset(
self.subset | set(partition_keys),
)
def serialize(self) -> str:
# Serialize version number, so attempting to deserialize old versions can be handled gracefully.
# Any time the serialization format changes, we should increment the version number.
return json.dumps(
{
"version": self.SERIALIZATION_VERSION,
# sort to ensure that equivalent partition subsets have identical serialized forms
"subset": sorted(list(self.subset)),
}
)
@classmethod
def from_serialized(
cls, partitions_def: PartitionsDefinition, serialized: str
) -> "PartitionsSubset":
# Check the version number, so only valid versions can be deserialized.
data = json.loads(serialized)
if isinstance(data, list):
# backwards compatibility
return cls(subset=set(data))
else:
if data.get("version") != cls.SERIALIZATION_VERSION:
raise DagsterInvalidDeserializationVersionError(
f"Attempted to deserialize partition subset with version {data.get('version')},"
f" but only version {cls.SERIALIZATION_VERSION} is supported."
)
return cls(subset=set(data.get("subset")))
@classmethod
def can_deserialize(
cls,
partitions_def: PartitionsDefinition,
serialized: str,
serialized_partitions_def_unique_id: Optional[str],
serialized_partitions_def_class_name: Optional[str],
) -> bool:
if serialized_partitions_def_class_name is not None:
return serialized_partitions_def_class_name == partitions_def.__class__.__name__
data = json.loads(serialized)
return isinstance(data, list) or (
data.get("subset") is not None and data.get("version") == cls.SERIALIZATION_VERSION
)
def __eq__(self, other: object) -> bool:
return isinstance(other, DefaultPartitionsSubset) and self.subset == other.subset
def __len__(self) -> int:
return len(self.subset)
def __contains__(self, value) -> bool:
return value in self.subset
def __repr__(self) -> str:
return f"DefaultPartitionsSubset(subset={self.subset})"
@classmethod
def create_empty_subset(
cls, partitions_def: Optional[PartitionsDefinition] = None
) -> "DefaultPartitionsSubset":
return cls()
def empty_subset(
self,
) -> "DefaultPartitionsSubset":
return DefaultPartitionsSubset()
| DefaultPartitionsSubset |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/flare/base.py | {
"start": 2989,
"end": 10725
} | class ____(Chain):
"""Flare chain.
Chain that combines a retriever, a question generator,
and a response generator.
See [Active Retrieval Augmented Generation](https://arxiv.org/abs/2305.06983) paper.
"""
question_generator_chain: Runnable
"""Chain that generates questions from uncertain spans."""
response_chain: Runnable
"""Chain that generates responses from user input and context."""
output_parser: FinishedOutputParser = Field(default_factory=FinishedOutputParser)
"""Parser that determines whether the chain is finished."""
retriever: BaseRetriever
"""Retriever that retrieves relevant documents from a user input."""
min_prob: float = 0.2
"""Minimum probability for a token to be considered low confidence."""
min_token_gap: int = 5
"""Minimum number of tokens between two low confidence spans."""
num_pad_tokens: int = 2
"""Number of tokens to pad around a low confidence span."""
max_iter: int = 10
"""Maximum number of iterations."""
start_with_retrieval: bool = True
"""Whether to start with retrieval."""
@property
def input_keys(self) -> list[str]:
"""Input keys for the chain."""
return ["user_input"]
@property
def output_keys(self) -> list[str]:
"""Output keys for the chain."""
return ["response"]
def _do_generation(
self,
questions: list[str],
user_input: str,
response: str,
_run_manager: CallbackManagerForChainRun,
) -> tuple[str, bool]:
callbacks = _run_manager.get_child()
docs = []
for question in questions:
docs.extend(self.retriever.invoke(question))
context = "\n\n".join(d.page_content for d in docs)
result = self.response_chain.invoke(
{
"user_input": user_input,
"context": context,
"response": response,
},
{"callbacks": callbacks},
)
if isinstance(result, AIMessage):
result = result.content
marginal, finished = self.output_parser.parse(result)
return marginal, finished
def _do_retrieval(
self,
low_confidence_spans: list[str],
_run_manager: CallbackManagerForChainRun,
user_input: str,
response: str,
initial_response: str,
) -> tuple[str, bool]:
question_gen_inputs = [
{
"user_input": user_input,
"current_response": initial_response,
"uncertain_span": span,
}
for span in low_confidence_spans
]
callbacks = _run_manager.get_child()
if isinstance(self.question_generator_chain, LLMChain):
question_gen_outputs = self.question_generator_chain.apply(
question_gen_inputs,
callbacks=callbacks,
)
questions = [
output[self.question_generator_chain.output_keys[0]]
for output in question_gen_outputs
]
else:
questions = self.question_generator_chain.batch(
question_gen_inputs,
config={"callbacks": callbacks},
)
_run_manager.on_text(
f"Generated Questions: {questions}",
color="yellow",
end="\n",
)
return self._do_generation(questions, user_input, response, _run_manager)
def _call(
self,
inputs: dict[str, Any],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
user_input = inputs[self.input_keys[0]]
response = ""
for _i in range(self.max_iter):
_run_manager.on_text(
f"Current Response: {response}",
color="blue",
end="\n",
)
_input = {"user_input": user_input, "context": "", "response": response}
tokens, log_probs = _extract_tokens_and_log_probs(
self.response_chain.invoke(
_input,
{"callbacks": _run_manager.get_child()},
),
)
low_confidence_spans = _low_confidence_spans(
tokens,
log_probs,
self.min_prob,
self.min_token_gap,
self.num_pad_tokens,
)
initial_response = response.strip() + " " + "".join(tokens)
if not low_confidence_spans:
response = initial_response
final_response, finished = self.output_parser.parse(response)
if finished:
return {self.output_keys[0]: final_response}
continue
marginal, finished = self._do_retrieval(
low_confidence_spans,
_run_manager,
user_input,
response,
initial_response,
)
response = response.strip() + " " + marginal
if finished:
break
return {self.output_keys[0]: response}
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel | None,
max_generation_len: int = 32,
**kwargs: Any,
) -> FlareChain:
"""Creates a FlareChain from a language model.
Args:
llm: Language model to use.
max_generation_len: Maximum length of the generated response.
kwargs: Additional arguments to pass to the constructor.
Returns:
FlareChain class with the given language model.
"""
try:
from langchain_openai import ChatOpenAI
except ImportError as e:
msg = (
"OpenAI is required for FlareChain. "
"Please install langchain-openai."
"pip install langchain-openai"
)
raise ImportError(msg) from e
# Preserve supplied llm instead of always creating a new ChatOpenAI.
# Enforce ChatOpenAI requirement (token logprobs needed for FLARE).
if llm is None:
llm = ChatOpenAI(
max_completion_tokens=max_generation_len,
logprobs=True,
temperature=0,
)
else:
if not isinstance(llm, ChatOpenAI):
msg = (
f"FlareChain.from_llm requires ChatOpenAI; got "
f"{type(llm).__name__}."
)
raise TypeError(msg)
if not getattr(llm, "logprobs", False): # attribute presence may vary
msg = (
"Provided ChatOpenAI instance must be constructed with "
"logprobs=True for FlareChain."
)
raise ValueError(msg)
current_max = getattr(llm, "max_completion_tokens", None)
if current_max is not None and current_max != max_generation_len:
logger.debug(
"FlareChain.from_llm: supplied llm max_completion_tokens=%s "
"differs from requested max_generation_len=%s; "
"leaving model unchanged.",
current_max,
max_generation_len,
)
response_chain = PROMPT | llm
question_gen_chain = QUESTION_GENERATOR_PROMPT | llm | StrOutputParser()
return cls(
question_generator_chain=question_gen_chain,
response_chain=response_chain,
**kwargs,
)
| FlareChain |
python | falconry__falcon | tests/test_error_handlers.py | {
"start": 509,
"end": 882
} | class ____(CustomBaseException):
@staticmethod
def handle(req, resp, ex, params):
raise falcon.HTTPError(
falcon.HTTP_792,
title='Internet crashed!',
description='Catastrophic weather event',
href='http://example.com/api/inconvenient-truth',
href_text='Drill, baby drill!',
)
| CustomException |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 18998,
"end": 20500
} | class ____(HelperFunction):
def _calculate(self, X, y, logger, feat_type):
numerical = {
key: True if value.lower() == "numerical" else False
for key, value in feat_type.items()
}
kurts = []
for i in range(X.shape[1]):
if numerical[X.columns[i] if hasattr(X, "columns") else i]:
if np.isclose(
np.var(X.iloc[:, i] if hasattr(X, "iloc") else X[:, i]), 0
):
kurts.append(0)
else:
kurts.append(
scipy.stats.kurtosis(
X.iloc[:, i] if hasattr(X, "iloc") else X[:, i]
)
)
return kurts
def _calculate_sparse(self, X, y, logger, feat_type):
numerical = {
key: True if value.lower() == "numerical" else False
for key, value in feat_type.items()
}
kurts = []
X_new = X.tocsc()
for i in range(X_new.shape[1]):
if numerical[X.columns[i] if hasattr(X, "columns") else i]:
start = X_new.indptr[i]
stop = X_new.indptr[i + 1]
if np.isclose(np.var(X_new.data[start:stop]), 0):
kurts.append(0)
else:
kurts.append(scipy.stats.kurtosis(X_new.data[start:stop]))
return kurts
@metafeatures.define("KurtosisMin", dependency="Kurtosisses")
| Kurtosisses |
python | huggingface__transformers | src/transformers/models/smollm3/modeling_smollm3.py | {
"start": 22916,
"end": 23305
} | class ____(GenericForQuestionAnswering, SmolLM3PreTrainedModel):
base_model_prefix = "transformer" # For BC, where `transformer` was used instead of `model`
__all__ = [
"SmolLM3PreTrainedModel",
"SmolLM3Model",
"SmolLM3ForCausalLM",
"SmolLM3ForSequenceClassification",
"SmolLM3ForTokenClassification",
"SmolLM3ForQuestionAnswering",
]
| SmolLM3ForQuestionAnswering |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/completion/word_completer.py | {
"start": 320,
"end": 3435
} | class ____(Completer):
"""
Simple autocompletion on a list of words.
:param words: List of words or callable that returns a list of words.
:param ignore_case: If True, case-insensitive completion.
:param meta_dict: Optional dict mapping words to their meta-text. (This
should map strings to strings or formatted text.)
:param WORD: When True, use WORD characters.
:param sentence: When True, don't complete by comparing the word before the
cursor, but by comparing all the text before the cursor. In this case,
the list of words is just a list of strings, where each string can
contain spaces. (Can not be used together with the WORD option.)
:param match_middle: When True, match not only the start, but also in the
middle of the word.
:param pattern: Optional compiled regex for finding the word before
the cursor to complete. When given, use this regex pattern instead of
default one (see document._FIND_WORD_RE)
"""
def __init__(
self,
words: Sequence[str] | Callable[[], Sequence[str]],
ignore_case: bool = False,
display_dict: Mapping[str, AnyFormattedText] | None = None,
meta_dict: Mapping[str, AnyFormattedText] | None = None,
WORD: bool = False,
sentence: bool = False,
match_middle: bool = False,
pattern: Pattern[str] | None = None,
) -> None:
assert not (WORD and sentence)
self.words = words
self.ignore_case = ignore_case
self.display_dict = display_dict or {}
self.meta_dict = meta_dict or {}
self.WORD = WORD
self.sentence = sentence
self.match_middle = match_middle
self.pattern = pattern
def get_completions(
self, document: Document, complete_event: CompleteEvent
) -> Iterable[Completion]:
# Get list of words.
words = self.words
if callable(words):
words = words()
# Get word/text before cursor.
if self.sentence:
word_before_cursor = document.text_before_cursor
else:
word_before_cursor = document.get_word_before_cursor(
WORD=self.WORD, pattern=self.pattern
)
if self.ignore_case:
word_before_cursor = word_before_cursor.lower()
def word_matches(word: str) -> bool:
"""True when the word before the cursor matches."""
if self.ignore_case:
word = word.lower()
if self.match_middle:
return word_before_cursor in word
else:
return word.startswith(word_before_cursor)
for a in words:
if word_matches(a):
display = self.display_dict.get(a, a)
display_meta = self.meta_dict.get(a, "")
yield Completion(
text=a,
start_position=-len(word_before_cursor),
display=display,
display_meta=display_meta,
)
| WordCompleter |
python | joke2k__faker | tests/test_factory.py | {
"start": 258,
"end": 12282
} | class ____(unittest.TestCase):
def setUp(self):
self.generator = Generator()
def test_documentor(self):
from faker.cli import print_doc
output = io.StringIO()
print_doc(output=output)
print_doc("address", output=output)
print_doc("faker.providers.person.it_IT", output=output)
assert output.getvalue()
def test_command(self):
from faker.cli import Command
orig_stdout = sys.stdout
try:
sys.stdout = io.StringIO()
command = Command(["faker", "address"])
command.execute()
assert sys.stdout.getvalue()
finally:
sys.stdout = orig_stdout
def test_command_custom_provider(self):
from faker.cli import Command
orig_stdout = sys.stdout
try:
sys.stdout = io.StringIO()
command = Command(["faker", "foo", "-i", "tests.mymodule.en_US"])
command.execute()
assert sys.stdout.getvalue()
finally:
sys.stdout = orig_stdout
def test_cli_seed(self):
from faker.cli import Command
orig_stdout = sys.stdout
try:
sys.stdout = io.StringIO()
base_args = ["faker", "address"]
target_args = ["--seed", "967"]
commands = [
Command(base_args + target_args),
Command(base_args + target_args),
]
cli_output = [None] * 2
for i in range(2):
commands[i].execute()
cli_output[i] = sys.stdout.getvalue()
cli_output[1] = cli_output[1][len(cli_output[0]) :]
assert cli_output[0][:10] == cli_output[1][:10]
finally:
sys.stdout = orig_stdout
def test_cli_seed_with_repeat(self):
from faker.cli import Command
orig_stdout = sys.stdout
try:
sys.stdout = io.StringIO()
base_args = ["faker", "address", "-r", "3"]
target_args = ["--seed", "967"]
commands = [
Command(base_args + target_args),
Command(base_args + target_args),
]
cli_output = [None] * 2
for i in range(2):
commands[i].execute()
cli_output[i] = sys.stdout.getvalue()
cli_output[1] = cli_output[1][len(cli_output[0]) :]
assert cli_output[0] == cli_output[1]
finally:
sys.stdout = orig_stdout
def test_cli_verbosity(self):
from faker.cli import Command
orig_stdout = sys.stdout
try:
sys.stdout = io.StringIO()
base_args = ["faker", "address", "--seed", "769"]
target_args = ["-v"]
commands = [Command(base_args), Command(base_args + target_args)]
cli_output = [None] * 2
for i in range(2):
commands[i].execute()
cli_output[i] = sys.stdout.getvalue()
simple_output, verbose_output = cli_output
assert simple_output != verbose_output
finally:
sys.stdout = orig_stdout
def test_unknown_provider(self):
with pytest.raises(ModuleNotFoundError) as excinfo:
Factory.create(providers=["dummy_provider"])
assert str(excinfo.value) == "No module named 'dummy_provider'"
with pytest.raises(ModuleNotFoundError) as excinfo:
Factory.create(providers=["dummy_provider"], locale="it_IT")
assert str(excinfo.value) == "No module named 'dummy_provider'"
def test_unknown_locale(self):
with pytest.raises(AttributeError) as excinfo:
Factory.create(locale="77")
assert str(excinfo.value) == "Invalid configuration for faker locale `77`"
with pytest.raises(AttributeError) as excinfo:
Factory.create(locale="77_US")
assert str(excinfo.value) == "Invalid configuration for faker locale `77_US`"
def test_lang_unlocalized_provider(self):
for locale in (None, "", "en_GB", "it_IT"):
factory = Factory.create(providers=["faker.providers.file"], locale=locale)
assert len(factory.providers) == 1
assert factory.providers[0].__provider__ == "faker.providers.file"
assert factory.providers[0].__lang__ is None
def test_lang_localized_provider(self, with_default=True):
class DummyProviderModule:
localized = True
def __init__(self):
if with_default:
self.default_locale = "ar_EG"
@property
def __name__(self):
return self.__class__.__name__
class Provider:
def __init__(self, *args, **kwargs):
pass
# There's a cache based on the provider name, so when the provider changes behaviour we need
# a new name:
provider_path = f"test_lang_localized_provider_{with_default}"
with patch.multiple(
"faker.factory",
import_module=MagicMock(return_value=DummyProviderModule()),
list_module=MagicMock(return_value=("en_GB", "it_IT")),
DEFAULT_LOCALE="ko_KR",
):
test_cases = [
(None, False),
("", False),
("ar", False),
("es_CO", False),
("en", False),
("en_GB", True),
("ar_EG", with_default), # True if module defines a default locale
]
for locale, expected_used in test_cases:
factory = Factory.create(providers=[provider_path], locale=locale)
assert factory.providers[0].__provider__ == provider_path
from faker.config import DEFAULT_LOCALE
print(f"requested locale = {locale} , DEFAULT LOCALE {DEFAULT_LOCALE}")
expected_locale = locale if expected_used else ("ar_EG" if with_default else "ko_KR")
assert factory.providers[0].__lang__ == expected_locale
def test_lang_localized_provider_without_default(self):
self.test_lang_localized_provider(with_default=False)
def test_slugify(self):
slug = text.slugify("a'b/c")
assert slug == "abc"
slug = text.slugify("àeìöú")
assert slug == "aeiou"
slug = text.slugify("àeì.öú")
assert slug == "aeiou"
slug = text.slugify("àeì.öú", allow_dots=True)
assert slug == "aei.ou"
slug = text.slugify("àeì.öú", allow_unicode=True)
assert slug == "àeìöú"
slug = text.slugify("àeì.öú", allow_unicode=True, allow_dots=True)
assert slug == "àeì.öú"
@decorators.slugify
def fn(s):
return s
slug = fn("a'b/c")
assert slug == "abc"
@decorators.slugify_domain
def fn(s):
return s
slug = fn("a'b/.c")
assert slug == "ab.c"
@decorators.slugify_unicode
def fn(s):
return s
slug = fn("a'b/.cé")
assert slug == "abcé"
def test_binary(self):
from faker.providers.misc import Provider
provider = Provider(self.generator)
for _ in range(999):
length = random.randint(0, 2**10)
binary = provider.binary(length)
assert isinstance(binary, (bytes, bytearray))
assert len(binary) == length
for _ in range(999):
self.generator.seed(_)
binary1 = provider.binary(_)
self.generator.seed(_)
binary2 = provider.binary(_)
assert binary1 == binary2
def test_password(self):
from faker.providers.misc import Provider
provider = Provider(self.generator)
def in_string(char, _str):
return char in _str
for _ in range(999):
password = provider.password()
assert any(in_string(char, password) for char in "!@#$%^&*()_+")
assert any(in_string(char, password) for char in string.digits)
assert any(in_string(char, password) for char in string.ascii_uppercase)
assert any(in_string(char, password) for char in string.ascii_lowercase)
with pytest.raises(AssertionError):
provider.password(length=2)
def test_prefix_suffix_always_string(self):
# Locales known to contain `*_male` and `*_female`.
for locale in ("bg_BG", "dk_DK", "en", "ru_RU", "tr_TR"):
fake = Faker(locale=locale)
for x in range(20): # Probabilistic testing.
self.assertIsInstance(fake.prefix(), str)
self.assertIsInstance(fake.suffix(), str)
def test_random_pystr_characters(self):
from faker.providers.python import Provider
provider = Provider(self.generator)
characters = provider.pystr()
assert len(characters) == 20
characters = provider.pystr(max_chars=255)
assert len(characters) == 255
characters = provider.pystr(max_chars=0)
assert characters == ""
characters = provider.pystr(max_chars=-10)
assert characters == ""
characters = provider.pystr(min_chars=10, max_chars=255)
assert len(characters) >= 10
def test_random_pyfloat(self):
from faker.providers.python import Provider
provider = Provider(self.generator)
assert 0 <= abs(provider.pyfloat(left_digits=1)) < 10
assert 0 <= abs(provider.pyfloat(left_digits=0)) < 1
x = abs(provider.pyfloat(right_digits=0))
assert x - int(x) == 0
with pytest.raises(ValueError):
provider.pyfloat(left_digits=0, right_digits=0)
def test_pyfloat_in_range(self):
# tests for https://github.com/joke2k/faker/issues/994
fake = Faker()
for i in range(20):
for min_value, max_value in [
(0, 1),
(-1, 1),
(None, -5),
(-5, None),
(None, 5),
(5, None),
]:
fake.seed_instance(i)
result = fake.pyfloat(min_value=min_value, max_value=max_value)
if min_value is not None:
assert result >= min_value
if max_value is not None:
assert result <= max_value
def test_negative_pyfloat(self):
# tests for https://github.com/joke2k/faker/issues/813
fake = Faker()
fake.seed_instance(32167)
assert any(fake.pyfloat(left_digits=0, positive=False) < 0 for _ in range(100))
assert any(fake.pydecimal(left_digits=0, positive=False) < 0 for _ in range(100))
def test_arbitrary_digits_pydecimal(self):
# tests for https://github.com/joke2k/faker/issues/1462
fake = Faker()
assert any(
len(str(fake.pydecimal(left_digits=sys.float_info.dig + i))) > sys.float_info.dig for i in range(100)
)
assert any(len(str(fake.pydecimal())) > sys.float_info.dig for _ in range(100))
def test_pyfloat_empty_range_error(self):
# tests for https://github.com/joke2k/faker/issues/1048
fake = Faker()
fake.seed_instance(8038)
assert fake.pyfloat(max_value=9999) < 9999
def test_pyfloat_same_min_max(self):
# tests for https://github.com/joke2k/faker/issues/1048
fake = Faker()
with pytest.raises(ValueError):
assert fake.pyfloat(min_value=9999, max_value=9999)
def test_instance_seed_chain(self):
factory = Factory.create()
names = ["Real Name0", "Real Name1", "Real Name2", "Real Name0", "Real Name2"]
anonymized = [factory.seed_instance(name).name() for name in names]
assert anonymized[0] == anonymized[3]
assert anonymized[2] == anonymized[4]
if __name__ == "__main__":
unittest.main() # pragma: no cover
| FactoryTestCase |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 58421,
"end": 59170
} | class ____(PrefectFilterBaseModel):
"""Filter by `BlockSchema.capabilities`"""
all_: Optional[list[str]] = Field(
default=None,
examples=[["write-storage", "read-storage"]],
description=(
"A list of block capabilities. Block entities will be returned only if an"
" associated block schema has a superset of the defined capabilities."
),
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnElement[bool]] = []
if self.all_ is not None:
filters.append(db.BlockSchema.capabilities.has_all(_as_array(self.all_)))
return filters
| BlockSchemaFilterCapabilities |
python | pola-rs__polars | py-polars/src/polars/io/cloud/credential_provider/_providers.py | {
"start": 18212,
"end": 19380
} | class ____(CredentialProvider):
"""User-provided GCP token in storage_options."""
def __init__(self, token: str) -> None:
self.token = token
def __call__(self) -> CredentialProviderFunctionReturn:
return {"bearer_token": self.token}, None
def _get_credentials_from_provider_expiry_aware(
credential_provider: CredentialProviderFunction,
) -> dict[str, str] | None:
if (
isinstance(credential_provider, CredentialProviderAWS)
and not credential_provider._can_use_as_provider()
):
return None
creds, opt_expiry = credential_provider()
if (
opt_expiry is not None
and (expires_in := opt_expiry - int(datetime.now().timestamp())) < 7
):
from time import sleep
if verbose():
eprint(f"waiting for {expires_in} seconds for refreshed credentials")
sleep(1 + expires_in)
creds, _ = credential_provider()
# Loads the endpoint_url
if isinstance(credential_provider, CredentialProviderAWS) and (
v := credential_provider._storage_update_options()
):
creds = {**creds, **v}
return creds
| UserProvidedGCPToken |
python | qdrant__qdrant-client | qdrant_client/http/api_client.py | {
"start": 9649,
"end": 9800
} | class ____:
async def __call__(self, request: Request, call_next: SendAsync) -> Response:
return await call_next(request)
| BaseAsyncMiddleware |
python | apache__airflow | providers/openlineage/src/airflow/providers/openlineage/sqlparser.py | {
"start": 7688,
"end": 21022
} | class ____(LoggingMixin):
"""
Interface for openlineage-sql.
:param dialect: dialect specific to the database
:param default_schema: schema applied to each table with no schema parsed
"""
def __init__(self, dialect: str | None = None, default_schema: str | None = None) -> None:
super().__init__()
self.dialect = dialect
self.default_schema = default_schema
def parse(self, sql: list[str] | str) -> SqlMeta | None:
"""Parse a single or a list of SQL statements."""
self.log.debug(
"OpenLineage calling SQL parser with SQL %s dialect %s schema %s",
sql,
self.dialect,
self.default_schema,
)
return parse(sql=sql, dialect=self.dialect, default_schema=self.default_schema)
def parse_table_schemas(
self,
hook: BaseHook,
inputs: list[DbTableMeta],
outputs: list[DbTableMeta],
database_info: DatabaseInfo,
namespace: str = DEFAULT_NAMESPACE,
database: str | None = None,
sqlalchemy_engine: Engine | None = None,
) -> tuple[list[Dataset], ...]:
"""Parse schemas for input and output tables."""
database_kwargs: GetTableSchemasParams = {
"normalize_name": database_info.normalize_name_method,
"is_cross_db": database_info.is_information_schema_cross_db,
"information_schema_columns": database_info.information_schema_columns,
"information_schema_table": database_info.information_schema_table_name,
"is_uppercase_names": database_info.is_uppercase_names,
"database": database or database_info.database,
"use_flat_cross_db_query": database_info.use_flat_cross_db_query,
}
return get_table_schemas(
hook,
namespace,
self.default_schema,
database or database_info.database,
self.create_information_schema_query(
tables=inputs, sqlalchemy_engine=sqlalchemy_engine, **database_kwargs
)
if inputs
else None,
self.create_information_schema_query(
tables=outputs, sqlalchemy_engine=sqlalchemy_engine, **database_kwargs
)
if outputs
else None,
)
@staticmethod
def get_metadata_from_parser(
inputs: list[DbTableMeta],
outputs: list[DbTableMeta],
database_info: DatabaseInfo,
namespace: str = DEFAULT_NAMESPACE,
database: str | None = None,
) -> tuple[list[Dataset], ...]:
database = database if database else database_info.database
return [
from_table_meta(dataset, database, namespace, database_info.is_uppercase_names)
for dataset in inputs
], [
from_table_meta(dataset, database, namespace, database_info.is_uppercase_names)
for dataset in outputs
]
def attach_column_lineage(
self, datasets: list[Dataset], database: str | None, parse_result: SqlMeta
) -> None:
"""
Attaches column lineage facet to the list of datasets.
Note that currently each dataset has the same column lineage information set.
This would be a matter of change after OpenLineage SQL Parser improvements.
"""
if not len(parse_result.column_lineage):
return
for dataset in datasets:
dataset.facets = dataset.facets or {}
dataset.facets["columnLineage"] = column_lineage_dataset.ColumnLineageDatasetFacet(
fields={
column_lineage.descendant.name: column_lineage_dataset.Fields(
inputFields=[
column_lineage_dataset.InputField(
namespace=dataset.namespace,
name=".".join(
filter(
None,
(
column_meta.origin.database or database,
column_meta.origin.schema or self.default_schema,
column_meta.origin.name,
),
)
)
if column_meta.origin
else "",
field=column_meta.name,
)
for column_meta in column_lineage.lineage
],
transformationType="",
transformationDescription="",
)
for column_lineage in parse_result.column_lineage
}
)
def generate_openlineage_metadata_from_sql(
self,
sql: list[str] | str,
hook: BaseHook,
database_info: DatabaseInfo,
database: str | None = None,
sqlalchemy_engine: Engine | None = None,
use_connection: bool = True,
) -> OperatorLineage:
"""
Parse SQL statement(s) and generate OpenLineage metadata.
Generated OpenLineage metadata contains:
* input tables with schemas parsed
* output tables with schemas parsed
* run facets
* job facets.
:param sql: a SQL statement or list of SQL statement to be parsed
:param hook: Airflow Hook used to connect to the database
:param database_info: database specific information
:param database: when passed it takes precedence over parsed database name
:param sqlalchemy_engine: when passed, engine's dialect is used to compile SQL queries
:param use_connection: if call to db should be performed to enrich datasets (e.g., with schema)
"""
job_facets: dict[str, JobFacet] = {"sql": sql_job.SQLJobFacet(query=self.normalize_sql(sql))}
parse_result = self.parse(sql=self.split_sql_string(sql))
if not parse_result:
return OperatorLineage(job_facets=job_facets)
run_facets: dict[str, RunFacet] = {}
if parse_result.errors:
run_facets["extractionError"] = extraction_error_run.ExtractionErrorRunFacet(
totalTasks=len(sql) if isinstance(sql, list) else 1,
failedTasks=len(parse_result.errors),
errors=[
extraction_error_run.Error(
errorMessage=error.message,
stackTrace=None,
task=error.origin_statement,
taskNumber=error.index,
)
for error in parse_result.errors
],
)
namespace = self.create_namespace(database_info=database_info)
inputs: list[Dataset] = []
outputs: list[Dataset] = []
if use_connection:
try:
inputs, outputs = self.parse_table_schemas(
hook=hook,
inputs=parse_result.in_tables,
outputs=parse_result.out_tables,
namespace=namespace,
database=database,
database_info=database_info,
sqlalchemy_engine=sqlalchemy_engine,
)
except Exception as e:
self.log.warning(
"OpenLineage method failed to enrich datasets using db metadata. Exception: `%s`",
e,
)
self.log.debug("OpenLineage failure details:", exc_info=True)
# If call to db failed or was not performed, use datasets from sql parsing alone
if not inputs and not outputs:
inputs, outputs = self.get_metadata_from_parser(
inputs=parse_result.in_tables,
outputs=parse_result.out_tables,
namespace=namespace,
database=database,
database_info=database_info,
)
self.attach_column_lineage(outputs, database or database_info.database, parse_result)
return OperatorLineage(
inputs=inputs,
outputs=outputs,
run_facets=run_facets,
job_facets=job_facets,
)
@staticmethod
def create_namespace(database_info: DatabaseInfo) -> str:
return (
f"{database_info.scheme}://{database_info.authority}"
if database_info.authority
else database_info.scheme
)
@classmethod
def normalize_sql(cls, sql: list[str] | str) -> str:
"""Make sure to return a semicolon-separated SQL statement."""
return ";\n".join(stmt.rstrip(" ;\r\n") for stmt in cls.split_sql_string(sql))
@classmethod
def split_sql_string(cls, sql: list[str] | str) -> list[str]:
"""
Split SQL string into list of statements.
Tries to use `DbApiHook.split_sql_string` if available.
Otherwise, uses the same logic.
"""
try:
from airflow.providers.common.sql.hooks.sql import DbApiHook
split_statement = DbApiHook.split_sql_string
except (ImportError, AttributeError):
# No common.sql Airflow provider available or version is too old.
def split_statement(sql: str, strip_semicolon: bool = False) -> list[str]:
splits = sqlparse.split(
sql=sqlparse.format(sql, strip_comments=True),
strip_semicolon=strip_semicolon,
)
return [s for s in splits if s]
if isinstance(sql, str):
return split_statement(sql)
return [obj for stmt in sql for obj in cls.split_sql_string(stmt) if obj != ""]
def create_information_schema_query(
self,
tables: list[DbTableMeta],
normalize_name: Callable[[str], str],
is_cross_db: bool,
information_schema_columns: list[str],
information_schema_table: str,
is_uppercase_names: bool,
use_flat_cross_db_query: bool,
database: str | None = None,
sqlalchemy_engine: Engine | None = None,
) -> str:
"""Create SELECT statement to query information schema table."""
tables_hierarchy = self._get_tables_hierarchy(
tables,
normalize_name=normalize_name,
database=database,
is_cross_db=is_cross_db,
)
return create_information_schema_query(
columns=information_schema_columns,
information_schema_table_name=information_schema_table,
tables_hierarchy=tables_hierarchy,
use_flat_cross_db_query=use_flat_cross_db_query,
uppercase_names=is_uppercase_names,
sqlalchemy_engine=sqlalchemy_engine,
)
@staticmethod
def _get_tables_hierarchy(
tables: list[DbTableMeta],
normalize_name: Callable[[str], str],
database: str | None = None,
is_cross_db: bool = False,
) -> TablesHierarchy:
"""
Create a hierarchy of database -> schema -> table name.
This helps to create simpler information schema query grouped by
database and schema.
:param tables: List of tables.
:param normalize_name: A method to normalize all names.
:param is_cross_db: If false, set top (database) level to None
when creating hierarchy.
"""
hierarchy: TablesHierarchy = {}
for table in tables:
if is_cross_db:
db = table.database or database
else:
db = None
schemas = hierarchy.setdefault(normalize_name(db) if db else db, {})
tables = schemas.setdefault(normalize_name(table.schema) if table.schema else None, [])
tables.append(table.name)
return hierarchy
def get_openlineage_facets_with_sql(
hook: DbApiHook, sql: str | list[str], conn_id: str, database: str | None
) -> OperatorLineage | None:
connection = hook.get_connection(conn_id)
try:
database_info = hook.get_openlineage_database_info(connection)
except AttributeError:
database_info = None
if database_info is None:
log.debug("%s has no database info provided", hook)
return None
try:
sql_parser = SQLParser(
dialect=hook.get_openlineage_database_dialect(connection),
default_schema=hook.get_openlineage_default_schema(),
)
except AttributeError:
log.debug("%s failed to get database dialect", hook)
return None
try:
sqlalchemy_engine = hook.get_sqlalchemy_engine()
except Exception as e:
log.debug("Failed to get sql alchemy engine: %s", e)
sqlalchemy_engine = None
operator_lineage = sql_parser.generate_openlineage_metadata_from_sql(
sql=sql,
hook=hook,
database_info=database_info,
database=database,
sqlalchemy_engine=sqlalchemy_engine,
use_connection=should_use_external_connection(hook),
)
return operator_lineage
| SQLParser |
python | huggingface__transformers | tests/models/depth_pro/test_modeling_depth_pro.py | {
"start": 12435,
"end": 14785
} | class ____(unittest.TestCase):
def test_inference_depth_estimation(self):
model_path = "apple/DepthPro-hf"
image_processor = DepthProImageProcessor.from_pretrained(model_path)
model = DepthProForDepthEstimation.from_pretrained(model_path).to(torch_device)
config = model.config
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the predicted depth
n_fusion_blocks = len(config.intermediate_hook_ids) + len(config.scaled_images_ratios)
out_size = config.image_model_config.image_size // config.image_model_config.patch_size
expected_depth_size = 2 ** (n_fusion_blocks + 1) * out_size
expected_shape = torch.Size((1, expected_depth_size, expected_depth_size))
self.assertEqual(outputs.predicted_depth.shape, expected_shape)
expected_slice = torch.tensor(
[[1.0582, 1.1225, 1.1335], [1.1154, 1.1398, 1.1486], [1.1434, 1.1500, 1.1643]]
).to(torch_device)
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, atol=1e-4, rtol=1e-4)
# verify the predicted fov
expected_shape = torch.Size((1,))
self.assertEqual(outputs.field_of_view.shape, expected_shape)
expected_slice = torch.tensor([47.2459]).to(torch_device)
torch.testing.assert_close(outputs.field_of_view, expected_slice, atol=1e-4, rtol=1e-4)
def test_post_processing_depth_estimation(self):
model_path = "apple/DepthPro-hf"
image_processor = DepthProImageProcessor.from_pretrained(model_path)
model = DepthProForDepthEstimation.from_pretrained(model_path)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt")
# forward pass
with torch.no_grad():
outputs = model(**inputs)
outputs = image_processor.post_process_depth_estimation(
outputs,
target_sizes=[[image.height, image.width]],
)
predicted_depth = outputs[0]["predicted_depth"]
expected_shape = torch.Size((image.height, image.width))
self.assertTrue(predicted_depth.shape == expected_shape)
| DepthProModelIntegrationTest |
python | qdrant__qdrant-client | qdrant_client/local/distances.py | {
"start": 1994,
"end": 10161
} | class ____:
def __init__(self, context_pairs: list[ContextPair]):
self.context_pairs = context_pairs
DenseQueryVector = Union[
DiscoveryQuery,
ContextQuery,
RecoQuery,
]
def distance_to_order(distance: models.Distance) -> DistanceOrder:
"""
Convert distance to order
Args:
distance: distance to convert
Returns:
order
"""
if distance == models.Distance.EUCLID:
return DistanceOrder.SMALLER_IS_BETTER
elif distance == models.Distance.MANHATTAN:
return DistanceOrder.SMALLER_IS_BETTER
return DistanceOrder.BIGGER_IS_BETTER
def cosine_similarity(query: types.NumpyArray, vectors: types.NumpyArray) -> types.NumpyArray:
"""
Calculate cosine distance between query and vectors
Args:
query: query vector
vectors: vectors to calculate distance with
Returns:
distances
"""
vectors_norm = np.linalg.norm(vectors, axis=-1)[:, np.newaxis]
vectors /= np.where(vectors_norm != 0.0, vectors_norm, EPSILON)
if len(query.shape) == 1:
query_norm = np.linalg.norm(query)
query /= np.where(query_norm != 0.0, query_norm, EPSILON)
return np.dot(vectors, query)
query_norm = np.linalg.norm(query, axis=-1)[:, np.newaxis]
query /= np.where(query_norm != 0.0, query_norm, EPSILON)
return np.dot(query, vectors.T)
def dot_product(query: types.NumpyArray, vectors: types.NumpyArray) -> types.NumpyArray:
"""
Calculate dot product between query and vectors
Args:
query: query vector.
vectors: vectors to calculate distance with
Returns:
distances
"""
if len(query.shape) == 1:
return np.dot(vectors, query)
else:
return np.dot(query, vectors.T)
def euclidean_distance(query: types.NumpyArray, vectors: types.NumpyArray) -> types.NumpyArray:
"""
Calculate euclidean distance between query and vectors
Args:
query: query vector.
vectors: vectors to calculate distance with
Returns:
distances
"""
if len(query.shape) == 1:
return np.linalg.norm(vectors - query, axis=-1)
else:
return np.linalg.norm(vectors - query[:, np.newaxis], axis=-1)
def manhattan_distance(query: types.NumpyArray, vectors: types.NumpyArray) -> types.NumpyArray:
"""
Calculate manhattan distance between query and vectors
Args:
query: query vector.
vectors: vectors to calculate distance with
Returns:
distances
"""
if len(query.shape) == 1:
return np.sum(np.abs(vectors - query), axis=-1)
else:
return np.sum(np.abs(vectors - query[:, np.newaxis]), axis=-1)
def calculate_distance(
query: types.NumpyArray, vectors: types.NumpyArray, distance_type: models.Distance
) -> types.NumpyArray:
assert not np.isnan(query).any(), "Query vector must not contain NaN"
if distance_type == models.Distance.COSINE:
return cosine_similarity(query, vectors)
elif distance_type == models.Distance.DOT:
return dot_product(query, vectors)
elif distance_type == models.Distance.EUCLID:
return euclidean_distance(query, vectors)
elif distance_type == models.Distance.MANHATTAN:
return manhattan_distance(query, vectors)
else:
raise ValueError(f"Unknown distance type {distance_type}")
def calculate_distance_core(
query: types.NumpyArray, vectors: types.NumpyArray, distance_type: models.Distance
) -> types.NumpyArray:
"""
Calculate same internal distances as in core, rather than the final displayed distance
"""
assert not np.isnan(query).any(), "Query vector must not contain NaN"
if distance_type == models.Distance.EUCLID:
return -np.square(vectors - query, dtype=np.float32).sum(axis=1, dtype=np.float32)
if distance_type == models.Distance.MANHATTAN:
return -np.abs(vectors - query, dtype=np.float32).sum(axis=1, dtype=np.float32)
else:
return calculate_distance(query, vectors, distance_type)
def fast_sigmoid(x: np.float32) -> np.float32:
if np.isnan(x) or np.isinf(x):
# To avoid divisions on NaNs or inf, which gets: RuntimeWarning: invalid value encountered in scalar divide
return x
return x / np.add(1.0, abs(x))
def scaled_fast_sigmoid(x: np.float32) -> np.float32:
return 0.5 * (np.add(fast_sigmoid(x), 1.0))
def calculate_recommend_best_scores(
query: RecoQuery, vectors: types.NumpyArray, distance_type: models.Distance
) -> types.NumpyArray:
def get_best_scores(examples: list[types.NumpyArray]) -> types.NumpyArray:
vector_count = vectors.shape[0]
# Get scores to all examples
scores: list[types.NumpyArray] = []
for example in examples:
score = calculate_distance_core(example, vectors, distance_type)
scores.append(score)
# Keep only max for each vector
if len(scores) == 0:
scores.append(np.full(vector_count, -np.inf))
best_scores = np.array(scores, dtype=np.float32).max(axis=0)
return best_scores
pos = get_best_scores(query.positive)
neg = get_best_scores(query.negative)
# Choose from best positive or best negative,
# in in both cases we apply sigmoid and then negate depending on the order
return np.where(
pos > neg,
np.fromiter((scaled_fast_sigmoid(xi) for xi in pos), pos.dtype),
np.fromiter((-scaled_fast_sigmoid(xi) for xi in neg), neg.dtype),
)
def calculate_recommend_sum_scores(
query: RecoQuery, vectors: types.NumpyArray, distance_type: models.Distance
) -> types.NumpyArray:
def get_sum_scores(examples: list[types.NumpyArray]) -> types.NumpyArray:
vector_count = vectors.shape[0]
scores: list[types.NumpyArray] = []
for example in examples:
score = calculate_distance_core(example, vectors, distance_type)
scores.append(score)
if len(scores) == 0:
scores.append(np.zeros(vector_count))
sum_scores = np.array(scores, dtype=np.float32).sum(axis=0)
return sum_scores
pos = get_sum_scores(query.positive)
neg = get_sum_scores(query.negative)
return pos - neg
def calculate_discovery_ranks(
context: list[ContextPair],
vectors: types.NumpyArray,
distance_type: models.Distance,
) -> types.NumpyArray:
overall_ranks = np.zeros(vectors.shape[0], dtype=np.int32)
for pair in context:
# Get distances to positive and negative vectors
pos = calculate_distance_core(pair.positive, vectors, distance_type)
neg = calculate_distance_core(pair.negative, vectors, distance_type)
pair_ranks = np.array(
[
1 if is_bigger else 0 if is_equal else -1
for is_bigger, is_equal in zip(pos > neg, pos == neg)
]
)
overall_ranks += pair_ranks
return overall_ranks
def calculate_discovery_scores(
query: DiscoveryQuery, vectors: types.NumpyArray, distance_type: models.Distance
) -> types.NumpyArray:
ranks = calculate_discovery_ranks(query.context, vectors, distance_type)
# Get distances to target
distances_to_target = calculate_distance_core(query.target, vectors, distance_type)
sigmoided_distances = np.fromiter(
(scaled_fast_sigmoid(xi) for xi in distances_to_target), np.float32
)
return ranks + sigmoided_distances
def calculate_context_scores(
query: ContextQuery, vectors: types.NumpyArray, distance_type: models.Distance
) -> types.NumpyArray:
overall_scores = np.zeros(vectors.shape[0], dtype=np.float32)
for pair in query.context_pairs:
# Get distances to positive and negative vectors
pos = calculate_distance_core(pair.positive, vectors, distance_type)
neg = calculate_distance_core(pair.negative, vectors, distance_type)
difference = pos - neg - EPSILON
pair_scores = np.fromiter(
(fast_sigmoid(xi) for xi in np.minimum(difference, 0.0)), np.float32
)
overall_scores += pair_scores
return overall_scores
| ContextQuery |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/property2.py | {
"start": 120,
"end": 610
} | class ____(abc.ABC):
@abc.abstractproperty
def x(self) -> int:
raise NotImplementedError
@x.setter
def x(self, value: int):
raise NotImplementedError
@abc.abstractproperty
def y(self) -> float:
raise NotImplementedError
a = Foo()
requires_int(a.x)
a.x = 3
# This should generate an error because a.y is not an int
requires_int(a.y)
# This should generate an error because the assigned type
# isn't compatible with the setter.
a.x = 4.5
| Foo |
python | pypa__setuptools | setuptools/_distutils/errors.py | {
"start": 689,
"end": 888
} | class ____(DistutilsError):
"""Unable to load an expected module, or to find an expected class
within some module (in particular, command modules and classes)."""
pass
| DistutilsModuleError |
python | Lightning-AI__lightning | tests/tests_pytorch/checkpointing/test_model_checkpoint.py | {
"start": 42141,
"end": 42385
} | class ____(Callback):
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx):
if not trainer.sanity_checking and batch_idx == 1:
raise RuntimeError("Trouble!")
| TroubledCallbackOnValidationBatchEnd |
python | django-extensions__django-extensions | django_extensions/management/commands/dumpscript.py | {
"start": 15519,
"end": 20922
} | class ____(Code):
"""Produces a complete python script that can recreate data for the given apps."""
def __init__(self, models, context=None, stdout=None, stderr=None, options=None):
super().__init__(stdout=stdout, stderr=stderr)
self.imports = {}
self.models = models
if context is None:
context = {}
self.context = context
self.context["__available_models"] = set(models)
self.context["__extra_imports"] = {}
self.options = options
def _queue_models(self, models, context):
"""
Work an an appropriate ordering for the models.
This isn't essential, but makes the script look nicer because
more instances can be defined on their first try.
"""
model_queue = []
number_remaining_models = len(models)
# Max number of cycles allowed before we call it an infinite loop.
MAX_CYCLES = number_remaining_models
allowed_cycles = MAX_CYCLES
while number_remaining_models > 0:
previous_number_remaining_models = number_remaining_models
model = models.pop(0)
# If the model is ready to be processed, add it to the list
if check_dependencies(model, model_queue, context["__available_models"]):
model_class = ModelCode(
model=model,
context=context,
stdout=self.stdout,
stderr=self.stderr,
options=self.options,
)
model_queue.append(model_class)
# Otherwise put the model back at the end of the list
else:
models.append(model)
# Check for infinite loops.
# This means there is a cyclic foreign key structure
# That cannot be resolved by re-ordering
number_remaining_models = len(models)
if number_remaining_models == previous_number_remaining_models:
allowed_cycles -= 1
if allowed_cycles <= 0:
# Add remaining models, but do not remove them from the model list
missing_models = [
ModelCode(
model=m,
context=context,
stdout=self.stdout,
stderr=self.stderr,
options=self.options,
)
for m in models
]
model_queue += missing_models
# Replace the models with the model class objects
# (sure, this is a little bit of hackery)
models[:] = missing_models
break
else:
allowed_cycles = MAX_CYCLES
return model_queue
def get_lines(self):
"""
Return a list of lists or strings, representing the code body.
Each list is a block, each string is a statement.
"""
code = [self.FILE_HEADER.strip()]
# Queue and process the required models
for model_class in self._queue_models(self.models, context=self.context):
msg = "Processing model: %s.%s\n" % (
model_class.model.__module__,
model_class.model.__name__,
)
self.stderr.write(msg)
code.append(" # " + msg)
code.append(model_class.import_lines)
code.append("")
code.append(model_class.lines)
# Process left over foreign keys from cyclic models
for model in self.models:
msg = "Re-processing model: %s.%s\n" % (
model.model.__module__,
model.model.__name__,
)
self.stderr.write(msg)
code.append(" # " + msg)
for instance in model.instances:
if instance.waiting_list or instance.many_to_many_waiting_list:
code.append(instance.get_lines(force=True))
code.insert(1, " # Initial Imports")
code.insert(2, "")
for key, value in self.context["__extra_imports"].items():
code.insert(2, " from %s import %s" % (value, key))
return code
lines = property(get_lines)
# A user-friendly file header
FILE_HEADER = """
#!/usr/bin/env python
# This file has been automatically generated.
# Instead of changing it, create a file called import_helper.py
# and put there a class called ImportHelper(object) in it.
#
# This class will be specially cast so that instead of extending object,
# it will actually extend the class BasicImportHelper()
#
# That means you just have to overload the methods you want to
# change, leaving the other ones intact.
#
# Something that you might want to do is use transactions, for example.
#
# Also, don't forget to add the necessary Django imports.
#
# This file was generated with the following command:
# %s
#
# to restore it, run
# manage.py runscript module_name.this_script_name
#
# example: if manage.py is at ./manage.py
# and the script is at ./some_folder/some_script.py
# you must make sure ./some_folder/__init__.py exists
# and run ./manage.py runscript some_folder.some_script
import os, sys
from django.db import transaction
| Script |
python | pytorch__pytorch | test/dynamo/test_generator.py | {
"start": 467,
"end": 1374
} | class ____(torch._dynamo.test_case.TestCaseWithNestedGraphBreaks):
def setUp(self):
super().setUp()
self._old = torch._dynamo.config.enable_faithful_generator_behavior
torch._dynamo.config.enable_faithful_generator_behavior = True
self._unittest_old = torch._dynamo.config.enable_trace_unittest
torch._dynamo.config.enable_trace_unittest = True
def tearDown(self):
super().tearDown()
torch._dynamo.config.enable_faithful_generator_behavior = self._old
torch._dynamo.config.enable_trace_unittest = self._unittest_old
def _compile_check(self, fn, args=None, fullgraph=True):
eager = EagerAndRecordGraphs()
if args is None:
args = (torch.randn(2),)
r = torch.compile(fn, backend=eager, fullgraph=fullgraph)(*args)
self.assertGreater(len(eager.graphs), 0)
return r
| GeneratorTestsBase |
python | google__pytype | pytype/tests/test_cmp1.py | {
"start": 1491,
"end": 2797
} | class ____(test_base.BaseTest):
"""Test for "x not in y". Also test overloading of this operator."""
def test_concrete(self):
ty, errors = self.InferWithErrors("""
def f(x, y):
return x not in y # unsupported-operands[e]
f(1, [1])
f(1, [2])
f("x", "x")
f("y", "x")
f("y", (1,))
f("y", object())
""")
self.assertTypesMatchPytd(ty, "def f(x, y) -> bool: ...")
self.assertErrorRegexes(errors, {"e": r"'in'.*object"})
# "not in" maps to the inverse of __contains__
def test_overloaded(self):
ty = self.Infer("""
class Foo:
def __contains__(self, x):
return 3j
def f():
return Foo() not in []
def g():
# The result of __contains__ is coerced to a bool.
return 3 not in Foo()
""")
self.assertTypesMatchPytd(
ty,
"""
class Foo:
def __contains__(self, x) -> complex: ...
def f() -> bool: ...
def g() -> bool: ...
""",
)
def test_none(self):
errors = self.CheckWithErrors("""
x = None
if "" not in x: # unsupported-operands[e1]
x[""] = 42 # unsupported-operands[e2]
""")
self.assertErrorRegexes(
errors, {"e1": r"'in'.*None", "e2": r"item assignment.*None"}
)
| NotInTest |
python | kamyu104__LeetCode-Solutions | Python/verbal-arithmetic-puzzle.py | {
"start": 64,
"end": 1711
} | class ____(object):
def isSolvable(self, words, result):
"""
:type words: List[str]
:type result: str
:rtype: bool
"""
def backtracking(words, result, i, j, carry, lookup, used):
if j == len(result):
return carry == 0
if i != len(words):
if j >= len(words[i]) or words[i][j] in lookup:
return backtracking(words, result, i+1, j, carry, lookup, used)
for val in xrange(10):
if val in used or (val == 0 and j == len(words[i])-1):
continue
lookup[words[i][j]] = val
used.add(val)
if backtracking(words, result, i+1, j, carry, lookup, used):
return True
used.remove(val)
del lookup[words[i][j]]
return False
carry, val = divmod(carry + sum(lookup[w[j]] for w in words if j < len(w)), 10)
if result[j] in lookup:
return val == lookup[result[j]] and \
backtracking(words, result, 0, j+1, carry, lookup, used)
if val in used or (val == 0 and j == len(result)-1):
return False
lookup[result[j]] = val
used.add(val)
if backtracking(words, result, 0, j+1, carry, lookup, used):
return True
used.remove(val)
del lookup[result[j]]
return False
return backtracking([w[::-1] for w in words], result[::-1], 0, 0, 0, {}, set())
| Solution |
python | pytorch__pytorch | .ci/lumen_cli/cli/lib/core/vllm/vllm_test.py | {
"start": 1729,
"end": 9941
} | class ____(BaseRunner):
def __init__(self, args: Any):
self.work_directory = "vllm"
self.test_plan = ""
self.test_type = TestInpuType.UNKNOWN
self.shard_id = args.shard_id
self.num_shards = args.num_shards
if args.test_plan:
self.test_plan = args.test_plan
self.test_type = TestInpuType.TEST_PLAN
# Matches the structeur in the artifacts.zip from torcb build
self.TORCH_WHL_PATH_REGEX = "torch*.whl"
self.TORCH_WHL_EXTRA = "opt-einsum"
self.TORCH_ADDITIONAL_WHLS_REGEX = [
"vision/torchvision*.whl",
"audio/torchaudio*.whl",
]
# Match the structure of the artifacts.zip from vllm external build
self.VLLM_TEST_WHLS_REGEX = [
"xformers/*.whl",
"vllm/vllm*.whl",
]
def prepare(self):
"""
prepare test environment for vllm. This includes clone vllm repo, install all wheels, test dependencies and set env
"""
params = VllmTestParameters()
logger.info("Display VllmTestParameters %s", params)
self._set_envs(params)
clone_vllm(dst=self.work_directory)
self.cp_torch_cleaning_script(params)
with working_directory(self.work_directory):
remove_dir(Path("vllm"))
self._install_wheels(params)
self._install_dependencies()
# verify the torches are not overridden by test dependencies
check_versions()
def run(self):
"""
main function to run vllm test
"""
self.prepare()
try:
with working_directory(self.work_directory):
if self.test_type == TestInpuType.TEST_PLAN:
if self.num_shards > 1:
run_test_plan(
self.test_plan,
"vllm",
sample_vllm_test_library(),
self.shard_id,
self.num_shards,
)
else:
run_test_plan(
self.test_plan, "vllm", sample_vllm_test_library()
)
else:
raise ValueError(f"Unknown test type {self.test_type}")
finally:
# double check the torches are not overridden by other packages
check_versions()
def cp_torch_cleaning_script(self, params: VllmTestParameters):
script = get_path(params.cleaning_script, resolve=True)
vllm_script = Path(f"./{self.work_directory}/use_existing_torch.py")
copy(script, vllm_script)
def _install_wheels(self, params: VllmTestParameters):
logger.info("Running vllm test with inputs: %s", params)
if not pkg_exists("torch"):
# install torch from local whls if it's not installed yet.
torch_p = f"{str(params.torch_whls_path)}/{self.TORCH_WHL_PATH_REGEX}"
pip_install_first_match(torch_p, self.TORCH_WHL_EXTRA)
torch_whls_path = [
f"{str(params.torch_whls_path)}/{whl_path}"
for whl_path in self.TORCH_ADDITIONAL_WHLS_REGEX
]
for torch_whl in torch_whls_path:
pip_install_first_match(torch_whl)
logger.info("Done. Installed torch and other torch-related wheels ")
logger.info("Installing vllm wheels")
vllm_whls_path = [
f"{str(params.vllm_whls_path)}/{whl_path}"
for whl_path in self.VLLM_TEST_WHLS_REGEX
]
for vllm_whl in vllm_whls_path:
pip_install_first_match(vllm_whl)
logger.info("Done. Installed vllm wheels")
def _install_test_dependencies(self):
"""
This method replaces torch dependencies with local torch wheel info in
requirements/test.in file from vllm repo. then generates the test.txt
in runtime
"""
logger.info("generate test.txt from requirements/test.in with local torch whls")
preprocess_test_in()
copy("requirements/test.txt", "snapshot_constraint.txt")
run_command(
f"{sys.executable} -m uv pip compile requirements/test.in "
"-o test.txt "
"--index-strategy unsafe-best-match "
"--constraint snapshot_constraint.txt "
"--torch-backend cu128"
)
pip_install_packages(requirements="test.txt", prefer_uv=True)
logger.info("Done. installed requirements for test dependencies")
def _install_dependencies(self):
pip_install_packages(packages=["-e", "tests/vllm_test_utils"], prefer_uv=True)
pip_install_packages(packages=["hf_transfer"], prefer_uv=True)
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
# using script from vllm repo to remove all torch packages from requirements txt
run_python("use_existing_torch.py")
# install common packages
for requirements in ["requirements/common.txt", "requirements/build.txt"]:
pip_install_packages(
requirements=requirements,
prefer_uv=True,
)
# install test packages
self._install_test_dependencies()
def _set_envs(self, inputs: VllmTestParameters):
os.environ["TORCH_CUDA_ARCH_LIST"] = inputs.torch_cuda_arch_list
if not validate_cuda(get_env("TORCH_CUDA_ARCH_LIST")):
logger.warning(
"Missing supported TORCH_CUDA_ARCH_LIST. "
"Currently support TORCH_CUDA_ARCH_LIST env var "
"with supported arch [8.0, 8.9, 9.0]"
)
os.environ["HF_TOKEN"] = os.getenv("VLLM_TEST_HUGGING_FACE_TOKEN", "")
if not get_env("HF_TOKEN"):
raise ValueError(
"missing required HF_TOKEN, please set VLLM_TEST_HUGGING_FACE_TOKEN env var"
)
if not get_env("TORCH_CUDA_ARCH_LIST"):
raise ValueError(
"missing required TORCH_CUDA_ARCH_LIST, please set TORCH_CUDA_ARCH_LIST env var"
)
def preprocess_test_in(
target_file: str = "requirements/test.in", additional_packages: Iterable[str] = ()
):
"""
This modifies the target_file file in place in vllm work directory.
It removes torch and unwanted packages in target_file and replace with local torch whls
package with format "$WHEEL_PACKAGE_NAME @ file://<LOCAL_PATH>"
"""
additional_package_to_move = list(additional_packages or ())
pkgs_to_remove = [
"torch",
"torchvision",
"torchaudio",
"xformers",
"mamba_ssm",
] + additional_package_to_move
# Read current requirements
target_path = Path(target_file)
lines = target_path.read_text().splitlines()
pkgs_to_add = []
# Remove lines starting with the package names (==, @, >=) — case-insensitive
pattern = re.compile(rf"^({'|'.join(pkgs_to_remove)})\s*(==|@|>=)", re.IGNORECASE)
kept_lines = [line for line in lines if not pattern.match(line)]
# Get local installed torch/vision/audio from pip freeze
# This is hacky, but it works
pip_freeze = subprocess.check_output(["pip", "freeze"], text=True)
header_lines = [
line
for line in pip_freeze.splitlines()
if re.match(
r"^(torch|torchvision|torchaudio)\s*@\s*file://", line, re.IGNORECASE
)
]
# Write back: header_lines + blank + kept_lines
out_lines = header_lines + [""] + kept_lines
if pkgs_to_add:
out_lines += [""] + pkgs_to_add
out = "\n".join(out_lines) + "\n"
target_path.write_text(out)
logger.info("[INFO] Updated %s", target_file)
def validate_cuda(value: str) -> bool:
VALID_VALUES = {"8.0", "8.9", "9.0"}
return all(v in VALID_VALUES for v in value.split())
def check_versions():
"""
check installed packages version
"""
logger.info("Double check installed packages")
patterns = ["torch", "xformers", "torchvision", "torchaudio", "vllm"]
for pkg in patterns:
pkg_exists(pkg)
logger.info("Done. checked installed packages")
| VllmTestRunner |
python | ray-project__ray | python/ray/serve/tests/unit/test_cli.py | {
"start": 914,
"end": 1478
} | class ____:
def __init__(self):
self._deployed_config: Optional[Dict] = None
@property
def deployed_config(self) -> Optional[Dict]:
return self._deployed_config
def deploy_applications(self, config: Dict):
self._deployed_config = config
@pytest.fixture
def fake_serve_client() -> FakeServeSubmissionClient:
fake_client = FakeServeSubmissionClient()
with patch(
"ray.serve.scripts.ServeSubmissionClient",
new=Mock(return_value=fake_client),
):
yield fake_client
| FakeServeSubmissionClient |
python | jina-ai__jina | tests/integration/hub_usage/hub-mwu/mwu_encoder.py | {
"start": 62,
"end": 292
} | class ____(Executor):
def __init__(self, greetings: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self._greetings = greetings
@requests
def encode(self, **kwargs) -> Any:
pass
| MWUEncoder |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 7677,
"end": 8079
} | class ____(PrefectBaseModel, OperatorMixin):
"""Filter by `FlowRun.parent_task_run_id`."""
any_: Optional[List[UUID]] = Field(
default=None, description="A list of flow run parent_task_run_ids to include"
)
is_null_: Optional[bool] = Field(
default=None,
description="If true, only include flow runs without parent_task_run_id",
)
| FlowRunFilterParentTaskRunId |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_config_migrations.py | {
"start": 5424,
"end": 8701
} | class ____:
OLD_TEST1_CONFIG_PATH = _config_path(f"{_EXCLUDE_DELETE_CONFIGS_PATH}/test_old_config.json")
NEW_TEST1_CONFIG_PATH = _config_path(f"{_EXCLUDE_DELETE_CONFIGS_PATH}/test_new_config.json")
OLD_TEST2_CONFIG_PATH = _config_path(f"{_INCLUDE_DELETE_CONFIGS_PATH}/test_old_config.json")
NEW_TEST2_CONFIG_PATH = _config_path(f"{_INCLUDE_DELETE_CONFIGS_PATH}/test_new_config.json")
UPGRADED_TEST_CONFIG_PATH = _config_path("test_migrations/account_id_to_array/test_upgraded_config.json")
filter_properties = ["ad_statuses", "adset_statuses", "campaign_statuses"]
def revert_migration(self, config_path: str) -> None:
with open(config_path, "r") as test_config:
config = json.load(test_config)
for filter in self.filter_properties:
config.pop(filter)
with open(config_path, "w") as updated_config:
config = json.dumps(config)
updated_config.write(config)
@pytest.mark.parametrize(
"old_config_path, new_config_path, include_deleted",
[(OLD_TEST1_CONFIG_PATH, NEW_TEST1_CONFIG_PATH, False), (OLD_TEST2_CONFIG_PATH, NEW_TEST2_CONFIG_PATH, True)],
)
def test_migrate_config(self, old_config_path, new_config_path, include_deleted, capsys):
migration_instance = MigrateIncludeDeletedToStatusFilters()
# migrate the test_config
migration_instance.migrate([CMD, "--config", old_config_path], SOURCE)
# load the updated config
test_migrated_config = load_config(old_config_path)
# load expected updated config
expected_new_config = load_config(new_config_path)
# compare expected with migrated
assert expected_new_config == test_migrated_config
# check migrated property
if include_deleted:
assert all([filter in test_migrated_config for filter in self.filter_properties])
# check the old property is in place
assert "include_deleted" in test_migrated_config
assert test_migrated_config["include_deleted"] == include_deleted
# check the migration should be skipped, once already done
assert not migration_instance.should_migrate(test_migrated_config)
if include_deleted:
# test CONTROL MESSAGE was emitted
control_msg = json.loads(capsys.readouterr().out)
assert control_msg["type"] == Type.CONTROL.value
assert control_msg["control"]["type"] == OrchestratorType.CONNECTOR_CONFIG.value
# revert the test_config to the starting point
self.revert_migration(old_config_path)
@pytest.mark.parametrize("new_config_path", [NEW_TEST1_CONFIG_PATH, NEW_TEST2_CONFIG_PATH])
def test_should_not_migrate_new_config(self, new_config_path):
new_config = load_config(new_config_path)
migration_instance = MigrateIncludeDeletedToStatusFilters()
assert not migration_instance.should_migrate(new_config)
def test_should_not_migrate_upgraded_config(self):
new_config = load_config(self.UPGRADED_TEST_CONFIG_PATH)
migration_instance = MigrateIncludeDeletedToStatusFilters()
assert not migration_instance.should_migrate(new_config)
| TestMigrateIncludeDeletedToStatusFilters |
python | ipython__ipython | IPython/lib/backgroundjobs.py | {
"start": 15958,
"end": 16756
} | class ____(BackgroundJobBase):
"""Evaluate an expression as a background job (uses a separate thread)."""
def __init__(self, expression, glob=None, loc=None):
"""Create a new job from a string which can be fed to eval().
global/locals dicts can be provided, which will be passed to the eval
call."""
# fail immediately if the given expression can't be compiled
self.code = compile(expression,'<BackgroundJob compilation>','eval')
glob = {} if glob is None else glob
loc = {} if loc is None else loc
self.expression = self.strform = expression
self.glob = glob
self.loc = loc
self._init()
def call(self):
return eval(self.code,self.glob,self.loc)
| BackgroundJobExpr |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | contents/12_Proximal_Policy_Optimization/simply_PPO.py | {
"start": 867,
"end": 6467
} | class ____(object):
def __init__(self):
self.sess = tf.Session()
self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state')
# critic
with tf.variable_scope('critic'):
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu)
self.v = tf.layers.dense(l1, 1)
self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
self.advantage = self.tfdc_r - self.v
self.closs = tf.reduce_mean(tf.square(self.advantage))
self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)
# actor
pi, pi_params = self._build_anet('pi', trainable=True)
oldpi, oldpi_params = self._build_anet('oldpi', trainable=False)
with tf.variable_scope('sample_action'):
self.sample_op = tf.squeeze(pi.sample(1), axis=0) # choosing action
with tf.variable_scope('update_oldpi'):
self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)]
self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action')
self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
with tf.variable_scope('loss'):
with tf.variable_scope('surrogate'):
# ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
ratio = pi.prob(self.tfa) / (oldpi.prob(self.tfa) + 1e-5)
surr = ratio * self.tfadv
if METHOD['name'] == 'kl_pen':
self.tflam = tf.placeholder(tf.float32, None, 'lambda')
kl = tf.distributions.kl_divergence(oldpi, pi)
self.kl_mean = tf.reduce_mean(kl)
self.aloss = -(tf.reduce_mean(surr - self.tflam * kl))
else: # clipping method, find this is better
self.aloss = -tf.reduce_mean(tf.minimum(
surr,
tf.clip_by_value(ratio, 1.-METHOD['epsilon'], 1.+METHOD['epsilon'])*self.tfadv))
with tf.variable_scope('atrain'):
self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)
tf.summary.FileWriter("log/", self.sess.graph)
self.sess.run(tf.global_variables_initializer())
def update(self, s, a, r):
self.sess.run(self.update_oldpi_op)
adv = self.sess.run(self.advantage, {self.tfs: s, self.tfdc_r: r})
# adv = (adv - adv.mean())/(adv.std()+1e-6) # sometimes helpful
# update actor
if METHOD['name'] == 'kl_pen':
for _ in range(A_UPDATE_STEPS):
_, kl = self.sess.run(
[self.atrain_op, self.kl_mean],
{self.tfs: s, self.tfa: a, self.tfadv: adv, self.tflam: METHOD['lam']})
if kl > 4*METHOD['kl_target']: # this in in google's paper
break
if kl < METHOD['kl_target'] / 1.5: # adaptive lambda, this is in OpenAI's paper
METHOD['lam'] /= 2
elif kl > METHOD['kl_target'] * 1.5:
METHOD['lam'] *= 2
METHOD['lam'] = np.clip(METHOD['lam'], 1e-4, 10) # sometimes explode, this clipping is my solution
else: # clipping method, find this is better (OpenAI's paper)
[self.sess.run(self.atrain_op, {self.tfs: s, self.tfa: a, self.tfadv: adv}) for _ in range(A_UPDATE_STEPS)]
# update critic
[self.sess.run(self.ctrain_op, {self.tfs: s, self.tfdc_r: r}) for _ in range(C_UPDATE_STEPS)]
def _build_anet(self, name, trainable):
with tf.variable_scope(name):
l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu, trainable=trainable)
mu = 2 * tf.layers.dense(l1, A_DIM, tf.nn.tanh, trainable=trainable)
sigma = tf.layers.dense(l1, A_DIM, tf.nn.softplus, trainable=trainable)
norm_dist = tf.distributions.Normal(loc=mu, scale=sigma)
params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
return norm_dist, params
def choose_action(self, s):
s = s[np.newaxis, :]
a = self.sess.run(self.sample_op, {self.tfs: s})[0]
return np.clip(a, -2, 2)
def get_v(self, s):
if s.ndim < 2: s = s[np.newaxis, :]
return self.sess.run(self.v, {self.tfs: s})[0, 0]
env = gym.make('Pendulum-v0').unwrapped
ppo = PPO()
all_ep_r = []
for ep in range(EP_MAX):
s = env.reset()
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0
for t in range(EP_LEN): # in one episode
env.render()
a = ppo.choose_action(s)
s_, r, done, _ = env.step(a)
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append((r+8)/8) # normalize reward, find to be useful
s = s_
ep_r += r
# update ppo
if (t+1) % BATCH == 0 or t == EP_LEN-1:
v_s_ = ppo.get_v(s_)
discounted_r = []
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs, ba, br = np.vstack(buffer_s), np.vstack(buffer_a), np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
ppo.update(bs, ba, br)
if ep == 0: all_ep_r.append(ep_r)
else: all_ep_r.append(all_ep_r[-1]*0.9 + ep_r*0.1)
print(
'Ep: %i' % ep,
"|Ep_r: %i" % ep_r,
("|Lam: %.4f" % METHOD['lam']) if METHOD['name'] == 'kl_pen' else '',
)
plt.plot(np.arange(len(all_ep_r)), all_ep_r)
plt.xlabel('Episode');plt.ylabel('Moving averaged episode reward');plt.show() | PPO |
python | PyCQA__pylint | tests/functional/c/class_attributes.py | {
"start": 122,
"end": 415
} | class ____:
"dummy class"
def __init__(self):
self.topic = 5
self._data = 45
def change_type(self, new_class):
"""Change type"""
self.__class__ = new_class
def do_nothing(self):
"I do nothing useful"
return self.topic + 56
| Clazz |
python | scikit-image__scikit-image | src/skimage/transform/_geometric.py | {
"start": 31459,
"end": 45011
} | class ____(_HMatrixTransform):
r"""Projective transformation.
Apply a projective transformation (homography) on coordinates.
For each homogeneous coordinate :math:`\mathbf{x} = [x, y, 1]^T`, its
target position is calculated by multiplying with the given matrix,
:math:`H`, to give :math:`H \mathbf{x}`::
[[a0 a1 a2]
[b0 b1 b2]
[c0 c1 1 ]].
E.g., to rotate by theta degrees clockwise, the matrix should be::
[[cos(theta) -sin(theta) 0]
[sin(theta) cos(theta) 0]
[0 0 1]]
or, to translate x by 10 and y by 20::
[[1 0 10]
[0 1 20]
[0 0 1 ]].
Parameters
----------
matrix : (D+1, D+1) array_like, optional
Homogeneous transformation matrix.
dimensionality : int, optional
Fallback number of dimensions when `matrix` not specified.
Attributes
----------
params : (D+1, D+1) array
Homogeneous transformation matrix.
Examples
--------
>>> import numpy as np
>>> import skimage as ski
Define a transform with an homogeneous transformation matrix:
>>> tform = ski.transform.ProjectiveTransform(np.diag([2., 3., 1.]))
>>> tform.params
array([[2., 0., 0.],
[0., 3., 0.],
[0., 0., 1.]])
You can estimate a transformation to map between source and destination
points:
>>> src = np.array([[150, 150],
... [250, 100],
... [150, 200]])
>>> dst = np.array([[200, 200],
... [300, 150],
... [150, 400]])
>>> tform = ski.transform.ProjectiveTransform.from_estimate(src, dst)
>>> np.allclose(tform.params, [[ -16.56, 5.82, 895.81],
... [ -10.31, -8.29, 2075.43],
... [ -0.05, 0.02, 1. ]], atol=0.01)
True
Apply the transformation to some image data.
>>> img = ski.data.astronaut()
>>> warped = ski.transform.warp(img, inverse_map=tform.inverse)
The estimation can fail - for example, if all the input or output points
are the same. If this happens, you will get a transform that is not
"truthy" - meaning that ``bool(tform)`` is ``False``:
>>> # A successfully estimated model is truthy (applying ``bool()``
>>> # gives ``True``):
>>> if tform:
... print("Estimation succeeded.")
Estimation succeeded.
>>> # Not so for a degenerate transform with identical points.
>>> bad_src = np.ones((3, 2))
>>> bad_tform = ski.transform.ProjectiveTransform.from_estimate(
... bad_src, dst)
>>> if not bad_tform:
... print("Estimation failed.")
Estimation failed.
Trying to use this failed estimation transform result will give a suitable
error:
>>> bad_tform.params # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
FailedEstimationAccessError: No attribute "params" for failed estimation ...
"""
scaling = 'rms'
@property
def _coeff_inds(self):
"""Indices into flat ``self.params`` with coefficients to estimate"""
return range(self.params.size - 1)
def _check_dims(self, d):
if d >= 2:
return
raise NotImplementedError(
f'Input for {type(self)} should result in transform of >=2D'
)
@property
def _inv_matrix(self):
return np.linalg.inv(self.params)
def __array__(self, dtype=None, copy=None):
return self.params if dtype is None else self.params.astype(dtype)
def __call__(self, coords):
"""Apply forward transformation.
Parameters
----------
coords : (N, D) array_like
Source coordinates.
Returns
-------
coords_out : (N, D) array
Destination coordinates.
"""
return _apply_homogeneous(self.params, coords)
@property
def inverse(self):
"""Return a transform object representing the inverse."""
return type(self)(matrix=self._inv_matrix)
@classmethod
def from_estimate(cls, src, dst, weights=None):
"""Estimate the transformation from a set of corresponding points.
You can determine the over-, well- and under-determined parameters
with the total least-squares method.
Number of source and destination coordinates must match.
The transformation is defined as::
X = (a0*x + a1*y + a2) / (c0*x + c1*y + 1)
Y = (b0*x + b1*y + b2) / (c0*x + c1*y + 1)
These equations can be transformed to the following form::
0 = a0*x + a1*y + a2 - c0*x*X - c1*y*X - X
0 = b0*x + b1*y + b2 - c0*x*Y - c1*y*Y - Y
which exist for each set of corresponding points, so we have a set of
N * 2 equations. The coefficients appear linearly so we can write
A x = 0, where::
A = [[x y 1 0 0 0 -x*X -y*X -X]
[0 0 0 x y 1 -x*Y -y*Y -Y]
...
...
]
x.T = [a0 a1 a2 b0 b1 b2 c0 c1 c3]
In case of total least-squares the solution of this homogeneous system
of equations is the right singular vector of A which corresponds to the
smallest singular value normed by the coefficient c3.
Weights can be applied to each pair of corresponding points to
indicate, particularly in an overdetermined system, if point pairs have
higher or lower confidence or uncertainties associated with them. From
the matrix treatment of least squares problems, these weight values are
normalized, square-rooted, then built into a diagonal matrix, by which
A is multiplied.
In case of the affine transformation the coefficients c0 and c1 are 0.
Thus the system of equations is::
A = [[x y 1 0 0 0 -X]
[0 0 0 x y 1 -Y]
...
...
]
x.T = [a0 a1 a2 b0 b1 b2 c3]
Parameters
----------
src : (N, 2) array_like
Source coordinates.
dst : (N, 2) array_like
Destination coordinates.
weights : (N,) array_like, optional
Relative weight values for each pair of points.
Returns
-------
tf : Self or ``FailedEstimation``
An instance of the transformation if the estimation succeeded.
Otherwise, we return a special ``FailedEstimation`` object to
signal a failed estimation. Testing the truth value of the failed
estimation object will return ``False``. E.g.
.. code-block:: python
tf = ProjectiveTransform.from_estimate(...)
if not tf:
raise RuntimeError(f"Failed estimation: {tf}")
"""
return super().from_estimate(src, dst, weights)
def _estimate(self, src, dst, weights=None):
src = np.asarray(src)
dst = np.asarray(dst)
n, d = src.shape
fail_matrix = np.full((d + 1, d + 1), np.nan)
src_matrix, src = _center_and_normalize_points(src)
dst_matrix, dst = _center_and_normalize_points(dst)
if not np.all(np.isfinite(src_matrix + dst_matrix)):
self.params = fail_matrix
return 'Scaling generated NaN values'
# params: a0, a1, a2, b0, b1, b2, c0, c1
A = np.zeros((n * d, (d + 1) ** 2))
# fill the A matrix with the appropriate block matrices; see docstring
# for 2D example — this can be generalised to more blocks in the 3D and
# higher-dimensional cases.
for ddim in range(d):
A[ddim * n : (ddim + 1) * n, ddim * (d + 1) : ddim * (d + 1) + d] = src
A[ddim * n : (ddim + 1) * n, ddim * (d + 1) + d] = 1
A[ddim * n : (ddim + 1) * n, -d - 1 : -1] = src
A[ddim * n : (ddim + 1) * n, -1] = -1
A[ddim * n : (ddim + 1) * n, -d - 1 :] *= -dst[:, ddim : (ddim + 1)]
# Select relevant columns, depending on params
A = A[:, list(self._coeff_inds) + [-1]]
# Get the vectors that correspond to singular values, also applying
# the weighting if provided
if weights is None:
_, _, V = np.linalg.svd(A)
else:
weights = np.asarray(weights)
W = np.diag(np.tile(np.sqrt(weights / np.max(weights)), d))
_, _, V = np.linalg.svd(W @ A)
H = np.zeros((d + 1, d + 1))
# Solution is right singular vector that corresponds to smallest
# singular value.
if np.isclose(V[-1, -1], 0):
self.params = fail_matrix
return 'Right singular vector has 0 final element'
H.flat[list(self._coeff_inds) + [-1]] = -V[-1, :-1] / V[-1, -1]
H[d, d] = 1
# De-center and de-normalize
H = np.linalg.inv(dst_matrix) @ H @ src_matrix
# Small errors can creep in if points are not exact, causing the last
# element of H to deviate from unity. Correct for that here.
H /= H[-1, -1]
self.params = H
return None
def __add__(self, other):
"""Combine this transformation with another."""
if isinstance(other, ProjectiveTransform):
# combination of the same types result in a transformation of this
# type again, otherwise use general projective transformation
if type(self) == type(other):
tform = self.__class__
else:
tform = ProjectiveTransform
return tform(other.params @ self.params)
else:
raise TypeError("Cannot combine transformations of differing " "types.")
def __nice__(self):
"""common 'paramstr' used by __str__ and __repr__"""
if not hasattr(self, 'params'):
return '<not yet initialized>'
npstring = np.array2string(self.params, separator=', ')
return 'matrix=\n' + textwrap.indent(npstring, ' ')
def __repr__(self):
"""Add standard repr formatting around a __nice__ string"""
return f'<{type(self).__name__}({self.__nice__()}) at {hex(id(self))}>'
def __str__(self):
"""Add standard str formatting around a __nice__ string"""
return f'<{type(self).__name__}({self.__nice__()})>'
@property
def dimensionality(self):
"""The dimensionality of the transformation."""
return self.params.shape[0] - 1
@classmethod
def identity(cls, dimensionality=None):
"""Identity transform
Parameters
----------
dimensionality : {None, int}, optional
Dimensionality of identity transform.
Returns
-------
tform : transform
Transform such that ``np.all(tform(pts) == pts)``.
"""
return super().identity(dimensionality=dimensionality)
@_deprecate_estimate
def estimate(self, src, dst, weights=None):
"""Estimate the transformation from a set of corresponding points.
You can determine the over-, well- and under-determined parameters
with the total least-squares method.
Number of source and destination coordinates must match.
The transformation is defined as::
X = (a0*x + a1*y + a2) / (c0*x + c1*y + 1)
Y = (b0*x + b1*y + b2) / (c0*x + c1*y + 1)
These equations can be transformed to the following form::
0 = a0*x + a1*y + a2 - c0*x*X - c1*y*X - X
0 = b0*x + b1*y + b2 - c0*x*Y - c1*y*Y - Y
which exist for each set of corresponding points, so we have a set of
N * 2 equations. The coefficients appear linearly so we can write
A x = 0, where::
A = [[x y 1 0 0 0 -x*X -y*X -X]
[0 0 0 x y 1 -x*Y -y*Y -Y]
...
...
]
x.T = [a0 a1 a2 b0 b1 b2 c0 c1 c3]
In case of total least-squares the solution of this homogeneous system
of equations is the right singular vector of A which corresponds to the
smallest singular value normed by the coefficient c3.
Weights can be applied to each pair of corresponding points to
indicate, particularly in an overdetermined system, if point pairs have
higher or lower confidence or uncertainties associated with them. From
the matrix treatment of least squares problems, these weight values are
normalized, square-rooted, then built into a diagonal matrix, by which
A is multiplied.
In case of the affine transformation the coefficients c0 and c1 are 0.
Thus the system of equations is::
A = [[x y 1 0 0 0 -X]
[0 0 0 x y 1 -Y]
...
...
]
x.T = [a0 a1 a2 b0 b1 b2 c3]
Parameters
----------
src : (N, 2) array_like
Source coordinates.
dst : (N, 2) array_like
Destination coordinates.
weights : (N,) array_like, optional
Relative weight values for each pair of points.
Returns
-------
success : bool
True, if model estimation succeeds.
"""
return self._estimate(src, dst, weights) is None
@_update_from_estimate_docstring
@_deprecate_inherited_estimate
| ProjectiveTransform |
python | dagster-io__dagster | python_modules/libraries/dagster-azure/dagster_azure/blob/fake_blob_client.py | {
"start": 4863,
"end": 5135
} | class ____:
"""Mock of a Blob file downloader for testing."""
def __init__(self, contents):
self.contents = contents
def readall(self):
return self.contents
def readinto(self, fileobj):
fileobj.write(self.contents)
| FakeBlobDownloader |
python | dagster-io__dagster | python_modules/libraries/dagster-azure/dagster_azure/fakes/fake_adls2_resource.py | {
"start": 4423,
"end": 6142
} | class ____:
"""Stateful mock of an ADLS2 file client for testing."""
def __init__(self, name, fs_client):
self.name = name
self.contents = None
self._lease = None
self.fs_client = fs_client
@property
def lease(self):
return self._lease if self._lease is None else self._lease.id
def get_file_properties(self):
if self.contents is None:
raise ResourceNotFoundError("File does not exist!")
lease_id = None if self._lease is None else self._lease.id
return {"lease": lease_id}
def upload_data(self, contents, overwrite=False, lease=None):
if self._lease is not None:
if not self._lease.is_valid(lease):
raise Exception("Invalid lease!")
if self.contents is not None or overwrite is True:
if isinstance(contents, str):
self.contents = contents.encode("utf8")
elif isinstance(contents, io.BytesIO):
self.contents = contents.read()
elif isinstance(contents, io.StringIO):
self.contents = contents.read().encode("utf8")
elif isinstance(contents, bytes):
self.contents = contents
else:
self.contents = contents
def download_file(self):
if self.contents is None:
raise ResourceNotFoundError("File does not exist!")
return FakeADLS2FileDownloader(contents=self.contents)
def delete_file(self, lease=None):
if self._lease is not None:
if not self._lease.is_valid(lease):
raise Exception("Invalid lease!")
self.fs_client.delete_file(self.name)
| FakeADLS2FileClient |
python | ansible__ansible | lib/ansible/modules/user.py | {
"start": 64435,
"end": 64745
} | class ____(FreeBsdUser):
"""
This is a DragonFlyBSD User manipulation class - it inherits the
FreeBsdUser class behaviors, such as using the pw command to
manipulate the user database, followed by the chpass command
to change the password.
"""
platform = 'DragonFly'
| DragonFlyBsdUser |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 136726,
"end": 138179
} | class ____(ASTTemplateParam):
def __init__(self, data: ASTTemplateKeyParamPackIdDefault) -> None:
assert data
self.data = data
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTTemplateParamType):
return NotImplemented
return self.data == other.data
def __hash__(self) -> int:
return hash(self.data)
@property
def name(self) -> ASTNestedName:
id = self.get_identifier()
return ASTNestedName([ASTNestedNameElement(id, None)], [False], rooted=False)
@property
def isPack(self) -> bool:
return self.data.parameterPack
def get_identifier(self) -> ASTIdentifier:
return self.data.get_identifier()
def get_id(
self, version: int, objectType: str | None = None, symbol: Symbol | None = None
) -> str:
# this is not part of the normal name mangling in C++
assert version >= 2
if symbol:
# the anchor will be our parent
return symbol.parent.declaration.get_id(version, prefixed=False)
else:
return self.data.get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.data)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
self.data.describe_signature(signode, mode, env, symbol)
| ASTTemplateParamType |
python | numba__numba | numba/tests/test_range.py | {
"start": 5324,
"end": 5643
} | class ____(TestCase):
def test_range_safe_cast_mixed(self):
"""Test that mixing `uint64` and `int64` works."""
a = my_arange(np.uint64(6), np.uint64(0), np.int64(-1))
self.assertPreciseEqual(a, np.arange(6, 0, -1, dtype=np.uint64))
if __name__ == '__main__':
unittest.main()
| TestRangeNumpy |
python | doocs__leetcode | solution/2600-2699/2663.Lexicographically Smallest Beautiful String/Solution.py | {
"start": 0,
"end": 769
} | class ____:
def smallestBeautifulString(self, s: str, k: int) -> str:
n = len(s)
cs = list(s)
for i in range(n - 1, -1, -1):
p = ord(cs[i]) - ord('a') + 1
for j in range(p, k):
c = chr(ord('a') + j)
if (i > 0 and cs[i - 1] == c) or (i > 1 and cs[i - 2] == c):
continue
cs[i] = c
for l in range(i + 1, n):
for m in range(k):
c = chr(ord('a') + m)
if (l > 0 and cs[l - 1] == c) or (l > 1 and cs[l - 2] == c):
continue
cs[l] = c
break
return ''.join(cs)
return ''
| Solution |
python | numpy__numpy | numpy/_core/tests/test_scalarmath.py | {
"start": 5836,
"end": 8022
} | class ____:
@pytest.mark.xfail(check_support_sve(), reason="gh-22982")
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
type='binary',
max_size=sz):
exp1 = np.ones_like(inp1)
inp1[...] = np.ones_like(inp1)
inp2[...] = np.zeros_like(inp2)
assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
np.add(inp1, inp2, out=out)
assert_almost_equal(out, exp1, err_msg=msg)
inp2[...] += np.arange(inp2.size, dtype=dt) + 1
assert_almost_equal(np.square(inp2),
np.multiply(inp2, inp2), err_msg=msg)
# skip true divide for ints
if dt != np.int32:
assert_almost_equal(np.reciprocal(inp2),
np.divide(1, inp2), err_msg=msg)
inp1[...] = np.ones_like(inp1)
np.add(inp1, 2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
inp2[...] = np.ones_like(inp2)
np.add(2, inp2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
def test_lower_align(self):
# check data that is not aligned to element size
# i.e doubles are aligned to 4 bytes on i386
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_almost_equal(d + d, d * 2)
np.add(d, d, out=o)
np.add(np.ones_like(d), d, out=o)
np.add(d, np.ones_like(d), out=o)
np.add(np.ones_like(d), d)
np.add(d, np.ones_like(d))
| TestBaseMath |
python | plotly__plotly.py | plotly/graph_objs/contourcarpet/_stream.py | {
"start": 233,
"end": 3541
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "contourcarpet"
_path_str = "contourcarpet.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.contourcarpet.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.contourcarpet.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contourcarpet.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | modin-project__modin | modin/error_message.py | {
"start": 928,
"end": 5397
} | class ____(object):
# Only print full ``default to pandas`` warning one time.
printed_default_to_pandas = False
printed_warnings: Set[int] = set() # Set of hashes of printed warnings
@classmethod
def not_implemented(cls, message: str = "") -> NoReturn:
if message == "":
message = "This functionality is not yet available in Modin."
get_logger().info(f"Modin Error: NotImplementedError: {message}")
raise NotImplementedError(
f"{message}\n"
+ "To request implementation, file an issue at "
+ "https://github.com/modin-project/modin/issues or, if that's "
+ "not possible, send an email to feature_requests@modin.org."
)
@classmethod
def single_warning(
cls, message: str, category: Optional[type[Warning]] = None
) -> None:
# note that there should not be identical messages with different categories since
# only the message is used as the hash key.
message_hash = hash(message)
logger = get_logger()
if message_hash in cls.printed_warnings:
logger.debug(
f"Modin Warning: Single Warning: {message} was raised and suppressed."
)
return
logger.debug(f"Modin Warning: Single Warning: {message} was raised.")
warnings.warn(message, category=category)
cls.printed_warnings.add(message_hash)
@classmethod
def default_to_pandas(cls, message: str = "", reason: str = "") -> None:
# TODO(https://github.com/modin-project/modin/issues/7429): Use
# frame-level engine config.
if message != "":
execution_str = get_current_execution()
message = (
f"{message} is not currently supported by {execution_str}, "
+ "defaulting to pandas implementation."
)
else:
message = "Defaulting to pandas implementation."
if not cls.printed_default_to_pandas:
message = (
f"{message}\n"
+ "Please refer to "
+ "https://modin.readthedocs.io/en/stable/supported_apis/defaulting_to_pandas.html for explanation."
)
cls.printed_default_to_pandas = True
if reason:
message += f"\nReason: {reason}"
get_logger().debug(f"Modin Warning: Default to pandas: {message}")
warnings.warn(message)
@classmethod
def catch_bugs_and_request_email(
cls, failure_condition: bool, extra_log: str = ""
) -> None:
if failure_condition:
get_logger().info(f"Modin Error: Internal Error: {extra_log}")
raise Exception(
"Internal Error. "
+ "Please visit https://github.com/modin-project/modin/issues "
+ "to file an issue with the traceback and the command that "
+ "caused this error. If you can't file a GitHub issue, "
+ f"please email bug_reports@modin.org.\n{extra_log}"
)
@classmethod
def non_verified_udf(cls) -> None:
get_logger().debug("Modin Warning: Non Verified UDF")
warnings.warn(
"User-defined function verification is still under development in Modin. "
+ "The function provided is not verified."
)
@classmethod
def bad_type_for_numpy_op(cls, function_name: str, operand_type: type) -> None:
cls.single_warning(
f"Modin NumPy only supports objects of modin.numpy.array types for {function_name}, not {operand_type}. Defaulting to NumPy."
)
@classmethod
def mismatch_with_pandas(cls, operation: str, message: str) -> None:
get_logger().debug(
f"Modin Warning: {operation} mismatch with pandas: {message}"
)
cls.single_warning(
f"`{operation}` implementation has mismatches with pandas:\n{message}."
)
@classmethod
def warn(cls, message: str) -> None:
warnings.warn(message)
@classmethod
def not_initialized(cls, engine: str, code: str) -> None:
get_logger().debug(f"Modin Warning: Not Initialized: {engine}")
warnings.warn(
f"{engine} execution environment not yet initialized. Initializing...\n"
+ "To remove this warning, run the following python code before doing dataframe operations:\n"
+ f"{code}"
)
| ErrorMessage |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_descriptors.py | {
"start": 7925,
"end": 8610
} | class ____:
def test___init__(self) -> None:
f = Alias("bar")
d = bcpd.AliasPropertyDescriptor("foo", f)
assert d.name == "foo"
assert d.aliased_name == "bar"
assert d.property == f
assert d.__doc__ == "This is a compatibility alias for the 'bar' property."
def test_values(self) -> None:
class Some(Model):
p0 = Int(default=17)
p1 = Alias("p0")
obj = Some()
assert obj.p0 == 17
assert obj.p1 == 17
obj.p0 = 18
assert obj.p0 == 18
assert obj.p1 == 18
obj.p1 = 19
assert obj.p0 == 19
assert obj.p1 == 19
| Test_AliasDescriptor |
python | huggingface__transformers | src/transformers/models/cvt/modeling_cvt.py | {
"start": 2920,
"end": 3534
} | class ____(nn.Module):
"""
Construct the CvT embeddings.
"""
def __init__(self, patch_size, num_channels, embed_dim, stride, padding, dropout_rate):
super().__init__()
self.convolution_embeddings = CvtConvEmbeddings(
patch_size=patch_size, num_channels=num_channels, embed_dim=embed_dim, stride=stride, padding=padding
)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, pixel_values):
hidden_state = self.convolution_embeddings(pixel_values)
hidden_state = self.dropout(hidden_state)
return hidden_state
| CvtEmbeddings |
python | ethereum__web3.py | web3/exceptions.py | {
"start": 4377,
"end": 4482
} | class ____(Web3Exception):
"""
Raised when a supplied value is invalid.
"""
| Web3ValidationError |
python | cython__cython | Cython/Compiler/FlowControl.py | {
"start": 9971,
"end": 10429
} | class ____:
"""Exception handling helper.
entry_point ControlBlock Exception handling entry point
finally_enter ControlBlock Normal finally clause entry point
finally_exit ControlBlock Normal finally clause exit point
"""
def __init__(self, entry_point, finally_enter=None, finally_exit=None):
self.entry_point = entry_point
self.finally_enter = finally_enter
self.finally_exit = finally_exit
| ExceptionDescr |
python | ansible__ansible | test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/lookup/lookup_subdir/my_subdir_lookup.py | {
"start": 84,
"end": 212
} | class ____(LookupBase):
def run(self, terms, variables, **kwargs):
return ['subdir_lookup_from_user_dir']
| LookupModule |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.