language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pydantic__pydantic
|
pydantic-core/tests/validators/test_model_fields.py
|
{
"start": 434,
"end": 673
}
|
class ____:
def __init__(self, **attributes):
for k, v in attributes.items():
setattr(self, k, v)
def __repr__(self):
return 'Cls({})'.format(', '.join(f'{k}={v!r}' for k, v in self.__dict__.items()))
|
Cls
|
python
|
django-extensions__django-extensions
|
django_extensions/mongodb/fields/__init__.py
|
{
"start": 6522,
"end": 6965
}
|
class ____(CreationDateTimeField):
"""
ModificationDateTimeField
By default, sets editable=False, blank=True, default=datetime.now
Sets value to datetime.now() on each save of the model.
"""
def pre_save(self, model, add):
value = datetime.datetime.now()
setattr(model, self.attname, value)
return value
def get_internal_type(self):
return "DateTimeField"
|
ModificationDateTimeField
|
python
|
plotly__plotly.py
|
plotly/graph_objs/densitymap/legendgrouptitle/_font.py
|
{
"start": 233,
"end": 9942
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "densitymap.legendgrouptitle"
_path_str = "densitymap.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.densitymap.leg
endgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.densitymap.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.densitymap.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Font
|
python
|
astropy__astropy
|
astropy/table/table.py
|
{
"start": 13979,
"end": 19699
}
|
class ____(TableAttribute):
"""Maintain tuple that controls table column visibility for print output.
This is a descriptor that inherits from MetaAttribute so that the attribute
value is stored in the table meta['__attributes__'].
This gets used for the ``pprint_include_names`` and ``pprint_exclude_names`` Table
attributes.
"""
def __get__(self, instance, owner_cls):
"""Get the attribute.
This normally returns an instance of this class which is stored on the
owner object.
"""
# For getting from class not an instance
if instance is None:
return self
# If not already stored on `instance`, make a copy of the class
# descriptor object and put it onto the instance.
value = instance.__dict__.get(self.name)
if value is None:
value = deepcopy(self)
instance.__dict__[self.name] = value
# We set _instance_ref on every call, since if one makes copies of
# instances, this attribute will be copied as well, which will lose the
# reference.
value._instance_ref = weakref.ref(instance)
return value
def __set__(self, instance, names):
"""Set value of ``instance`` attribute to ``names``.
Parameters
----------
instance : object
Instance that owns the attribute
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
if isinstance(names, str):
names = [names]
if names is None:
# Remove attribute value from the meta['__attributes__'] dict.
# Subsequent access will just return None.
delattr(instance, self.name)
else:
# This stores names into instance.meta['__attributes__'] as tuple
return super().__set__(instance, tuple(names))
def __call__(self):
"""Get the value of the attribute.
Returns
-------
names : None, tuple
Include/exclude names
"""
# Get the value from instance.meta['__attributes__']
instance = self._instance_ref()
return super().__get__(instance, instance.__class__)
def __repr__(self):
if hasattr(self, "_instance_ref"):
out = f"<{self.__class__.__name__} name={self.name} value={self()}>"
else:
out = super().__repr__()
return out
def _add_remove_setup(self, names):
"""Common setup for add and remove.
- Coerce attribute value to a list
- Coerce names into a list
- Get the parent table instance
"""
names = [names] if isinstance(names, str) else list(names)
# Get the value. This is the same as self() but we need `instance` here.
instance = self._instance_ref()
value = super().__get__(instance, instance.__class__)
value = [] if value is None else list(value)
return instance, names, value
def add(self, names):
"""Add ``names`` to the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to add
"""
instance, names, value = self._add_remove_setup(names)
value.extend(name for name in names if name not in value)
super().__set__(instance, tuple(value))
def remove(self, names):
"""Remove ``names`` from the include/exclude attribute.
Parameters
----------
names : str, list, tuple
Column name(s) to remove
"""
self._remove(names, raise_exc=True)
def _remove(self, names, raise_exc=False):
"""Remove ``names`` with optional checking if they exist."""
instance, names, value = self._add_remove_setup(names)
# Return now if there are no attributes and thus no action to be taken.
if not raise_exc and "__attributes__" not in instance.meta:
return
# Remove one by one, optionally raising an exception if name is missing.
for name in names:
if name in value:
value.remove(name) # Using the list.remove method
elif raise_exc:
raise ValueError(f"{name} not in {self.name}")
# Change to either None or a tuple for storing back to attribute
value = None if value == [] else tuple(value)
self.__set__(instance, value)
def _rename(self, name, new_name):
"""Rename ``name`` to ``new_name`` if ``name`` is in the list."""
names = self() or ()
if name in names:
new_names = list(names)
new_names[new_names.index(name)] = new_name
self.set(new_names)
def set(self, names):
"""Set value of include/exclude attribute to ``names``.
Parameters
----------
names : None, str, list, tuple
Column name(s) to store, or None to clear
"""
class _Context:
def __init__(self, descriptor_self):
self.descriptor_self = descriptor_self
self.names_orig = descriptor_self()
def __enter__(self):
pass
def __exit__(self, type, value, tb):
descriptor_self = self.descriptor_self
instance = descriptor_self._instance_ref()
descriptor_self.__set__(instance, self.names_orig)
def __repr__(self):
return repr(self.descriptor_self)
ctx = _Context(descriptor_self=self)
instance = self._instance_ref()
self.__set__(instance, names)
return ctx
|
PprintIncludeExclude
|
python
|
geekcomputers__Python
|
JARVIS/JARVIS_2.0.py
|
{
"start": 4548,
"end": 9435
}
|
class ____:
def __init__(self, Q):
self.query = Q
def sub_call(self, exe_file):
'''
This method can directly use call method of subprocess module and according to the
argument(exe_file) passed it returns the output.
exe_file:- must pass the exe file name as str object type.
'''
return subprocess.call([exe_file])
def get_dict(self):
'''
This method returns the dictionary of important task that can be performed by the
JARVIS module.
Later on this can also be used by the user itself to add or update their preferred apps.
'''
_dict = dict(
time=datetime.now(),
notepad='Notepad.exe',
calculator='calc.exe',
stickynot='StickyNot.exe',
shell='powershell.exe',
paint='mspaint.exe',
cmd='cmd.exe',
browser='C:\\Program Files\\Internet Explorer\\iexplore.exe',
)
return _dict
@property
def get_app(self):
task_dict = self.get_dict()
task = task_dict.get(self.query, None)
if task is None:
engine.say("Sorry Try Again")
engine.runAndWait()
else:
if 'exe' in str(task):
return self.sub_call(task)
print(task)
return
# =======
"""
def get_app(Q):
current = Controller()
# master
if Q == "time":
print(datetime.now())
x = datetime.now()
voice(x)
elif Q == "news":
speak_news()
elif Q == "open notepad":
subprocess.call(["Notepad.exe"])
elif Q == "open calculator":
subprocess.call(["calc.exe"])
elif Q == "open stikynot":
subprocess.call(["StikyNot.exe"])
elif Q == "open shell":
subprocess.call(["powershell.exe"])
elif Q == "open paint":
subprocess.call(["mspaint.exe"])
elif Q == "open cmd":
subprocess.call(["cmd.exe"])
elif Q == "open discord":
subprocess.call(["discord.exe"])
elif Q == "open browser":
subprocess.call(["C:\\Program Files\\Internet Explorer\\iexplore.exe"])
# patch-1
elif Q == "open youtube":
webbrowser.open("https://www.youtube.com/") # open youtube
elif Q == "open google":
webbrowser.open("https://www.google.com/") # open google
elif Q == "open github":
webbrowser.open("https://github.com/")
elif Q == "search for":
que = Q.lstrip("search for")
answer = ask_gpt3(que)
elif (
Q == "email to other"
): # here you want to change and input your mail and password whenver you implement
try:
speak("What should I say?")
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
to = "abc@gmail.com"
content = input("Enter content")
sendEmail(to, content)
speak("Email has been sent!")
except Exception as e:
print(e)
speak("Sorry, I can't send the email.")
# =======
# master
elif Q == "Take screenshot":
snapshot = ImageGrab.grab()
drive_letter = "C:\\"
folder_name = r"downloaded-files"
folder_time = datetime.datetime.now().strftime("%Y-%m-%d_%I-%M-%S_%p")
extention = ".jpg"
folder_to_save_files = drive_letter + folder_name + folder_time + extention
snapshot.save(folder_to_save_files)
elif Q == "Jokes":
speak(pyjokes.get_joke())
elif Q == "start recording":
current.add("Win", "Alt", "r")
speak("Started recording. just say stop recording to stop.")
elif Q == "stop recording":
current.add("Win", "Alt", "r")
speak("Stopped recording. check your game bar folder for the video")
elif Q == "clip that":
current.add("Win", "Alt", "g")
speak("Clipped. check you game bar file for the video")
with keyboard.Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
elif Q == "take a break":
exit()
else:
answer = ask_gpt3(Q)
# master
apps = {
"time": datetime.datetime.now(),
"notepad": "Notepad.exe",
"calculator": "calc.exe",
"stikynot": "StikyNot.exe",
"shell": "powershell.exe",
"paint": "mspaint.exe",
"cmd": "cmd.exe",
"browser": r"C:\\Program Files\Internet Explorer\iexplore.exe",
"vscode": r"C:\\Users\\Users\\User\\AppData\\Local\\Programs\Microsoft VS Code",
}
# master
# Call get_app(Query) Func.
if __name__ == "__main__":
while not exit_jarvis:
Query = takecommand().lower()
get_app(Query)
exit_jarvis = True
|
Jarvis
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/definitions/_internal/mixins.py
|
{
"start": 1338,
"end": 4447
}
|
class ____:
"""Mixing implementing common dependency setting methods like >> and <<."""
@property
def roots(self) -> Iterable[DependencyMixin]:
"""
List of root nodes -- ones with no upstream dependencies.
a.k.a. the "start" of this sub-graph
"""
raise NotImplementedError()
@property
def leaves(self) -> Iterable[DependencyMixin]:
"""
List of leaf nodes -- ones with only upstream dependencies.
a.k.a. the "end" of this sub-graph
"""
raise NotImplementedError()
@abstractmethod
def set_upstream(
self, other: DependencyMixin | Sequence[DependencyMixin], edge_modifier: EdgeModifier | None = None
):
"""Set a task or a task list to be directly upstream from the current task."""
raise NotImplementedError()
@abstractmethod
def set_downstream(
self, other: DependencyMixin | Sequence[DependencyMixin], edge_modifier: EdgeModifier | None = None
):
"""Set a task or a task list to be directly downstream from the current task."""
raise NotImplementedError()
def as_setup(self) -> DependencyMixin:
"""Mark a task as setup task."""
raise NotImplementedError()
def as_teardown(
self,
*,
setups: BaseOperator | Iterable[BaseOperator] | None = None,
on_failure_fail_dagrun: bool | None = None,
) -> DependencyMixin:
"""Mark a task as teardown and set its setups as direct relatives."""
raise NotImplementedError()
def update_relative(
self, other: DependencyMixin, upstream: bool = True, edge_modifier: EdgeModifier | None = None
) -> None:
"""
Update relationship information about another TaskMixin. Default is no-op.
Override if necessary.
"""
def __lshift__(self, other: DependencyMixin | Sequence[DependencyMixin]):
"""Implement Task << Task."""
self.set_upstream(other)
return other
def __rshift__(self, other: DependencyMixin | Sequence[DependencyMixin]):
"""Implement Task >> Task."""
self.set_downstream(other)
return other
def __rrshift__(self, other: DependencyMixin | Sequence[DependencyMixin]):
"""Implement Task >> [Task] because list don't have __rshift__ operators."""
self.__lshift__(other)
return self
def __rlshift__(self, other: DependencyMixin | Sequence[DependencyMixin]):
"""Implement Task << [Task] because list don't have __lshift__ operators."""
self.__rshift__(other)
return self
@classmethod
def _iter_references(cls, obj: Any) -> Iterable[tuple[DependencyMixin, str]]:
from airflow.sdk.definitions._internal.abstractoperator import AbstractOperator
if isinstance(obj, AbstractOperator):
yield obj, "operator"
elif isinstance(obj, ResolveMixin):
yield from obj.iter_references()
elif isinstance(obj, Sequence):
for o in obj:
yield from cls._iter_references(o)
|
DependencyMixin
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/image_ops_test.py
|
{
"start": 60475,
"end": 62318
}
|
class ____(test_util.TensorFlowTestCase):
def _testBrightness(self, x_np, y_np, delta, tol=1e-6):
with self.cached_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_brightness(x, delta)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, tol)
def testPositiveDeltaUint8(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 255, 11]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat32(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float32).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float32).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255.)
def testPositiveDeltaFloat16(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float16).reshape(x_shape) / 255.
y_data = [10, 15, 23, 64, 145, 236, 47, 18, 244, 100, 265, 11]
y_np = np.array(y_data, dtype=np.float16).reshape(x_shape) / 255.
self._testBrightness(x_np, y_np, delta=10. / 255., tol=1e-3)
def testNegativeDelta(self):
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 3, 44, 125, 216, 27, 0, 224, 80, 245, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testBrightness(x_np, y_np, delta=-10. / 255.)
|
AdjustBrightnessTest
|
python
|
spyder-ide__spyder
|
spyder/plugins/run/models.py
|
{
"start": 13150,
"end": 14968
}
|
class ____(QAbstractListModel):
def __init__(self, parent, executor_model: RunExecutorListModel):
super().__init__(parent)
self.executor_model = executor_model
self.executor_configurations: OrderedDict[
str, Set[Tuple[str, str]]] = OrderedDict({})
for input_conf in self.executor_model.executors_per_input:
executors_available = self.executor_model.executors_per_input[
input_conf]
for executor in executors_available:
exec_input_conf = self.executor_model[(input_conf, executor)]
ConfigWidget = exec_input_conf.get(
'configuration_widget', None)
if ConfigWidget is not None:
input_set = self.executor_configurations.get(
executor,set({}))
input_set |= {input_conf}
self.executor_configurations[executor] = input_set
self.executor_indexed_list: Dict[int, str] = dict(
enumerate(self.executor_configurations))
self.executor_list: Dict[str, int] = {
v: k for k, v in self.executor_indexed_list.items()}
def data(self, index: QModelIndex, role: int = Qt.DisplayRole) -> str:
row = index.row()
if role == Qt.DisplayRole or role == Qt.EditRole:
executor_id = self.executor_indexed_list[row]
return self.executor_model.executor_names[executor_id]
def rowCount(self, parent: QModelIndex = None) -> int:
return len(self.executor_list)
def selected_executor(
self, index: int) -> Tuple[str, Set[Tuple[str, str]]]:
executor_name = self.executor_indexed_list[index]
return executor_name, self.executor_configurations[executor_name]
|
RunExecutorNamesListModel
|
python
|
Pylons__pyramid
|
tests/test_config/test_actions.py
|
{
"start": 22018,
"end": 31092
}
|
class ____(unittest.TestCase):
def _callFUT(self, actions):
from pyramid.config.actions import resolveConflicts
return resolveConflicts(actions)
def test_it_success_tuples(self):
from . import dummyfactory as f
result = self._callFUT(
[
(None, f),
(1, f, (1,), {}, (), 'first'),
(1, f, (2,), {}, ('x',), 'second'),
(1, f, (3,), {}, ('y',), 'third'),
(4, f, (4,), {}, ('y',), 'should be last', 99999),
(3, f, (3,), {}, ('y',)),
(None, f, (5,), {}, ('y',)),
]
)
result = list(result)
self.assertEqual(
result,
[
{
'info': None,
'args': (),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': None,
'includepath': (),
'order': 0,
},
{
'info': 'first',
'args': (1,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': 1,
'includepath': (),
'order': 0,
},
{
'info': None,
'args': (3,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': 3,
'includepath': ('y',),
'order': 0,
},
{
'info': None,
'args': (5,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': None,
'includepath': ('y',),
'order': 0,
},
{
'info': 'should be last',
'args': (4,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': 4,
'includepath': ('y',),
'order': 99999,
},
],
)
def test_it_success_dicts(self):
from . import dummyfactory as f
result = self._callFUT(
[
(None, f),
(1, f, (1,), {}, (), 'first'),
(1, f, (2,), {}, ('x',), 'second'),
(1, f, (3,), {}, ('y',), 'third'),
(4, f, (4,), {}, ('y',), 'should be last', 99999),
(3, f, (3,), {}, ('y',)),
(None, f, (5,), {}, ('y',)),
]
)
result = list(result)
self.assertEqual(
result,
[
{
'info': None,
'args': (),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': None,
'includepath': (),
'order': 0,
},
{
'info': 'first',
'args': (1,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': 1,
'includepath': (),
'order': 0,
},
{
'info': None,
'args': (3,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': 3,
'includepath': ('y',),
'order': 0,
},
{
'info': None,
'args': (5,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': None,
'includepath': ('y',),
'order': 0,
},
{
'info': 'should be last',
'args': (4,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': 4,
'includepath': ('y',),
'order': 99999,
},
],
)
def test_it_conflict(self):
from . import dummyfactory as f
result = self._callFUT(
[
(None, f),
(1, f, (2,), {}, ('x',), 'eek'), # will conflict
(1, f, (3,), {}, ('y',), 'ack'), # will conflict
(4, f, (4,), {}, ('y',)),
(3, f, (3,), {}, ('y',)),
(None, f, (5,), {}, ('y',)),
]
)
self.assertRaises(ConfigurationConflictError, list, result)
def test_it_with_actions_grouped_by_order(self):
from . import dummyfactory as f
result = self._callFUT(
[
(None, f), # X
(1, f, (1,), {}, (), 'third', 10), # X
(1, f, (2,), {}, ('x',), 'fourth', 10),
(1, f, (3,), {}, ('y',), 'fifth', 10),
(2, f, (1,), {}, (), 'sixth', 10), # X
(3, f, (1,), {}, (), 'seventh', 10), # X
(5, f, (4,), {}, ('y',), 'eighth', 99999), # X
(4, f, (3,), {}, (), 'first', 5), # X
(4, f, (5,), {}, ('y',), 'second', 5),
]
)
result = list(result)
self.assertEqual(len(result), 6)
# resolved actions should be grouped by (order, i)
self.assertEqual(
result,
[
{
'info': None,
'args': (),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': None,
'includepath': (),
'order': 0,
},
{
'info': 'first',
'args': (3,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': 4,
'includepath': (),
'order': 5,
},
{
'info': 'third',
'args': (1,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': 1,
'includepath': (),
'order': 10,
},
{
'info': 'sixth',
'args': (1,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': 2,
'includepath': (),
'order': 10,
},
{
'info': 'seventh',
'args': (1,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': 3,
'includepath': (),
'order': 10,
},
{
'info': 'eighth',
'args': (4,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': 5,
'includepath': ('y',),
'order': 99999,
},
],
)
def test_override_success_across_orders(self):
from . import dummyfactory as f
result = self._callFUT(
[
(1, f, (2,), {}, ('x',), 'eek', 0),
(1, f, (3,), {}, ('x', 'y'), 'ack', 10),
]
)
result = list(result)
self.assertEqual(
result,
[
{
'info': 'eek',
'args': (2,),
'callable': f,
'introspectables': (),
'kw': {},
'discriminator': 1,
'includepath': ('x',),
'order': 0,
}
],
)
def test_conflicts_across_orders(self):
from . import dummyfactory as f
result = self._callFUT(
[
(1, f, (2,), {}, ('x', 'y'), 'eek', 0),
(1, f, (3,), {}, ('x'), 'ack', 10),
]
)
self.assertRaises(ConfigurationConflictError, list, result)
|
Test_resolveConflicts
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_emr_serverless.py
|
{
"start": 45474,
"end": 49853
}
|
class ____:
@mock.patch.object(EmrServerlessHook, "get_waiter")
@mock.patch.object(EmrServerlessHook, "conn")
def test_delete_application_with_wait_for_completion_successfully(self, mock_conn, mock_get_waiter):
mock_get_waiter().wait.return_value = True
mock_conn.stop_application.return_value = {}
mock_conn.delete_application.return_value = {"ResponseMetadata": {"HTTPStatusCode": 200}}
operator = EmrServerlessDeleteApplicationOperator(
task_id=task_id, application_id=application_id_delete_operator
)
operator.execute(None)
assert operator.wait_for_completion is True
assert mock_get_waiter().wait.call_count == 2
mock_conn.stop_application.assert_called_once()
mock_conn.delete_application.assert_called_once_with(applicationId=application_id_delete_operator)
@mock.patch.object(EmrServerlessHook, "get_waiter")
@mock.patch.object(EmrServerlessHook, "conn")
def test_delete_application_without_wait_for_completion_successfully(self, mock_conn, mock_get_waiter):
mock_get_waiter().wait.return_value = True
mock_conn.stop_application.return_value = {}
mock_conn.delete_application.return_value = {"ResponseMetadata": {"HTTPStatusCode": 200}}
operator = EmrServerlessDeleteApplicationOperator(
task_id=task_id,
application_id=application_id_delete_operator,
wait_for_completion=False,
)
operator.execute(None)
mock_get_waiter().wait.assert_called_once()
mock_conn.stop_application.assert_called_once()
mock_conn.delete_application.assert_called_once_with(applicationId=application_id_delete_operator)
@mock.patch.object(EmrServerlessHook, "get_waiter")
@mock.patch.object(EmrServerlessHook, "conn")
def test_delete_application_failed_deletion(self, mock_conn, mock_get_waiter):
mock_get_waiter().wait.return_value = True
mock_conn.stop_application.return_value = {}
mock_conn.delete_application.return_value = {"ResponseMetadata": {"HTTPStatusCode": 400}}
operator = EmrServerlessDeleteApplicationOperator(
task_id=task_id, application_id=application_id_delete_operator
)
with pytest.raises(AirflowException) as ex_message:
operator.execute(None)
assert "Application deletion failed:" in str(ex_message.value)
mock_get_waiter().wait.assert_called_once()
mock_conn.stop_application.assert_called_once()
mock_conn.delete_application.assert_called_once_with(applicationId=application_id_delete_operator)
@pytest.mark.parametrize(
("waiter_delay", "waiter_max_attempts", "expected"),
[
(NOTSET, NOTSET, [60, 25]),
(30, 10, [30, 10]),
],
)
def test_delete_application_waiter_params(
self,
waiter_delay,
waiter_max_attempts,
expected,
):
operator = EmrServerlessDeleteApplicationOperator(
task_id=task_id,
application_id=application_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
)
assert operator.wait_for_completion is True
assert operator.waiter_delay == expected[0]
assert operator.waiter_max_attempts == expected[1]
@mock.patch.object(EmrServerlessHook, "conn")
def test_delete_application_deferrable(self, mock_conn):
mock_conn.delete_application.return_value = {"ResponseMetadata": {"HTTPStatusCode": 200}}
operator = EmrServerlessDeleteApplicationOperator(
task_id=task_id,
application_id=application_id,
deferrable=True,
)
with pytest.raises(TaskDeferred):
operator.execute(None)
def test_template_fields(self):
operator = EmrServerlessDeleteApplicationOperator(
task_id=task_id, application_id=application_id_delete_operator
)
template_fields = list(operator.template_fields) + list(operator.template_fields_renderers.keys())
class_fields = operator.__dict__
missing_fields = [field for field in template_fields if field not in class_fields]
assert not missing_fields, f"Templated fields are not available {missing_fields}"
|
TestEmrServerlessDeleteOperator
|
python
|
PyCQA__pylint
|
tests/functional/b/base_init_vars.py
|
{
"start": 309,
"end": 722
}
|
class ____(BaseClass):
"""Inherits from BaseClass
"""
def __init__(self):
BaseClass.__init__(self)
self.var = {}
def met(self):
"""Checks that base_var is not seen as defined outsite '__init__'
"""
self.var[1] = 'one'
self.base_var[1] = 'one'
return self.base_var, self.var
if __name__ == '__main__':
OBJ = MyClass()
OBJ.met()
|
MyClass
|
python
|
psf__black
|
src/blib2to3/pygram.py
|
{
"start": 900,
"end": 3235
}
|
class ____(Symbols):
and_expr: int
and_test: int
annassign: int
arglist: int
argument: int
arith_expr: int
asexpr_test: int
assert_stmt: int
async_funcdef: int
async_stmt: int
atom: int
augassign: int
break_stmt: int
case_block: int
classdef: int
comp_for: int
comp_if: int
comp_iter: int
comp_op: int
comparison: int
compound_stmt: int
continue_stmt: int
decorated: int
decorator: int
decorators: int
del_stmt: int
dictsetmaker: int
dotted_as_name: int
dotted_as_names: int
dotted_name: int
encoding_decl: int
eval_input: int
except_clause: int
expr: int
expr_stmt: int
exprlist: int
factor: int
file_input: int
flow_stmt: int
for_stmt: int
fstring: int
fstring_format_spec: int
fstring_middle: int
fstring_replacement_field: int
funcdef: int
global_stmt: int
guard: int
if_stmt: int
import_as_name: int
import_as_names: int
import_from: int
import_name: int
import_stmt: int
lambdef: int
listmaker: int
match_stmt: int
namedexpr_test: int
not_test: int
old_comp_for: int
old_comp_if: int
old_comp_iter: int
old_lambdef: int
old_test: int
or_test: int
parameters: int
paramspec: int
pass_stmt: int
pattern: int
patterns: int
power: int
raise_stmt: int
return_stmt: int
shift_expr: int
simple_stmt: int
single_input: int
sliceop: int
small_stmt: int
subject_expr: int
star_expr: int
stmt: int
subscript: int
subscriptlist: int
suite: int
term: int
test: int
testlist: int
testlist1: int
testlist_gexp: int
testlist_safe: int
testlist_star_expr: int
tfpdef: int
tfplist: int
tname: int
tname_star: int
trailer: int
try_stmt: int
tstring: int
tstring_format_spec: int
tstring_middle: int
tstring_replacement_field: int
type_stmt: int
typedargslist: int
typeparam: int
typeparams: int
typevar: int
typevartuple: int
varargslist: int
vfpdef: int
vfplist: int
vname: int
while_stmt: int
with_stmt: int
xor_expr: int
yield_arg: int
yield_expr: int
yield_stmt: int
|
_python_symbols
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_name01.py
|
{
"start": 315,
"end": 1369
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_name01.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line", "name": "New Chart Name"})
chart.axis_ids = [47335296, 56029952]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-snowflake-pandas/dagster_snowflake_pandas/snowflake_pandas_type_handler.py
|
{
"start": 2886,
"end": 9943
}
|
class ____(DbTypeHandler[pd.DataFrame]):
"""Plugin for the Snowflake I/O Manager that can store and load Pandas DataFrames as Snowflake tables.
Examples:
.. code-block:: python
from dagster_snowflake import SnowflakeIOManager
from dagster_snowflake_pandas import SnowflakePandasTypeHandler
from dagster_snowflake_pyspark import SnowflakePySparkTypeHandler
from dagster import Definitions, EnvVar
class MySnowflakeIOManager(SnowflakeIOManager):
@staticmethod
def type_handlers() -> Sequence[DbTypeHandler]:
return [SnowflakePandasTypeHandler(), SnowflakePySparkTypeHandler()]
@asset(
key_prefix=["my_schema"], # will be used as the schema in snowflake
)
def my_table() -> pd.DataFrame: # the name of the asset will be the table name
...
Definitions(
assets=[my_table],
resources={
"io_manager": MySnowflakeIOManager(database="MY_DATABASE", account=EnvVar("SNOWFLAKE_ACCOUNT"), ...)
}
)
"""
def handle_output(
self, context: OutputContext, table_slice: TableSlice, obj: pd.DataFrame, connection
) -> Mapping[str, RawMetadataValue]:
from snowflake import connector
connector.paramstyle = "pyformat"
with_uppercase_cols = obj.rename(str.upper, copy=False, axis="columns")
column_types = _get_table_column_types(table_slice, connection)
if context.resource_config and context.resource_config.get(
"store_timestamps_as_strings", False
):
with_uppercase_cols = with_uppercase_cols.apply(
lambda x: _convert_timestamp_to_string(x, column_types, table_slice.table),
axis="index",
)
write_pandas(
conn=connection,
df=with_uppercase_cols,
# originally we used pd.to_sql with pd_writer method to write the df to snowflake. pd_writer
# forced the database, schema, and table name to be uppercase, so we mimic that behavior here for feature parity
# in the future we could allow non-uppercase names
table_name=table_slice.table.upper(),
schema=table_slice.schema.upper(),
database=table_slice.database.upper() if table_slice.database else None,
auto_create_table=True,
use_logical_type=True,
quote_identifiers=True,
)
return {
# output object may be a slice/partition, so we output different metadata keys based on
# whether this output represents an entire table or just a slice/partition
**(
TableMetadataSet(partition_row_count=obj.shape[0])
if context.has_partition_key
else TableMetadataSet(row_count=obj.shape[0])
),
"dataframe_columns": MetadataValue.table_schema(
TableSchema(
columns=[
TableColumn(name=str(name), type=str(dtype))
for name, dtype in obj.dtypes.items()
]
)
),
}
def load_input(
self, context: InputContext, table_slice: TableSlice, connection
) -> pd.DataFrame:
if table_slice.partition_dimensions and len(context.asset_partition_keys) == 0:
return pd.DataFrame()
result = pd.read_sql(
sql=SnowflakeDbClient.get_select_statement(table_slice), con=connection
)
if context.resource_config and context.resource_config.get(
"store_timestamps_as_strings", False
):
result = result.apply(_convert_string_to_timestamp, axis="index")
result.columns = map(str.lower, result.columns) # type: ignore # (bad stubs)
return result
@property
def supported_types(self):
return [pd.DataFrame]
snowflake_pandas_io_manager = build_snowflake_io_manager(
[SnowflakePandasTypeHandler()], default_load_type=pd.DataFrame
)
snowflake_pandas_io_manager.__doc__ = """
An I/O manager definition that reads inputs from and writes Pandas DataFrames to Snowflake. When
using the snowflake_pandas_io_manager, any inputs and outputs without type annotations will be loaded
as Pandas DataFrames.
Returns:
IOManagerDefinition
Examples:
.. code-block:: python
from dagster_snowflake_pandas import snowflake_pandas_io_manager
from dagster import asset, Definitions
@asset(
key_prefix=["my_schema"], # will be used as the schema in snowflake
)
def my_table() -> pd.DataFrame: # the name of the asset will be the table name
...
Definitions(
assets=[my_table],
resources={
"io_manager": snowflake_pandas_io_manager.configured({
"database": "my_database",
"account": {"env": "SNOWFLAKE_ACCOUNT"}
})
}
)
You can set a default schema to store the assets using the ``schema`` configuration value of the Snowflake I/O
Manager. This schema will be used if no other schema is specified directly on an asset or op.
.. code-block:: python
Definitions(
assets=[my_table],
resources={"io_manager": snowflake_pandas_io_manager.configured(
{"database": "my_database", "schema": "my_schema"} # will be used as the schema
)}
)
On individual assets, you an also specify the schema where they should be stored using metadata or
by adding a ``key_prefix`` to the asset key. If both ``key_prefix`` and metadata are defined, the metadata will
take precedence.
.. code-block:: python
@asset(
key_prefix=["my_schema"], # will be used as the schema in snowflake
)
def my_table() -> pd.DataFrame:
...
@asset(
metadata={"schema": "my_schema"} # will be used as the schema in snowflake
)
def my_other_table() -> pd.DataFrame:
...
For ops, the schema can be specified by including a "schema" entry in output metadata.
.. code-block:: python
@op(
out={"my_table": Out(metadata={"schema": "my_schema"})}
)
def make_my_table() -> pd.DataFrame:
...
If none of these is provided, the schema will default to "public".
To only use specific columns of a table as input to a downstream op or asset, add the metadata "columns" to the
In or AssetIn.
.. code-block:: python
@asset(
ins={"my_table": AssetIn("my_table", metadata={"columns": ["a"]})}
)
def my_table_a(my_table: pd.DataFrame) -> pd.DataFrame:
# my_table will just contain the data from column "a"
...
"""
|
SnowflakePandasTypeHandler
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_dataflow.py
|
{
"start": 17000,
"end": 19034
}
|
class ____:
@mock.patch("airflow.providers.google.cloud.operators.dataflow.DataflowHook")
def test_exec_job_id(self, dataflow_mock):
self.dataflow = DataflowStopJobOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
job_id=JOB_ID,
poll_sleep=POLL_SLEEP,
location=TEST_LOCATION,
)
"""
Test DataflowHook is created and the right args are passed to cancel_job.
"""
cancel_job_hook = dataflow_mock.return_value.cancel_job
mock_context = {"task_instance": mock.MagicMock()}
self.dataflow.execute(mock_context)
assert dataflow_mock.called
cancel_job_hook.assert_called_once_with(
job_name=None,
project_id=TEST_PROJECT,
location=TEST_LOCATION,
job_id=JOB_ID,
)
@mock.patch("airflow.providers.google.cloud.operators.dataflow.DataflowHook")
def test_exec_job_name_prefix(self, dataflow_mock):
self.dataflow = DataflowStopJobOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
job_name_prefix=JOB_NAME,
poll_sleep=POLL_SLEEP,
location=TEST_LOCATION,
)
"""
Test DataflowHook is created and the right args are passed to cancel_job
and is_job_dataflow_running.
"""
is_job_running_hook = dataflow_mock.return_value.is_job_dataflow_running
cancel_job_hook = dataflow_mock.return_value.cancel_job
mock_context = {"task_instance": mock.MagicMock()}
self.dataflow.execute(mock_context)
assert dataflow_mock.called
is_job_running_hook.assert_called_once_with(
name=JOB_NAME,
project_id=TEST_PROJECT,
location=TEST_LOCATION,
)
cancel_job_hook.assert_called_once_with(
job_name=JOB_NAME,
project_id=TEST_PROJECT,
location=TEST_LOCATION,
job_id=None,
)
|
TestDataflowStopJobOperator
|
python
|
charliermarsh__ruff
|
crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py
|
{
"start": 10543,
"end": 10677
}
|
class ____(Generic[T]):
def __new__(cls: type[Generic2]) -> Generic2: ...
def __enter__(self: Generic2) -> Generic2: ...
|
Generic2
|
python
|
has2k1__plotnine
|
plotnine/facets/labelling.py
|
{
"start": 7194,
"end": 7781
}
|
class ____(_core_labeller):
"""
Use a function turn facet value into a label
Parameters
----------
func : callable
Function to label an individual string
"""
def __init__(self, func: Callable[[str], str]):
self.func = func
def __call__(self, label_info: strip_label_details) -> strip_label_details:
label_info = label_info.copy()
variables = label_info.variables
for facet_var, facet_value in variables.items():
variables[facet_var] = self.func(facet_value)
return label_info
|
_function_labeller
|
python
|
django__django
|
tests/raw_query/models.py
|
{
"start": 1271,
"end": 1347
}
|
class ____(models.Model):
reviewed = models.ManyToManyField(Book)
|
Reviewer
|
python
|
sympy__sympy
|
sympy/polys/polyerrors.py
|
{
"start": 4705,
"end": 4744
}
|
class ____(OptionError):
pass
|
FlagError
|
python
|
dask__distributed
|
distributed/exceptions.py
|
{
"start": 622,
"end": 1482
}
|
class ____(TimeoutError):
"""Raised when the expected number of workers to not start within the timeout period."""
#: Number of workers that are available.
available_workers: int
#: Number of workers that were expected to be available.
expected_workers: int
#: Timeout period in seconds.
timeout: float
def __init__(
self, available_workers: int, expected_workers: int, timeout: float
) -> None:
self.available_workers = available_workers
self.expected_workers = expected_workers
self.timeout = timeout
super().__init__(available_workers, expected_workers, timeout)
def __str__(self) -> str:
return "Only %d/%d workers arrived after %s" % (
self.available_workers,
self.expected_workers,
self.timeout,
)
|
WorkerStartTimeoutError
|
python
|
django__django
|
tests/generic_inline_admin/tests.py
|
{
"start": 9569,
"end": 11126
}
|
class ____(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_add(self):
category_id = Category.objects.create(name="male").pk
prefix = "generic_inline_admin-phonenumber-content_type-object_id"
post_data = {
"name": "John Doe",
# inline data
f"{prefix}-TOTAL_FORMS": "1",
f"{prefix}-INITIAL_FORMS": "0",
f"{prefix}-MAX_NUM_FORMS": "0",
f"{prefix}-0-id": "",
f"{prefix}-0-phone_number": "555-555-5555",
f"{prefix}-0-category": str(category_id),
}
response = self.client.get(reverse("admin:generic_inline_admin_contact_add"))
self.assertEqual(response.status_code, 200)
response = self.client.post(
reverse("admin:generic_inline_admin_contact_add"), post_data
)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_delete(self):
from .models import Contact
c = Contact.objects.create(name="foo")
PhoneNumber.objects.create(
object_id=c.id,
content_type=ContentType.objects.get_for_model(Contact),
phone_number="555-555-5555",
)
response = self.client.post(
reverse("admin:generic_inline_admin_contact_delete", args=[c.pk])
)
self.assertContains(response, "Are you sure you want to delete")
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
|
GenericInlineAdminWithUniqueTogetherTest
|
python
|
facebook__pyre-check
|
client/coverage_data.py
|
{
"start": 17649,
"end": 23545
}
|
class ____(matchers.MatcherDecoratableVisitor):
"""
Collects all empty containers in the module.
"""
# An empty container is:
# - an empty list literal
# - an empty dict literal
# - a call to set(), frozenset(), list(), or dict() with no arguments
METADATA_DEPENDENCIES = (PositionProvider,)
def __init__(self) -> None:
super().__init__()
self.empty_containers: List[EmptyContainerInfo] = []
def location(self, node: libcst.CSTNode) -> Location:
return Location.from_code_range(self.get_metadata(PositionProvider, node))
def record_empty_container(self, kind: EmptyContainerKind, loc: Location) -> None:
self.empty_containers.append(EmptyContainerInfo(kind=kind, location=loc))
@matchers.visit(
matchers.Assign(
targets=matchers.MatchIfTrue(_matches_names),
value=matchers.List(elements=matchers.MatchIfTrue(lambda x: len(x) == 0)),
)
)
def record_empty_list(self, node: libcst.Assign) -> None:
self.record_empty_container(
EmptyContainerKind.LIST_LITERAL, self.location(node)
)
@matchers.visit(
matchers.Assign(
targets=matchers.MatchIfTrue(_matches_names),
value=matchers.Dict(elements=matchers.MatchIfTrue(lambda x: len(x) == 0)),
)
)
def record_empty_dict(self, node: libcst.Assign) -> None:
self.record_empty_container(
EmptyContainerKind.DICT_LITERAL, self.location(node)
)
@matchers.leave(
matchers.Assign(
targets=matchers.MatchIfTrue(_matches_names),
value=matchers.Call(
func=matchers.Name("list")
| matchers.Name("dict")
| matchers.Name("set")
| matchers.Name("frozenset"),
args=matchers.MatchIfTrue(lambda x: len(x) == 0),
),
)
)
def record_constructor_call(self, node: libcst.Assign) -> None:
func = libcst.ensure_type(node.value, libcst.Call)
func_name = libcst.ensure_type(func.func, libcst.Name)
if func_name.value == "list":
kind = EmptyContainerKind.LIST_CALL
elif func_name.value == "dict":
kind = EmptyContainerKind.DICT_CALL
elif func_name.value == "set":
kind = EmptyContainerKind.SET_CALL
elif func_name.value == "frozenset":
kind = EmptyContainerKind.FROZENSET_CALL
else:
raise ValueError(f"Unexpected function call: {func_name}")
self.record_empty_container(kind, self.location(node))
def collect_mode(
module: libcst.MetadataWrapper,
strict_by_default: bool,
path: Path,
ignored: bool = False, # means the module was ignored in the pyre configuration
) -> ModuleModeInfo:
is_test_regex = compile(r".*\/(test|tests)\/.*\.py$")
visitor = ModuleModeCollector(strict_by_default)
module.visit(visitor)
mode = ModuleMode.IGNORE_ALL if ignored else visitor.mode
return ModuleModeInfo(
mode=mode,
explicit_comment_line=visitor.explicit_comment_line,
is_generated=visitor.is_generated,
is_test=bool(is_test_regex.match(str(path))),
)
def collect_functions(
module: libcst.MetadataWrapper,
) -> Sequence[FunctionAnnotationInfo]:
visitor = AnnotationCollector()
module.visit(visitor)
return visitor.functions
def collect_suppressions(
module: libcst.MetadataWrapper,
) -> Sequence[TypeErrorSuppression]:
visitor = SuppressionCollector()
module.visit(visitor)
return visitor.suppressions
def collect_empty_containers(
module: libcst.MetadataWrapper,
) -> Sequence[EmptyContainerInfo]:
visitor = EmptyContainerCollector()
module.visit(visitor)
return visitor.empty_containers
def module_from_code(code: str) -> Optional[libcst.MetadataWrapper]:
try:
raw_module = libcst.parse_module(code)
return libcst.MetadataWrapper(raw_module)
except Exception:
LOG.exception("Error reading code at path %s.", code)
return None
def module_from_path(path: Path) -> Optional[libcst.MetadataWrapper]:
try:
return module_from_code(path.read_text())
except FileNotFoundError:
return None
def _is_excluded(
path: Path,
excludes: Sequence[str],
) -> bool:
try:
return any(re.match(exclude_pattern, str(path)) for exclude_pattern in excludes)
except re.error:
LOG.warning("Could not parse `excludes`: %s", excludes)
return False
def _should_ignore(
path: Path,
excludes: Sequence[str],
) -> bool:
return (
path.suffix != ".py"
or path.name.startswith("__")
or path.name.startswith(".")
or _is_excluded(path, excludes)
)
def find_module_paths(
paths: Iterable[Path],
excludes: Sequence[str],
) -> List[Path]:
"""
Given a set of paths (which can be file paths or directory paths)
where we want to collect data, return an iterable of all the module
paths after recursively expanding directories, and ignoring directory
exclusions specified in `excludes`.
"""
def _get_paths_for_file(target_file: Path) -> Iterable[Path]:
return [target_file] if not _should_ignore(target_file, excludes) else []
def _get_paths_in_directory(target_directory: Path) -> Iterable[Path]:
return (
path
for path in target_directory.glob("**/*.py")
if not _should_ignore(path, excludes)
)
return sorted(
set(
itertools.chain.from_iterable(
(
_get_paths_for_file(path)
if not path.is_dir()
else _get_paths_in_directory(path)
)
for path in paths
)
)
)
|
EmptyContainerCollector
|
python
|
lepture__authlib
|
authlib/oauth2/rfc8628/models.py
|
{
"start": 301,
"end": 827
}
|
class ____(dict, DeviceCredentialMixin):
def get_client_id(self):
return self["client_id"]
def get_scope(self):
return self.get("scope")
def get_user_code(self):
return self["user_code"]
def get_nonce(self):
return self.get("nonce")
def get_auth_time(self):
return self.get("auth_time")
def is_expired(self):
expires_at = self.get("expires_at")
if expires_at:
return expires_at < time.time()
return False
|
DeviceCredentialDict
|
python
|
wandb__wandb
|
wandb/sdk/lib/wb_logging.py
|
{
"start": 683,
"end": 4139
}
|
class ____:
"""Sentinel for `not_run_specific()`."""
_NOT_RUN_SPECIFIC = _NotRunSpecific()
_run_id: contextvars.ContextVar[str | _NotRunSpecific | None] = contextvars.ContextVar(
"_run_id",
default=None,
)
_logger = logging.getLogger("wandb")
def configure_wandb_logger() -> None:
"""Configures the global 'wandb' logger.
The wandb logger is not intended to be customized by users. Instead, it is
used as a mechanism to redirect log messages into wandb run-specific log
files.
This function is idempotent: calling it multiple times has the same effect.
"""
# Send all DEBUG and above messages to registered handlers.
#
# Per-run handlers can set different levels.
_logger.setLevel(logging.DEBUG)
# Do not propagate wandb logs to the root logger, which the user may have
# configured to point elsewhere. All wandb log messages should go to a run's
# log file.
_logger.propagate = False
# If no handlers are configured for the 'wandb' logger, don't activate the
# "lastResort" handler which sends messages to stderr with a level of
# WARNING by default.
#
# This occurs in wandb code that runs outside the context of a Run and
# not as part of the CLI.
#
# Most such code uses the `termlog` / `termwarn` / `termerror` methods
# to communicate with the user. When that code executes while a run is
# active, its logger messages go to that run's log file.
if not _logger.handlers:
_logger.addHandler(logging.NullHandler())
@contextlib.contextmanager
def log_to_run(run_id: str | None) -> Iterator[None]:
"""Direct all wandb log messages to the given run.
Args:
id: The current run ID, or None if actions in the context manager are
not associated to a specific run. In the latter case, log messages
will go to all runs.
Usage:
with wb_logging.run_id(...):
... # Log messages here go to the specified run's logger.
"""
token = _run_id.set(run_id)
try:
yield
finally:
_run_id.reset(token)
@contextlib.contextmanager
def log_to_all_runs() -> Iterator[None]:
"""Direct wandb log messages to all runs.
Unlike `log_to_run(None)`, this indicates an intentional choice.
This is often convenient to use as a decorator:
@wb_logging.log_to_all_runs()
def my_func():
... # Log messages here go to the specified run's logger.
"""
token = _run_id.set(_NOT_RUN_SPECIFIC)
try:
yield
finally:
_run_id.reset(token)
def add_file_handler(run_id: str, filepath: pathlib.Path) -> logging.Handler:
"""Direct log messages for a run to a file.
Args:
run_id: The run for which to create a log file.
filepath: The file to write log messages to.
Returns:
The added handler which can then be configured further or removed
from the 'wandb' logger directly.
The default logging level is INFO.
"""
handler = logging.FileHandler(filepath)
handler.setLevel(logging.INFO)
handler.addFilter(_RunIDFilter(run_id))
handler.setFormatter(
logging.Formatter(
"%(asctime)s %(levelname)-7s %(threadName)-10s:%(process)d"
" [%(filename)s:%(funcName)s():%(lineno)s]%(run_id_tag)s"
" %(message)s"
)
)
_logger.addHandler(handler)
return handler
|
_NotRunSpecific
|
python
|
jina-ai__jina
|
tests/unit/jaml/test_type_parse.py
|
{
"start": 3672,
"end": 6957
}
|
class ____:
def __init__(self, envs):
self._env_keys_added = envs
def __enter__(self):
for key, val in self._env_keys_added.items():
os.environ[key] = str(val)
def __exit__(self, exc_type, exc_val, exc_tb):
for key in self._env_keys_added.keys():
os.unsetenv(key)
def test_parsing_brackets_in_envvar():
flow_yaml = '''
jtype: Flow
executors:
- name: a
env:
var1: ${{ env.VAR1 }}
var4: -${{ env.VAR1 }}
var2: ${{root.executors[0].name}}
var3: ${{ env.VAR1 }}-${{root.executors[0].name}}
'''
with EnvironmentVarCtxtManager(
envs={
'VAR1': '{"1": "2"}',
}
):
b = JAML.load(flow_yaml, substitute=True)
assert b['executors'][0]['env']['var1'] == '{"1": "2"}'
assert b['executors'][0]['env']['var2'] == 'a'
assert b['executors'][0]['env']['var3'] == '{"1": "2"}-a'
assert b['executors'][0]['env']['var4'] == '-{"1": "2"}'
def test_exception_invalid_yaml():
cur_dir = os.path.dirname(os.path.abspath(__file__))
yaml = os.path.join(cur_dir, 'invalid.yml')
with pytest.raises(BadConfigSource):
BaseExecutor.load_config(yaml)
with pytest.raises(BadConfigSource):
Flow.load_config(yaml)
def test_jtype(tmpdir):
flow_path = os.path.join(tmpdir, 'flow.yml')
f = Flow()
f.save_config(flow_path)
with open(flow_path, 'r', encoding='utf-8') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'Flow'
assert type(Flow.load_config(flow_path)) == Flow
exec_path = os.path.join(tmpdir, 'exec.yml')
e = BaseExecutor()
e.save_config(exec_path)
with open(exec_path, 'r', encoding='utf-8') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'BaseExecutor'
assert type(BaseExecutor.load_config(exec_path)) == BaseExecutor
dep_path = os.path.join(tmpdir, 'dep.yml')
dep = Deployment(uses='YourExecutor', port=12345, replicas=3, shards=2)
dep.save_config(dep_path)
with open(dep_path, 'r', encoding='utf-8') as file:
conf = yaml.safe_load(file)
assert 'jtype' in conf
assert conf['jtype'] == 'Deployment'
assert conf['with']['shards'] == 2
assert conf['with']['replicas'] == 3
assert conf['with']['port'] == 12345
loaded_deployment = Deployment.load_config(dep_path)
assert type(loaded_deployment) == Deployment
assert loaded_deployment.port == 12345
assert loaded_deployment.args.shards == 2
assert loaded_deployment.args.replicas == 3
def test_load_dataclass_executor():
executor_yaml = '''
jtype: MyDataClassExecutor
with:
my_field: this is my field
metas:
name: test-name-updated
workspace: test-work-space-updated
requests:
/foo: baz
'''
exec = BaseExecutor.load_config(executor_yaml)
assert exec.my_field == 'this is my field'
assert exec.requests['/foo'].fn == MyDataClassExecutor.baz
assert exec.metas.name == 'test-name-updated'
assert exec.metas.workspace == 'test-work-space-updated'
|
EnvironmentVarCtxtManager
|
python
|
getsentry__sentry
|
src/sentry/types/releaseactivity.py
|
{
"start": 24,
"end": 341
}
|
class ____(Enum):
CREATED = 0
DEPLOYED = 1
FINISHED = 2
ISSUE = 3
CHOICES = tuple(
(i.value, i.name.lower())
for i in [
ReleaseActivityType.CREATED,
ReleaseActivityType.DEPLOYED,
ReleaseActivityType.FINISHED,
ReleaseActivityType.ISSUE,
]
)
|
ReleaseActivityType
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/types/dagster_type.py
|
{
"start": 14730,
"end": 15633
}
|
class ____(DagsterType):
def __init__(
self,
key: t.Optional[str],
name: t.Optional[str],
loader: t.Optional[DagsterTypeLoader] = None,
is_builtin: bool = False,
description: t.Optional[str] = None,
):
super(Anyish, self).__init__(
key=key,
name=name,
kind=DagsterTypeKind.ANY,
loader=loader,
is_builtin=is_builtin,
type_check_fn=self.type_check_method,
description=description,
typing_type=t.Any,
)
def type_check_method(
self, _context: "TypeCheckContext", _value: object
) -> TypeCheck:
return TypeCheck(success=True)
@property
def supports_fan_in(self) -> bool:
return True
def get_inner_type_for_fan_in(self) -> DagsterType:
# Anyish all the way down
return self
|
Anyish
|
python
|
scikit-image__scikit-image
|
benchmarks/benchmark_measure.py
|
{
"start": 925,
"end": 1358
}
|
class ____:
param_names = ['cache']
params = (False, True)
def setup(self, cache):
self.label_image, self.intensity_image = init_regionprops_data()
def time_regionprops_table_all(self, cache):
measure.regionprops_table(
self.label_image, self.intensity_image, properties=PROP_VALS, cache=cache
)
# omit peakmem tests to save time (memory usage was minimal)
|
RegionpropsTableAll
|
python
|
huggingface__transformers
|
src/transformers/models/mistral3/modular_mistral3.py
|
{
"start": 4279,
"end": 9743
}
|
class ____(LlavaModel):
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_sizes: torch.Tensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
**kwargs,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
vision_feature_layer (`Union[int, list[int]]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
image_sizes (`torch.Tensor`, *optional*):
Tensor containing the image sizes as returned by the processor.
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
# this is not memory efficient at all (output_hidden_states=True) will save all the hidden states.
image_outputs = self.vision_tower(pixel_values, image_sizes=image_sizes, output_hidden_states=True, **kwargs)
# If we have one vision feature layer, return the corresponding hidden states,
# otherwise, select the hidden states of each feature layer and concatenate them
if isinstance(vision_feature_layer, int):
selected_image_feature = image_outputs.hidden_states[vision_feature_layer]
else:
hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer]
selected_image_feature = torch.cat(hs_pool, dim=-1)
image_features = self.multi_modal_projector(selected_image_feature.squeeze(0), image_sizes)
downsample_ratio = self.vision_tower.patch_size * self.config.spatial_merge_size
split_sizes = [(height // downsample_ratio) * (width // downsample_ratio) for height, width in image_sizes]
image_features = torch.split(image_features.squeeze(0), split_sizes)
return image_features
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
image_sizes: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, Mistral3ModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
image_sizes=image_sizes,
)
image_features = torch.cat(image_features, dim=0).to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
return Mistral3ModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
|
Mistral3Model
|
python
|
pytorch__pytorch
|
torch/distributed/_shard/sharded_tensor/metadata.py
|
{
"start": 166,
"end": 302
}
|
class ____(Enum):
TORCH_CONTIGUOUS_FORMAT = 0
TORCH_CHANNELS_LAST = 1
TORCH_PRESERVE_FORMAT = 2
@dataclass
|
MEM_FORMAT_ENCODING
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/orm/state_changes.py
|
{
"start": 888,
"end": 6806
}
|
class ____:
"""Supplies state assertion decorators.
The current use case is for the :class:`_orm.SessionTransaction` class. The
:class:`_StateChange` class itself is agnostic of the
:class:`_orm.SessionTransaction` class so could in theory be generalized
for other systems as well.
"""
_next_state: _StateChangeState = _StateChangeStates.ANY
_state: _StateChangeState = _StateChangeStates.NO_CHANGE
_current_fn: Optional[Callable[..., Any]] = None
def _raise_for_prerequisite_state(
self, operation_name: str, state: _StateChangeState
) -> NoReturn:
raise sa_exc.IllegalStateChangeError(
f"Can't run operation '{operation_name}()' when Session "
f"is in state {state!r}",
code="isce",
)
@classmethod
def declare_states(
cls,
prerequisite_states: Union[
Literal[_StateChangeStates.ANY], Tuple[_StateChangeState, ...]
],
moves_to: _StateChangeState,
) -> Callable[[_F], _F]:
"""Method decorator declaring valid states.
:param prerequisite_states: sequence of acceptable prerequisite
states. Can be the single constant _State.ANY to indicate no
prerequisite state
:param moves_to: the expected state at the end of the method, assuming
no exceptions raised. Can be the constant _State.NO_CHANGE to
indicate state should not change at the end of the method.
"""
assert prerequisite_states, "no prequisite states sent"
has_prerequisite_states = (
prerequisite_states is not _StateChangeStates.ANY
)
prerequisite_state_collection = cast(
"Tuple[_StateChangeState, ...]", prerequisite_states
)
expect_state_change = moves_to is not _StateChangeStates.NO_CHANGE
@util.decorator
def _go(fn: _F, self: Any, *arg: Any, **kw: Any) -> Any:
current_state = self._state
if (
has_prerequisite_states
and current_state not in prerequisite_state_collection
):
self._raise_for_prerequisite_state(fn.__name__, current_state)
next_state = self._next_state
existing_fn = self._current_fn
expect_state = moves_to if expect_state_change else current_state
if (
# destination states are restricted
next_state is not _StateChangeStates.ANY
# method seeks to change state
and expect_state_change
# destination state incorrect
and next_state is not expect_state
):
if existing_fn and next_state in (
_StateChangeStates.NO_CHANGE,
_StateChangeStates.CHANGE_IN_PROGRESS,
):
raise sa_exc.IllegalStateChangeError(
f"Method '{fn.__name__}()' can't be called here; "
f"method '{existing_fn.__name__}()' is already "
f"in progress and this would cause an unexpected "
f"state change to {moves_to!r}",
code="isce",
)
else:
raise sa_exc.IllegalStateChangeError(
f"Cant run operation '{fn.__name__}()' here; "
f"will move to state {moves_to!r} where we are "
f"expecting {next_state!r}",
code="isce",
)
self._current_fn = fn
self._next_state = _StateChangeStates.CHANGE_IN_PROGRESS
try:
ret_value = fn(self, *arg, **kw)
except:
raise
else:
if self._state is expect_state:
return ret_value
if self._state is current_state:
raise sa_exc.IllegalStateChangeError(
f"Method '{fn.__name__}()' failed to "
"change state "
f"to {moves_to!r} as expected",
code="isce",
)
elif existing_fn:
raise sa_exc.IllegalStateChangeError(
f"While method '{existing_fn.__name__}()' was "
"running, "
f"method '{fn.__name__}()' caused an "
"unexpected "
f"state change to {self._state!r}",
code="isce",
)
else:
raise sa_exc.IllegalStateChangeError(
f"Method '{fn.__name__}()' caused an unexpected "
f"state change to {self._state!r}",
code="isce",
)
finally:
self._next_state = next_state
self._current_fn = existing_fn
return _go
@contextlib.contextmanager
def _expect_state(self, expected: _StateChangeState) -> Iterator[Any]:
"""called within a method that changes states.
method must also use the ``@declare_states()`` decorator.
"""
assert self._next_state is _StateChangeStates.CHANGE_IN_PROGRESS, (
"Unexpected call to _expect_state outside of "
"state-changing method"
)
self._next_state = expected
try:
yield
except:
raise
else:
if self._state is not expected:
raise sa_exc.IllegalStateChangeError(
f"Unexpected state change to {self._state!r}", code="isce"
)
finally:
self._next_state = _StateChangeStates.CHANGE_IN_PROGRESS
|
_StateChange
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/douban/provider.py
|
{
"start": 219,
"end": 430
}
|
class ____(ProviderAccount):
def get_profile_url(self):
return self.account.extra_data.get("alt")
def get_avatar_url(self):
return self.account.extra_data.get("large_avatar")
|
DoubanAccount
|
python
|
has2k1__plotnine
|
plotnine/iapi.py
|
{
"start": 3916,
"end": 4010
}
|
class ____:
"""
Position Scales
"""
x: scale
y: scale
@dataclass
|
pos_scales
|
python
|
django__django
|
tests/postgres_tests/test_array.py
|
{
"start": 33155,
"end": 36854
}
|
class ____(TransactionTestCase):
available_apps = ["postgres_tests"]
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(kwargs.keys(), {"base_field"})
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
self.assertIsNot(new.base_field, field.base_field)
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.contrib.postgres.fields.ArrayField")
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "postgres_tests.models.ArrayFieldSubclass")
@override_settings(
MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
}
)
def test_adding_field_with_default(self):
# See #22962
table_name = "postgres_tests_integerarraydefaultmodel"
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command("migrate", "postgres_tests", verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command("migrate", "postgres_tests", "zero", verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
@override_settings(
MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_index_migrations",
}
)
def test_adding_arrayfield_with_index(self):
"""
ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops
indexes.
"""
table_name = "postgres_tests_chartextarrayindexmodel"
call_command("migrate", "postgres_tests", verbosity=0)
with connection.cursor() as cursor:
like_constraint_columns_list = [
v["columns"]
for k, v in list(
connection.introspection.get_constraints(cursor, table_name).items()
)
if k.endswith("_like")
]
# Only the CharField should have a LIKE index.
self.assertEqual(like_constraint_columns_list, [["char2"]])
# All fields should have regular indexes.
with connection.cursor() as cursor:
indexes = [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table_name
).values()
if c["index"] and len(c["columns"]) == 1
]
self.assertIn("char", indexes)
self.assertIn("char2", indexes)
self.assertIn("text", indexes)
call_command("migrate", "postgres_tests", "zero", verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
|
TestMigrations
|
python
|
dask__dask
|
dask/utils_test.py
|
{
"start": 362,
"end": 4819
}
|
class ____:
"""
The GetFunctionTestCase class can be imported and used to test foreign
implementations of the `get` function specification. It aims to enforce all
known expectations of `get` functions.
To use the class, inherit from it and override the `get` function. For
example:
> from dask.utils_test import GetFunctionTestMixin
> class TestCustomGet(GetFunctionTestMixin):
get = staticmethod(myget)
Note that the foreign `myget` function has to be explicitly decorated as a
staticmethod.
"""
def test_get(self):
d = {":x": 1, ":y": (inc, ":x"), ":z": (add, ":x", ":y")}
assert self.get(d, ":x") == 1
assert self.get(d, ":y") == 2
assert self.get(d, ":z") == 3
def test_badkey(self):
d = {":x": 1, ":y": (inc, ":x"), ":z": (add, ":x", ":y")}
try:
result = self.get(d, "badkey")
except KeyError:
pass
else:
msg = "Expected `{}` with badkey to raise KeyError.\n"
msg += f"Obtained '{result}' instead."
assert False, msg.format(self.get.__name__)
def test_nested_badkey(self):
d = {"x": 1, "y": 2, "z": (sum, ["x", "y"])}
try:
result = self.get(d, [["badkey"], "y"])
except KeyError:
pass
else:
msg = "Expected `{}` with badkey to raise KeyError.\n"
msg += f"Obtained '{result}' instead."
assert False, msg.format(self.get.__name__)
def test_data_not_in_dict_is_ok(self):
d = {"x": 1, "y": (add, "x", 10)}
assert self.get(d, "y") == 11
def test_get_with_list(self):
d = {"x": 1, "y": 2, "z": (sum, ["x", "y"])}
assert self.get(d, ["x", "y"]) == (1, 2)
assert self.get(d, "z") == 3
def test_get_with_list_top_level(self):
d = {
"a": [1, 2, 3],
"b": "a",
"c": [1, (inc, 1)],
"d": [(sum, "a")],
"e": ["a", "b"],
"f": [[[(sum, "a"), "c"], (sum, "b")], 2],
}
assert self.get(d, "a") == [1, 2, 3]
assert self.get(d, "b") == [1, 2, 3]
assert self.get(d, "c") == [1, 2]
assert self.get(d, "d") == [6]
assert self.get(d, "e") == [[1, 2, 3], [1, 2, 3]]
assert self.get(d, "f") == [[[6, [1, 2]], 6], 2]
def test_get_with_nested_list(self):
d = {"x": 1, "y": 2, "z": (sum, ["x", "y"])}
assert self.get(d, [["x"], "y"]) == ((1,), 2)
assert self.get(d, "z") == 3
def test_get_works_with_unhashables_in_values(self):
f = lambda x, y: x + len(y)
d = {"x": 1, "y": (f, "x", {1})}
assert self.get(d, "y") == 2
def test_nested_tasks(self):
d = {"x": 1, "y": (inc, "x"), "z": (add, (inc, "x"), "y")}
assert self.get(d, "z") == 4
def test_get_stack_limit(self):
d = {f"x{i + 1}": (inc, f"x{i}") for i in range(10000)}
d["x0"] = 0
assert self.get(d, "x10000") == 10000
def test_with_HighLevelGraph(self):
from dask.highlevelgraph import HighLevelGraph
layers = {"a": {"x": 1, "y": (inc, "x")}, "b": {"z": (add, (inc, "x"), "y")}}
dependencies = {"a": (), "b": {"a"}}
graph = HighLevelGraph(layers, dependencies)
assert self.get(graph, "z") == 4
def import_or_none(name):
"""Import a module and return it; in case of failure; return None"""
try:
return importlib.import_module(name)
except (ImportError, AttributeError):
return None
def hlg_layer(hlg: HighLevelGraph, prefix: str) -> Layer:
"Get the first layer from a HighLevelGraph whose name starts with a prefix"
for key, lyr in hlg.layers.items():
if key.startswith(prefix):
return lyr
raise KeyError(f"No layer starts with {prefix!r}: {list(hlg.layers)}")
def hlg_layer_topological(hlg: HighLevelGraph, i: int) -> Layer:
"Get the layer from a HighLevelGraph at position ``i``, topologically"
return hlg.layers[hlg._toposort_layers()[i]]
@contextlib.contextmanager
def _check_warning(condition: bool, category: type[Warning], message: str):
"""Conditionally check if a warning is raised"""
if condition:
import pytest
with pytest.warns(category, match=message) as ctx:
yield ctx
else:
with contextlib.nullcontext() as ctx:
yield ctx
|
GetFunctionTestMixin
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-iterable/source_iterable/streams.py
|
{
"start": 15913,
"end": 16008
}
|
class ____(IterableExportStreamAdjustableRange):
data_field = "emailComplaint"
|
EmailComplaint
|
python
|
simonw__datasette
|
datasette/utils/asgi.py
|
{
"start": 731,
"end": 924
}
|
class ____(NotFound):
def __init__(self, database_name, table):
super().__init__("Table not found")
self.database_name = database_name
self.table = table
|
TableNotFound
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/base/llms/types.py
|
{
"start": 21045,
"end": 21929
}
|
class ____(BaseModel):
"""
Completion response.
Fields:
text: Text content of the response if not streaming, or if streaming,
the current extent of streamed text.
additional_kwargs: Additional information on the response(i.e. token
counts, function calling information).
raw: Optional raw JSON that was parsed to populate text, if relevant.
delta: New text that just streamed in (only relevant when streaming).
"""
text: str
additional_kwargs: dict = Field(default_factory=dict)
raw: Optional[Any] = None
logprobs: Optional[List[List[LogProb]]] = None
delta: Optional[str] = None
def __str__(self) -> str:
return self.text
CompletionResponseGen = Generator[CompletionResponse, None, None]
CompletionResponseAsyncGen = AsyncGenerator[CompletionResponse, None]
|
CompletionResponse
|
python
|
pandas-dev__pandas
|
pandas/_typing.py
|
{
"start": 8079,
"end": 8302
}
|
class ____(BaseBuffer, Protocol[AnyStr_co]):
__module__: str = "pandas.api.typing.aliases"
def read(self, n: int = ..., /) -> AnyStr_co:
# for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File
...
|
ReadBuffer
|
python
|
numpy__numpy
|
numpy/_globals.py
|
{
"start": 3093,
"end": 4155
}
|
class ____:
# A descriptor to store on the ufunc __dict__ that avoids definig a
# signature for the ufunc class/type but allows the instance to have one.
# This is needed because inspect.signature() chokes on normal properties
# (as of 3.14 at least).
# We could also set __signature__ on the instance but this allows deferred
# computation of the signature.
def __get__(self, obj, objtype=None):
# Delay import, not a critical path but need to avoid circular import.
from numpy._core._internal import _ufunc_inspect_signature_builder
if obj is None:
# could also return None, which is accepted as "not set" by
# inspect.signature().
raise AttributeError(
"type object 'numpy.ufunc' has no attribute '__signature__'")
# Store on the instance, after this the descriptor won't be used.
obj.__signature__ = _ufunc_inspect_signature_builder(obj)
return obj.__signature__
_signature_descriptor = _SignatureDescriptor()
|
_SignatureDescriptor
|
python
|
pytorch__pytorch
|
test/dynamo/test_higher_order_ops.py
|
{
"start": 253431,
"end": 254974
}
|
class ____(torch.nn.Module):
def forward(self, L_x_: "f32[2, 4, 3]"):
l_x_ = L_x_
lazy_load_decompositions = torch._functorch.predispatch.lazy_load_decompositions(); lazy_load_decompositions = None
_vmap_increment_nesting = torch._functorch.predispatch._vmap_increment_nesting(2, 'error'); _vmap_increment_nesting = None
_add_batch_dim: "f32[4, 3]" = torch._functorch.predispatch._add_batch_dim(l_x_, 0, 1); l_x_ = None
child: "f32[3]" = _add_batch_dim.sum(0)
child_1: "f32[4]" = _add_batch_dim.sum(1); _add_batch_dim = None
_remove_batch_dim: "f32[2, 3]" = torch._functorch.predispatch._remove_batch_dim(child, 1, 2, 0); child = None
_remove_batch_dim_1: "f32[2, 4]" = torch._functorch.predispatch._remove_batch_dim(child_1, 1, 2, 0); child_1 = None
_vmap_decrement_nesting = torch._functorch.predispatch._vmap_decrement_nesting(); _vmap_decrement_nesting = None
return (_remove_batch_dim, _remove_batch_dim_1)
""",
)
def test_vmap_multiple_outputs_diff_dims(self):
x = torch.ones(2, 4, 3)
def fn(x):
return torch.vmap(lambda x: (x.sum(0), x.sum(1)), out_dims=(1, 0))(x)
wrapped_gm = self._compile_check(fn, (x,))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
|
GraphModule
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/python/optimize/calibrator_test.py
|
{
"start": 1714,
"end": 10752
}
|
class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters(
# Activation type Int8
('UseActivationTypeInt8', dtypes.int8),
# Activation type Int16
('UseActivationTypeInt16', dtypes.int16),
)
def test_calibration_with_quantization(self, activations_type):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin'
)
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator for the model.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 5, 5, 3), dtype=np.float32)]
quantized_model = quantizer.calibrate_and_quantize(
input_gen, dtypes.float32, dtypes.float32, False, activations_type
)
self.assertIsNotNone(quantized_model)
@parameterized.named_parameters(
# Activation type Int8
('UseActivationTypeInt8', dtypes.int8),
# Activation type Int16
('UseActivationTypeInt16', dtypes.int16),
)
def test_calibration_with_quantization_allow_float(self, activations_type):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin'
)
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator for the model.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 5, 5, 3), dtype=np.float32)]
quantized_model = quantizer.calibrate_and_quantize(
input_gen, dtypes.float32, dtypes.float32, True, activations_type
)
self.assertIsNotNone(quantized_model)
def test_calibration_with_quantization_single_op(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin'
)
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator for the model.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 5, 5, 3), dtype=np.float32)]
quantized_model = quantizer.calibrate_and_quantize_single(
input_gen, dtypes.float32, dtypes.float32, True, 'conv2d_8/BiasAdd'
)
self.assertIsNotNone(quantized_model)
def test_calibration_with_string_input(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/string_input_flex_model.bin'
)
with open(model_path, 'rb') as fp:
model_with_string_input = fp.read()
quantizer = _calibrator.Calibrator(model_with_string_input)
# Input generator for the model.
def input_gen():
for i in range(10):
yield [np.array('Test' + str(i))]
quantized_model = quantizer.calibrate_and_quantize_single(
input_gen, dtypes.float32, dtypes.float32, True, 'Identity'
)
self.assertIsNotNone(quantized_model)
@parameterized.named_parameters(
# Activation type Int8
('UseActivationTypeInt8 - EnableMlirQuantizer', dtypes.int8),
# Activation type Int16
('UseActivationTypeInt16 - DisableEnableMlirQuantizer', dtypes.int16),
)
def test_calibration_with_quantization_multiple_inputs(
self, activations_type
):
# Load multi add model from test data.
# This model has 4 inputs of size (1, 8, 8, 3).
model_path = resource_loader.get_path_to_datafile(
'../../testdata/multi_add.bin'
)
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator for the model.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 8, 8, 3), dtype=np.float32) for _ in range(4)]
quantized_model = quantizer.calibrate_and_quantize(
input_gen, dtypes.float32, dtypes.float32, False, activations_type
)
self.assertIsNotNone(quantized_model)
def test_invalid_model_buffer(self):
float_model = b'\0' * 100
with self.assertRaisesRegex(ValueError, 'Failed to parse the model'):
_calibrator.Calibrator(float_model)
# TODO(fengliuai): enable mlir quantizer
def test_empty_calibrator_gen(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin'
)
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
def empty_input_gen():
for i in ():
yield i
with self.assertRaises(RuntimeError):
quantizer.calibrate_and_quantize(
empty_input_gen, dtypes.float32, dtypes.float32, False
)
def test_invalid_shape_calibrator_gen(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin'
)
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator with incorrect shape.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 2, 2, 3), dtype=np.float32)]
with self.assertRaisesRegex(ValueError, 'Size mismatch'):
quantizer.calibrate_and_quantize(
input_gen,
dtypes.float32,
dtypes.float32,
False,
activations_type=dtypes.int8,
bias_type=dtypes.int32,
resize_input=False,
)
def test_invalid_type_calibrator_gen(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin'
)
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator with incorrect type.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 5, 5, 3), dtype=np.int32)]
with self.assertRaises(ValueError):
quantizer.calibrate_and_quantize(
input_gen, dtypes.float32, dtypes.float32, False, dtypes.int8
)
def test_calibration(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin'
)
float_model = open(model_path, 'rb').read()
quantizer = _calibrator.Calibrator(float_model)
# Input generator for the model.
def input_gen():
for _ in range(10):
yield [np.ones(shape=(1, 5, 5, 3), dtype=np.float32)]
quantized_model = quantizer.calibrate(input_gen)
self.assertIsNotNone(quantized_model)
def test_add_intermediate_tensors(self):
model_path = resource_loader.get_path_to_datafile(
'test_data/mobilenet_like_model.bin'
)
model = open(model_path, 'rb').read()
added_model = _calibrator.add_intermediate_tensors(model)
self.assertIsNotNone(added_model)
def test_calibrate_model_with_offset_buffer(self):
# Define a simple model to run calibration with.
class MatMulModel(tf.Module):
def __init__(self):
# Use ones for predictable calibration results.
self.filter = np.ones((4, 3)).astype(np.float32)
@tf.function(
input_signature=[tf.TensorSpec(shape=(1, 4), dtype=dtypes.float32)]
)
def __call__(self, input_tensor: tf.Tensor) -> tf.Tensor:
output_tensor = tf.linalg.matmul(input_tensor, self.filter)
return {'output': output_tensor}
model = MatMulModel()
saved_model_path = self.create_tempdir().full_path
tf.saved_model.save(model, saved_model_path)
converter = lite.TFLiteConverter.from_saved_model(saved_model_path)
# Enable the use of buffer offsets.
# pylint: disable=protected-access
converter._experimental_use_buffer_offset = True
# pylint: enable=protected-access
converter.exclude_conversion_metadata = True
model_serialized = converter.convert()
model = flatbuffer_utils.convert_bytearray_to_object(model_serialized)
self.assertTrue(_uses_buffer_offset(model))
quantizer = _calibrator.Calibrator(model_serialized)
# Input generator for the model.
def input_gen():
for _ in range(2):
yield [np.array([1.0, 1.0, 1.0, 1.0], dtype=np.float32)]
calibrated_model_serialized = quantizer.calibrate(input_gen)
self.assertIsNotNone(calibrated_model_serialized)
calibrated_model = flatbuffer_utils.convert_bytearray_to_object(
calibrated_model_serialized
)
self.assertTrue(_uses_buffer_offset(calibrated_model))
# Confirm that the tensors are correctly calibrated.
subgraph = calibrated_model.subgraphs[0]
matmul_input_tensor = subgraph.tensors[0]
self.assertAllClose(matmul_input_tensor.quantization.min, [1.0])
self.assertAllClose(matmul_input_tensor.quantization.max, [1.0])
matmul_filter_tensor = subgraph.tensors[1]
self.assertAllClose(matmul_filter_tensor.quantization.min, [1.0])
self.assertAllClose(matmul_filter_tensor.quantization.max, [1.0])
# The matmul is performed with all ones so the output is expected to be 4s.
matmul_output_tensor = subgraph.tensors[2]
self.assertAllClose(matmul_output_tensor.quantization.min, [4.0])
self.assertAllClose(matmul_output_tensor.quantization.max, [4.0])
if __name__ == '__main__':
test.main()
|
CalibratorTest
|
python
|
huggingface__transformers
|
src/transformers/models/emu3/modeling_emu3.py
|
{
"start": 23757,
"end": 24241
}
|
class ____(nn.GroupNorm):
"""
Same as the torch GroupNorm with the only difference that this ones accepts
an optional kwarg `quant_states` which is not used. This class makes it easier to
use SpatialNorm or GroupNorm without conditionals
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def forward(self, input, quant_states=None):
return F.group_norm(input, self.num_groups, self.weight, self.bias, self.eps)
|
Emu3VQVAEGroupNorm
|
python
|
facebook__pyre-check
|
client/commands/tests/language_server_test.py
|
{
"start": 4957,
"end": 13209
}
|
class ____(testslide.TestCase):
@setup.async_test
async def test_try_initialize_success(self) -> None:
input_channel = await server_setup.create_input_channel_with_requests(
[
json_rpc.Request(
id=0,
method="initialize",
parameters=json_rpc.ByNameParameters(
{
"processId": 42,
"rootUri": None,
"capabilities": {
"textDocument": {
"publishDiagnostics": {},
"synchronization": {
"didSave": True,
},
},
},
}
),
),
json_rpc.Request(
method="initialized", parameters=json_rpc.ByNameParameters({})
),
]
)
bytes_writer = connections.MemoryBytesWriter()
result = await init.async_try_initialize(
input_channel,
connections.AsyncTextWriter(bytes_writer),
compute_initialize_result=lambda parameters: persistent.process_initialize_request(
parameters,
server_setup.DEFAULT_FEATURES,
),
)
self.assertIsInstance(result, init.InitializationSuccess)
self.assertEqual(len(bytes_writer.items()), 1)
@setup.async_test
async def test_try_initialize_failure__not_a_request(self) -> None:
input_channel = await server_setup.create_input_channel_with_requests(
[json_rpc.Request(method="derp", parameters=None)]
)
output_channel = connections.create_memory_text_writer()
result = await init.async_try_initialize(
input_channel,
output_channel,
compute_initialize_result=lambda parameters: persistent.process_initialize_request(
parameters,
server_setup.DEFAULT_FEATURES,
),
)
self.assertIsInstance(result, init.InitializationFailure)
@setup.async_test
async def test_try_initialize_failure__invalid_parameters(self) -> None:
input_channel = await server_setup.create_input_channel_with_requests(
[json_rpc.Request(id=0, method="initialize", parameters=None)]
)
output_channel = connections.create_memory_text_writer()
result = await init.async_try_initialize(
input_channel,
output_channel,
compute_initialize_result=lambda parameters: persistent.process_initialize_request(
parameters,
server_setup.DEFAULT_FEATURES,
),
)
self.assertIsInstance(result, init.InitializationFailure)
@setup.async_test
async def test_try_initialize_failure__no_initialized(self) -> None:
input_channel = await server_setup.create_input_channel_with_requests(
[
json_rpc.Request(
id=0,
method="initialize",
parameters=json_rpc.ByNameParameters(
{
"processId": 42,
"rootUri": None,
"capabilities": {
"textDocument": {
"publishDiagnostics": {},
"synchronization": {
"didSave": True,
},
},
},
}
),
),
json_rpc.Request(
method="derp", parameters=json_rpc.ByNameParameters({})
),
]
)
output_channel = connections.create_memory_text_writer()
result = await init.async_try_initialize(
input_channel,
output_channel,
compute_initialize_result=lambda parameters: persistent.process_initialize_request(
parameters,
server_setup.DEFAULT_FEATURES,
),
)
self.assertIsInstance(result, init.InitializationFailure)
@setup.async_test
async def test_try_initialize_exit(self) -> None:
input_channel = await server_setup.create_input_channel_with_requests(
[json_rpc.Request(method="exit", parameters=None)]
)
output_channel = connections.create_memory_text_writer()
result = await init.async_try_initialize(
input_channel,
output_channel,
compute_initialize_result=lambda parameters: persistent.process_initialize_request(
parameters,
server_setup.DEFAULT_FEATURES,
),
)
self.assertIsInstance(result, init.InitializationExit)
@setup.async_test
async def test_try_initialize_exit__shutdown_after_initialize(self) -> None:
input_channel = await server_setup.create_input_channel_with_requests(
[
json_rpc.Request(
id=0,
method="initialize",
parameters=json_rpc.ByNameParameters(
{
"processId": 42,
"rootUri": None,
"capabilities": {
"textDocument": {
"publishDiagnostics": {},
"synchronization": {
"didSave": True,
},
},
},
}
),
),
json_rpc.Request(method="shutdown", parameters=None),
json_rpc.Request(method="exit", parameters=None),
]
)
output_channel = connections.create_memory_text_writer()
result = await init.async_try_initialize(
input_channel,
output_channel,
compute_initialize_result=lambda parameters: persistent.process_initialize_request(
parameters,
server_setup.DEFAULT_FEATURES,
),
)
self.assertIsInstance(result, init.InitializationExit)
@setup.async_test
async def test_try_initialize_exit__shutdown_without_exit(self) -> None:
input_channel = await server_setup.create_input_channel_with_requests(
[
json_rpc.Request(
id=0,
method="initialize",
parameters=json_rpc.ByNameParameters(
{
"processId": 42,
"rootUri": None,
"capabilities": {},
}
),
),
json_rpc.Request(method="shutdown", parameters=None),
]
)
output_channel = connections.create_memory_text_writer()
result = await init.async_try_initialize(
input_channel,
output_channel,
compute_initialize_result=lambda parameters: persistent.process_initialize_request(
parameters,
server_setup.DEFAULT_FEATURES,
),
)
self.assertIsInstance(result, init.InitializationExit)
@setup.async_test
async def test_try_initialize_exit__without_anything(self) -> None:
result = await init.async_try_initialize(
connections.create_memory_text_reader(""),
connections.create_memory_text_writer(),
compute_initialize_result=lambda parameters: persistent.process_initialize_request(
parameters,
server_setup.DEFAULT_FEATURES,
),
)
self.assertIsInstance(result, init.InitializationExit)
|
InitializeTest
|
python
|
PyCQA__pylint
|
tests/functional/t/too/too_many_instance_attributes_py37.py
|
{
"start": 388,
"end": 573
}
|
class ____:
a_1: int
a_2: int
a_3: int
a_4: int
a_5: int
a_6: int
a_7: int
a_8: InitVar[int]
def __post_init__(self, a_8):
self.a_1 += a_8
|
Hello
|
python
|
walkccc__LeetCode
|
solutions/2355. Maximum Number of Books You Can Take/2355.py
|
{
"start": 0,
"end": 864
}
|
class ____:
def maximumBooks(self, books: list[int]) -> int:
# dp[i] := the maximum the number of books we can take from books[0..i] with taking all of
# books[i]
dp = [0] * len(books)
stack = [] # the possible indices we can reach
for i, book in enumerate(books):
# We may take all of books[j], where books[j] < books[i] - (i - j).
while stack and books[stack[-1]] >= book - (i - stack[-1]):
stack.pop()
# We can now take books[j + 1..i].
j = stack[-1] if stack else -1
lastPicked = book - (i - j) + 1
if lastPicked > 1:
# book + (book - 1) + ... + (book - (i - j) + 1)
dp[i] = (book + lastPicked) * (i - j) // 2
else:
# 1 + 2 + ... + book
dp[i] = book * (book + 1) // 2
if j >= 0:
dp[i] += dp[j]
stack.append(i)
return max(dp)
|
Solution
|
python
|
openai__openai-python
|
src/openai/types/evals/create_eval_completions_run_data_source_param.py
|
{
"start": 4026,
"end": 4607
}
|
class ____(TypedDict, total=False):
content: Required[InputMessagesTemplateTemplateEvalItemContent]
"""Inputs to the model - can contain template strings."""
role: Required[Literal["user", "assistant", "system", "developer"]]
"""The role of the message input.
One of `user`, `assistant`, `system`, or `developer`.
"""
type: Literal["message"]
"""The type of the message input. Always `message`."""
InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateEvalItem]
|
InputMessagesTemplateTemplateEvalItem
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/indices/knowledge_graph/base.py
|
{
"start": 1514,
"end": 14513
}
|
class ____(BaseIndex[KG]):
"""
Knowledge Graph Index.
Build a KG by extracting triplets, and leveraging the KG during query-time.
Args:
kg_triplet_extract_template (BasePromptTemplate): The prompt to use for
extracting triplets.
max_triplets_per_chunk (int): The maximum number of triplets to extract.
storage_context (Optional[StorageContext]): The storage context to use.
graph_store (Optional[GraphStore]): The graph store to use.
show_progress (bool): Whether to show tqdm progress bars. Defaults to False.
include_embeddings (bool): Whether to include embeddings in the index.
Defaults to False.
max_object_length (int): The maximum length of the object in a triplet.
Defaults to 128.
kg_triplet_extract_fn (Optional[Callable]): The function to use for
extracting triplets. Defaults to None.
"""
index_struct_cls = KG
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
objects: Optional[Sequence[IndexNode]] = None,
index_struct: Optional[KG] = None,
llm: Optional[LLM] = None,
embed_model: Optional[BaseEmbedding] = None,
storage_context: Optional[StorageContext] = None,
kg_triplet_extract_template: Optional[BasePromptTemplate] = None,
max_triplets_per_chunk: int = 10,
include_embeddings: bool = False,
show_progress: bool = False,
max_object_length: int = 128,
kg_triplet_extract_fn: Optional[Callable] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
# need to set parameters before building index in base class.
self.include_embeddings = include_embeddings
self.max_triplets_per_chunk = max_triplets_per_chunk
self.kg_triplet_extract_template = (
kg_triplet_extract_template or DEFAULT_KG_TRIPLET_EXTRACT_PROMPT
)
# NOTE: Partially format keyword extract template here.
self.kg_triplet_extract_template = (
self.kg_triplet_extract_template.partial_format(
max_knowledge_triplets=self.max_triplets_per_chunk
)
)
self._max_object_length = max_object_length
self._kg_triplet_extract_fn = kg_triplet_extract_fn
self._llm = llm or Settings.llm
self._embed_model = embed_model or Settings.embed_model
super().__init__(
nodes=nodes,
index_struct=index_struct,
storage_context=storage_context,
show_progress=show_progress,
objects=objects,
**kwargs,
)
# TODO: legacy conversion - remove in next release
if (
len(self.index_struct.table) > 0
and isinstance(self.graph_store, SimpleGraphStore)
and len(self.graph_store._data.graph_dict) == 0
):
logger.warning("Upgrading previously saved KG index to new storage format.")
self.graph_store._data.graph_dict = self.index_struct.rel_map
@property
def graph_store(self) -> GraphStore:
return self._graph_store
def as_retriever(
self,
retriever_mode: Optional[str] = None,
embed_model: Optional[BaseEmbedding] = None,
**kwargs: Any,
) -> BaseRetriever:
from llama_index.core.indices.knowledge_graph.retrievers import (
KGRetrieverMode,
KGTableRetriever,
)
if len(self.index_struct.embedding_dict) > 0 and retriever_mode is None:
retriever_mode = KGRetrieverMode.HYBRID
elif retriever_mode is None:
retriever_mode = KGRetrieverMode.KEYWORD
elif isinstance(retriever_mode, str):
retriever_mode = KGRetrieverMode(retriever_mode)
else:
retriever_mode = retriever_mode
return KGTableRetriever(
self,
object_map=self._object_map,
llm=self._llm,
embed_model=embed_model or self._embed_model,
retriever_mode=retriever_mode,
**kwargs,
)
def _extract_triplets(self, text: str) -> List[Tuple[str, str, str]]:
if self._kg_triplet_extract_fn is not None:
return self._kg_triplet_extract_fn(text)
else:
return self._llm_extract_triplets(text)
def _llm_extract_triplets(self, text: str) -> List[Tuple[str, str, str]]:
"""Extract keywords from text."""
response = self._llm.predict(
self.kg_triplet_extract_template,
text=text,
)
return self._parse_triplet_response(
response, max_length=self._max_object_length
)
@staticmethod
def _parse_triplet_response(
response: str, max_length: int = 128
) -> List[Tuple[str, str, str]]:
knowledge_strs = response.strip().split("\n")
results = []
for text in knowledge_strs:
if "(" not in text or ")" not in text or text.index(")") < text.index("("):
# skip empty lines and non-triplets
continue
triplet_part = text[text.index("(") + 1 : text.index(")")]
tokens = triplet_part.split(",")
if len(tokens) != 3:
continue
if any(len(s.encode("utf-8")) > max_length for s in tokens):
# We count byte-length instead of len() for UTF-8 chars,
# will skip if any of the tokens are too long.
# This is normally due to a poorly formatted triplet
# extraction, in more serious KG building cases
# we'll need NLP models to better extract triplets.
continue
subj, pred, obj = map(str.strip, tokens)
if not subj or not pred or not obj:
# skip partial triplets
continue
# Strip double quotes and Capitalize triplets for disambiguation
subj, pred, obj = (
entity.strip('"').capitalize() for entity in [subj, pred, obj]
)
results.append((subj, pred, obj))
return results
def _build_index_from_nodes(
self, nodes: Sequence[BaseNode], **build_kwargs: Any
) -> KG:
"""Build the index from nodes."""
# do simple concatenation
index_struct = self.index_struct_cls()
nodes_with_progress = get_tqdm_iterable(
nodes, self._show_progress, "Processing nodes"
)
for n in nodes_with_progress:
triplets = self._extract_triplets(
n.get_content(metadata_mode=MetadataMode.LLM)
)
logger.debug(f"> Extracted triplets: {triplets}")
for triplet in triplets:
subj, _, obj = triplet
self.upsert_triplet(triplet)
index_struct.add_node([subj, obj], n)
if self.include_embeddings:
triplet_texts = [str(t) for t in triplets]
embed_outputs = self._embed_model.get_text_embedding_batch(
triplet_texts, show_progress=self._show_progress
)
for rel_text, rel_embed in zip(triplet_texts, embed_outputs):
index_struct.add_to_embedding_dict(rel_text, rel_embed)
return index_struct
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert a document."""
for n in nodes:
triplets = self._extract_triplets(
n.get_content(metadata_mode=MetadataMode.LLM)
)
logger.debug(f"Extracted triplets: {triplets}")
for triplet in triplets:
subj, _, obj = triplet
triplet_str = str(triplet)
self.upsert_triplet(triplet)
self._index_struct.add_node([subj, obj], n)
if (
self.include_embeddings
and triplet_str not in self._index_struct.embedding_dict
):
rel_embedding = self._embed_model.get_text_embedding(triplet_str)
self._index_struct.add_to_embedding_dict(triplet_str, rel_embedding)
# Update the storage context's index_store
self._storage_context.index_store.add_index_struct(self._index_struct)
def upsert_triplet(
self, triplet: Tuple[str, str, str], include_embeddings: bool = False
) -> None:
"""
Insert triplets and optionally embeddings.
Used for manual insertion of KG triplets (in the form
of (subject, relationship, object)).
Args:
triplet (tuple): Knowledge triplet
embedding (Any, optional): Embedding option for the triplet. Defaults to None.
"""
self._graph_store.upsert_triplet(*triplet)
triplet_str = str(triplet)
if include_embeddings:
set_embedding = self._embed_model.get_text_embedding(triplet_str)
self._index_struct.add_to_embedding_dict(str(triplet), set_embedding)
self._storage_context.index_store.add_index_struct(self._index_struct)
def add_node(self, keywords: List[str], node: BaseNode) -> None:
"""
Add node.
Used for manual insertion of nodes (keyed by keywords).
Args:
keywords (List[str]): Keywords to index the node.
node (Node): Node to be indexed.
"""
self._index_struct.add_node(keywords, node)
self._docstore.add_documents([node], allow_update=True)
def upsert_triplet_and_node(
self,
triplet: Tuple[str, str, str],
node: BaseNode,
include_embeddings: bool = False,
) -> None:
"""
Upsert KG triplet and node.
Calls both upsert_triplet and add_node.
Behavior is idempotent; if Node already exists,
only triplet will be added.
Args:
keywords (List[str]): Keywords to index the node.
node (Node): Node to be indexed.
include_embeddings (bool): Option to add embeddings for triplets. Defaults to False
"""
subj, _, obj = triplet
self.upsert_triplet(triplet)
self.add_node([subj, obj], node)
triplet_str = str(triplet)
if include_embeddings:
set_embedding = self._embed_model.get_text_embedding(triplet_str)
self._index_struct.add_to_embedding_dict(str(triplet), set_embedding)
self._storage_context.index_store.add_index_struct(self._index_struct)
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
"""Delete a node."""
raise NotImplementedError("Delete is not supported for KG index yet.")
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
node_doc_ids_sets = list(self._index_struct.table.values())
node_doc_ids = list(set().union(*node_doc_ids_sets))
nodes = self.docstore.get_nodes(node_doc_ids)
all_ref_doc_info = {}
for node in nodes:
ref_node = node.source_node
if not ref_node:
continue
ref_doc_info = self.docstore.get_ref_doc_info(ref_node.node_id)
if not ref_doc_info:
continue
all_ref_doc_info[ref_node.node_id] = ref_doc_info
return all_ref_doc_info
def get_networkx_graph(self, limit: int = 100) -> Any:
"""
Get networkx representation of the graph structure.
Args:
limit (int): Number of starting nodes to be included in the graph.
NOTE: This function requires networkx to be installed.
NOTE: This is a beta feature.
"""
try:
import networkx as nx
except ImportError:
raise ImportError(
"Please install networkx to visualize the graph: `pip install networkx`"
)
g = nx.Graph()
subjs = list(self.index_struct.table.keys())
# add edges
rel_map = self._graph_store.get_rel_map(subjs=subjs, depth=1, limit=limit)
added_nodes = set()
for keyword in rel_map:
for path in rel_map[keyword]:
subj = keyword
for i in range(0, len(path), 2):
if i + 2 >= len(path):
break
if subj not in added_nodes:
g.add_node(subj)
added_nodes.add(subj)
rel = path[i + 1]
obj = path[i + 2]
g.add_edge(subj, obj, label=rel, title=rel)
subj = obj
return g
@property
def query_context(self) -> Dict[str, Any]:
return {GRAPH_STORE_KEY: self._graph_store}
|
KnowledgeGraphIndex
|
python
|
apache__airflow
|
airflow-core/tests/unit/models/test_renderedtifields.py
|
{
"start": 2844,
"end": 19782
}
|
class ____:
"""Unit tests for RenderedTaskInstanceFields."""
@staticmethod
def clean_db():
clear_db_runs()
clear_db_dags()
clear_rendered_ti_fields()
def setup_method(self):
self.clean_db()
def teardown_method(self):
self.clean_db()
@pytest.mark.parametrize(
("templated_field", "expected_rendered_field"),
[
pytest.param(None, None, id="None"),
pytest.param([], [], id="list"),
pytest.param({}, {}, id="empty_dict"),
pytest.param((), [], id="empty_tuple"),
pytest.param(set(), "set()", id="empty_set"),
pytest.param("test-string", "test-string", id="string"),
pytest.param({"foo": "bar"}, {"foo": "bar"}, id="dict"),
pytest.param(("foo", "bar"), ["foo", "bar"], id="tuple"),
pytest.param({"foo"}, "{'foo'}", id="set"),
pytest.param("{{ task.task_id }}", "test", id="templated_string"),
(date(2018, 12, 6), "2018-12-06"),
pytest.param(datetime(2018, 12, 6, 10, 55), "2018-12-06 10:55:00+00:00", id="datetime"),
pytest.param(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes({'att1': 'test', 'att2': '{{ task.task_id }}', "
"'template_fields': ['att1']})",
id="class_with_custom_attributes",
),
pytest.param(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes({'nested1': ClassWithCustomAttributes("
"{'att1': 'test', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes("
"{'att3': '{{ task.task_id }}', 'att4': '{{ task.task_id }}', 'template_fields': ['att3']}), "
"'template_fields': ['nested1']})",
id="nested_class_with_custom_attributes",
),
pytest.param(
"a" * 5000,
f"Truncated. You can change this behaviour in [core]max_templated_field_length. {('a' * 5000)[: max_length - 79]!r}... ",
id="large_string",
),
pytest.param(
LargeStrObject(),
f"Truncated. You can change this behaviour in [core]max_templated_field_length. {str(LargeStrObject())[: max_length - 79]!r}... ",
id="large_object",
),
],
)
def test_get_templated_fields(self, templated_field, expected_rendered_field, dag_maker):
"""
Test that template_fields are rendered correctly, stored in the Database,
and are correctly fetched using RTIF.get_templated_fields
"""
with dag_maker("test_serialized_rendered_fields"):
task = BashOperator(task_id="test", bash_command=templated_field)
task_2 = BashOperator(task_id="test2", bash_command=templated_field)
dr = dag_maker.create_dagrun()
session = dag_maker.session
ti, ti2 = dr.task_instances
ti.task = task
ti2.task = task_2
rtif = RTIF(ti=ti)
assert ti.dag_id == rtif.dag_id
assert ti.task_id == rtif.task_id
assert ti.run_id == rtif.run_id
assert expected_rendered_field == rtif.rendered_fields.get("bash_command")
session.add(rtif)
session.flush()
assert RTIF.get_templated_fields(ti=ti, session=session) == {
"bash_command": expected_rendered_field,
"env": None,
"cwd": None,
}
# Test the else part of get_templated_fields
# i.e. for the TIs that are not stored in RTIF table
# Fetching them will return None
assert RTIF.get_templated_fields(ti=ti2) is None
@pytest.mark.enable_redact
def test_secrets_are_masked_when_large_string(self, dag_maker):
"""
Test that secrets are masked when the templated field is a large string
"""
Variable.set(
key="api_key",
value="test api key are still masked" * 5000,
)
with dag_maker("test_serialized_rendered_fields"):
task = BashOperator(task_id="test", bash_command="echo {{ var.value.api_key }}")
dr = dag_maker.create_dagrun()
ti = dr.task_instances[0]
ti.task = task
rtif = RTIF(ti=ti)
assert "***" in rtif.rendered_fields.get("bash_command")
@mock.patch("airflow.models.BaseOperator.render_template")
def test_pandas_dataframes_works_with_the_string_compare(self, render_mock, dag_maker):
"""Test that rendered dataframe gets passed through the serialized template fields."""
import pandas
render_mock.return_value = pandas.DataFrame({"a": [1, 2, 3]})
with dag_maker("test_serialized_rendered_fields"):
@task_decorator
def generate_pd():
return pandas.DataFrame({"a": [1, 2, 3]})
@task_decorator
def consume_pd(data):
return data
consume_pd(generate_pd())
dr = dag_maker.create_dagrun()
ti, ti2 = dr.task_instances
rtif = RTIF(ti=ti2)
rtif.write()
@pytest.mark.parametrize(
("rtif_num", "num_to_keep", "remaining_rtifs", "expected_query_count"),
[
(0, 1, 0, 1),
(1, 1, 1, 1),
(1, 0, 1, 0),
(3, 1, 1, 1),
(4, 2, 2, 1),
(5, 2, 2, 1),
],
)
def test_delete_old_records(
self, rtif_num, num_to_keep, remaining_rtifs, expected_query_count, dag_maker, session
):
"""
Test that old records are deleted from rendered_task_instance_fields table
for a given task_id and dag_id.
"""
with dag_maker("test_delete_old_records") as dag:
task = BashOperator(task_id="test", bash_command="echo {{ ds }}")
rtif_list = []
for num in range(rtif_num):
dr = dag_maker.create_dagrun(run_id=str(num), logical_date=dag.start_date + timedelta(days=num))
ti = dr.task_instances[0]
ti.task = task
rtif_list.append(RTIF(ti))
session.add_all(rtif_list)
session.flush()
result = session.query(RTIF).filter(RTIF.dag_id == dag.dag_id, RTIF.task_id == task.task_id).all()
for rtif in rtif_list:
assert rtif in result
assert rtif_num == len(result)
with assert_queries_count(expected_query_count):
RTIF.delete_old_records(task_id=task.task_id, dag_id=task.dag_id, num_to_keep=num_to_keep)
result = session.query(RTIF).filter(RTIF.dag_id == dag.dag_id, RTIF.task_id == task.task_id).all()
assert remaining_rtifs == len(result)
@pytest.mark.parametrize(
("num_runs", "num_to_keep", "remaining_rtifs", "expected_query_count"),
[
(3, 1, 1, 1),
(4, 2, 2, 1),
(5, 2, 2, 1),
],
)
def test_delete_old_records_mapped(
self, num_runs, num_to_keep, remaining_rtifs, expected_query_count, dag_maker, session
):
"""
Test that old records are deleted from rendered_task_instance_fields table
for a given task_id and dag_id with mapped tasks.
"""
with dag_maker("test_delete_old_records", session=session, serialized=True) as dag:
mapped = BashOperator.partial(task_id="mapped").expand(bash_command=["a", "b"])
for num in range(num_runs):
dr = dag_maker.create_dagrun(
run_id=f"run_{num}", logical_date=dag.start_date + timedelta(days=num)
)
TaskMap.expand_mapped_task(dag.task_dict[mapped.task_id], dr.run_id, session=dag_maker.session)
session.refresh(dr)
for ti in dr.task_instances:
ti.task = mapped
session.add(RTIF(ti))
session.flush()
result = session.query(RTIF).filter(RTIF.dag_id == dag.dag_id).all()
assert len(result) == num_runs * 2
with assert_queries_count(expected_query_count):
RTIF.delete_old_records(
task_id=mapped.task_id, dag_id=dr.dag_id, num_to_keep=num_to_keep, session=session
)
result = session.query(RTIF).filter_by(dag_id=dag.dag_id, task_id=mapped.task_id).all()
rtif_num_runs = Counter(rtif.run_id for rtif in result)
assert len(rtif_num_runs) == remaining_rtifs
# Check that we have _all_ the data for each row
assert len(result) == remaining_rtifs * 2
def test_write(self, dag_maker):
"""
Test records can be written and overwritten
"""
Variable.set(key="test_key", value="test_val")
session = settings.Session()
result = session.query(RTIF).all()
assert result == []
with dag_maker("test_write"):
task = BashOperator(task_id="test", bash_command="echo {{ var.value.test_key }}")
dr = dag_maker.create_dagrun()
ti = dr.task_instances[0]
ti.task = task
rtif = RTIF(ti)
rtif.write()
result = (
session.query(RTIF.dag_id, RTIF.task_id, RTIF.rendered_fields)
.filter(
RTIF.dag_id == rtif.dag_id,
RTIF.task_id == rtif.task_id,
RTIF.run_id == rtif.run_id,
)
.first()
)
assert result == ("test_write", "test", {"bash_command": "echo test_val", "env": None, "cwd": None})
# Test that overwrite saves new values to the DB
Variable.delete("test_key")
Variable.set(key="test_key", value="test_val_updated")
self.clean_db()
with dag_maker("test_write"):
updated_task = BashOperator(task_id="test", bash_command="echo {{ var.value.test_key }}")
dr = dag_maker.create_dagrun()
ti = dr.task_instances[0]
ti.task = updated_task
rtif_updated = RTIF(ti)
rtif_updated.write()
result_updated = (
session.query(RTIF.dag_id, RTIF.task_id, RTIF.rendered_fields)
.filter(
RTIF.dag_id == rtif_updated.dag_id,
RTIF.task_id == rtif_updated.task_id,
RTIF.run_id == rtif_updated.run_id,
)
.first()
)
assert result_updated == (
"test_write",
"test",
{"bash_command": "echo test_val_updated", "env": None, "cwd": None},
)
@mock.patch.dict(os.environ, {"AIRFLOW_VAR_API_KEY": "secret"})
def test_redact(self, dag_maker):
with mock.patch("airflow._shared.secrets_masker.redact", autospec=True) as redact:
with dag_maker("test_ritf_redact", serialized=True):
task = BashOperator(
task_id="test",
bash_command="echo {{ var.value.api_key }}",
env={"foo": "secret", "other_api_key": "masked based on key name"},
)
dr = dag_maker.create_dagrun()
redact.side_effect = [
# Order depends on order in Operator template_fields
"val 1", # bash_command
"val 2", # env
"val 3", # cwd
]
ti = dr.task_instances[0]
ti.task = task
rtif = RTIF(ti=ti)
assert rtif.rendered_fields == {
"bash_command": "val 1",
"env": "val 2",
"cwd": "val 3",
}
def test_rtif_deletion_stale_data_error(self, dag_maker, session):
"""
Here we verify bad behavior. When we rerun a task whose RTIF
will get removed, we get a stale data error.
"""
with dag_maker(dag_id="test_retry_handling"):
task = PythonOperator(
task_id="test_retry_handling_op",
python_callable=lambda a, b: print(f"{a}\n{b}\n"),
op_args=[
"dag {{dag.dag_id}};",
"try_number {{ti.try_number}};yo",
],
)
def popuate_rtif(date):
run_id = f"abc_{date.to_date_string()}"
dr = session.scalar(select(DagRun).where(DagRun.logical_date == date, DagRun.run_id == run_id))
if not dr:
dr = dag_maker.create_dagrun(logical_date=date, run_id=run_id)
ti: TaskInstance = dr.task_instances[0]
ti.state = TaskInstanceState.SUCCESS
rtif = RTIF(ti=ti, render_templates=False, rendered_fields={"a": "1"})
session.merge(rtif)
session.flush()
return dr
base_date = pendulum.datetime(2021, 1, 1)
exec_dates = [base_date.add(days=x) for x in range(40)]
for when in exec_dates:
popuate_rtif(date=when)
session.commit()
session.expunge_all()
# find oldest dag run
dr = session.scalar(select(DagRun).join(RTIF.dag_run).order_by(DagRun.run_after).limit(1))
assert dr
ti: TaskInstance = dr.task_instances[0]
ti.state = None
session.flush()
# rerun the old run. this will shouldn't fail
ti.task = task
ti.run()
def test_nested_dictionary_template_field_rendering(self, dag_maker):
"""
Test that nested dictionary items in template fields are properly rendered
when using template_fields_renderers with dot-separated paths.
This test verifies the fix for rendering dictionary items in templates.
Before the fix, nested dictionary items specified in template_fields_renderers
(e.g., "configuration.query.sql") would not be rendered. After the fix,
these nested items are properly extracted and rendered.
"""
# Create a custom operator with a dictionary template field
class MyConfigOperator(BaseOperator):
template_fields: Sequence[str] = ("configuration",)
template_fields_renderers = {
"configuration": "json",
"configuration.query.sql": "sql",
}
def __init__(self, configuration: dict, **kwargs):
super().__init__(**kwargs)
self.configuration = configuration
# Create a configuration dictionary with nested structure
configuration = {
"query": {
"job_id": "123",
"sql": "select * from my_table where date = '{{ ds }}'",
}
}
with dag_maker("test_nested_dict_rendering"):
task = MyConfigOperator(task_id="test_config", configuration=configuration)
dr = dag_maker.create_dagrun()
session = dag_maker.session
ti = dr.task_instances[0]
ti.task = task
rtif = RTIF(ti=ti)
# Verify that the base configuration field is rendered
assert "configuration" in rtif.rendered_fields
rendered_config = rtif.rendered_fields["configuration"]
assert isinstance(rendered_config, dict)
assert rendered_config["query"]["job_id"] == "123"
# The SQL should be templated (ds should be replaced with actual date)
assert "select * from my_table where date = '" in rendered_config["query"]["sql"]
assert rendered_config["query"]["sql"] != configuration["query"]["sql"]
# Verify that the nested dictionary item is also rendered
# This is the key test - before the fix, this would not exist
assert "configuration.query.sql" in rtif.rendered_fields
rendered_sql = rtif.rendered_fields["configuration.query.sql"]
assert isinstance(rendered_sql, str)
assert "select * from my_table where date = '" in rendered_sql
# The template should be rendered (ds should be replaced)
assert "{{ ds }}" not in rendered_sql
# Store in database and verify retrieval
session.add(rtif)
session.flush()
retrieved_fields = RTIF.get_templated_fields(ti=ti, session=session)
assert retrieved_fields is not None
assert "configuration" in retrieved_fields
assert "configuration.query.sql" in retrieved_fields
assert retrieved_fields["configuration.query.sql"] == rendered_sql
|
TestRenderedTaskInstanceFields
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/nocover/test_stateful.py
|
{
"start": 3469,
"end": 3717
}
|
class ____(RuleBasedStateMachine):
b1 = Bundle("b1")
b2 = Bundle("b2")
@rule(targets=(b1, b2))
def populate(self):
return 1
@rule(x=b1, y=b2)
def fail(self, x, y):
raise AssertionError
|
PopulateMultipleTargets
|
python
|
chroma-core__chroma
|
chromadb/proto/utils.py
|
{
"start": 173,
"end": 2570
}
|
class ____(
grpc.UnaryUnaryClientInterceptor, grpc.UnaryStreamClientInterceptor
):
"""
A gRPC client interceptor that retries RPCs on specific status codes. By default, it retries on UNAVAILABLE and UNKNOWN status codes.
This interceptor should be placed after the OpenTelemetry interceptor in the interceptor list.
"""
max_attempts: int
retryable_status_codes: Set[grpc.StatusCode]
def __init__(
self,
max_attempts: int = 5,
retryable_status_codes: Set[grpc.StatusCode] = set(
[grpc.StatusCode.UNAVAILABLE, grpc.StatusCode.UNKNOWN]
),
) -> None:
self.max_attempts = max_attempts
self.retryable_status_codes = retryable_status_codes
def _intercept_call(self, continuation, client_call_details, request_or_iterator):
sleep_span: Optional[Span] = None
def before_sleep(_):
from chromadb.telemetry.opentelemetry import tracer
nonlocal sleep_span
if tracer is not None:
sleep_span = tracer.start_span("Waiting to retry RPC")
@retry(
wait=wait_exponential_jitter(0.1, jitter=0.1),
stop=stop_after_attempt(self.max_attempts),
retry=retry_if_result(lambda x: x.code() in self.retryable_status_codes),
before_sleep=before_sleep,
)
def wrapped(*args, **kwargs):
nonlocal sleep_span
if sleep_span is not None:
sleep_span.end()
sleep_span = None
return continuation(*args, **kwargs)
return wrapped(client_call_details, request_or_iterator)
def intercept_unary_unary(self, continuation, client_call_details, request):
return self._intercept_call(continuation, client_call_details, request)
def intercept_unary_stream(self, continuation, client_call_details, request):
return self._intercept_call(continuation, client_call_details, request)
def intercept_stream_unary(
self, continuation, client_call_details, request_iterator
):
return self._intercept_call(continuation, client_call_details, request_iterator)
def intercept_stream_stream(
self, continuation, client_call_details, request_iterator
):
return self._intercept_call(continuation, client_call_details, request_iterator)
|
RetryOnRpcErrorClientInterceptor
|
python
|
numpy__numpy
|
numpy/polynomial/tests/test_hermite.py
|
{
"start": 9970,
"end": 11307
}
|
class ____:
def test_hermder(self):
# check exceptions
assert_raises(TypeError, herm.hermder, [0], .5)
assert_raises(ValueError, herm.hermder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0] * i + [1]
res = herm.hermder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0] * i + [1]
res = herm.hermder(herm.hermint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0] * i + [1]
res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_hermder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T
res = herm.hermder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermder(c) for c in c2d])
res = herm.hermder(c2d, axis=1)
assert_almost_equal(res, tgt)
|
TestDerivative
|
python
|
modin-project__modin
|
modin/experimental/core/storage_formats/pandas/parsers.py
|
{
"start": 3755,
"end": 4508
}
|
class ____(PandasParser):
@staticmethod
@doc(_doc_parse_func, parameters=_doc_parse_parameters_common)
def parse(fname, **kwargs):
warnings.filterwarnings("ignore")
num_splits = 1
single_worker_read = kwargs.pop("single_worker_read", None)
df = pandas.read_pickle(fname, **kwargs)
if single_worker_read:
return df
assert isinstance(
df, pandas.DataFrame
), f"Pickled obj type: [{type(df)}] in [{fname}]; works only with pandas.DataFrame"
length = len(df)
width = len(df.columns)
return _split_result_for_readers(1, num_splits, df) + [length, width]
@doc(_doc_pandas_parser_class, data_type="parquet files")
|
ExperimentalPandasPickleParser
|
python
|
getsentry__sentry
|
src/sentry/monitors/processing_errors/errors.py
|
{
"start": 1521,
"end": 1865
}
|
class ____(TypedDict):
"""
The guid for the checkin matched a checkin that was related to a different
project than the one provided in the DSN
"""
type: Literal[ProcessingErrorType.CHECKIN_GUID_PROJECT_MISMATCH]
guid: str
"""
The guid which is associated to a different project
"""
|
CheckinGuidProjectMismatch
|
python
|
ray-project__ray
|
python/ray/exceptions.py
|
{
"start": 27724,
"end": 28238
}
|
class ____(RayError):
"""Raised when a runtime environment fails to be set up.
Args:
error_message: The error message that explains
why runtime env setup has failed.
"""
def __init__(self, error_message: str = None):
self.error_message = error_message
def __str__(self):
msgs = ["Failed to set up runtime environment."]
if self.error_message:
msgs.append(self.error_message)
return "\n".join(msgs)
@PublicAPI
|
RuntimeEnvSetupError
|
python
|
ansible__ansible
|
test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/filter/filter_subdir/my_subdir_filters.py
|
{
"start": 126,
"end": 258
}
|
class ____(object):
def filters(self):
return {
'test_subdir_filter': test_subdir_filter
}
|
FilterModule
|
python
|
google__jax
|
jax/_src/api.py
|
{
"start": 77723,
"end": 103901
}
|
class ____(NamedTuple):
version: int # For forward and backward compatibility
xla_executable: xc.LoadedExecutable
in_handler: Any
out_handler: Any
out_pytree_def: Any
# Data needed to handle the inputs.
input_devices: Sequence[xc.Device]
input_indices: Sequence[sharding_specs.Index]
input_array_shardings: Sequence[Any]
# Data needed to build the Array from C++.
out_avals: Sequence[Any]
out_array_shardings: Sequence[Any]
out_committed: Sequence[Any]
def _cpp_pmap(
fun: Callable,
axis_name: AxisName | None = None,
*,
in_axes=0,
out_axes=0,
static_broadcasted_argnums: int | Iterable[int] = (),
devices: Sequence[xc.Device] | None = None, # noqa: F811
backend: str | None = None,
axis_size: int | None = None,
donate_argnums: int | Iterable[int] = (),
) -> Any:
axis_name, static_broadcasted_tuple, donate_tuple = _shared_code_pmap(
fun, axis_name, static_broadcasted_argnums, donate_argnums, in_axes,
out_axes)
del static_broadcasted_argnums, donate_argnums
prepare_pmap_fn = partial(_prepare_pmap,
fun, in_axes, out_axes, static_broadcasted_tuple, donate_tuple,
devices, backend, axis_size)
@api_boundary
def cache_miss(*args, **kwargs):
p = prepare_pmap_fn(args, kwargs)
for arg in p.flat_args:
dispatch.check_arg(arg)
params = dict(
backend=backend,
axis_name=axis_name,
axis_size=p.local_axis_size,
global_axis_size=p.global_axis_size,
devices=p.devices,
in_axes=p.in_axes_flat,
out_axes_thunk=p.out_axes_thunk,
name=p.flat_fun.__name__,
donated_invars=p.donated_invars,
is_explicit_global_axis_size=p.is_explicit_global_axis_size,
)
execute: Callable | None = None
with core.take_current_trace() as trace:
try:
if isinstance(trace, core.EvalTrace):
execute, const_args = pxla.xla_pmap_impl_lazy(p.flat_fun, *p.flat_args, **params)
out = execute(*const_args, *p.flat_args)
else:
out = pxla.xla_pmap_p.bind_with_trace(trace, (p.flat_fun, *p.flat_args), params)
except api_util.InternalFloatingPointError as e:
raise FloatingPointError(f'Invalid value ({e.ty}) encountered in parallel computation.')
out_tree, out_flat = p.out_tree, out
out_pytree_def = out_tree()
out = tree_unflatten(out_pytree_def, out_flat)
### Decide whether we can support the C++ fast path
use_fastpath = False
if execute is not None and isinstance(execute, pxla.ExecuteReplicated):
execute_replicated = typing.cast(pxla.ExecuteReplicated, execute)
use_fastpath = (
# TODO(sharadmv): Enable effects in replicated computation
not execute_replicated.has_unordered_effects
and not execute_replicated.has_host_callbacks and
len(const_args) == 0 and
# No tracers in the outputs.
all(isinstance(x, xc.ArrayImpl) for x in out_flat))
### If we can use the fastpath, we return required info to the caller.
if use_fastpath:
execute_replicated = typing.cast(pxla.ExecuteReplicated, execute)
out_handler = execute_replicated.out_handler
in_handler = execute_replicated.in_handler
out_array_shardings = [out.sharding for out in out_flat]
out_committed = [out._committed for out in out_flat]
fastpath_data = _PmapFastpathData(
version=1,
xla_executable=execute_replicated.xla_executable,
in_handler=in_handler,
out_handler=out_handler,
out_pytree_def=out_pytree_def,
input_devices=in_handler.local_devices,
input_indices=in_handler.input_indices,
input_array_shardings=in_handler.in_shardings,
out_avals=out_handler.out_avals,
out_array_shardings=out_array_shardings,
out_committed=out_committed,
)
else:
fastpath_data = None
return out, fastpath_data
cpp_mapped_f = pmap_lib.pmap(
fun, cache_miss, static_broadcasted_tuple,
lambda x, s: pxla.shard_args([s], [None],
[xc.ArrayCopySemantics.REUSE_INPUT], [x])[0],
pytree_registry=tree_util.default_registry)
_pmap_cache_clears.add(cpp_mapped_f)
pmap_f = wraps(fun)(cpp_mapped_f)
# Store some data for the `lower` and `trace` methods
pmap_f._fun = fun
pmap_f._prepare_pmap = prepare_pmap_fn
pmap_f._backend = backend
pmap_f._axis_name = axis_name
pmap_f._donate_tuple = donate_tuple
# TODO(necula): move these to top-level; we don't need to do this for
# every pmap
cpp_mapped_f_class = type(pmap_f)
cpp_mapped_f_class.lower = _cpp_mapped_lower
# We return directly the function produced by pmap_lib.pmap, because we do not
# want to have Python in the dispatch path.
return pmap_f
@api_boundary
def _cpp_mapped_lower(pmap_f, *args, **kwargs):
p = pmap_f._prepare_pmap(args, kwargs)
abstract_args = list(map(shaped_abstractify, p.flat_args))
closed_jaxpr, xc_backend, replicas, shards, pci = pxla.get_pmap_jaxpr(
p.flat_fun, pmap_f._backend, pmap_f._axis_name,
axis_size=p.local_axis_size, global_axis_size=p.global_axis_size,
devices=p.devices, name=p.flat_fun.__name__, in_axes=p.in_axes_flat,
out_axes_thunk=p.out_axes_thunk, avals=abstract_args)
lowering = pxla.lower_parallel_callable(
p.flat_fun, pmap_f._axis_name, axis_size=p.local_axis_size,
global_axis_size=p.global_axis_size, devices=p.devices,
name=p.flat_fun.__name__, in_axes=p.in_axes_flat,
donated_invars=p.donated_invars,
is_explicit_global_axis_size=p.is_explicit_global_axis_size,
avals=abstract_args, closed_jaxpr=closed_jaxpr, backend=xc_backend,
replicas=replicas, shards=shards, pci=pci, lowering_platforms=None,
lowering_parameters=pxla.mlir.LoweringParameters())
args_info = stages.make_args_info(p.in_tree, abstract_args, pmap_f._donate_tuple)
return stages.Lowered(lowering, args_info, p.out_tree())
_pmap_cache_clears = weakref.WeakSet() # type: ignore
@partial(api_boundary, repro_api_name="jax.jvp")
def jvp(
fun: Callable, primals, tangents, has_aux: bool = False
) -> tuple[Any, ...]:
"""Computes a (forward-mode) Jacobian-vector product of ``fun``.
Args:
fun: Function to be differentiated. Its arguments should be arrays, scalars,
or standard Python containers of arrays or scalars. It should return an
array, scalar, or standard Python container of arrays or scalars.
primals: The primal values at which the Jacobian of ``fun`` should be
evaluated. Should be either a tuple or a list of arguments,
and its length should be equal to the number of positional parameters of
``fun``.
tangents: The tangent vector for which the Jacobian-vector product should be
evaluated. Should be either a tuple or a list of tangents, with the same
tree structure and array shapes as ``primals``.
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the
first element is considered the output of the mathematical function to be
differentiated and the second element is auxiliary data. Default False.
Returns:
If ``has_aux`` is ``False``, returns a ``(primals_out, tangents_out)`` pair,
where ``primals_out`` is ``fun(*primals)``,
and ``tangents_out`` is the Jacobian-vector product of
``function`` evaluated at ``primals`` with ``tangents``. The
``tangents_out`` value has the same Python tree structure and shapes as
``primals_out``. If ``has_aux`` is ``True``, returns a
``(primals_out, tangents_out, aux)`` tuple where ``aux``
is the auxiliary data returned by ``fun``.
For example:
>>> import jax
>>>
>>> primals, tangents = jax.jvp(jax.numpy.sin, (0.1,), (0.2,))
>>> print(primals)
0.09983342
>>> print(tangents)
0.19900084
"""
check_callable(fun)
if (not isinstance(primals, (tuple, list)) or
not isinstance(tangents, (tuple, list))):
raise TypeError("primal and tangent arguments to jax.jvp must be tuples or lists; "
f"found {type(primals).__name__} and {type(tangents).__name__}.")
return _jvp(lu.wrap_init(fun, debug_info=debug_info("jvp", fun, primals, {})),
primals, tangents, has_aux=has_aux)
def _jvp(fun: lu.WrappedFun, primals, tangents, has_aux=False):
"""Variant of jvp() that takes an lu.WrappedFun."""
ps_flat, tree_def = tree_flatten(primals)
ts_flat, tree_def_2 = tree_flatten(tangents)
if tree_def != tree_def_2:
raise TypeError("primal and tangent arguments to jax.jvp must have the same tree "
f"structure; primals have tree structure {tree_def} whereas tangents have "
f"tree structure {tree_def_2}.")
for p, t in zip(ps_flat, ts_flat):
if not isinstance(core.typeof(p), ShapedArray): continue
if core.primal_dtype_to_tangent_dtype(_dtype(p)) != _dtype(t):
raise TypeError("primal and tangent arguments to jax.jvp do not match; "
"dtypes must be equal, or in case of int/bool primal dtype "
"the tangent dtype must be float0."
f"Got primal dtype {_dtype(p)} and so expected tangent dtype "
f"{core.primal_dtype_to_tangent_dtype(_dtype(p))}, but got "
f"tangent dtype {_dtype(t)} instead.")
if np.shape(p) != np.shape(t):
raise ValueError("jvp called with different primal and tangent shapes;"
f"Got primal shape {np.shape(p)} and tangent shape as {np.shape(t)}")
if not has_aux:
flat_fun, out_tree = flatten_fun_nokwargs(fun, tree_def)
out_primals, out_tangents = ad.jvp(flat_fun).call_wrapped(ps_flat, ts_flat)
out_tree = out_tree()
return (tree_unflatten(out_tree, out_primals),
tree_unflatten(out_tree, out_tangents))
else:
flat_fun, out_aux_trees = flatten_fun_nokwargs2(fun, tree_def)
jvp_fun, aux = ad.jvp(flat_fun, has_aux=True)
out_primals, out_tangents = jvp_fun.call_wrapped(ps_flat, ts_flat)
out_tree, aux_tree = out_aux_trees()
return (tree_unflatten(out_tree, out_primals),
tree_unflatten(out_tree, out_tangents),
tree_unflatten(aux_tree, aux()))
@overload
def linearize(fun: Callable, *primals, has_aux: Literal[False] = False
) -> tuple[Any, Callable]:
...
@overload
def linearize(fun: Callable, *primals, has_aux: Literal[True]
) -> tuple[Any, Callable, Any]:
...
@partial(api_boundary, repro_api_name="jax.linearize")
def linearize(fun: Callable, *primals, has_aux: bool = False
) -> tuple[Any, Callable] | tuple[Any, Callable, Any]:
"""Produces a linear approximation to ``fun`` using :py:func:`jvp` and partial eval.
Args:
fun: Function to be differentiated. Its arguments should be arrays, scalars,
or standard Python containers of arrays or scalars. It should return an
array, scalar, or standard python container of arrays or scalars.
primals: The primal values at which the Jacobian of ``fun`` should be
evaluated. Should be a tuple of arrays, scalar, or standard Python
container thereof. The length of the tuple is equal to the number of
positional parameters of ``fun``.
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the first
element is considered the output of the mathematical function to be linearized,
and the second is auxiliary data. Default False.
Returns:
If ``has_aux`` is ``False``, returns a pair where the first element is the value of
``f(*primals)`` and the second element is a function that evaluates the
(forward-mode) Jacobian-vector product of ``fun`` evaluated at ``primals`` without
re-doing the linearization work. If ``has_aux`` is ``True``, returns a
``(primals_out, lin_fn, aux)`` tuple where ``aux`` is the auxiliary data returned by
``fun``.
In terms of values computed, :py:func:`linearize` behaves much like a curried
:py:func:`jvp`, where these two code blocks compute the same values::
y, out_tangent = jax.jvp(f, (x,), (in_tangent,))
y, f_jvp = jax.linearize(f, x)
out_tangent = f_jvp(in_tangent)
However, the difference is that :py:func:`linearize` uses partial evaluation
so that the function ``f`` is not re-linearized on calls to ``f_jvp``. In
general that means the memory usage scales with the size of the computation,
much like in reverse-mode. (Indeed, :py:func:`linearize` has a similar
signature to :py:func:`vjp`!)
This function is mainly useful if you want to apply ``f_jvp`` multiple times,
i.e. to evaluate a pushforward for many different input tangent vectors at the
same linearization point. Moreover if all the input tangent vectors are known
at once, it can be more efficient to vectorize using :py:func:`vmap`, as in::
pushfwd = partial(jvp, f, (x,))
y, out_tangents = vmap(pushfwd, out_axes=(None, 0))((in_tangents,))
By using :py:func:`vmap` and :py:func:`jvp` together like this we avoid the stored-linearization
memory cost that scales with the depth of the computation, which is incurred
by both :py:func:`linearize` and :py:func:`vjp`.
Here's a more complete example of using :py:func:`linearize`:
>>> import jax
>>> import jax.numpy as jnp
>>>
>>> def f(x): return 3. * jnp.sin(x) + jnp.cos(x / 2.)
...
>>> jax.jvp(f, (2.,), (3.,))
(Array(3.2681944, dtype=float32, weak_type=True), Array(-5.007528, dtype=float32, weak_type=True))
>>> y, f_jvp = jax.linearize(f, 2.)
>>> print(y)
3.2681944
>>> print(f_jvp(3.))
-5.007528
>>> print(f_jvp(4.))
-6.676704
"""
check_callable(fun)
f = lu.wrap_init(fun, debug_info=debug_info("linearize", fun, primals, {}))
primals_flat, in_tree = tree_flatten(primals)
if has_aux:
jaxtree_fun, out_tree = flatten_fun_nokwargs2(f, in_tree)
else:
jaxtree_fun, out_tree = flatten_fun_nokwargs(f, in_tree)
out_primals, out_pvals, jaxpr, consts, *maybe_aux = ad.linearize(
jaxtree_fun, *primals_flat, has_aux=has_aux)
if has_aux:
out_tree, aux_tree = out_tree()
else:
out_tree = out_tree()
out_primal_py = tree_unflatten(out_tree, out_primals)
primal_avals = list(map(core.get_aval, primals_flat))
# Ensure that lifted_jvp is a PyTree
lifted_jvp = Partial(partial(_lift_linearized, jaxpr, primal_avals,
(in_tree, out_tree), out_pvals), consts)
if has_aux:
[aux] = maybe_aux
return out_primal_py, lifted_jvp, tree_unflatten(aux_tree, aux)
else:
[] = maybe_aux
return out_primal_py, lifted_jvp
def _lift_linearized(jaxpr, primal_avals, io_tree, out_pvals, consts, *py_args):
def fun(*tangents):
tangent_avals = list(map(core.get_aval, tangents))
for primal_aval, tangent_aval in zip(primal_avals, tangent_avals):
expected_tangent_aval = primal_aval.to_tangent_aval()
if not core.typecompat(expected_tangent_aval, tangent_aval):
extra_msg = ''
if (isinstance(primal_aval, core.ShapedArray) and
isinstance(tangent_aval, core.ShapedArray) and
primal_aval.vma != tangent_aval.vma):
pvary_applications = []
if left := tangent_aval.vma - primal_aval.vma:
pvary_applications.append(
f"applying `jax.lax.pvary(..., {tuple(left)})` to the primal"
" value passed to `jax.linearize`")
if left := primal_aval.vma - tangent_aval.vma:
pvary_applications.append(
f"applying `jax.lax.pvary(..., {tuple(left)})` to the tangent"
" value passed to the callable `f_jvp` returned by"
" `jax.linearize`")
extra_msg = " \nThis might be fixed by:\n" + "\n".join(
f" * {d};" for d in pvary_applications)
raise ValueError(
"linearized function called on tangent values inconsistent with "
"the original primal values:\n"
f"Got tangent aval {tangent_aval} for primal aval {primal_aval} "
f"but expected {expected_tangent_aval}.{extra_msg}")
tangents_out = eval_jaxpr(jaxpr, consts, *tangents)
tangents_out_ = iter(tangents_out)
full_out = [pval.get_known() if pval.is_known() else next(tangents_out_)
for pval in out_pvals]
assert next(tangents_out_, None) is None
return full_out
return apply_flat_fun_nokwargs(fun, io_tree, py_args)
@api_boundary
def _vjp_pullback_wrapper(name, out_primal_avals, io_tree, fun, *py_args_):
if len(py_args_) != 1:
msg = (f"The function returned by `jax.vjp` applied to {name} was called "
f"with {len(py_args_)} arguments, but functions returned by "
"`jax.vjp` must be called with a single argument corresponding to "
f"the single value returned by {name} (even if that returned "
"value is a tuple or other container).\n"
"\n"
"For example, if we have:\n"
"\n"
" def f(x):\n"
" return (x, x)\n"
" _, f_vjp = jax.vjp(f, 1.0)\n"
"\n"
"the function `f` returns a single tuple as output, and so we call "
"`f_vjp` with a single tuple as its argument:\n"
"\n"
" x_bar, = f_vjp((2.0, 2.0))\n"
"\n"
"If we instead call `f_vjp(2.0, 2.0)`, with the values 'splatted "
"out' as arguments rather than in a tuple, this error can arise.")
raise TypeError(msg)
py_args, = py_args_
in_tree_expected, out_tree = io_tree
args, in_tree = tree_flatten(py_args)
if in_tree != in_tree_expected:
raise ValueError(f"unexpected tree structure of argument to vjp function: "
f"got {in_tree}, but expected to match {in_tree_expected}")
for arg, aval in zip(args, out_primal_avals):
ct_aval = shaped_abstractify(arg)
ct_aval_expected = aval.to_cotangent_aval()
if (not core.typecompat(ct_aval, ct_aval_expected) and
not _temporary_dtype_exception(ct_aval, ct_aval_expected)):
raise ValueError(
"unexpected JAX type (e.g. shape/dtype) for argument to vjp function: "
f"got {ct_aval.str_short()}, but expected {ct_aval_expected.str_short()} "
f"because the corresponding output of the function {name} had JAX type "
f"{aval.str_short()}")
ans = fun(*args)
return tree_unflatten(out_tree, ans)
# TODO(mattjj): see similar function in custom_derivatives.py
def _temporary_dtype_exception(a, a_) -> bool:
if isinstance(a, core.ShapedArray) and isinstance(a_, core.ShapedArray):
return a.shape == a_.shape and a_.dtype == float0
return False
@overload
def vjp(fun: Callable[..., T],
*primals: Any,
has_aux: Literal[False] = False,
reduce_axes: Sequence[AxisName] = ()) -> tuple[T, Callable]:
...
@overload
def vjp(fun: Callable[..., tuple[T, U]], *primals: Any,
has_aux: Literal[True],
reduce_axes: Sequence[AxisName] = ()) -> tuple[T, Callable, U]:
...
@partial(api_boundary, repro_api_name="jax.vjp")
def vjp(
fun: Callable, *primals, has_aux: bool = False, reduce_axes=()
) -> tuple[Any, Callable] | tuple[Any, Callable, Any]:
"""Compute a (reverse-mode) vector-Jacobian product of ``fun``.
:py:func:`grad` is implemented as a special case of :py:func:`vjp`.
Args:
fun: Function to be differentiated. Its arguments should be arrays, scalars,
or standard Python containers of arrays or scalars. It should return an
array, scalar, or standard Python container of arrays or scalars.
primals: A sequence of primal values at which the Jacobian of ``fun``
should be evaluated. The number of ``primals`` should be equal to the
number of positional parameters of ``fun``. Each primal value should be
an array, a scalar, or a pytree (standard Python containers) thereof.
has_aux: Optional, bool. Indicates whether ``fun`` returns a pair where the
first element is considered the output of the mathematical function to be
differentiated and the second element is auxiliary data. Default False.
Returns:
If ``has_aux`` is ``False``, returns a ``(primals_out, vjpfun)`` pair, where
``primals_out`` is ``fun(*primals)``. If ``has_aux`` is ``True``, returns a
``(primals_out, vjpfun, aux)`` tuple where ``aux`` is the auxiliary data
returned by ``fun``.
``vjpfun`` is a function from a cotangent vector with the same shape as
``primals_out`` to a tuple of cotangent vectors with the same number and
shapes as ``primals``, representing the vector-Jacobian product of ``fun``
evaluated at ``primals``.
>>> import jax
>>>
>>> def f(x, y):
... return jax.numpy.sin(x), jax.numpy.cos(y)
...
>>> primals, f_vjp = jax.vjp(f, 0.5, 1.0)
>>> xbar, ybar = f_vjp((-0.7, 0.3))
>>> print(xbar)
-0.61430776
>>> print(ybar)
-0.2524413
"""
if reduce_axes:
raise NotImplementedError("reduce_axes argument to vjp is deprecated")
del reduce_axes
check_callable(fun)
wrapped_fun = lu.wrap_init(
fun, debug_info=debug_info("vjp", fun, primals, {}))
return _vjp(wrapped_fun, *primals, has_aux=has_aux)
def _vjp(fun: lu.WrappedFun, *primals, has_aux=False):
"""Variant of vjp() that takes an lu.WrappedFun."""
if config.vjp3.value:
return _vjp3(fun, *primals, has_aux=has_aux)
primals_flat, in_tree = tree_flatten(primals)
primals_flat = [canonicalize_value(v) if not isinstance(v, core.Tracer) else v
for v in primals_flat]
for arg in primals_flat: dispatch.check_arg(arg)
if not has_aux:
flat_fun, out_tree = flatten_fun_nokwargs(fun, in_tree)
out_primals, vjp = ad.vjp(flat_fun, primals_flat)
out_tree = out_tree()
else:
flat_fun, out_aux_trees = flatten_fun_nokwargs2(fun, in_tree)
out_primals, vjp, aux = ad.vjp(flat_fun, primals_flat, has_aux=True)
out_tree, aux_tree = out_aux_trees()
out_primal_avals = map(shaped_abstractify, out_primals)
out_primal_py = tree_unflatten(out_tree, out_primals)
vjp_py = Partial(partial(_vjp_pullback_wrapper, fun.__name__,
out_primal_avals, (out_tree, in_tree)), vjp)
if not has_aux:
return out_primal_py, vjp_py
else:
return out_primal_py, vjp_py, tree_unflatten(aux_tree, aux)
@partial(api_boundary, repro_api_name="jax.experimental.saved_input_vjp")
def saved_input_vjp(f: Callable, which: Sequence[bool], *primals,
allow_unused: bool = True, allow_opaque: bool = True):
if len(which) != len(primals):
raise ValueError(
"length of 'which' argument must equal the number of primal input values, "
f"but got {len(which)=} and {len(primals)=}")
dbg = debug_info("saved_input_vjp", f, primals, {})
fun = lu.wrap_init(f, debug_info=dbg)
primals_flat, in_tree = tree_flatten(primals)
fun, out_tree = flatten_fun_nokwargs(fun, in_tree)
out_primals_flat, out_pvals, jaxpr, residuals = ad.linearize(fun, *primals_flat)
out_known = [pval.is_known() for pval in out_pvals]
primals_filt, filt_tree = tree_flatten(tuple(p for w, p in zip(which, primals) if w))
id_map = {id(x): i for i, x in enumerate(primals_filt)}
opaque_residuals = []
res_spec = [RSpec(id_map[id(r)], True) if id(r) in id_map else
RSpec(opaque_residuals.append(r) or (len(opaque_residuals) - 1), False) # type: ignore
for r in residuals]
f_vjp = Partial(partial(_saved_input_vjpfun, res_spec, filt_tree, in_tree,
out_tree(), out_known, jaxpr), opaque_residuals)
if not allow_unused and not set(id_map).issubset(res_ids := {id(r) for r in residuals}):
unused = [(i, core.get_aval(x)) for i, (x, w) in enumerate(zip(primals, which))
if w and id(x) not in res_ids]
assert unused
if len(unused) == 1:
(i, a), = unused
start, was = "an input value", "was"
msg = f" {dbg.arg_names[i] if dbg.arg_names is not None else 'unknown'} of type {a.str_short()}"
else:
start, was = "multiple input values", "were"
msg = "\n" + "\n".join(f" * {dbg.arg_names[i] if dbg.arg_names is not None else 'unknown'} of type {a.str_short()}"
for i, a in unused)
raise Exception(f"with {allow_unused=}, {start} marked to be saved {was} "
f"not used by the backward pass:{msg}")
if not allow_opaque and opaque_residuals:
msg = ", ".join(core.get_aval(x).str_short() for x in opaque_residuals)
raise Exception(f"with {allow_opaque=}, the backward pass requires opaque "
f"(non-input) residuals: {msg}")
out_primals = tree_unflatten(out_tree(), out_primals_flat)
return out_primals, f_vjp
def _saved_input_vjpfun(res_spec, filtered_tree, in_tree, out_tree, out_known,
jaxpr, opaque_residuals, ct, *saved_primals):
primals_filtered, filtered_tree_ = tree_flatten(saved_primals)
if filtered_tree != filtered_tree_:
raise ValueError(
"inputs passed to f_vjp must be a tuple of (pytrees of) "
"arrays with the same structure as\n"
" tuple(x for x, w in zip(inputs, which) if w)\n"
"given the original call\n"
" _, f_vjp = saved_input_vjp(f, which, *inputs, ...)\n"
"but the structures differ:\n" +
"\n".join(f" * inputs{keystr(path)} was a {thing1} in the original "
f"call, but a {thing2} here, so {explanation}"
for path, thing1, thing2, explanation
in equality_errors_pytreedef(filtered_tree, filtered_tree_)))
residuals = [primals_filtered[i.idx] if i.primal else opaque_residuals[i.idx]
for i in res_spec]
dummy_args = [ad.UndefinedPrimal(v.aval) for v in jaxpr.invars]
cts_flat, out_tree_ = tree_flatten(ct)
assert out_tree_ == out_tree
cts_flat = [ct for ct, k in zip(cts_flat, out_known) if not k]
arg_cts = ad.backward_pass(jaxpr, True, residuals, dummy_args, cts_flat)
return tree_unflatten(in_tree, map(ad.instantiate_zeros, arg_cts))
@dataclasses.dataclass(frozen=True)
|
_PmapFastpathData
|
python
|
keras-team__keras
|
keras/src/losses/losses_test.py
|
{
"start": 77755,
"end": 84894
}
|
class ____(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.CategoricalGeneralizedCrossEntropy(name="gce")
)
self.run_class_serialization_test(
losses.CategoricalGeneralizedCrossEntropy(q=0.1, name="gce")
)
def test_basic_correctness_for_binary(self):
y_true = np.array([0, 1, 0, 1])
y_pred = np.array([[0.7, 0.3], [0.2, 0.8], [0.6, 0.4], [0.4, 0.6]])
# Calculate expected GCE loss manually
# For q=0.5:
# First sample (class 0): gce = (1 - 0.7^0.5) / 0.5
# Second sample (class 1): gce = (1 - 0.8^0.5) / 0.5
# Third sample (class 0): gce = (1 - 0.6^0.5) / 0.5
# Fourth sample (class 1): gce = (1 - 0.6^0.5) / 0.5
expected = np.array(
[
(1 - np.power(0.7, 0.5)) / 0.5,
(1 - np.power(0.8, 0.5)) / 0.5,
(1 - np.power(0.6, 0.5)) / 0.5,
(1 - np.power(0.6, 0.5)) / 0.5,
]
)
output = losses.CategoricalGeneralizedCrossEntropy()(y_true, y_pred)
self.assertAllClose(output, expected.sum() / len(expected))
expected_q_08 = np.array(
[
(1 - np.power(0.7, 0.8)) / 0.8,
(1 - np.power(0.8, 0.8)) / 0.8,
(1 - np.power(0.6, 0.8)) / 0.8,
(1 - np.power(0.6, 0.8)) / 0.8,
]
)
output = losses.CategoricalGeneralizedCrossEntropy(q=0.8)(
y_true, y_pred
)
self.assertAllClose(output, expected_q_08.sum() / len(expected_q_08))
def test_basic_correctness_for_multi_class(self):
y_true = np.array([0, 1, 0, 1])
y_pred = np.array(
[[0.7, 0.3, 0.0], [0.2, 0.2, 0.6], [0.6, 0.4, 0.0], [0.2, 0.2, 0.6]]
)
# Calculate expected GCE loss manually
# For q=0.5:
# First sample (class 0): gce = (1 - 0.7^0.5) / 0.5
# Second sample (class 1): gce = (1 - 0^0.5) / 0.5
# Third sample (class 0): gce = (1 - 0.6^0.5) / 0.5
# Fourth sample (class 1): gce = (1 - 0.0^0.5) / 0.5
expected = np.array(
[
(1 - np.power(0.7, 0.5)) / 0.5,
(1 - np.power(0.2, 0.5)) / 0.5,
(1 - np.power(0.6, 0.5)) / 0.5,
(1 - np.power(0.2, 0.5)) / 0.5,
]
)
output = losses.CategoricalGeneralizedCrossEntropy()(y_true, y_pred)
self.assertAllClose(output, expected.sum() / len(expected))
expected_q_08 = np.array(
[
(1 - np.power(0.7, 0.8)) / 0.8,
(1 - np.power(0.2, 0.8)) / 0.8,
(1 - np.power(0.6, 0.8)) / 0.8,
(1 - np.power(0.2, 0.8)) / 0.8,
]
)
output = losses.CategoricalGeneralizedCrossEntropy(q=0.8)(
y_true, y_pred
)
self.assertAllClose(output, expected_q_08.sum() / len(expected_q_08))
def test_binary_segmentation(self):
y_true = np.array(
[[0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1]]
)
y_pred = np.array(
[
[[1.0, 0.0], [0.0, 1.0], [0.0, 1.0], [1.0, 0.0]],
[[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [1.0, 0.0]],
[[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]],
[[0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]],
]
)
output = losses.CategoricalGeneralizedCrossEntropy(q=0.5)(
y_true, y_pred
)
self.assertAllClose(output, 0.0)
y_true = np.array(
[[0, 1, 1, 0], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1]]
)
y_pred = np.array(
[
[[1.0, 0.0], [0.0, 1.0], [0.0, 1.0], [0.2, 0.8]],
[[0.0, 1.0], [1.0, 0.0], [0.0, 1.0], [1.0, 0.0]],
[[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]],
[[0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [0.6, 0.4]],
]
)
expected = np.array(
[
(1 - np.power(0.2, 0.5)) / 0.5,
(1 - np.power(0.4, 0.5)) / 0.5,
]
)
output = losses.CategoricalGeneralizedCrossEntropy(q=0.5)(
y_true, y_pred
)
self.assertAllClose(output, expected.sum() / 16.0) # 16 pixels
def test_multi_class_segmentation(self):
y_true = np.array(
[[0, 1, 2, 0], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1]]
)
y_pred = np.array(
[
[
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
],
[
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
],
[
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
],
[
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
],
]
)
output = losses.CategoricalGeneralizedCrossEntropy(q=0.5)(
y_true, y_pred
)
self.assertAllClose(output, 0.0)
y_true = np.array(
[[0, 1, 2, 0], [1, 0, 1, 0], [0, 0, 1, 1], [1, 1, 0, 1]]
)
y_pred = np.array(
[
[
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
[0.2, 0.0, 0.8],
],
[
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
],
[
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
],
[
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.5, 0.5, 0.0],
[0.0, 1.0, 0.0],
],
]
)
expected = np.array(
[
(1 - np.power(0.2, 0.5)) / 0.5,
(1 - np.power(0.0, 0.5)) / 0.5,
(1 - np.power(0.5, 0.5)) / 0.5,
]
)
output = losses.CategoricalGeneralizedCrossEntropy(q=0.5)(
y_true, y_pred
)
self.assertAllClose(output, expected.sum() / 16.0) # 16 pixels
def test_dtype_arg(self):
y_true = np.array([0, 1, 0, 1])
y_pred = np.array([[0.7, 0.3], [0.2, 0.8], [0.6, 0.4], [0.4, 0.6]])
output = losses.CategoricalGeneralizedCrossEntropy(dtype="bfloat16")(
y_true, y_pred
)
self.assertDType(output, "bfloat16")
|
CategoricalGeneralizedCrossEntropyTest
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/tasks.py
|
{
"start": 332132,
"end": 333253
}
|
class ____(Request):
"""
Get the list of task hyper parameters
:param tasks: Task IDs
:type tasks: Sequence[str]
"""
_service = "tasks"
_action = "get_hyper_params"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"tasks": {
"description": "Task IDs",
"items": {"type": "string"},
"type": "array",
}
},
"required": ["tasks"],
"type": "object",
}
def __init__(self, tasks: List[str], **kwargs: Any) -> None:
super(GetHyperParamsRequest, self).__init__(**kwargs)
self.tasks = tasks
@schema_property("tasks")
def tasks(self) -> List[str]:
return self._property_tasks
@tasks.setter
def tasks(self, value: List[str]) -> None:
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
|
GetHyperParamsRequest
|
python
|
ZoranPandovski__al-go-rithms
|
data_structures/Graphs/prims/python/hemant-mst-prims.py
|
{
"start": 25,
"end": 1363
}
|
class ____():
# defining the class or data structure
def __init__(self, vertices):
self.V = vertices
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
def printMST(self, parent):
print("Edge \tWeight")
for i in range(1, self.V):
print(parent[i], "-", i, "\t", self.graph[i][parent[i]])
def minKey(self, key, mstSet):
min = Inf
for v in range(self.V):
if key[v] < min and mstSet[v] == False:
min = key[v]
min_index = v
return min_index
# Prims algorithm implementation
def primMST(self):
key = [Inf] * self.V
parent = [None] * self.V
key[0] = 0
mstSet = [False] * self.V
parent[0] = -1
for _ in range(self.V):
u = self.minKey(key, mstSet)
mstSet[u] = True
for v in range(self.V):
if self.graph[u][v] > 0 and mstSet[v] == False and key[v] > self.graph[u][v]:
key[v] = self.graph[u][v]
parent[v] = u
self.printMST(parent)
# Driver code
g = Graph(5)
g.graph = [[0, 2, 0, 6, 0],
[2, 0, 3, 8, 5],
[0, 3, 0, 0, 7],
[6, 8, 0, 0, 9],
[0, 5, 7, 9, 0]]
g.primMST()
|
Graph
|
python
|
scrapy__scrapy
|
tests/test_utils_reactor.py
|
{
"start": 250,
"end": 1129
}
|
class ____:
def test_is_asyncio_reactor_installed(self, reactor_pytest: str) -> None:
# the result should depend only on the pytest --reactor argument
assert is_asyncio_reactor_installed() == (reactor_pytest == "asyncio")
def test_install_asyncio_reactor(self):
from twisted.internet import reactor as original_reactor
with warnings.catch_warnings(record=True) as w:
install_reactor(_asyncio_reactor_path)
assert len(w) == 0, [str(warning) for warning in w]
from twisted.internet import reactor # pylint: disable=reimported
assert original_reactor == reactor
@pytest.mark.only_asyncio
@deferred_f_from_coro_f
async def test_set_asyncio_event_loop(self):
install_reactor(_asyncio_reactor_path)
assert set_asyncio_event_loop(None) is asyncio.get_running_loop()
|
TestAsyncio
|
python
|
huggingface__transformers
|
src/transformers/models/wav2vec2/modeling_wav2vec2.py
|
{
"start": 24576,
"end": 26313
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = Wav2Vec2Attention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
config=config,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = Wav2Vec2FeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if getattr(config, "adapter_attn_dim", None) is not None:
self.adapter_layer = Wav2Vec2AttnAdapterLayer(config)
else:
self.adapter_layer = None
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
):
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
if self.adapter_layer is not None:
hidden_states = hidden_states + self.adapter_layer(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
|
Wav2Vec2EncoderLayerStableLayerNorm
|
python
|
dask__dask
|
dask/diagnostics/profile.py
|
{
"start": 8507,
"end": 12136
}
|
class ____(Callback):
"""A profiler for dask execution at the scheduler cache level.
Records the following information for each task:
1. Key
2. Task
3. Size metric
4. Cache entry time in seconds since the epoch
5. Cache exit time in seconds since the epoch
Examples
--------
>>> from operator import add, mul
>>> from dask.threaded import get
>>> from dask.diagnostics import CacheProfiler
>>> dsk = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> with CacheProfiler() as prof:
... get(dsk, 'z')
22
>>> prof.results # doctest: +SKIP
[CacheData(key='y', task=(add, 'x', 10), metric=1, cache_time=..., free_time=...),
CacheData(key='z', task=(mul, 'y', 2), metric=1, cache_time=..., free_time=...)]
The default is to count each task (``metric`` is 1 for all tasks). Other
functions may used as a metric instead through the ``metric`` keyword. For
example, the ``nbytes`` function found in ``cachey`` can be used to measure
the number of bytes in the cache.
>>> from cachey import nbytes # doctest: +SKIP
>>> with CacheProfiler(metric=nbytes) as prof: # doctest: +SKIP
... get(dsk, 'z')
22
The profiling results can be visualized in a bokeh plot using the
``visualize`` method. Note that this requires bokeh to be installed.
>>> prof.visualize() # doctest: +SKIP
You can activate the profiler globally
>>> prof.register()
If you use the profiler globally you will need to clear out old results
manually.
>>> prof.clear()
>>> prof.unregister()
"""
def __init__(self, metric=None, metric_name=None):
self.clear()
self._metric = metric if metric else lambda value: 1
if metric_name:
self._metric_name = metric_name
elif metric:
self._metric_name = metric.__name__
else:
self._metric_name = "count"
def __enter__(self):
self.clear()
self.start_time = default_timer()
return super().__enter__()
def __exit__(self, *args):
self.end_time = default_timer()
return super().__exit__(*args)
def _start(self, dsk):
self._dsk.update(dsk)
def _posttask(self, key, value, dsk, state, id):
t = default_timer()
self._cache[key] = (self._metric(value), t)
for k in state["released"] & self._cache.keys():
metric, start = self._cache.pop(k)
self.results.append(CacheData(k, dsk[k], metric, start, t))
def _finish(self, dsk, state, failed):
t = default_timer()
for k, (metric, start) in self._cache.items():
self.results.append(CacheData(k, dsk[k], metric, start, t))
self._cache.clear()
def _plot(self, **kwargs):
from dask.diagnostics.profile_visualize import plot_cache
return plot_cache(
self.results,
self._dsk,
self.start_time,
self.end_time,
self._metric_name,
**kwargs,
)
def visualize(self, **kwargs):
"""Visualize the profiling run in a bokeh plot.
See also
--------
dask.diagnostics.profile_visualize.visualize
"""
from dask.diagnostics.profile_visualize import visualize
return visualize(self, **kwargs)
def clear(self):
"""Clear out old results from profiler"""
self.results = []
self._cache = {}
self._dsk = {}
self.start_time = None
self.end_time = None
|
CacheProfiler
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_internal/commands/list.py
|
{
"start": 1254,
"end": 12771
}
|
class ____(IndexGroupCommand):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
ignore_require_venv = True
usage = """
%prog [options]"""
def add_options(self) -> None:
self.cmd_opts.add_option(
"-o",
"--outdated",
action="store_true",
default=False,
help="List outdated packages",
)
self.cmd_opts.add_option(
"-u",
"--uptodate",
action="store_true",
default=False,
help="List uptodate packages",
)
self.cmd_opts.add_option(
"-e",
"--editable",
action="store_true",
default=False,
help="List editable projects.",
)
self.cmd_opts.add_option(
"-l",
"--local",
action="store_true",
default=False,
help=(
"If in a virtualenv that has global access, do not list "
"globally-installed packages."
),
)
self.cmd_opts.add_option(
"--user",
dest="user",
action="store_true",
default=False,
help="Only output packages installed in user-site.",
)
self.cmd_opts.add_option(cmdoptions.list_path())
self.cmd_opts.add_option(
"--pre",
action="store_true",
default=False,
help=(
"Include pre-release and development versions. By default, "
"pip only finds stable versions."
),
)
self.cmd_opts.add_option(
"--format",
action="store",
dest="list_format",
default="columns",
choices=("columns", "freeze", "json"),
help=(
"Select the output format among: columns (default), freeze, or json. "
"The 'freeze' format cannot be used with the --outdated option."
),
)
self.cmd_opts.add_option(
"--not-required",
action="store_true",
dest="not_required",
help="List packages that are not dependencies of installed packages.",
)
self.cmd_opts.add_option(
"--exclude-editable",
action="store_false",
dest="include_editable",
help="Exclude editable package from output.",
)
self.cmd_opts.add_option(
"--include-editable",
action="store_true",
dest="include_editable",
help="Include editable package from output.",
default=True,
)
self.cmd_opts.add_option(cmdoptions.list_exclude())
index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, self.cmd_opts)
def handle_pip_version_check(self, options: Values) -> None:
if options.outdated or options.uptodate:
super().handle_pip_version_check(options)
def _build_package_finder(
self, options: Values, session: "PipSession"
) -> "PackageFinder":
"""
Create a package finder appropriate to this list command.
"""
# Lazy import the heavy index modules as most list invocations won't need 'em.
from pip._internal.index.collector import LinkCollector
from pip._internal.index.package_finder import PackageFinder
link_collector = LinkCollector.create(session, options=options)
# Pass allow_yanked=False to ignore yanked versions.
selection_prefs = SelectionPreferences(
allow_yanked=False,
allow_all_prereleases=options.pre,
)
return PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
def run(self, options: Values, args: List[str]) -> int:
if options.outdated and options.uptodate:
raise CommandError("Options --outdated and --uptodate cannot be combined.")
if options.outdated and options.list_format == "freeze":
raise CommandError(
"List format 'freeze' cannot be used with the --outdated option."
)
cmdoptions.check_list_path_option(options)
skip = set(stdlib_pkgs)
if options.excludes:
skip.update(canonicalize_name(n) for n in options.excludes)
packages: "_ProcessedDists" = [
cast("_DistWithLatestInfo", d)
for d in get_environment(options.path).iter_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=options.editable,
include_editables=options.include_editable,
skip=skip,
)
]
# get_not_required must be called firstly in order to find and
# filter out all dependencies correctly. Otherwise a package
# can't be identified as requirement because some parent packages
# could be filtered out before.
if options.not_required:
packages = self.get_not_required(packages, options)
if options.outdated:
packages = self.get_outdated(packages, options)
elif options.uptodate:
packages = self.get_uptodate(packages, options)
self.output_package_listing(packages, options)
return SUCCESS
def get_outdated(
self, packages: "_ProcessedDists", options: Values
) -> "_ProcessedDists":
return [
dist
for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version > dist.version
]
def get_uptodate(
self, packages: "_ProcessedDists", options: Values
) -> "_ProcessedDists":
return [
dist
for dist in self.iter_packages_latest_infos(packages, options)
if dist.latest_version == dist.version
]
def get_not_required(
self, packages: "_ProcessedDists", options: Values
) -> "_ProcessedDists":
dep_keys = {
canonicalize_name(dep.name)
for dist in packages
for dep in (dist.iter_dependencies() or ())
}
# Create a set to remove duplicate packages, and cast it to a list
# to keep the return type consistent with get_outdated and
# get_uptodate
return list({pkg for pkg in packages if pkg.canonical_name not in dep_keys})
def iter_packages_latest_infos(
self, packages: "_ProcessedDists", options: Values
) -> Generator["_DistWithLatestInfo", None, None]:
with self._build_session(options) as session:
finder = self._build_package_finder(options, session)
def latest_info(
dist: "_DistWithLatestInfo",
) -> Optional["_DistWithLatestInfo"]:
all_candidates = finder.find_all_candidates(dist.canonical_name)
if not options.pre:
# Remove prereleases
all_candidates = [
candidate
for candidate in all_candidates
if not candidate.version.is_prerelease
]
evaluator = finder.make_candidate_evaluator(
project_name=dist.canonical_name,
)
best_candidate = evaluator.sort_best_candidate(all_candidates)
if best_candidate is None:
return None
remote_version = best_candidate.version
if best_candidate.link.is_wheel:
typ = "wheel"
else:
typ = "sdist"
dist.latest_version = remote_version
dist.latest_filetype = typ
return dist
for dist in map(latest_info, packages):
if dist is not None:
yield dist
def output_package_listing(
self, packages: "_ProcessedDists", options: Values
) -> None:
packages = sorted(
packages,
key=lambda dist: dist.canonical_name,
)
if options.list_format == "columns" and packages:
data, header = format_for_columns(packages, options)
self.output_package_listing_columns(data, header)
elif options.list_format == "freeze":
for dist in packages:
if options.verbose >= 1:
write_output(
"%s==%s (%s)", dist.raw_name, dist.version, dist.location
)
else:
write_output("%s==%s", dist.raw_name, dist.version)
elif options.list_format == "json":
write_output(format_for_json(packages, options))
def output_package_listing_columns(
self, data: List[List[str]], header: List[str]
) -> None:
# insert the header first: we need to know the size of column names
if len(data) > 0:
data.insert(0, header)
pkg_strings, sizes = tabulate(data)
# Create and add a separator.
if len(data) > 0:
pkg_strings.insert(1, " ".join("-" * x for x in sizes))
for val in pkg_strings:
write_output(val)
def format_for_columns(
pkgs: "_ProcessedDists", options: Values
) -> Tuple[List[List[str]], List[str]]:
"""
Convert the package data into something usable
by output_package_listing_columns.
"""
header = ["Package", "Version"]
running_outdated = options.outdated
if running_outdated:
header.extend(["Latest", "Type"])
has_editables = any(x.editable for x in pkgs)
if has_editables:
header.append("Editable project location")
if options.verbose >= 1:
header.append("Location")
if options.verbose >= 1:
header.append("Installer")
data = []
for proj in pkgs:
# if we're working on the 'outdated' list, separate out the
# latest_version and type
row = [proj.raw_name, proj.raw_version]
if running_outdated:
row.append(str(proj.latest_version))
row.append(proj.latest_filetype)
if has_editables:
row.append(proj.editable_project_location or "")
if options.verbose >= 1:
row.append(proj.location or "")
if options.verbose >= 1:
row.append(proj.installer)
data.append(row)
return data, header
def format_for_json(packages: "_ProcessedDists", options: Values) -> str:
data = []
for dist in packages:
info = {
"name": dist.raw_name,
"version": str(dist.version),
}
if options.verbose >= 1:
info["location"] = dist.location or ""
info["installer"] = dist.installer
if options.outdated:
info["latest_version"] = str(dist.latest_version)
info["latest_filetype"] = dist.latest_filetype
editable_project_location = dist.editable_project_location
if editable_project_location:
info["editable_project_location"] = editable_project_location
data.append(info)
return json.dumps(data)
|
ListCommand
|
python
|
getsentry__sentry
|
tests/sentry/monitors/test_models.py
|
{
"start": 7131,
"end": 12849
}
|
class ____(TestCase):
@override_settings(MAX_ENVIRONMENTS_PER_MONITOR=2)
def test_monitor_environment_limits(self) -> None:
monitor = Monitor.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
name="Unicron",
slug="unicron",
config={
"schedule": [1, "month"],
"schedule_type": ScheduleType.INTERVAL,
"checkin_margin": None,
"max_runtime": None,
},
)
for i in range(settings.MAX_ENVIRONMENTS_PER_MONITOR):
MonitorEnvironment.objects.ensure_environment(self.project, monitor, f"space-{i}")
with pytest.raises(
MonitorEnvironmentLimitsExceeded,
match=f"You may not exceed {settings.MAX_ENVIRONMENTS_PER_MONITOR} environments per monitor",
):
MonitorEnvironment.objects.ensure_environment(
self.project, monitor, f"space-{settings.MAX_ENVIRONMENTS_PER_MONITOR}"
)
def test_update_config(self) -> None:
monitor = Monitor.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
name="Unicron",
slug="unicron",
config={
"schedule": [1, "month"],
"schedule_type": ScheduleType.INTERVAL,
"alert_rule_id": 1,
"checkin_margin": None,
"max_runtime": None,
},
)
new_config = {
"schedule": {
"type": "crontab",
"value": "0 0 1 2 *",
},
"max_runtime": 10,
"garbage": "data",
}
validator = ConfigValidator(data=new_config)
assert validator.is_valid()
validated_config = validator.validated_data
monitor.update_config(new_config, validated_config)
assert monitor.config == {
"schedule": "0 0 1 2 *",
"schedule_type": ScheduleType.CRONTAB,
"checkin_margin": None,
"max_runtime": 10,
"alert_rule_id": 1,
}
def test_config_validator(self) -> None:
config = {
"checkin_margin": None,
"max_runtime": None,
"schedule": [1, "month"],
"schedule_type": ScheduleType.INTERVAL,
"alert_rule_id": 1,
}
monitor = Monitor.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
name="Unicron",
slug="unicron",
config=config,
)
validated_config = monitor.get_validated_config()
assert validated_config == config
validated_config["bad_key"] = 100
monitor.config = validated_config
with self.assertLogs(logger="root", level=logging.WARNING) as cm:
bad_config = monitor.get_validated_config()
assert bad_config == validated_config
assert bad_config["bad_key"] == 100
assert len(cm.records) == 1
assert "invalid config" in cm.records[0].message
def test_build_occurrence_fingerprint(self):
"""Test that build_occurrence_fingerprint returns the expected format"""
monitor = self.create_monitor()
monitor_environment = self.create_monitor_environment(
monitor=monitor,
environment_id=self.environment.id,
)
fingerprint = monitor_environment.build_occurrence_fingerprint()
assert fingerprint == f"crons:{monitor_environment.id}"
def test_build_occurrence_fingerprint_different_environments(self):
"""Test that different MonitorEnvironments get different fingerprints"""
monitor = self.create_monitor()
env1 = self.create_environment(name="production")
env2 = self.create_environment(name="staging")
monitor_env1 = self.create_monitor_environment(
monitor=monitor,
environment_id=env1.id,
)
monitor_env2 = self.create_monitor_environment(
monitor=monitor,
environment_id=env2.id,
)
fingerprint1 = monitor_env1.build_occurrence_fingerprint()
fingerprint2 = monitor_env2.build_occurrence_fingerprint()
assert fingerprint1 != fingerprint2
assert fingerprint1 == f"crons:{monitor_env1.id}"
assert fingerprint2 == f"crons:{monitor_env2.id}"
def test_ensure_environment_matches_monitor_muted_state(self):
"""Test that new environments match the monitor's computed is_muted state.
When creating a new environment, it will be muted if the monitor's is_muted
property returns True (i.e., all existing environments are muted).
"""
# Create monitor with all existing environments muted
muted_monitor = self.create_monitor()
# Create first environment as muted
self.create_monitor_environment(
monitor=muted_monitor,
environment_id=self.environment.id,
is_muted=True,
)
# New environment matches the monitor's computed muted state (all envs muted = True)
muted_env = MonitorEnvironment.objects.ensure_environment(
self.project, muted_monitor, "production"
)
assert muted_env.is_muted is True
# Test with monitor that has no muted environments
unmuted_monitor = self.create_monitor()
unmuted_env = MonitorEnvironment.objects.ensure_environment(
self.project, unmuted_monitor, "staging"
)
assert unmuted_env.is_muted is False
|
MonitorEnvironmentTestCase
|
python
|
explosion__spaCy
|
spacy/lang/az/__init__.py
|
{
"start": 117,
"end": 221
}
|
class ____(BaseDefaults):
lex_attr_getters = LEX_ATTRS
stop_words = STOP_WORDS
|
AzerbaijaniDefaults
|
python
|
fastai__fastai
|
fastai/vision/augment.py
|
{
"start": 36913,
"end": 39433
}
|
class ____(AffineCoordTfm):
"Apply a random zoom of at most `max_zoom` with probability `p` to a batch of images"
def __init__(self,
min_zoom:float=1., # Minimum zoom
max_zoom:float=1.1, # Maximum zoom
p:float=0.5, # Probability of applying zoom
draw:float|MutableSequence|Callable=None, # User defined scale of the zoom
draw_x:float|MutableSequence|Callable=None, # User defined center of the zoom in x
draw_y:float|MutableSequence|Callable=None, # User defined center of the zoom in y
size:int|tuple=None, # Output size, duplicated if one value is specified
mode='bilinear', # PyTorch `F.grid_sample` interpolation
pad_mode=PadMode.Reflection, # A `PadMode`
batch=False, # Apply identical zoom to entire batch
align_corners=True # PyTorch `F.grid_sample` align_corners
):
aff_fs = partial(zoom_mat, min_zoom=min_zoom, max_zoom=max_zoom, p=p, draw=draw, draw_x=draw_x, draw_y=draw_y, batch=batch)
super().__init__(aff_fs, size=size, mode=mode, pad_mode=pad_mode, align_corners=align_corners)
# %% ../../nbs/09_vision.augment.ipynb 178
def solve(A,B):
return torch.linalg.solve(A,B)
# %% ../../nbs/09_vision.augment.ipynb 179
def find_coeffs(
p1:Tensor, # Original points
p2:Tensor, # Target points
):
"Find coefficients for warp tfm from `p1` to `p2`"
m = []
p = p1[:,0,0]
#The equations we'll need to solve.
for i in range(p1.shape[1]):
m.append(stack([p2[:,i,0], p2[:,i,1], t1(p), t0(p), t0(p), t0(p), -p1[:,i,0]*p2[:,i,0], -p1[:,i,0]*p2[:,i,1]]))
m.append(stack([t0(p), t0(p), t0(p), p2[:,i,0], p2[:,i,1], t1(p), -p1[:,i,1]*p2[:,i,0], -p1[:,i,1]*p2[:,i,1]]))
#The 8 scalars we seek are solution of AX = B
A = stack(m).permute(2, 0, 1)
B = p1.view(p1.shape[0], 8, 1)
return solve(A,B)
# %% ../../nbs/09_vision.augment.ipynb 180
def apply_perspective(
coords:Tensor, # Original coordinates
coeffs:Tensor # Warping transformation matrice
):
"Apply perspective tranform on `coords` with `coeffs`"
sz = coords.shape
coords = coords.view(sz[0], -1, 2)
coeffs = torch.cat([coeffs, t1(coeffs[:,:1])], dim=1).view(coeffs.shape[0], 3,3)
coords1 = coords @ coeffs[...,:2].transpose(1,2) + coeffs[...,2].unsqueeze(1)
if (coords1[...,2]==0.).any(): return coords[...,:2].view(*sz)
coords = coords1/coords1[...,2].unsqueeze(-1)
return coords[...,:2].view(*sz)
# %% ../../nbs/09_vision.augment.ipynb 181
|
Zoom
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_mapper.py
|
{
"start": 88207,
"end": 89062
}
|
class ____(_fixtures.FixtureTest):
def setup_test(self):
self.buf = logging.handlers.BufferingHandler(100)
for log in [logging.getLogger("sqlalchemy.orm")]:
log.addHandler(self.buf)
self.mapper = registry().map_imperatively
def teardown_test(self):
for log in [logging.getLogger("sqlalchemy.orm")]:
log.removeHandler(self.buf)
def _current_messages(self):
return [b.getMessage() for b in self.buf.buffer]
def test_mapper_info_aliased(self):
User, users = self.classes.User, self.tables.users
tb = users.select().alias()
self.mapper(User, tb)
s = fixture_session()
s.add(User(name="ed"))
s.commit()
for msg in self._current_messages():
assert msg.startswith("(User|%%(%d anon)s) " % id(tb))
|
ORMLoggingTest
|
python
|
openai__openai-python
|
src/openai/types/fine_tuning/alpha/grader_validate_params.py
|
{
"start": 598,
"end": 875
}
|
class ____(TypedDict, total=False):
grader: Required[Grader]
"""The grader used for the fine-tuning job."""
Grader: TypeAlias = Union[
StringCheckGraderParam, TextSimilarityGraderParam, PythonGraderParam, ScoreModelGraderParam, MultiGraderParam
]
|
GraderValidateParams
|
python
|
scrapy__scrapy
|
tests/test_spidermiddleware.py
|
{
"start": 11458,
"end": 11589
}
|
class ____:
async def process_spider_output(self, response, result):
return result
|
ProcessSpiderOutputCoroutineMiddleware
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/parameter_server_strategy_v2_test.py
|
{
"start": 11989,
"end": 26847
}
|
class ____(test.TestCase, parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(VariablePartitioningTest, cls).setUpClass()
cls.cluster = multi_worker_test_base.create_multi_process_cluster(
num_workers=2, num_ps=2, rpc_layer="grpc")
cls.cluster_resolver = cls.cluster.cluster_resolver
@classmethod
def tearDownClass(cls):
super(VariablePartitioningTest, cls).tearDownClass()
cls.cluster.stop()
def testDefaultNoPartition(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
with strategy.scope():
v = variables.Variable([0, 1, 2, 3])
self.assertIsInstance(v, variables.Variable)
def testBasic(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, sharded_variable.FixedShardsPartitioner(2))
with strategy.scope():
init1 = init_ops_v2.Constant([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
v1 = variables.Variable(
initial_value=lambda: init1(shape=(5, 2), dtype=dtypes.int64),
shape=(5, 2),
dtype=dtypes.int64)
init2 = init_ops_v2.Constant([0, 1, 2, 3, 4, 5])
v2 = variables.Variable(
initial_value=lambda: init2(shape=(6, 1), dtype=dtypes.int64),
shape=(6, 1),
dtype=dtypes.int64)
self.assertIsInstance(v1, sharded_variable.ShardedVariable)
self.assertLen(v1.variables, 2)
self.assertRegex(v1.variables[0].device, "/job:ps/replica:0/task:0")
self.assertRegex(v1.variables[1].device, "/job:ps/replica:0/task:1")
self.assertAllEqual(v1.variables[0], [[0, 1], [2, 3], [4, 5]])
self.assertAllEqual(v1.variables[1], [[6, 7], [8, 9]])
self.assertIsInstance(v2, sharded_variable.ShardedVariable)
self.assertLen(v2.variables, 2)
self.assertRegex(v2.variables[0].device, "/job:ps/replica:0/task:0")
self.assertRegex(v2.variables[1].device, "/job:ps/replica:0/task:1")
self.assertAllEqual(v2.variables[0], [[0], [1], [2]])
self.assertAllEqual(v2.variables[1], [[3], [4], [5]])
def testBasicVariableWithAggregation(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
strategy.extended._allow_run_without_coordinator = True
with strategy.scope():
v = variables.Variable(
initial_value=[0, 0, 0, 0, 0, 0, 0, 0],
dtype=dtypes.float32,
aggregation=variable_scope.VariableAggregation.SUM)
if strategy.num_replicas_in_sync > 1:
self.assertIsInstance(v, ps_values.AggregatingVariable)
else:
self.assertIsInstance(v, variables.Variable)
def replica_fn():
replica_id = distribute_lib.get_replica_context(
).replica_id_in_sync_group
val = array_ops.reshape(
math_ops.cast(replica_id + 10, dtype=v.dtype), [1])
v.assign(
array_ops.concat(
[val, constant_op.constant([1., 2., 3., 4., 5., 6., 7.])], 0))
strategy.run(replica_fn)
expected_result = np.arange(8.) * strategy.num_replicas_in_sync
for i in range(strategy.num_replicas_in_sync):
expected_result[0] = expected_result[0] + i + 10
self.assertAllEqual(v, expected_result)
def testBasicShardedVariableWithAggregation(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, sharded_variable.FixedShardsPartitioner(2))
strategy.extended._allow_run_without_coordinator = True
with strategy.scope():
v = variables.Variable(
initial_value=[0, 0, 0, 0, 0, 0, 0, 0],
dtype=dtypes.float32,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertIsInstance(v, sharded_variable.ShardedVariable)
self.assertLen(v.variables, 2)
if strategy.num_replicas_in_sync > 1:
self.assertIsInstance(v.variables[0], ps_values.AggregatingVariable)
else:
self.assertIsInstance(v.variables[0], variables.Variable)
def replica_fn():
replica_id = distribute_lib.get_replica_context(
).replica_id_in_sync_group
val = array_ops.reshape(
math_ops.cast(replica_id + 10, dtype=v.dtype), [1])
v.assign(
array_ops.concat(
[val, constant_op.constant([1., 2., 3., 4., 5., 6., 7.])], 0))
strategy.run(replica_fn)
expected_result = np.arange(8.) * strategy.num_replicas_in_sync
for i in range(strategy.num_replicas_in_sync):
expected_result[0] = expected_result[0] + i + 10
expected_result = np.array_split(expected_result, 2)
self.assertAllEqual(expected_result[0], v.variables[0])
self.assertAllEqual(expected_result[1], v.variables[1])
def testNonCallableInitialValue(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, sharded_variable.FixedShardsPartitioner(4))
with strategy.scope():
v = variables.Variable([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertIsInstance(v, sharded_variable.ShardedVariable)
self.assertLen(v.variables, 4)
self.assertRegex(v.variables[0].device, "/job:ps/replica:0/task:0")
self.assertRegex(v.variables[1].device, "/job:ps/replica:0/task:1")
self.assertRegex(v.variables[2].device, "/job:ps/replica:0/task:0")
self.assertRegex(v.variables[3].device, "/job:ps/replica:0/task:1")
self.assertAllEqual(v.variables[0], [0, 1, 2])
self.assertAllEqual(v.variables[1], [3, 4, 5])
self.assertAllEqual(v.variables[2], [6, 7])
self.assertAllEqual(v.variables[3], [8, 9])
def testNumPartitionsLargerThanSize(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, sharded_variable.FixedShardsPartitioner(4))
with strategy.scope():
v = variables.Variable([0, 1, 2])
self.assertIsInstance(v, sharded_variable.ShardedVariable)
self.assertLen(v.variables, 3)
self.assertRegex(v.variables[0].device, "/job:ps/replica:0/task:0")
self.assertRegex(v.variables[1].device, "/job:ps/replica:0/task:1")
self.assertRegex(v.variables[2].device, "/job:ps/replica:0/task:0")
self.assertAllEqual(v.variables[0], [0])
self.assertAllEqual(v.variables[1], [1])
self.assertAllEqual(v.variables[2], [2])
def testPartitionToOne(self):
# For small variables there is only one partition.
variable_partitioner = sharded_variable.MinSizePartitioner(
min_shard_bytes=64 << 20, max_shards=2)
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, variable_partitioner)
with strategy.scope():
initializer = init_ops_v2.Constant([0] * 10)
v1 = variables.Variable(
initial_value=lambda: initializer(shape=(10,), dtype=dtypes.int64),
shape=(10,),
dtype=dtypes.int64)
v2 = variables.Variable([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
self.assertIsInstance(v1, variables.Variable)
self.assertNotIsInstance(v1, sharded_variable.ShardedVariable)
self.assertRegex(v1.device, "/job:ps/replica:0/task:0")
self.assertAllEqual(v1, [0] * 10)
self.assertIsInstance(v2, variables.Variable)
self.assertNotIsInstance(v2, sharded_variable.ShardedVariable)
self.assertRegex(v2.device, "/job:ps/replica:0/task:1")
self.assertAllEqual(v2, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
def testColocateWith(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, sharded_variable.FixedShardsPartitioner(2))
with strategy.scope():
v1 = variables.Variable([0, 1, 2, 3])
with strategy.extended.colocate_vars_with(v1.variables[0]):
v2 = variables.Variable([4, 5])
self.assertIsInstance(v1, sharded_variable.ShardedVariable)
self.assertIsInstance(v2, variables.Variable)
self.assertNotIsInstance(v2, sharded_variable.ShardedVariable)
self.assertEqual(v2.device, v1.variables[0].device)
self.assertAllEqual(v2, [4, 5])
def testCustomPartitionAwareInitializer(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, sharded_variable.FixedShardsPartitioner(2))
with strategy.scope():
initializer = PartitionAwareIdentity()
initial_value = functools.partial(
initializer, shape=(4, 4), dtype=dtypes.int64)
v = variables.Variable(
initial_value=initial_value, shape=(4, 4), dtype=dtypes.int64)
self.assertIsInstance(v, sharded_variable.ShardedVariable)
self.assertLen(v.variables, 2)
self.assertRegex(v.variables[0].device, "/job:ps/replica:0/task:0")
self.assertRegex(v.variables[1].device, "/job:ps/replica:0/task:1")
self.assertAllEqual(v.variables[0], [[1, 0, 0, 0], [0, 1, 0, 0]])
self.assertAllEqual(v.variables[1], [[0, 0, 1, 0], [0, 0, 0, 1]])
def testPartitionWhenLackOfInfo(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, sharded_variable.FixedShardsPartitioner(2))
with strategy.scope():
initializer = init_ops_v2.Constant([0, 1, 2, 3])
# Shape is not explicitly specified.
v1 = variables.Variable(
initial_value=lambda: initializer(shape=(4,), dtype=dtypes.int64),
dtype=dtypes.int64)
# Dtype is not explicitly specified.
v2 = variables.Variable(
initial_value=lambda: initializer(shape=(4,), dtype=dtypes.int64),
shape=(4,))
# Neither shape nor dtype is explicitly specified.
v3 = variables.Variable(
initial_value=lambda: initializer(shape=(4,), dtype=dtypes.int64))
for v in [v1, v2, v3]:
self.assertIsInstance(v, sharded_variable.ShardedVariable)
self.assertLen(v.variables, 2)
self.assertRegex(v.variables[0].device, "/job:ps/replica:0/task:0")
self.assertRegex(v.variables[1].device, "/job:ps/replica:0/task:1")
self.assertAllEqual(v.variables[0], [0, 1])
self.assertAllEqual(v.variables[1], [2, 3])
def testInvalidPartitioner(self):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, lambda shape, dtype: None)
with self.assertRaisesRegex(ValueError, "variable_partitioner"):
with strategy.scope():
variables.Variable([[[0, 1], [2, 3]], [[0, 1], [2, 3]]])
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, lambda shape, dtype: [])
with self.assertRaisesRegex(ValueError, "variable_partitioner"):
with strategy.scope():
variables.Variable([[[0, 1], [2, 3]], [[0, 1], [2, 3]]])
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, lambda shape, dtype: [0, 1, 1])
with self.assertRaisesRegex(ValueError, "variable_partitioner"):
with strategy.scope():
variables.Variable([[[0, 1], [2, 3]], [[0, 1], [2, 3]]])
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, lambda shape, dtype: [2, 2, 1])
with self.assertRaisesRegex(ValueError, "variable_partitioner"):
with strategy.scope():
variables.Variable([[[0, 1], [2, 3]], [[0, 1], [2, 3]]])
def testCreateInsideTFFunction(self):
if test_util.is_xla_enabled():
self.skipTest("TODO(b/202760274): Would raise an error that is to be "
"investigated.")
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, sharded_variable.FixedShardsPartitioner(2))
collection = []
@def_function.function
def create_vars():
if not collection:
identity = init_ops_v2.Identity()
v1 = variables.Variable([[1., 0.], [0., 1.]], dtype=dtypes.float32)
v2 = variables.Variable(lambda: identity((2, 2), dtypes.float32))
v3 = variables.Variable(
lambda: identity((2, 2), dtypes.float32),
dtype=dtypes.float32,
shape=(2, 2))
collection.extend([v1, v2, v3])
with strategy.scope():
create_vars()
for v in collection:
self.assertIsInstance(v, sharded_variable.ShardedVariable)
self.assertLen(v.variables, 2)
self.assertRegex(v.variables[0].device, "/job:ps/replica:0/task:0")
self.assertRegex(v.variables[1].device, "/job:ps/replica:0/task:1")
self.assertAllEqual(v.variables[0], [[1., 0.]])
self.assertAllEqual(v.variables[1], [[0., 1.]])
@parameterized.named_parameters(
("Restore", False, 2),
("RestoreDiffShards", False, 4),
("DelayedRestore", True, 2),
("DelayedRestoreDiffShards", True, 4),
)
def testCheckpoint(self, delayed, restore_shards):
if test_util.is_xla_enabled() and not delayed and restore_shards == 4:
self.skipTest("TODO(b/202760274): Would raise an error that is to be "
"investigated.")
def make_variable(name, shape, dtype, initializer):
initial_value = functools.partial(initializer, shape, dtype=dtype)
return variables.Variable(
name=name, initial_value=initial_value, shape=shape, dtype=dtype)
class Model(autotrackable.AutoTrackable):
def build(self):
self.w = self._add_variable_with_custom_getter(
"w",
shape=(4,),
initializer=init_ops_v2.Ones(),
getter=make_variable)
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver, sharded_variable.FixedShardsPartitioner(2))
ckpt_dir = os.path.join(self.get_temp_dir(), "checkpoint")
with strategy.scope():
model1 = Model()
model1.build()
self.assertIsInstance(model1.w, sharded_variable.ShardedVariable)
self.assertLen(model1.w.variables, 2)
model1.w.assign([1., 2., 3., 4.])
cp1 = tracking_util.Checkpoint(model=model1)
cp1.write(ckpt_dir)
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver,
sharded_variable.FixedShardsPartitioner(restore_shards))
with strategy.scope():
model2 = Model()
cp2 = tracking_util.Checkpoint(model=model2)
if delayed:
cp2.restore(ckpt_dir)
model2.build()
else:
model2.build()
cp2.restore(ckpt_dir)
self.assertIsInstance(model2.w, sharded_variable.ShardedVariable)
self.assertLen(model2.w.variables, restore_shards)
if restore_shards == 2:
self.assertAllEqual(model2.w.variables[0], [1., 2.])
self.assertAllEqual(model2.w.variables[1], [3., 4.])
elif restore_shards == 4:
self.assertAllEqual(model2.w.variables[0], [1.])
self.assertAllEqual(model2.w.variables[1], [2.])
self.assertAllEqual(model2.w.variables[2], [3.])
self.assertAllEqual(model2.w.variables[3], [4.])
|
VariablePartitioningTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/closest-dessert-cost.py
|
{
"start": 2096,
"end": 2910
}
|
class ____(object):
def closestCost(self, baseCosts, toppingCosts, target):
"""
:type baseCosts: List[int]
:type toppingCosts: List[int]
:type target: int
:rtype: int
"""
max_count = 2
combs = set([0])
for t in toppingCosts:
combs = set([c+i*t for c in combs for i in xrange(max_count+1)])
result, combs = float("inf"), sorted(combs)
for b in baseCosts:
idx = bisect.bisect_left(combs, target-b)
if idx < len(combs):
result = min(result, b+combs[idx], key=lambda x: (abs(x-target), x))
if idx > 0:
result = min(result, b+combs[idx-1], key=lambda x: (abs(x-target), x))
return result
# Time: O(n * 3^m)
# Space: O(3^m)
|
Solution3
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/indices/property_graph/retriever.py
|
{
"start": 340,
"end": 2402
}
|
class ____(BaseRetriever):
"""
A retriever that uses multiple sub-retrievers to retrieve nodes from a property graph.
Args:
sub_retrievers (List[BasePGRetriever]):
The sub-retrievers to use.
num_workers (int, optional):
The number of workers to use for async retrieval. Defaults to 4.
use_async (bool, optional):
Whether to use async retrieval. Defaults to True.
show_progress (bool, optional):
Whether to show progress bars. Defaults to False.
"""
def __init__(
self,
sub_retrievers: List[BasePGRetriever],
num_workers: int = 4,
use_async: bool = True,
show_progress: bool = False,
**kwargs: Any,
) -> None:
self.sub_retrievers = sub_retrievers
self.use_async = use_async
self.num_workers = num_workers
self.show_progress = show_progress
def _deduplicate(self, nodes: List[NodeWithScore]) -> List[NodeWithScore]:
seen = set()
deduped = []
for node in nodes:
if node.text not in seen:
deduped.append(node)
seen.add(node.text)
return deduped
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
results = []
if self.use_async:
return asyncio_run(self._aretrieve(query_bundle))
for sub_retriever in tqdm(self.sub_retrievers, disable=not self.show_progress):
results.extend(sub_retriever.retrieve(query_bundle))
return self._deduplicate(results)
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
tasks = []
for sub_retriever in self.sub_retrievers:
tasks.append(sub_retriever.aretrieve(query_bundle))
async_results = await run_jobs(
tasks, workers=self.num_workers, show_progress=self.show_progress
)
# flatten the results
return self._deduplicate([node for nodes in async_results for node in nodes])
|
PGRetriever
|
python
|
apache__airflow
|
providers/celery/tests/integration/celery/test_celery_executor.py
|
{
"start": 12476,
"end": 18030
}
|
class ____:
bulk_state_fetcher_logger = "airflow.providers.celery.executors.celery_executor_utils.BulkStateFetcher"
@mock.patch(
"celery.backends.base.BaseKeyValueStoreBackend.mget",
return_value=[json.dumps({"status": "SUCCESS", "task_id": "123"})],
)
def test_should_support_kv_backend(self, mock_mget, caplog):
from airflow.providers.celery.executors import celery_executor, celery_executor_utils
caplog.set_level(logging.DEBUG, logger=self.bulk_state_fetcher_logger)
with _prepare_app():
mock_backend = BaseKeyValueStoreBackend(app=celery_executor.app)
with mock.patch(
"airflow.providers.celery.executors.celery_executor_utils.Celery.backend", mock_backend
):
caplog.clear()
fetcher = celery_executor_utils.BulkStateFetcher(1)
result = fetcher.get_many(
[
mock.MagicMock(task_id="123"),
mock.MagicMock(task_id="456"),
]
)
# Assert called - ignore order
mget_args, _ = mock_mget.call_args
assert set(mget_args[0]) == {b"celery-task-meta-456", b"celery-task-meta-123"}
mock_mget.assert_called_once_with(mock.ANY)
assert result == {"123": ("SUCCESS", None), "456": ("PENDING", None)}
assert caplog.messages == ["Fetched 2 state(s) for 2 task(s)"]
@mock.patch("celery.backends.database.DatabaseBackend.ResultSession")
def test_should_support_db_backend(self, mock_session, caplog):
from airflow.providers.celery.executors import celery_executor, celery_executor_utils
caplog.set_level(logging.DEBUG, logger=self.bulk_state_fetcher_logger)
with _prepare_app():
mock_backend = DatabaseBackend(app=celery_executor.app, url="sqlite3://")
with mock.patch(
"airflow.providers.celery.executors.celery_executor_utils.Celery.backend", mock_backend
):
caplog.clear()
mock_session = mock_backend.ResultSession.return_value
mock_session.scalars.return_value.all.return_value = [
mock.MagicMock(**{"to_dict.return_value": {"status": "SUCCESS", "task_id": "123"}})
]
fetcher = celery_executor_utils.BulkStateFetcher(1)
result = fetcher.get_many(
[
mock.MagicMock(task_id="123"),
mock.MagicMock(task_id="456"),
]
)
assert result == {"123": ("SUCCESS", None), "456": ("PENDING", None)}
assert caplog.messages == ["Fetched 2 state(s) for 2 task(s)"]
@mock.patch("celery.backends.database.DatabaseBackend.ResultSession")
def test_should_retry_db_backend(self, mock_session, caplog):
from airflow.providers.celery.executors import celery_executor, celery_executor_utils
caplog.set_level(logging.DEBUG, logger=self.bulk_state_fetcher_logger)
from sqlalchemy.exc import DatabaseError
with _prepare_app():
mock_backend = DatabaseBackend(app=celery_executor.app, url="sqlite3://")
with mock.patch(
"airflow.providers.celery.executors.celery_executor_utils.Celery.backend", mock_backend
):
caplog.clear()
mock_session = mock_backend.ResultSession.return_value
mock_retry_db_result = mock_session.scalars.return_value.all
mock_retry_db_result.return_value = [
mock.MagicMock(**{"to_dict.return_value": {"status": "SUCCESS", "task_id": "123"}})
]
mock_retry_db_result.side_effect = [
DatabaseError("DatabaseError", "DatabaseError", "DatabaseError"),
mock_retry_db_result.return_value,
]
fetcher = celery_executor_utils.BulkStateFetcher(1)
result = fetcher.get_many(
[
mock.MagicMock(task_id="123"),
mock.MagicMock(task_id="456"),
]
)
assert mock_retry_db_result.call_count == 2
assert result == {"123": ("SUCCESS", None), "456": ("PENDING", None)}
assert caplog.messages == [
"Failed operation _query_task_cls_from_db_backend. Retrying 2 more times.",
"Fetched 2 state(s) for 2 task(s)",
]
def test_should_support_base_backend(self, caplog):
from airflow.providers.celery.executors import celery_executor_utils
caplog.set_level(logging.DEBUG, logger=self.bulk_state_fetcher_logger)
with _prepare_app():
mock_backend = mock.MagicMock(autospec=BaseBackend)
with mock.patch(
"airflow.providers.celery.executors.celery_executor_utils.Celery.backend", mock_backend
):
caplog.clear()
fetcher = celery_executor_utils.BulkStateFetcher(1)
result = fetcher.get_many(
[
ClassWithCustomAttributes(task_id="123", state="SUCCESS"),
ClassWithCustomAttributes(task_id="456", state="PENDING"),
]
)
assert result == {"123": ("SUCCESS", None), "456": ("PENDING", None)}
assert caplog.messages == ["Fetched 2 state(s) for 2 task(s)"]
|
TestBulkStateFetcher
|
python
|
Textualize__textual
|
src/textual/_compositor.py
|
{
"start": 2151,
"end": 3654
}
|
class ____(CompositorUpdate):
"""A renderable containing the result of a render for a given region."""
def __init__(self, strips: list[Iterable[Strip]], region: Region) -> None:
self.strips = strips
self.region = region
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
x = self.region.x
new_line = Segment.line()
move_to = Control.move_to
for last, (y, line) in loop_last(enumerate(self.strips, self.region.y)):
yield move_to(x, y).segment
for strip in line:
yield from strip
if not last:
yield new_line
def render_segments(self, console: Console) -> str:
"""Render the update to raw data, suitable for writing to terminal.
Args:
console: Console instance.
Returns:
Raw data with escape sequences.
"""
sequences: list[str] = []
append = sequences.append
extend = sequences.extend
x = self.region.x
move_to = Control.move_to
for last, (y, line) in loop_last(enumerate(self.strips, self.region.y)):
append(move_to(x, y).segment.text)
extend([strip.render(console) for strip in line])
if not last:
append("\n")
return "".join(sequences)
def __rich_repr__(self) -> rich.repr.Result:
yield self.region
@rich.repr.auto(angular=True)
|
LayoutUpdate
|
python
|
jamielennox__requests-mock
|
tests/base.py
|
{
"start": 566,
"end": 611
}
|
class ____(testtools.TestCase):
pass
|
TestCase
|
python
|
coleifer__peewee
|
tests/fields.py
|
{
"start": 34386,
"end": 34572
}
|
class ____(TestModel):
ts_0 = TimestampField(resolution=0)
ts_1 = TimestampField(resolution=1)
ts_10 = TimestampField(resolution=10)
ts_2 = TimestampField(resolution=2)
|
TSR
|
python
|
apache__airflow
|
airflow-core/src/airflow/dag_processing/processor.py
|
{
"start": 3360,
"end": 16117
}
|
class ____(BaseModel):
"""
Result of DAG File Parsing.
This is the result of a successful DAG parse, in this class, we gather all serialized DAGs,
import errors and warnings to send back to the scheduler to store in the DB.
"""
fileloc: str
serialized_dags: list[LazyDeserializedDAG]
warnings: list | None = None
import_errors: dict[str, str] | None = None
type: Literal["DagFileParsingResult"] = "DagFileParsingResult"
ToManager = Annotated[
DagFileParsingResult
| GetConnection
| GetVariable
| PutVariable
| GetTaskStates
| GetTICount
| DeleteVariable
| GetPrevSuccessfulDagRun
| GetPreviousDagRun
| GetXCom
| GetXComCount
| GetXComSequenceItem
| GetXComSequenceSlice
| MaskSecret,
Field(discriminator="type"),
]
ToDagProcessor = Annotated[
DagFileParseRequest
| ConnectionResult
| VariableResult
| TaskStatesResult
| PreviousDagRunResult
| PrevSuccessfulDagRunResult
| ErrorResponse
| OKResponse
| XComCountResponse
| XComResult
| XComSequenceIndexResult
| XComSequenceSliceResult,
Field(discriminator="type"),
]
def _pre_import_airflow_modules(file_path: str, log: FilteringBoundLogger) -> None:
"""
Pre-import Airflow modules found in the given file.
This prevents modules from being re-imported in each processing process,
saving CPU time and memory.
(The default value of "parsing_pre_import_modules" is set to True)
:param file_path: Path to the file to scan for imports
:param log: Logger instance to use for warnings
"""
if not conf.getboolean("dag_processor", "parsing_pre_import_modules", fallback=True):
return
for module in iter_airflow_imports(file_path):
try:
importlib.import_module(module)
except Exception as e:
log.warning("Error when trying to pre-import module '%s' found in %s: %s", module, file_path, e)
def _parse_file_entrypoint():
# Mark as client-side (runs user DAG code)
# Prevents inheriting server context from parent DagProcessorManager
os.environ["_AIRFLOW_PROCESS_CONTEXT"] = "client"
import structlog
from airflow.sdk.execution_time import comms, task_runner
# Parse DAG file, send JSON back up!
comms_decoder = comms.CommsDecoder[ToDagProcessor, ToManager](
body_decoder=TypeAdapter[ToDagProcessor](ToDagProcessor),
)
msg = comms_decoder._get_response()
if not isinstance(msg, DagFileParseRequest):
raise RuntimeError(f"Required first message to be a DagFileParseRequest, it was {msg}")
task_runner.SUPERVISOR_COMMS = comms_decoder
log = structlog.get_logger(logger_name="task")
# Put bundle root on sys.path if needed. This allows the dag bundle to add
# code in util modules to be shared between files within the same bundle.
if (bundle_root := os.fspath(msg.bundle_path)) not in sys.path:
sys.path.append(bundle_root)
result = _parse_file(msg, log)
if result is not None:
comms_decoder.send(result)
def _parse_file(msg: DagFileParseRequest, log: FilteringBoundLogger) -> DagFileParsingResult | None:
# TODO: Set known_pool names on DagBag!
bag = DagBag(
dag_folder=msg.file,
bundle_path=msg.bundle_path,
bundle_name=msg.bundle_name,
include_examples=False,
load_op_links=False,
)
if msg.callback_requests:
# If the request is for callback, we shouldn't serialize the DAGs
_execute_callbacks(bag, msg.callback_requests, log)
return None
serialized_dags, serialization_import_errors = _serialize_dags(bag, log)
bag.import_errors.update(serialization_import_errors)
result = DagFileParsingResult(
fileloc=msg.file,
serialized_dags=serialized_dags,
import_errors=bag.import_errors,
# TODO: Make `bag.dag_warnings` not return SQLA model objects
warnings=[],
)
return result
def _serialize_dags(
bag: DagBag,
log: FilteringBoundLogger,
) -> tuple[list[LazyDeserializedDAG], dict[str, str]]:
serialization_import_errors = {}
serialized_dags = []
for dag in bag.dags.values():
try:
data = SerializedDAG.to_dict(dag)
serialized_dags.append(LazyDeserializedDAG(data=data, last_loaded=dag.last_loaded))
except Exception:
log.exception("Failed to serialize DAG: %s", dag.fileloc)
dagbag_import_error_traceback_depth = conf.getint(
"core", "dagbag_import_error_traceback_depth", fallback=None
)
serialization_import_errors[dag.fileloc] = traceback.format_exc(
limit=-dagbag_import_error_traceback_depth
)
return serialized_dags, serialization_import_errors
def _get_dag_with_task(
dagbag: DagBag, dag_id: str, task_id: str | None = None
) -> tuple[DAG, BaseOperator | MappedOperator | None]:
"""
Retrieve a DAG and optionally a task from the DagBag.
:param dagbag: DagBag to retrieve from
:param dag_id: DAG ID to retrieve
:param task_id: Optional task ID to retrieve from the DAG
:return: tuple of (dag, task) where task is None if not requested
:raises ValueError: If DAG or task is not found
"""
if dag_id not in dagbag.dags:
raise ValueError(
f"DAG '{dag_id}' not found in DagBag. "
f"This typically indicates a race condition where the DAG was removed or failed to parse."
)
dag = dagbag.dags[dag_id]
if task_id is not None:
try:
task = dag.get_task(task_id)
return dag, task
except TaskNotFound:
raise ValueError(
f"Task '{task_id}' not found in DAG '{dag_id}'. "
f"This typically indicates a race condition where the task was removed or the DAG structure changed."
) from None
return dag, None
def _execute_callbacks(
dagbag: DagBag, callback_requests: list[CallbackRequest], log: FilteringBoundLogger
) -> None:
for request in callback_requests:
log.debug("Processing Callback Request", request=request.to_json())
if isinstance(request, TaskCallbackRequest):
_execute_task_callbacks(dagbag, request, log)
elif isinstance(request, DagCallbackRequest):
_execute_dag_callbacks(dagbag, request, log)
elif isinstance(request, EmailRequest):
_execute_email_callbacks(dagbag, request, log)
def _execute_dag_callbacks(dagbag: DagBag, request: DagCallbackRequest, log: FilteringBoundLogger) -> None:
from airflow.sdk.api.datamodels._generated import TIRunContext
dag, _ = _get_dag_with_task(dagbag, request.dag_id)
callbacks = dag.on_failure_callback if request.is_failure_callback else dag.on_success_callback
if not callbacks:
log.warning("Callback requested, but dag didn't have any", dag_id=request.dag_id)
return
callbacks = callbacks if isinstance(callbacks, list) else [callbacks]
ctx_from_server = request.context_from_server
if ctx_from_server is not None and ctx_from_server.last_ti is not None:
task = dag.get_task(ctx_from_server.last_ti.task_id)
runtime_ti = RuntimeTaskInstance.model_construct(
**ctx_from_server.last_ti.model_dump(exclude_unset=True),
task=task,
_ti_context_from_server=TIRunContext.model_construct(
dag_run=ctx_from_server.dag_run,
max_tries=task.retries,
),
)
context = runtime_ti.get_template_context()
context["reason"] = request.msg
else:
context: Context = { # type: ignore[no-redef]
"dag": dag,
"run_id": request.run_id,
"reason": request.msg,
}
for callback in callbacks:
log.info(
"Executing on_%s dag callback",
"failure" if request.is_failure_callback else "success",
dag_id=request.dag_id,
)
try:
callback(context)
except Exception:
log.exception("Callback failed", dag_id=request.dag_id)
Stats.incr("dag.callback_exceptions", tags={"dag_id": request.dag_id})
def _execute_task_callbacks(dagbag: DagBag, request: TaskCallbackRequest, log: FilteringBoundLogger) -> None:
if not request.is_failure_callback:
log.warning(
"Task callback requested but is not a failure callback",
dag_id=request.ti.dag_id,
task_id=request.ti.task_id,
run_id=request.ti.run_id,
)
return
dag, task = _get_dag_with_task(dagbag, request.ti.dag_id, request.ti.task_id)
if TYPE_CHECKING:
assert task is not None
if request.task_callback_type is TaskInstanceState.UP_FOR_RETRY:
callbacks = task.on_retry_callback
else:
callbacks = task.on_failure_callback
if not callbacks:
log.warning(
"Callback requested but no callback found",
dag_id=request.ti.dag_id,
task_id=request.ti.task_id,
run_id=request.ti.run_id,
ti_id=request.ti.id,
)
return
callbacks = callbacks if isinstance(callbacks, Sequence) else [callbacks]
ctx_from_server = request.context_from_server
if ctx_from_server is not None:
runtime_ti = RuntimeTaskInstance.model_construct(
**request.ti.model_dump(exclude_unset=True),
task=task,
_ti_context_from_server=ctx_from_server,
max_tries=ctx_from_server.max_tries,
)
else:
runtime_ti = RuntimeTaskInstance.model_construct(
**request.ti.model_dump(exclude_unset=True),
task=task,
)
context = runtime_ti.get_template_context()
def get_callback_representation(callback):
with contextlib.suppress(AttributeError):
return callback.__name__
with contextlib.suppress(AttributeError):
return callback.__class__.__name__
return callback
for idx, callback in enumerate(callbacks):
callback_repr = get_callback_representation(callback)
log.info("Executing Task callback at index %d: %s", idx, callback_repr)
try:
callback(context)
except Exception:
log.exception("Error in callback at index %d: %s", idx, callback_repr)
def _execute_email_callbacks(dagbag: DagBag, request: EmailRequest, log: FilteringBoundLogger) -> None:
"""Execute email notification for task failure/retry."""
dag, task = _get_dag_with_task(dagbag, request.ti.dag_id, request.ti.task_id)
if TYPE_CHECKING:
assert task is not None
if not task.email:
log.warning(
"Email callback requested but no email configured",
dag_id=request.ti.dag_id,
task_id=request.ti.task_id,
run_id=request.ti.run_id,
)
return
# Check if email should be sent based on task configuration
should_send_email = False
if request.email_type == "failure" and task.email_on_failure:
should_send_email = True
elif request.email_type == "retry" and task.email_on_retry:
should_send_email = True
if not should_send_email:
log.info(
"Email not sent - task configured with email_on_%s=False",
request.email_type,
dag_id=request.ti.dag_id,
task_id=request.ti.task_id,
run_id=request.ti.run_id,
)
return
ctx_from_server = request.context_from_server
runtime_ti = RuntimeTaskInstance.model_construct(
**request.ti.model_dump(exclude_unset=True),
task=task,
_ti_context_from_server=ctx_from_server,
max_tries=ctx_from_server.max_tries,
)
log.info(
"Sending %s email for task %s",
request.email_type,
request.ti.task_id,
dag_id=request.ti.dag_id,
run_id=request.ti.run_id,
)
try:
context = runtime_ti.get_template_context()
error = Exception(request.msg) if request.msg else None
_send_error_email_notification(task, runtime_ti, context, error, log)
except Exception:
log.exception(
"Failed to send %s email",
request.email_type,
dag_id=request.ti.dag_id,
task_id=request.ti.task_id,
run_id=request.ti.run_id,
)
def in_process_api_server() -> InProcessExecutionAPI:
from airflow.api_fastapi.execution_api.app import InProcessExecutionAPI
api = InProcessExecutionAPI()
return api
@attrs.define(kw_only=True)
|
DagFileParsingResult
|
python
|
numpy__numpy
|
benchmarks/benchmarks/bench_ufunc.py
|
{
"start": 4293,
"end": 4903
}
|
class ____(Benchmark):
""" Benchmark for the shift methods
"""
params = [['__lshift__', '__rshift__'],
['intp', 'int8', 'int16',
'int32', 'int64', 'uint8',
'uint16', 'uint32', 'uint64']]
param_names = ['methods', 'npdtypes']
timeout = 10
def setup(self, methname, npdtypes):
self.vals = np.ones(1000,
dtype=getattr(np, npdtypes)) * \
np.random.randint(9)
def time_ndarray_meth(self, methname, npdtypes):
getattr(operator, methname)(*[self.vals, 2])
|
NDArrayLRShifts
|
python
|
tox-dev__tox
|
src/tox/tox_env/python/package.py
|
{
"start": 961,
"end": 1035
}
|
class ____(PythonPathPackageWithDeps):
"""wheel package."""
|
WheelPackage
|
python
|
hyperopt__hyperopt
|
hyperopt/exceptions.py
|
{
"start": 603,
"end": 786
}
|
class ____(ValueError):
"""fmin evaluation returned invalid loss value"""
def __init__(self, result):
ValueError.__init__(self)
self.result = result
|
InvalidLoss
|
python
|
prabhupant__python-ds
|
data_structures/graphs/transpose.py
|
{
"start": 38,
"end": 753
}
|
class ____:
def __init__(self, vertices):
self.V = vertices
self.graph = defaultdict(list)
def add_edge(self, u, v):
self.graph[u].append(v)
def print_graph(self):
for i in self.graph:
print(i, " --> ", end=" ")
for j in self.graph[i]:
print(j, end=" ")
print()
def transpose(self):
g = Graph(self.V)
for i in self.graph:
for j in self.graph[i]:
g.add_edge(j, i)
return g
g = Graph(5)
g.add_edge(1, 0)
g.add_edge(0, 2)
g.add_edge(2, 1)
g.add_edge(0, 3)
g.add_edge(3, 4)
g.print_graph()
print('---------')
t = g.transpose()
t.print_graph()
|
Graph
|
python
|
celery__celery
|
t/unit/worker/test_worker.py
|
{
"start": 2341,
"end": 22413
}
|
class ____(ConsumerCase):
def setup_method(self):
self.buffer = FastQueue()
self.timer = Timer()
@self.app.task(shared=False)
def foo_task(x, y, z):
return x * y * z
self.foo_task = foo_task
def teardown_method(self):
self.timer.stop()
def LoopConsumer(self, buffer=None, controller=None, timer=None, app=None,
without_mingle=True, without_gossip=True,
without_heartbeat=True, **kwargs):
if controller is None:
controller = Mock(name='.controller')
buffer = buffer if buffer is not None else self.buffer.put
timer = timer if timer is not None else self.timer
app = app if app is not None else self.app
c = Consumer(
buffer,
timer=timer,
app=app,
controller=controller,
without_mingle=without_mingle,
without_gossip=without_gossip,
without_heartbeat=without_heartbeat,
**kwargs
)
c.task_consumer = Mock(name='.task_consumer')
c.qos = QoS(c.task_consumer.qos, 10)
c.connection = Mock(name='.connection')
c.controller = c.app.WorkController()
c.heart = Mock(name='.heart')
c.controller.consumer = c
c.pool = c.controller.pool = Mock(name='.controller.pool')
c.node = Mock(name='.node')
c.event_dispatcher = mock_event_dispatcher()
return c
def NoopConsumer(self, *args, **kwargs):
c = self.LoopConsumer(*args, **kwargs)
c.loop = Mock(name='.loop')
return c
def test_info(self):
c = self.NoopConsumer()
c.connection.info.return_value = {'foo': 'bar'}
c.controller.pool.info.return_value = [Mock(), Mock()]
info = c.controller.stats()
assert info['prefetch_count'] == 10
assert info['broker']
def test_start_when_closed(self):
c = self.NoopConsumer()
c.blueprint.state = CLOSE
c.start()
def test_connection(self):
c = self.NoopConsumer()
c.blueprint.start(c)
assert isinstance(c.connection, Connection)
c.blueprint.state = RUN
c.event_dispatcher = None
c.blueprint.restart(c)
assert c.connection
c.blueprint.state = RUN
c.shutdown()
assert c.connection is None
assert c.task_consumer is None
c.blueprint.start(c)
assert isinstance(c.connection, Connection)
c.blueprint.restart(c)
c.stop()
c.shutdown()
assert c.connection is None
assert c.task_consumer is None
def test_close_connection(self):
c = self.NoopConsumer()
c.blueprint.state = RUN
step = find_step(c, consumer.Connection)
connection = c.connection
step.shutdown(c)
connection.close.assert_called()
assert c.connection is None
def test_close_connection__heart_shutdown(self):
c = self.NoopConsumer()
event_dispatcher = c.event_dispatcher
heart = c.heart
c.event_dispatcher.enabled = True
c.blueprint.state = RUN
Events = find_step(c, consumer.Events)
Events.shutdown(c)
Heart = find_step(c, consumer.Heart)
Heart.shutdown(c)
event_dispatcher.close.assert_called()
heart.stop.assert_called_with()
@patch('celery.worker.consumer.consumer.warn')
def test_receive_message_unknown(self, warn):
c = self.LoopConsumer()
c.blueprint.state = RUN
c.steps.pop()
channel = Mock(name='.channeol')
m = create_message(channel, unknown={'baz': '!!!'})
callback = self._get_on_message(c)
callback(m)
warn.assert_called()
@patch('celery.worker.strategy.to_timestamp')
def test_receive_message_eta_OverflowError(self, to_timestamp):
to_timestamp.side_effect = OverflowError()
c = self.LoopConsumer()
c.blueprint.state = RUN
c.steps.pop()
m = self.create_task_message(
Mock(), self.foo_task.name,
args=('2, 2'), kwargs={},
eta=datetime.now().isoformat(),
)
c.update_strategies()
callback = self._get_on_message(c)
callback(m)
assert m.acknowledged
@patch('celery.worker.consumer.consumer.error')
def test_receive_message_InvalidTaskError(self, error):
c = self.LoopConsumer()
c.blueprint.state = RUN
c.steps.pop()
m = self.create_task_message(
Mock(), self.foo_task.name,
args=(1, 2), kwargs='foobarbaz', id=1)
c.update_strategies()
strategy = c.strategies[self.foo_task.name] = Mock(name='strategy')
strategy.side_effect = InvalidTaskError()
callback = self._get_on_message(c)
callback(m)
error.assert_called()
assert 'Received invalid task message' in error.call_args[0][0]
@patch('celery.worker.consumer.consumer.crit')
def test_on_decode_error(self, crit):
c = self.LoopConsumer()
class MockMessage(Mock):
content_type = 'application/x-msgpack'
content_encoding = 'binary'
body = 'foobarbaz'
message = MockMessage()
c.on_decode_error(message, KeyError('foo'))
assert message.ack.call_count
assert "Can't decode message body" in crit.call_args[0][0]
def _get_on_message(self, c):
if c.qos is None:
c.qos = Mock()
c.task_consumer = Mock()
c.event_dispatcher = mock_event_dispatcher()
c.connection = Mock(name='.connection')
c.connection.get_heartbeat_interval.return_value = 0
c.connection.drain_events.side_effect = WorkerShutdown()
with pytest.raises(WorkerShutdown):
c.loop(*c.loop_args())
assert c.task_consumer.on_message
return c.task_consumer.on_message
def test_receieve_message(self):
c = self.LoopConsumer()
c.blueprint.state = RUN
m = self.create_task_message(
Mock(), self.foo_task.name,
args=[2, 4, 8], kwargs={},
)
c.update_strategies()
callback = self._get_on_message(c)
callback(m)
in_bucket = self.buffer.get_nowait()
assert isinstance(in_bucket, Request)
assert in_bucket.name == self.foo_task.name
assert in_bucket.execute() == 2 * 4 * 8
assert self.timer.empty()
def test_start_channel_error(self):
def loop_side_effect():
yield KeyError('foo')
yield SyntaxError('bar')
c = self.NoopConsumer(task_events=False, pool=BasePool())
c.loop.side_effect = loop_side_effect()
c.channel_errors = (KeyError,)
try:
with pytest.raises(KeyError):
c.start()
finally:
c.timer and c.timer.stop()
def test_start_connection_error(self):
def loop_side_effect():
yield KeyError('foo')
yield SyntaxError('bar')
c = self.NoopConsumer(task_events=False, pool=BasePool())
c.loop.side_effect = loop_side_effect()
c.pool.num_processes = 2
c.connection_errors = (KeyError,)
try:
with pytest.raises(SyntaxError):
c.start()
finally:
c.timer and c.timer.stop()
def test_loop_ignores_socket_timeout(self):
class Connection(self.app.connection_for_read().__class__):
obj = None
def drain_events(self, **kwargs):
self.obj.connection = None
raise socket.timeout(10)
c = self.NoopConsumer()
c.connection = Connection(self.app.conf.broker_url)
c.connection.obj = c
c.qos = QoS(c.task_consumer.qos, 10)
c.loop(*c.loop_args())
def test_loop_when_socket_error(self):
class Connection(self.app.connection_for_read().__class__):
obj = None
def drain_events(self, **kwargs):
self.obj.connection = None
raise OSError('foo')
c = self.LoopConsumer()
c.blueprint.state = RUN
conn = c.connection = Connection(self.app.conf.broker_url)
c.connection.obj = c
c.qos = QoS(c.task_consumer.qos, 10)
with pytest.raises(socket.error):
c.loop(*c.loop_args())
c.blueprint.state = CLOSE
c.connection = conn
c.loop(*c.loop_args())
def test_loop(self):
class Connection(self.app.connection_for_read().__class__):
obj = None
def drain_events(self, **kwargs):
self.obj.connection = None
@property
def supports_heartbeats(self):
return False
c = self.LoopConsumer()
c.blueprint.state = RUN
c.connection = Connection(self.app.conf.broker_url)
c.connection.obj = c
c.connection.get_heartbeat_interval = Mock(return_value=None)
c.qos = QoS(c.task_consumer.qos, 10)
c.loop(*c.loop_args())
c.loop(*c.loop_args())
assert c.task_consumer.consume.call_count
c.task_consumer.qos.assert_called_with(prefetch_count=10)
assert c.qos.value == 10
c.qos.decrement_eventually()
assert c.qos.value == 9
c.qos.update()
assert c.qos.value == 9
c.task_consumer.qos.assert_called_with(prefetch_count=9)
def test_ignore_errors(self):
c = self.NoopConsumer()
c.connection_errors = (AttributeError, KeyError,)
c.channel_errors = (SyntaxError,)
ignore_errors(c, Mock(side_effect=AttributeError('foo')))
ignore_errors(c, Mock(side_effect=KeyError('foo')))
ignore_errors(c, Mock(side_effect=SyntaxError('foo')))
with pytest.raises(IndexError):
ignore_errors(c, Mock(side_effect=IndexError('foo')))
def test_apply_eta_task(self):
c = self.NoopConsumer()
c.qos = QoS(None, 10)
task = Mock(name='task', id='1234213')
qos = c.qos.value
c.apply_eta_task(task)
assert task in state.reserved_requests
assert c.qos.value == qos - 1
assert self.buffer.get_nowait() is task
def test_receieve_message_eta_isoformat(self):
c = self.LoopConsumer()
c.blueprint.state = RUN
c.steps.pop()
m = self.create_task_message(
Mock(), self.foo_task.name,
eta=(datetime.now() + timedelta(days=1)).isoformat(),
args=[2, 4, 8], kwargs={},
)
c.qos = QoS(c.task_consumer.qos, 1)
current_pcount = c.qos.value
c.event_dispatcher.enabled = False
c.update_strategies()
callback = self._get_on_message(c)
callback(m)
c.timer.stop()
c.timer.join(1)
items = [entry[2] for entry in self.timer.queue]
found = 0
for item in items:
if item.args[0].name == self.foo_task.name:
found = True
assert found
assert c.qos.value > current_pcount
c.timer.stop()
def test_pidbox_callback(self):
c = self.NoopConsumer()
con = find_step(c, consumer.Control).box
con.node = Mock()
con.reset = Mock()
con.on_message('foo', 'bar')
con.node.handle_message.assert_called_with('foo', 'bar')
con.node = Mock()
con.node.handle_message.side_effect = KeyError('foo')
con.on_message('foo', 'bar')
con.node.handle_message.assert_called_with('foo', 'bar')
con.node = Mock()
con.node.handle_message.side_effect = ValueError('foo')
con.on_message('foo', 'bar')
con.node.handle_message.assert_called_with('foo', 'bar')
con.reset.assert_called()
def test_revoke(self):
c = self.LoopConsumer()
c.blueprint.state = RUN
c.steps.pop()
channel = Mock(name='channel')
id = uuid()
t = self.create_task_message(
channel, self.foo_task.name,
args=[2, 4, 8], kwargs={}, id=id,
)
state.revoked.add(id)
callback = self._get_on_message(c)
callback(t)
assert self.buffer.empty()
def test_receieve_message_not_registered(self):
c = self.LoopConsumer()
c.blueprint.state = RUN
c.steps.pop()
channel = Mock(name='channel')
m = self.create_task_message(
channel, 'x.X.31x', args=[2, 4, 8], kwargs={},
)
callback = self._get_on_message(c)
assert not callback(m)
with pytest.raises(Empty):
self.buffer.get_nowait()
assert self.timer.empty()
@patch('celery.worker.consumer.consumer.warn')
@patch('celery.worker.consumer.consumer.logger')
def test_receieve_message_ack_raises(self, logger, warn):
c = self.LoopConsumer()
c.blueprint.state = RUN
channel = Mock(name='channel')
m = self.create_task_message(
channel, self.foo_task.name,
args=[2, 4, 8], kwargs={},
)
m.headers = None
c.update_strategies()
c.connection_errors = (socket.error,)
m.reject = Mock()
m.reject.side_effect = socket.error('foo')
callback = self._get_on_message(c)
assert not callback(m)
warn.assert_called()
with pytest.raises(Empty):
self.buffer.get_nowait()
assert self.timer.empty()
m.reject_log_error.assert_called_with(logger, c.connection_errors)
def test_receive_message_eta(self):
if os.environ.get('C_DEBUG_TEST'):
pp = partial(print, file=sys.__stderr__)
else:
def pp(*args, **kwargs):
pass
pp('TEST RECEIVE MESSAGE ETA')
pp('+CREATE MYKOMBUCONSUMER')
c = self.LoopConsumer()
pp('-CREATE MYKOMBUCONSUMER')
c.steps.pop()
channel = Mock(name='channel')
pp('+ CREATE MESSAGE')
m = self.create_task_message(
channel, self.foo_task.name,
args=[2, 4, 8], kwargs={},
eta=(datetime.now() + timedelta(days=1)).isoformat(),
)
pp('- CREATE MESSAGE')
try:
pp('+ BLUEPRINT START 1')
c.blueprint.start(c)
pp('- BLUEPRINT START 1')
p = c.app.conf.broker_connection_retry
c.app.conf.broker_connection_retry = False
pp('+ BLUEPRINT START 2')
c.blueprint.start(c)
pp('- BLUEPRINT START 2')
c.app.conf.broker_connection_retry = p
pp('+ BLUEPRINT RESTART')
c.blueprint.restart(c)
pp('- BLUEPRINT RESTART')
pp('+ GET ON MESSAGE')
callback = self._get_on_message(c)
pp('- GET ON MESSAGE')
pp('+ CALLBACK')
callback(m)
pp('- CALLBACK')
finally:
pp('+ STOP TIMER')
c.timer.stop()
pp('- STOP TIMER')
try:
pp('+ JOIN TIMER')
c.timer.join()
pp('- JOIN TIMER')
except RuntimeError:
pass
in_hold = c.timer.queue[0]
assert len(in_hold) == 3
eta, priority, entry = in_hold
task = entry.args[0]
assert isinstance(task, Request)
assert task.name == self.foo_task.name
assert task.execute() == 2 * 4 * 8
with pytest.raises(Empty):
self.buffer.get_nowait()
def test_reset_pidbox_node(self):
c = self.NoopConsumer()
con = find_step(c, consumer.Control).box
con.node = Mock()
chan = con.node.channel = Mock()
chan.close.side_effect = socket.error('foo')
c.connection_errors = (socket.error,)
con.reset()
chan.close.assert_called_with()
def test_reset_pidbox_node_green(self):
c = self.NoopConsumer(pool=Mock(is_green=True))
con = find_step(c, consumer.Control)
assert isinstance(con.box, gPidbox)
con.start(c)
c.pool.spawn_n.assert_called_with(con.box.loop, c)
def test_green_pidbox_node(self):
pool = Mock()
pool.is_green = True
c = self.NoopConsumer(pool=Mock(is_green=True))
controller = find_step(c, consumer.Control)
class BConsumer(Mock):
def __enter__(self):
self.consume()
return self
def __exit__(self, *exc_info):
self.cancel()
controller.box.node.listen = BConsumer()
connections = []
class Connection:
calls = 0
def __init__(self, obj):
connections.append(self)
self.obj = obj
self.default_channel = self.channel()
self.closed = False
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def channel(self):
return Mock()
def as_uri(self):
return 'dummy://'
def drain_events(self, **kwargs):
if not self.calls:
self.calls += 1
raise socket.timeout()
self.obj.connection = None
controller.box._node_shutdown.set()
def close(self):
self.closed = True
c.connection_for_read = lambda: Connection(obj=c)
controller = find_step(c, consumer.Control)
controller.box.loop(c)
controller.box.node.listen.assert_called()
assert controller.box.consumer
controller.box.consumer.consume.assert_called_with()
assert c.connection is None
assert connections[0].closed
@patch('kombu.connection.Connection._establish_connection')
@patch('kombu.utils.functional.sleep')
def test_connect_errback(self, sleep, connect):
def connect_side_effect():
yield Mock()
while True:
yield ChannelError('error')
c = self.NoopConsumer()
Transport.connection_errors = (ChannelError,)
connect.side_effect = connect_side_effect()
c.connect()
connect.assert_called_with()
def test_stop_pidbox_node(self):
c = self.NoopConsumer()
cont = find_step(c, consumer.Control)
cont._node_stopped = Event()
cont._node_shutdown = Event()
cont._node_stopped.set()
cont.stop(c)
def test_start__loop(self):
class _QoS:
prev = 3
value = 4
def update(self):
self.prev = self.value
init_callback = Mock(name='init_callback')
c = self.NoopConsumer(init_callback=init_callback)
c.qos = _QoS()
c.connection = Connection(self.app.conf.broker_url)
c.connection.get_heartbeat_interval = Mock(return_value=None)
c.iterations = 0
def raises_KeyError(*args, **kwargs):
c.iterations += 1
if c.qos.prev != c.qos.value:
c.qos.update()
if c.iterations >= 2:
raise KeyError('foo')
c.loop = raises_KeyError
with pytest.raises(KeyError):
c.start()
assert c.iterations == 2
assert c.qos.prev == c.qos.value
init_callback.reset_mock()
c = self.NoopConsumer(task_events=False, init_callback=init_callback)
c.qos = _QoS()
c.connection = Connection(self.app.conf.broker_url)
c.connection.get_heartbeat_interval = Mock(return_value=None)
c.loop = Mock(side_effect=socket.error('foo'))
with pytest.raises(socket.error):
c.start()
c.loop.assert_called()
def test_reset_connection_with_no_node(self):
c = self.NoopConsumer()
c.steps.pop()
c.blueprint.start(c)
|
test_Consumer
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config_vectorizers.py
|
{
"start": 15317,
"end": 15544
}
|
class ____(BaseModel):
"""Use this class when defining the fields to use in the `Multi2VecClip` and `Multi2VecBind` vectorizers."""
name: str
weight: Optional[float] = Field(default=None, exclude=True)
|
Multi2VecField
|
python
|
huggingface__transformers
|
src/transformers/models/speech_to_text/modeling_speech_to_text.py
|
{
"start": 2308,
"end": 3707
}
|
class ____(nn.Module):
"""
Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation
via gated linear units (https://huggingface.co/papers/1911.08460)
"""
def __init__(self, config):
super().__init__()
self.config = config
self.num_layers = config.num_conv_layers
self.in_channels = config.input_feat_per_channel * config.input_channels
self.mid_channels = config.conv_channels
self.out_channels = config.d_model
self.kernel_sizes = config.conv_kernel_sizes
self.conv_layers = nn.ModuleList(
nn.Conv1d(
self.in_channels if i == 0 else self.mid_channels // 2,
self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2,
kernel_size=k,
stride=2,
padding=k // 2,
)
for i, k in enumerate(self.kernel_sizes)
)
def forward(self, input_features):
hidden_states = input_features.transpose(1, 2).contiguous() # -> B x (C x D) x T
for conv in self.conv_layers:
hidden_states = conv(hidden_states)
hidden_states = nn.functional.glu(hidden_states, dim=1)
hidden_states = hidden_states.transpose(1, 2).contiguous() # -> T x B x (C x D)
return hidden_states
|
Conv1dSubsampler
|
python
|
jpadilla__pyjwt
|
jwt/jwks_client.py
|
{
"start": 409,
"end": 4307
}
|
class ____:
def __init__(
self,
uri: str,
cache_keys: bool = False,
max_cached_keys: int = 16,
cache_jwk_set: bool = True,
lifespan: float = 300,
headers: Optional[Dict[str, Any]] = None,
timeout: float = 30,
ssl_context: Optional[SSLContext] = None,
):
if headers is None:
headers = {}
self.uri = uri
self.jwk_set_cache: Optional[JWKSetCache] = None
self.headers = headers
self.timeout = timeout
self.ssl_context = ssl_context
if cache_jwk_set:
# Init jwt set cache with default or given lifespan.
# Default lifespan is 300 seconds (5 minutes).
if lifespan <= 0:
raise PyJWKClientError(
f'Lifespan must be greater than 0, the input is "{lifespan}"'
)
self.jwk_set_cache = JWKSetCache(lifespan)
else:
self.jwk_set_cache = None
if cache_keys:
# Cache signing keys
# Ignore mypy (https://github.com/python/mypy/issues/2427)
self.get_signing_key = lru_cache(maxsize=max_cached_keys)(
self.get_signing_key
) # type: ignore
def fetch_data(self) -> Any:
jwk_set: Any = None
try:
r = urllib.request.Request(url=self.uri, headers=self.headers)
with urllib.request.urlopen(
r, timeout=self.timeout, context=self.ssl_context
) as response:
jwk_set = json.load(response)
except (URLError, TimeoutError) as e:
raise PyJWKClientConnectionError(
f'Fail to fetch data from the url, err: "{e}"'
) from e
else:
return jwk_set
finally:
if self.jwk_set_cache is not None:
self.jwk_set_cache.put(jwk_set)
def get_jwk_set(self, refresh: bool = False) -> PyJWKSet:
data = None
if self.jwk_set_cache is not None and not refresh:
data = self.jwk_set_cache.get()
if data is None:
data = self.fetch_data()
if not isinstance(data, dict):
raise PyJWKClientError("The JWKS endpoint did not return a JSON object")
return PyJWKSet.from_dict(data)
def get_signing_keys(self, refresh: bool = False) -> List[PyJWK]:
jwk_set = self.get_jwk_set(refresh)
signing_keys = [
jwk_set_key
for jwk_set_key in jwk_set.keys
if jwk_set_key.public_key_use in ["sig", None] and jwk_set_key.key_id
]
if not signing_keys:
raise PyJWKClientError("The JWKS endpoint did not contain any signing keys")
return signing_keys
def get_signing_key(self, kid: str) -> PyJWK:
signing_keys = self.get_signing_keys()
signing_key = self.match_kid(signing_keys, kid)
if not signing_key:
# If no matching signing key from the jwk set, refresh the jwk set and try again.
signing_keys = self.get_signing_keys(refresh=True)
signing_key = self.match_kid(signing_keys, kid)
if not signing_key:
raise PyJWKClientError(
f'Unable to find a signing key that matches: "{kid}"'
)
return signing_key
def get_signing_key_from_jwt(self, token: str | bytes) -> PyJWK:
unverified = decode_token(token, options={"verify_signature": False})
header = unverified["header"]
return self.get_signing_key(header.get("kid"))
@staticmethod
def match_kid(signing_keys: List[PyJWK], kid: str) -> Optional[PyJWK]:
signing_key = None
for key in signing_keys:
if key.key_id == kid:
signing_key = key
break
return signing_key
|
PyJWKClient
|
python
|
huggingface__transformers
|
tests/models/idefics2/test_modeling_idefics2.py
|
{
"start": 1485,
"end": 5658
}
|
class ____:
def __init__(
self,
parent,
is_training=True,
batch_size=2,
num_images=2,
seq_length=10,
vision_config={
"image_size": 12,
"patch_size": 12,
"num_channels": 3,
"hidden_size": 32,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"intermediate_size": 32,
"dropout": 0.1,
"attention_dropout": 0.1,
"initializer_range": 0.02,
},
perceiver_config={
"hidden_act": "silu",
"resampler_n_latents": 2,
"resampler_depth": 2,
"resampler_n_heads": 2,
"num_key_value_heads": 1,
"resampler_head_dim": 12,
"attention_dropout": 0.0,
},
text_config={
"vocab_size": 100,
"hidden_size": 64,
"intermediate_size": 56,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"num_key_value_heads": 2,
"hidden_act": "silu",
"max_position_embeddings": 256,
"initializer_range": 0.02,
"rms_norm_eps": 1e-6,
"pad_token_id": 0, # None in the original configuration_mistral, we set it to the unk_token_id
"bos_token_id": 1,
"eos_token_id": 2,
"image_token_id": 99,
"tie_word_embeddings": False,
"rope_theta": 10000.0,
"sliding_window": 32,
"attention_dropout": 0.0,
},
use_cache=False,
tie_word_embeddings=False,
image_token_id=99,
):
self.parent = parent
self.pad_token_id = text_config["pad_token_id"]
self.is_training = is_training
self.batch_size = batch_size
self.num_images = num_images
self.num_channels = 3
self.seq_length = seq_length
self.use_cache = use_cache
self.image_token_id = image_token_id
self.tie_word_embeddings = tie_word_embeddings
# Hack - add properties here so use common tests
self.vocab_size = text_config["vocab_size"]
self.num_hidden_layers = text_config["num_hidden_layers"]
self.num_attention_heads = text_config["num_attention_heads"]
self.hidden_size = text_config["hidden_size"]
self.vision_config = vision_config
self.perceiver_config = perceiver_config
self.text_config = text_config
def get_config(self):
return Idefics2Config(
use_cache=self.use_cache,
image_token_id=self.image_token_id,
tie_word_embeddings=self.tie_word_embeddings,
vision_config=self.vision_config,
perceiver_config=self.perceiver_config,
text_config=self.text_config,
vocab_size=self.vocab_size,
)
def prepare_config_and_inputs(self):
pixel_values = floats_tensor(
[
self.batch_size,
self.num_images,
self.vision_config["num_channels"],
self.vision_config["image_size"],
self.vision_config["image_size"],
]
)
config = self.get_config()
return config, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 1
# For simplicity just set the last n tokens to the image token
n_image_tokens_per_batch = self.num_images * self.perceiver_config["resampler_n_latents"]
input_ids[input_ids == self.image_token_id] = self.pad_token_id
input_ids[:, -n_image_tokens_per_batch:] = self.image_token_id
attention_mask = input_ids.ne(1).to(torch_device)
inputs_dict = {
"pixel_values": pixel_values,
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
|
Idefics2VisionText2TextModelTester
|
python
|
bokeh__bokeh
|
src/bokeh/io/notebook.py
|
{
"start": 5251,
"end": 24665
}
|
class ____(TypedDict):
load: Load
doc: ShowDoc
app: ShowApp
def install_notebook_hook(notebook_type: NotebookType, load: Load, show_doc: ShowDoc,
show_app: ShowApp, overwrite: bool = False) -> None:
''' Install a new notebook display hook.
Bokeh comes with support for Jupyter notebooks built-in. However, there are
other kinds of notebooks in use by different communities. This function
provides a mechanism for other projects to instruct Bokeh how to display
content in other notebooks.
This function is primarily of use to developers wishing to integrate Bokeh
with new notebook types.
Args:
notebook_type (str) :
A name for the notebook type, e.e. ``'Jupyter'`` or ``'Zeppelin'``
If the name has previously been installed, a ``RuntimeError`` will
be raised, unless ``overwrite=True``
load (callable) :
A function for loading BokehJS in a notebook type. The function
will be called with the following arguments:
.. code-block:: python
load(
resources, # A Resources object for how to load BokehJS
verbose, # Whether to display verbose loading banner
hide_banner, # Whether to hide the output banner entirely
load_timeout # Time after which to report a load fail error
)
show_doc (callable) :
A function for displaying Bokeh standalone documents in the
notebook type. This function will be called with the following
arguments:
.. code-block:: python
show_doc(
obj, # the Bokeh object to display
state, # current bokeh.io "state"
notebook_handle # whether a notebook handle was requested
)
If the notebook platform is capable of supporting in-place updates
to plots then this function may return an opaque notebook handle
that can be used for that purpose. The handle will be returned by
``show()``, and can be used by as appropriate to update plots, etc.
by additional functions in the library that installed the hooks.
show_app (callable) :
A function for displaying Bokeh applications in the notebook
type. This function will be called with the following arguments:
.. code-block:: python
show_app(
app, # the Bokeh Application to display
state, # current bokeh.io "state"
notebook_url, # URL to the current active notebook page
**kw # any backend-specific keywords passed as-is
)
overwrite (bool, optional) :
Whether to allow an existing hook to be overwritten by a new
definition (default: False)
Returns:
None
Raises:
RuntimeError
If ``notebook_type`` is already installed and ``overwrite=False``
'''
if notebook_type in _HOOKS and not overwrite:
raise RuntimeError(f"hook for notebook type {notebook_type!r} already exists")
_HOOKS[notebook_type] = Hooks(load=load, doc=show_doc, app=show_app)
def push_notebook(*, document: Document | None = None, state: State | None = None,
handle: CommsHandle | None = None) -> None:
''' Update Bokeh plots in a Jupyter notebook output cells with new data
or property values.
When working inside the notebook, the ``show`` function can be passed the
argument ``notebook_handle=True``, which will cause it to return a
handle object that can be used to update the Bokeh output later. When
``push_notebook`` is called, any property updates (e.g. plot titles or
data source values, etc.) since the last call to ``push_notebook`` or
the original ``show`` call are applied to the Bokeh output in the
previously rendered Jupyter output cell.
Several example notebooks can be found in the GitHub repository in
the :bokeh-tree:`examples/output/jupyter/push_notebook` directory.
Args:
document (Document, optional):
A |Document| to push from. If None uses ``curdoc()``. (default:
None)
state (State, optional) :
A :class:`State` object. If None, then the current default
state (set by |output_file|, etc.) is used. (default: None)
Returns:
None
Examples:
Typical usage is typically similar to this:
.. code-block:: python
from bokeh.plotting import figure
from bokeh.io import output_notebook, push_notebook, show
output_notebook()
plot = figure()
plot.scatter([1,2,3], [4,6,5])
handle = show(plot, notebook_handle=True)
# Update the plot title in the earlier cell
plot.title.text = "New Title"
push_notebook(handle=handle)
'''
from ..protocol import Protocol as BokehProtocol
if state is None:
state = curstate()
if not document:
document = state.document
if not document:
from ..util.warnings import warn
warn("No document to push")
return
if handle is None:
handle = state.last_comms_handle
if not handle:
from ..util.warnings import warn
warn("Cannot find a last shown plot to update. Call output_notebook() and show(..., notebook_handle=True) before push_notebook()")
return
events = list(handle.doc.callbacks._held_events)
# This is to avoid having an exception raised for attempting to create a
# PATCH-DOC with no events. In the notebook, we just want to silently
# ignore calls to push_notebook when there are no new events
if len(events) == 0:
return
handle.doc.callbacks._held_events = []
msg = BokehProtocol().create("PATCH-DOC", cast(list["DocumentPatchedEvent"], events)) # XXX: either fix types or filter events
handle.comms.send(msg.header_json)
handle.comms.send(msg.metadata_json)
handle.comms.send(msg.content_json)
for buffer in msg.buffers:
header = json.dumps(buffer.ref)
payload = buffer.to_bytes()
handle.comms.send(header)
handle.comms.send(buffers=[payload])
def run_notebook_hook(notebook_type: NotebookType, action: Literal["load", "doc", "app"], *args: Any, **kwargs: Any) -> Any:
''' Run an installed notebook hook with supplied arguments.
Args:
notebook_type (str) :
Name of an existing installed notebook hook
action (str) :
Name of the hook action to execute, ``'doc'`` or ``'app'``
All other arguments and keyword arguments are passed to the hook action
exactly as supplied.
Returns:
Result of the hook action, as-is
Raises:
RuntimeError
If the hook or specific action is not installed
'''
if notebook_type not in _HOOKS:
raise RuntimeError(f"no display hook installed for notebook type {notebook_type!r}")
if _HOOKS[notebook_type][action] is None:
raise RuntimeError(f"notebook hook for {notebook_type!r} did not install {action!r} action")
return _HOOKS[notebook_type][action](*args, **kwargs)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def destroy_server(server_id: ID) -> None:
''' Given a UUID id of a div removed or replaced in the Jupyter
notebook, destroy the corresponding server sessions and stop it.
'''
server = curstate().uuid_to_server.get(server_id, None)
if server is None:
log.debug(f"No server instance found for uuid: {server_id!r}")
return
try:
for session in server.get_sessions():
session.destroy()
server.stop()
del curstate().uuid_to_server[server_id]
except Exception as e:
log.debug(f"Could not destroy server for id {server_id!r}: {e}")
def get_comms(target_name: str) -> Comm:
''' Create a Jupyter comms object for a specific target, that can
be used to update Bokeh documents in the Jupyter notebook.
Args:
target_name (str) : the target name the Comms object should connect to
Returns
Jupyter Comms
'''
# NOTE: must defer all IPython imports inside functions
from ipykernel.comm import Comm
return Comm(target_name=target_name, data={})
def install_jupyter_hooks() -> None:
'''
'''
install_notebook_hook('jupyter', load_notebook, show_doc, show_app)
def load_notebook(resources: Resources | None = None, verbose: bool = False,
hide_banner: bool = False, load_timeout: int = 5000) -> None:
''' Prepare the IPython notebook for displaying Bokeh plots.
Args:
resources (Resource, optional) :
how and where to load BokehJS from (default: CDN)
verbose (bool, optional) :
whether to report detailed settings (default: False)
hide_banner (bool, optional):
whether to hide the Bokeh banner (default: False)
load_timeout (int, optional) :
Timeout in milliseconds when plots assume load timed out (default: 5000)
.. warning::
Clearing the output cell containing the published BokehJS
resources HTML code may cause Bokeh CSS styling to be removed.
Returns:
None
'''
global _NOTEBOOK_LOADED
from .. import __version__
from ..core.templates import NOTEBOOK_LOAD
from ..embed.bundle import bundle_for_objs_and_resources
from ..resources import Resources
from ..settings import settings
from ..util.serialization import make_globally_unique_css_safe_id
if resources is None:
resources = Resources(mode=settings.resources())
element_id: ID | None
html: str | None
if not hide_banner:
if resources.mode == 'inline':
js_info: str | list[str] = 'inline'
css_info: str | list[str] = 'inline'
else:
js_info = resources.js_files[0] if len(resources.js_files) == 1 else resources.js_files
css_info = resources.css_files[0] if len(resources.css_files) == 1 else resources.css_files
warnings = ["Warning: " + msg.text for msg in resources.messages if msg.type == 'warn']
if _NOTEBOOK_LOADED and verbose:
warnings.append('Warning: BokehJS previously loaded')
element_id = make_globally_unique_css_safe_id()
html = NOTEBOOK_LOAD.render(
element_id = element_id,
verbose = verbose,
js_info = js_info,
css_info = css_info,
bokeh_version = __version__,
warnings = warnings,
)
else:
element_id = None
html = None
_NOTEBOOK_LOADED = resources
bundle = bundle_for_objs_and_resources(None, resources)
nb_js = _loading_js(bundle, element_id, load_timeout, register_mime=True)
jl_js = _loading_js(bundle, element_id, load_timeout, register_mime=False)
if html is not None:
publish_display_data({'text/html': html})
publish_display_data({
JS_MIME_TYPE: nb_js,
LOAD_MIME_TYPE: jl_js,
})
def publish_display_data(data: dict[str, Any], metadata: dict[Any, Any] | None = None, *, transient: dict[str, Any] | None = None, **kwargs: Any) -> None:
'''
'''
# This import MUST be deferred or it will introduce a hard dependency on IPython
from IPython.display import publish_display_data
publish_display_data(data, metadata, transient=transient, **kwargs)
ProxyUrlFunc: TypeAlias = Callable[[int | None], str]
def show_app(
app: Application,
state: State,
notebook_url: str | ProxyUrlFunc = DEFAULT_JUPYTER_URL,
port: int = 0,
**kw: Any,
) -> None:
''' Embed a Bokeh server application in a Jupyter Notebook output cell.
Args:
app (Application or callable) :
A Bokeh Application to embed inline in a Jupyter notebook.
state (State) :
** Unused **
notebook_url (str or callable) :
The URL of the notebook server that is running the embedded app.
If ``notebook_url`` is a string, the value string is parsed to
construct the origin and full server URLs.
If notebook_url is a callable, it must accept one parameter,
which will be the server port, or None. If passed a port,
the callable must generate the server URL, otherwise if passed
None, it must generate the origin URL for the server.
If the environment variable JUPYTER_BOKEH_EXTERNAL_URL is set
to the external URL of a JupyterHub, notebook_url is overridden
with a callable which enables Bokeh to traverse the JupyterHub
proxy without specifying this parameter.
port (int) :
A port for the embedded server will listen on.
By default the port is 0, which results in the server listening
on a random dynamic port.
Any additional keyword arguments are passed to :class:`~bokeh.server.Server` (added in version 1.1)
Returns:
None
'''
logging.basicConfig()
from tornado.ioloop import IOLoop
from ..core.types import ID
from ..server.server import Server
loop = IOLoop.current()
notebook_url = _update_notebook_url_from_env(notebook_url)
if callable(notebook_url):
origin = notebook_url(None)
else:
origin = _origin_url(notebook_url)
server = Server({"/": app}, io_loop=loop, port=port, allow_websocket_origin=[origin], **kw)
server_id = ID(uuid4().hex)
curstate().uuid_to_server[server_id] = server
server.start()
if callable(notebook_url):
url = notebook_url(server.port)
else:
url = _server_url(notebook_url, server.port)
logging.debug(f"Server URL is {url}")
logging.debug(f"Origin URL is {origin}")
from ..embed import server_document
script = server_document(url, resources=None)
publish_display_data({
HTML_MIME_TYPE: script,
EXEC_MIME_TYPE: "",
}, metadata={
EXEC_MIME_TYPE: {"server_id": server_id},
})
@overload
def show_doc(obj: Model, state: State) -> None: ...
@overload
def show_doc(obj: Model, state: State, notebook_handle: CommsHandle) -> CommsHandle: ...
def show_doc(obj: Model, state: State, notebook_handle: CommsHandle | None = None) -> CommsHandle | None:
'''
'''
if obj not in state.document.roots:
state.document.add_root(obj)
from ..embed.notebook import notebook_content
comms_target = make_id() if notebook_handle else None
(script, div, cell_doc) = notebook_content(obj, comms_target)
publish_display_data({HTML_MIME_TYPE: div})
publish_display_data({JS_MIME_TYPE: script, EXEC_MIME_TYPE: ""}, metadata={EXEC_MIME_TYPE: {"id": obj.id}})
# Comms handling relies on the fact that the cell_doc returned by
# notebook copy has models with the same IDs as the original curdoc
# they were copied from
if comms_target:
handle = CommsHandle(get_comms(comms_target), cell_doc)
state.document.callbacks.on_change_dispatch_to(handle)
state.last_comms_handle = handle
return handle
return None
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_HOOKS: dict[str, Hooks] = {}
_NOTEBOOK_LOADED: Resources | None = None
def _loading_js(bundle: Bundle, element_id: ID | None, load_timeout: int = 5000, register_mime: bool = True) -> str:
'''
'''
from ..core.templates import AUTOLOAD_NB_JS
return AUTOLOAD_NB_JS.render(
bundle = bundle,
elementid = element_id,
force = True,
timeout = load_timeout,
register_mime = register_mime,
)
def _origin_url(url: str) -> str:
'''
'''
if url.startswith("http"):
url = url.split("//")[1]
return url
def _server_url(url: str, port: int | None) -> str:
'''
'''
port_ = f":{port}" if port is not None else ""
if url.startswith("http"):
return f"{url.rsplit(':', 1)[0]}{port_}{'/'}"
else:
return f"http://{url.split(':')[0]}{port_}{'/'}"
def _remote_jupyter_proxy_url(port: int | None) -> str:
""" Callable to configure Bokeh's show method when a proxy must be
configured. If port is None we're asking about the URL
for the origin header.
Taken from documentation here:
https://docs.bokeh.org/en/latest/docs/user_guide/output/jupyter.html#jupyterhub
and made an implicit override when JUPYTER_BOKEH_EXTERNAL_URL is defined in
a user's environment to the external hostname of the hub, e.g. https://our-hub.edu
Args:
port (int):
random port generated by bokeh to avoid re-using recently closed ports
Returns:
str:
URL capable of traversing the JupyterHub proxy to return to this notebook session.
"""
base_url = os.environ['JUPYTER_BOKEH_EXTERNAL_URL']
host = urllib.parse.urlparse(base_url).netloc
# If port is None we're asking for the URL origin
# so return the public hostname.
if port is None:
return host
service_url_path = os.environ['JUPYTERHUB_SERVICE_PREFIX']
proxy_url_path = f'proxy/{port}'
user_url = urllib.parse.urljoin(base_url, service_url_path)
full_url = urllib.parse.urljoin(user_url, proxy_url_path)
return full_url
def _update_notebook_url_from_env(notebook_url: str | ProxyUrlFunc) -> str | ProxyUrlFunc:
"""If the environment variable ``JUPYTER_BOKEH_EXTERNAL_URL`` is defined, returns a function which
generates URLs which can traverse the JupyterHub proxy. Otherwise returns ``notebook_url`` unmodified.
A warning is issued if ``notebook_url`` is not the default and
``JUPYTER_BOKEH_EXTERNAL_URL`` is also defined since setting the
environment variable makes specifying ``notebook_url`` irrelevant.
Args:
notebook_url (str | ProxyUrlFunc):
Either a URL string which defaults or a function that given a port
number will generate a URL suitable for traversing the JupyterHub proxy.
Returns:
str | ProxyUrlFunc
Either a URL string or a function that generates a URL string given a port number. The
latter function may be user supplied as the input parameter or defined internally by Bokeh
when ``JUPYTER_BOKEH_EXTERNAL_URL`` is set.
"""
if os.environ.get("JUPYTER_BOKEH_EXTERNAL_URL"):
if notebook_url != DEFAULT_JUPYTER_URL:
log.warning("Environment var 'JUPYTER_BOKEH_EXTERNAL_URL' is defined. Ignoring 'notebook_url' parameter.")
return _remote_jupyter_proxy_url
else:
return notebook_url
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
Hooks
|
python
|
realpython__materials
|
inheritance-and-composition/choosing/contacts.py
|
{
"start": 509,
"end": 1282
}
|
class ____:
def __init__(self):
self._employee_addresses = {
1: Address("121 Admin Rd.", "Concord", "NH", "03301"),
2: Address("67 Paperwork Ave", "Manchester", "NH", "03101"),
3: Address("15 Rose St", "Concord", "NH", "03301", "Apt. B-1"),
4: Address("39 Sole St.", "Concord", "NH", "03301"),
5: Address("99 Mountain Rd.", "Concord", "NH", "03301"),
}
def get_employee_address(self, employee_id):
address = self._employee_addresses.get(employee_id)
if not address:
raise ValueError(employee_id)
return address
_address_book = _AddressBook()
def get_employee_address(employee_id):
return _address_book.get_employee_address(employee_id)
|
_AddressBook
|
python
|
davidhalter__jedi
|
test/refactor/inline.py
|
{
"start": 2743,
"end": 6041
}
|
class ____: pass
#? 5 error
test(A)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
Cannot inline a class
# -------------------------------------------------- function
def foo(a):
return a + 1
#? 5 error
test(foo(1))
# ++++++++++++++++++++++++++++++++++++++++++++++++++
Cannot inline a function
# -------------------------------------------------- for-stmt
for x in []:
#? 9 error
test(x)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
Cannot inline a for_stmt
# -------------------------------------------------- simple
def test():
#? 4
a = (30 + b, c) + 1
return test(100, a)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
--- inline.py
+++ inline.py
@@ -1,5 +1,4 @@
def test():
#? 4
- a = (30 + b, c) + 1
- return test(100, a)
+ return test(100, (30 + b, c) + 1)
# -------------------------------------------------- tuple
if 1:
#? 4
a = 1, 2
return test(100, a)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
--- inline.py
+++ inline.py
@@ -1,5 +1,4 @@
if 1:
#? 4
- a = 1, 2
- return test(100, a)
+ return test(100, (1, 2))
# -------------------------------------------------- multiplication-add-parens1
a = 1+2
#? 11
test(100 * a)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
--- inline.py
+++ inline.py
@@ -1,4 +1,3 @@
-a = 1+2
#? 11
-test(100 * a)
+test(100 * (1+2))
# -------------------------------------------------- multiplication-add-parens2
a = 1+2
#? 11
(x, 100 * a)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
--- inline.py
+++ inline.py
@@ -1,4 +1,3 @@
-a = 1+2
#? 11
-(x, 100 * a)
+(x, 100 * (1+2))
# -------------------------------------------------- multiplication-add-parens3
x
a = 1+2
#? 9
(100 ** a)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
--- inline.py
+++ inline.py
@@ -1,5 +1,4 @@
x
-a = 1+2
#? 9
-(100 ** a)
+(100 ** (1+2))
# -------------------------------------------------- no-add-parens1
x
a = 1+2
#? 5
test(a)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
--- inline.py
+++ inline.py
@@ -1,5 +1,4 @@
x
-a = 1+2
#? 5
-test(a)
+test(1+2)
# -------------------------------------------------- no-add-parens2
a = 1+2
#? 9
test(3, a)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
--- inline.py
+++ inline.py
@@ -1,4 +1,3 @@
-a = 1+2
#? 9
-test(3, a)
+test(3, 1+2)
# -------------------------------------------------- no-add-parens3
a = 1|2
#? 5
(3, a)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
--- inline.py
+++ inline.py
@@ -1,4 +1,3 @@
-a = 1|2
#? 5
-(3, a)
+(3, 1|2)
# -------------------------------------------------- comment
a = 1 and 2 # foo
#? 9
(3, 3 * a)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
--- inline.py
+++ inline.py
@@ -1,4 +1,4 @@
-a = 1 and 2 # foo
+ # foo
#? 9
-(3, 3 * a)
+(3, 3 * (1 and 2))
# -------------------------------------------------- semicolon
a = 1, 2 ; b = 3
#? 9
(3, 3 == a)
# ++++++++++++++++++++++++++++++++++++++++++++++++++
--- inline.py
+++ inline.py
@@ -1,4 +1,4 @@
-a = 1, 2 ; b = 3
+ b = 3
#? 9
-(3, 3 == a)
+(3, 3 == (1, 2))
# -------------------------------------------------- no-tree-name
a = 1 + 2
#? 0
a.conjugate
# ++++++++++++++++++++++++++++++++++++++++++++++++++
--- inline.py
+++ inline.py
@@ -1,4 +1,3 @@
-a = 1 + 2
#? 0
-a.conjugate
+(1 + 2).conjugate
|
A
|
python
|
numpy__numpy
|
numpy/_core/tests/test_umath.py
|
{
"start": 87735,
"end": 90152
}
|
class ____:
def test_exp_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0, high=88.1, size=1000000))
x_f64 = np.float64(x_f32)
assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3)
def test_log_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0, high=1000, size=1000000))
x_f64 = np.float64(x_f32)
assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4)
def test_sincos_float32(self):
np.random.seed(42)
N = 1000000
M = np.int_(N / 20)
index = np.random.randint(low=0, high=N, size=M)
x_f32 = np.float32(np.random.uniform(low=-100., high=100., size=N))
if not _glibc_older_than("2.17"):
# test coverage for elements > 117435.992f for which glibc is used
# this is known to be problematic on old glibc, so skip it there
x_f32[index] = np.float32(10E+10 * np.random.rand(M))
x_f64 = np.float64(x_f32)
assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2)
assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2)
# test aliasing(issue #17761)
tx_f32 = x_f32.copy()
assert_array_max_ulp(np.sin(x_f32, out=x_f32), np.float32(np.sin(x_f64)), maxulp=2)
assert_array_max_ulp(np.cos(tx_f32, out=tx_f32), np.float32(np.cos(x_f64)), maxulp=2)
def test_strided_float32(self):
np.random.seed(42)
strides = np.array([-4, -3, -2, -1, 1, 2, 3, 4])
sizes = np.arange(2, 100)
for ii in sizes:
x_f32 = np.float32(np.random.uniform(low=0.01, high=88.1, size=ii))
x_f32_large = x_f32.copy()
x_f32_large[3:-1:4] = 120000.0
exp_true = np.exp(x_f32)
log_true = np.log(x_f32)
sin_true = np.sin(x_f32_large)
cos_true = np.cos(x_f32_large)
for jj in strides:
assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2)
|
TestAVXFloat32Transcendental
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 949914,
"end": 950429
}
|
class ____(sgqlc.types.Type):
"""Represents a required status check for a protected branch, but not
any specific run of that check.
"""
__schema__ = github_schema
__field_names__ = ("app", "context")
app = sgqlc.types.Field("App", graphql_name="app")
"""The App that must provide this status in order for it to be
accepted.
"""
context = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="context")
"""The name of this status."""
|
RequiredStatusCheckDescription
|
python
|
PrefectHQ__prefect
|
tests/server/api/test_logs.py
|
{
"start": 1388,
"end": 3285
}
|
class ____:
async def test_create_logs_with_flow_run_id_and_returns_number_created(
self, session, client, log_data, flow_run_id
):
response = await client.post(CREATE_LOGS_URL, json=log_data)
assert response.status_code == 201
log_filter = LogFilter(flow_run_id={"any_": [flow_run_id]})
logs = await models.logs.read_logs(session=session, log_filter=log_filter)
assert len(logs) == 2
for i, log in enumerate(logs):
assert (
Log.model_validate(log, from_attributes=True).model_dump(
mode="json", exclude={"created", "id", "updated"}
)
== log_data[i]
)
async def test_create_logs_with_task_run_id_and_returns_number_created(
self, session, client, flow_run_id, task_run_id, log_data
):
response = await client.post(CREATE_LOGS_URL, json=log_data)
assert response.status_code == 201
log_filter = LogFilter(task_run_id={"any_": [task_run_id]})
logs = await models.logs.read_logs(session=session, log_filter=log_filter)
assert len(logs) == 1
assert (
Log.model_validate(logs[0], from_attributes=True).model_dump(
mode="json", exclude={"created", "id", "updated"}
)
== log_data[1]
)
async def test_database_failure(
self, client_without_exceptions, session, flow_run_id, task_run_id, log_data
):
with mock.patch("prefect.server.models.logs.create_logs") as mock_create_logs:
def raise_error(*args, **kwargs):
raise FlushError
mock_create_logs.side_effect = raise_error
response = await client_without_exceptions.post(
CREATE_LOGS_URL, json=log_data
)
assert response.status_code == 500
|
TestCreateLogs
|
python
|
encode__django-rest-framework
|
tests/test_serializer.py
|
{
"start": 21611,
"end": 22186
}
|
class ____:
def test_cache_serializer_data(self):
"""
Caching serializer data with pickle will drop the serializer info,
but does preserve the data itself.
"""
class ExampleSerializer(serializers.Serializer):
field1 = serializers.CharField()
field2 = serializers.CharField()
serializer = ExampleSerializer({'field1': 'a', 'field2': 'b'})
pickled = pickle.dumps(serializer.data)
data = pickle.loads(pickled)
assert data == {'field1': 'a', 'field2': 'b'}
|
TestCacheSerializerData
|
python
|
ray-project__ray
|
python/ray/experimental/collective/nixl_tensor_transport.py
|
{
"start": 329,
"end": 7153
}
|
class ____(TensorTransportManager):
@property
def tensor_transport_backend(self) -> Backend:
return Backend.NIXL
@staticmethod
def is_one_sided() -> bool:
return True
@staticmethod
def can_abort_transport() -> bool:
return True
def actor_has_tensor_transport(self, actor: "ray.actor.ActorHandle") -> bool:
def __ray_actor_has_tensor_transport__(
self: "ray.actor.ActorHandle",
) -> bool:
try:
from ray.util.collective.collective import get_group_handle
nixl_backend = get_group_handle(NIXL_GROUP_NAME)
return nixl_backend is not None
except Exception:
return False
return ray.get(
actor.__ray_call__.options(concurrency_group="_ray_system").remote(
__ray_actor_has_tensor_transport__
)
)
@staticmethod
def extract_tensor_transport_metadata(
obj_id: str,
gpu_object: List["torch.Tensor"],
) -> NixlTransportMetadata:
from ray._private.worker import global_worker
from ray.util.collective.collective import get_group_handle
from ray.util.collective.collective_group.nixl_backend import NixlBackend
from ray.util.collective.types import NixlTransportMetadata
gpu_object_store = global_worker.gpu_object_manager.gpu_object_store
nixl_backend: NixlBackend = get_group_handle(NIXL_GROUP_NAME)
device = None
tensor_meta = []
duplicate_meta = gpu_object_store.record_and_get_meta_if_duplicate(
obj_id, gpu_object
)
if duplicate_meta is not None:
return duplicate_meta
if gpu_object:
reg_descs, serialized_descs, agent_meta = nixl_backend.get_nixl_metadata(
gpu_object
)
# We assume all tensors in one GPU object have the same device type.
device = gpu_object[0].device
for t in gpu_object:
if t.device.type != device.type:
raise ValueError(
"All tensors in an RDT object must have the same device type."
)
tensor_meta.append((t.shape, t.dtype))
else:
reg_descs, serialized_descs, agent_meta = None, None, None
ret = NixlTransportMetadata(
tensor_meta=tensor_meta,
tensor_device=device,
nixl_reg_descs=reg_descs,
nixl_serialized_descs=serialized_descs,
nixl_agent_meta=agent_meta,
)
gpu_object_store.record_managed_meta_nixl(obj_id, ret)
return ret
@staticmethod
def get_tensor_transport_metadata(
src_actor: "ray.actor.ActorHandle",
obj_id: str,
) -> NixlTransportMetadata:
def __ray_get_tensor_transport_metadata__(
self: "ray.actor.ActorHandle",
obj_id: str,
) -> NixlTransportMetadata:
from ray._private.worker import global_worker
gpu_object_manager = global_worker.gpu_object_manager
gpu_object_store = gpu_object_manager.gpu_object_store
# NOTE: We do not specify a timeout here because the user task that returns
# it could take arbitrarily long and we don't want to trigger a spurious
# timeout.
gpu_object = gpu_object_store.wait_and_get_object(obj_id)
return NixlTensorTransport.extract_tensor_transport_metadata(
obj_id, gpu_object
)
# Submit a Ray actor task to the source actor to get the tensor metadata.
# The metadata is a list of tuples, where each tuple contains the shape and dtype
# of a tensor in the GPU object store. This function returns an ObjectRef that
# points to the tensor metadata.
# NOTE(swang): We put this task on the background thread to avoid tasks
# executing on the main thread blocking this task.
return src_actor.__ray_call__.options(concurrency_group="_ray_system").remote(
__ray_get_tensor_transport_metadata__, obj_id
)
@staticmethod
def get_communicator_metadata(
src_actor: "ray.actor.ActorHandle",
dst_actor: "ray.actor.ActorHandle",
backend: Optional[str] = None,
) -> NixlCommunicatorMetadata:
communicator_metadata = NixlCommunicatorMetadata(
communicator_name=NIXL_GROUP_NAME,
)
return communicator_metadata
@staticmethod
def recv_multiple_tensors(
tensors,
obj_id: str,
tensor_transport_metadata: NixlTransportMetadata,
communicator_metadata: NixlCommunicatorMetadata,
):
from ray.util.collective import types
from ray.util.collective.collective import get_group_handle
if tensors:
g = get_group_handle(communicator_metadata.communicator_name)
assert isinstance(
tensor_transport_metadata, types.NixlTransportMetadata
), "metadata must be a NixlTransportMetadata object for NIXL transport"
assert isinstance(
communicator_metadata, types.NixlCommunicatorMetadata
), "metadata must be a NixlCommunicatorMetadata object for NIXL transport"
g.recv(
tensors,
obj_id,
tensor_transport_metadata.nixl_serialized_descs,
tensor_transport_metadata.nixl_agent_meta,
)
@staticmethod
def send_multiple_tensors(
tensors: List["torch.Tensor"],
communicator_metadata: NixlCommunicatorMetadata,
device: "torch.device",
):
raise NotImplementedError(
"NIXL transport does not support send_multiple_tensors, since it is a one-sided transport."
)
@staticmethod
def garbage_collect(obj_id: str, tensor_transport_meta: NixlTransportMetadata):
from ray._private.worker import global_worker
from ray.util.collective.collective import get_group_handle
gpu_object_store = global_worker.gpu_object_manager.gpu_object_store
count = gpu_object_store.remove_managed_meta_nixl(obj_id)
if count == 0:
descs = tensor_transport_meta.nixl_reg_descs
if descs is not None:
nixl_backend = get_group_handle(NIXL_GROUP_NAME)
nixl_backend.deregister_memory(descs)
@staticmethod
def abort_transport(
obj_id: str,
communicator_metadata: NixlCommunicatorMetadata,
):
from ray.util.collective.collective import get_group_handle
g = get_group_handle(communicator_metadata.communicator_name)
if g:
g.abort(obj_id)
|
NixlTensorTransport
|
python
|
facebook__pyre-check
|
source/interprocedural_analyses/taint/test/integration/class_interval.py
|
{
"start": 3470,
"end": 3642
}
|
class ____:
def bar(self, x):
return x
def f(self, b: B7):
y = b.foo() # Interval: [1,2]
return y
"""
A8: [1,2]
B8: [3,4]
C8: [5,6]
"""
|
A7
|
python
|
readthedocs__readthedocs.org
|
readthedocs/search/admin.py
|
{
"start": 131,
"end": 468
}
|
class ____(admin.ModelAdmin):
raw_id_fields = ("project", "version")
list_filter = ("created",)
list_display = ("__str__", "created")
search_fields = ("project__slug", "version__slug", "query")
readonly_fields = ("created", "modified")
list_select_related = ("project", "version", "version__project")
|
SearchQueryAdmin
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/override_module_templates/package.py
|
{
"start": 217,
"end": 510
}
|
class ____(Package):
homepage = "http://www.fake-spack-example.org"
url = "http://www.fake-spack-example.org/downloads/fake-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
tcl_template = "override.txt"
lmod_template = "override.txt"
|
OverrideModuleTemplates
|
python
|
huggingface__transformers
|
tests/models/nllb_moe/test_modeling_nllb_moe.py
|
{
"start": 9478,
"end": 14369
}
|
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (NllbMoeModel, NllbMoeForConditionalGeneration) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": NllbMoeModel,
"summarization": NllbMoeForConditionalGeneration,
"text2text-generation": NllbMoeForConditionalGeneration,
"translation": NllbMoeForConditionalGeneration,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_missing_keys = True
# TODO: Fix the failed tests when this model gets more usage
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
# Saving the slow tokenizer after saving the fast tokenizer causes the loading of the later hanging forever.
return True
def setUp(self):
self.model_tester = NllbMoeModelTester(self)
self.config_tester = ConfigTester(self, config_class=NllbMoeConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], set())
def test_decoder_model_past_with_large_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
config.decoder_sparse_step = 0
self.model_tester.create_and_check_decoder_model_past_large_inputs(config, inputs_dict)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (NllbMoeModel, NllbMoeForConditionalGeneration):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = NllbMoeForConditionalGeneration(config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def test_get_loss(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_dict["output_router_logits"] = True
input_dict["labels"] = input_dict["input_ids"]
model = NllbMoeForConditionalGeneration(config).eval().to(torch_device)
out = model(**input_dict)
self.assertIsNotNone(out.loss)
self.assertIsNotNone(model(**input_dict)["encoder_router_logits"])
self.assertIsNotNone(model(**input_dict)["decoder_router_logits"])
@unittest.skip(
reason="This architecture has tied weights by default and there is no way to remove it, check: https://github.com/huggingface/transformers/pull/31771#issuecomment-2210915245"
)
def test_load_save_without_tied_weights(self):
pass
@unittest.skip(reason="This is broken now, no idea why")
def test_generate_continue_from_past_key_values(self):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
|
NllbMoeModelTest
|
python
|
numba__numba
|
numba/cuda/tests/cudadrv/test_profiler.py
|
{
"start": 207,
"end": 508
}
|
class ____(ContextResettingTestCase):
def test_profiling(self):
with cuda.profiling():
a = cuda.device_array(10)
del a
with cuda.profiling():
a = cuda.device_array(100)
del a
if __name__ == '__main__':
unittest.main()
|
TestProfiler
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.