language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | tests/utils/test_audio_utils.py | {
"start": 1050,
"end": 79942
} | class ____(unittest.TestCase):
# will be set in `def _load_datasamples`
_dataset = None
def test_hertz_to_mel(self):
self.assertEqual(hertz_to_mel(0.0), 0.0)
self.assertAlmostEqual(hertz_to_mel(100), 150.48910241)
inputs = np.array([100, 200])
expected = np.array([150.48910241, 283.22989816])
self.assertTrue(np.allclose(hertz_to_mel(inputs), expected))
self.assertEqual(hertz_to_mel(0.0, "slaney"), 0.0)
self.assertEqual(hertz_to_mel(100, "slaney"), 1.5)
inputs = np.array([60, 100, 200, 1000, 1001, 2000])
expected = np.array([0.9, 1.5, 3.0, 15.0, 15.01453781, 25.08188016])
self.assertTrue(np.allclose(hertz_to_mel(inputs, "slaney"), expected))
inputs = np.array([60, 100, 200, 1000, 1001, 2000])
expected = np.array([92.6824, 150.4899, 283.2313, 999.9907, 1000.6534, 1521.3674])
self.assertTrue(np.allclose(hertz_to_mel(inputs, "kaldi"), expected))
with pytest.raises(ValueError):
hertz_to_mel(100, mel_scale=None)
def test_mel_to_hertz(self):
self.assertEqual(mel_to_hertz(0.0), 0.0)
self.assertAlmostEqual(mel_to_hertz(150.48910241), 100)
inputs = np.array([150.48910241, 283.22989816])
expected = np.array([100, 200])
self.assertTrue(np.allclose(mel_to_hertz(inputs), expected))
self.assertEqual(mel_to_hertz(0.0, "slaney"), 0.0)
self.assertEqual(mel_to_hertz(1.5, "slaney"), 100)
inputs = np.array([0.9, 1.5, 3.0, 15.0, 15.01453781, 25.08188016])
expected = np.array([60, 100, 200, 1000, 1001, 2000])
self.assertTrue(np.allclose(mel_to_hertz(inputs, "slaney"), expected))
inputs = np.array([92.6824, 150.4899, 283.2313, 999.9907, 1000.6534, 1521.3674])
expected = np.array([60, 100, 200, 1000, 1001, 2000])
self.assertTrue(np.allclose(mel_to_hertz(inputs, "kaldi"), expected))
with pytest.raises(ValueError):
mel_to_hertz(100, mel_scale=None)
def test_mel_filter_bank_shape(self):
mel_filters = mel_filter_bank(
num_frequency_bins=513,
num_mel_filters=13,
min_frequency=100,
max_frequency=4000,
sampling_rate=16000,
norm=None,
mel_scale="htk",
)
self.assertEqual(mel_filters.shape, (513, 13))
mel_filters = mel_filter_bank(
num_frequency_bins=513,
num_mel_filters=13,
min_frequency=100,
max_frequency=4000,
sampling_rate=16000,
norm="slaney",
mel_scale="slaney",
)
self.assertEqual(mel_filters.shape, (513, 13))
mel_filters = mel_filter_bank(
num_frequency_bins=513,
num_mel_filters=13,
min_frequency=100,
max_frequency=4000,
sampling_rate=16000,
norm="slaney",
mel_scale="slaney",
triangularize_in_mel_space=True,
)
self.assertEqual(mel_filters.shape, (513, 13))
def test_mel_filter_bank_htk(self):
mel_filters = mel_filter_bank(
num_frequency_bins=16,
num_mel_filters=4,
min_frequency=0,
max_frequency=2000,
sampling_rate=4000,
norm=None,
mel_scale="htk",
)
# fmt: off
expected = np.array([
[0.0 , 0.0 , 0.0 , 0.0 ],
[0.61454786, 0.0 , 0.0 , 0.0 ],
[0.82511046, 0.17488954, 0.0 , 0.0 ],
[0.35597035, 0.64402965, 0.0 , 0.0 ],
[0.0 , 0.91360726, 0.08639274, 0.0 ],
[0.0 , 0.55547007, 0.44452993, 0.0 ],
[0.0 , 0.19733289, 0.80266711, 0.0 ],
[0.0 , 0.0 , 0.87724349, 0.12275651],
[0.0 , 0.0 , 0.6038449 , 0.3961551 ],
[0.0 , 0.0 , 0.33044631, 0.66955369],
[0.0 , 0.0 , 0.05704771, 0.94295229],
[0.0 , 0.0 , 0.0 , 0.83483975],
[0.0 , 0.0 , 0.0 , 0.62612982],
[0.0 , 0.0 , 0.0 , 0.41741988],
[0.0 , 0.0 , 0.0 , 0.20870994],
[0.0 , 0.0 , 0.0 , 0.0 ]
])
# fmt: on
self.assertTrue(np.allclose(mel_filters, expected))
def test_mel_filter_bank_slaney(self):
mel_filters = mel_filter_bank(
num_frequency_bins=16,
num_mel_filters=4,
min_frequency=0,
max_frequency=2000,
sampling_rate=4000,
norm=None,
mel_scale="slaney",
)
# fmt: off
expected = np.array([
[0.0 , 0.0 , 0.0 , 0.0 ],
[0.39869419, 0.0 , 0.0 , 0.0 ],
[0.79738839, 0.0 , 0.0 , 0.0 ],
[0.80391742, 0.19608258, 0.0 , 0.0 ],
[0.40522322, 0.59477678, 0.0 , 0.0 ],
[0.00652903, 0.99347097, 0.0 , 0.0 ],
[0.0 , 0.60796161, 0.39203839, 0.0 ],
[0.0 , 0.20939631, 0.79060369, 0.0 ],
[0.0 , 0.0 , 0.84685344, 0.15314656],
[0.0 , 0.0 , 0.52418477, 0.47581523],
[0.0 , 0.0 , 0.2015161 , 0.7984839 ],
[0.0 , 0.0 , 0.0 , 0.9141874 ],
[0.0 , 0.0 , 0.0 , 0.68564055],
[0.0 , 0.0 , 0.0 , 0.4570937 ],
[0.0 , 0.0 , 0.0 , 0.22854685],
[0.0 , 0.0 , 0.0 , 0.0 ]
])
# fmt: on
self.assertTrue(np.allclose(mel_filters, expected))
def test_mel_filter_bank_kaldi(self):
mel_filters = mel_filter_bank(
num_frequency_bins=16,
num_mel_filters=4,
min_frequency=0,
max_frequency=2000,
sampling_rate=4000,
norm=None,
mel_scale="kaldi",
triangularize_in_mel_space=True,
)
# fmt: off
# here the expected values from torchaudio.compliance.kaldi.get_mel_banks
# note that we compute values in float64 while they do it in float32
expected = np.array(
[
[0.0000000000000000, 0.0000000000000000, 0.0000000000000000, 0.0000000000000000],
[0.6457883715629578, 0.0000000000000000, 0.0000000000000000, 0.0000000000000000],
[0.8044781088829041, 0.1955219060182571, 0.0000000000000000, 0.0000000000000000],
[0.3258901536464691, 0.6741098165512085, 0.0000000000000000, 0.0000000000000000],
[0.0000000000000000, 0.9021250009536743, 0.0978749766945839, 0.0000000000000000],
[0.0000000000000000, 0.5219038724899292, 0.4780961275100708, 0.0000000000000000],
[0.0000000000000000, 0.1771058291196823, 0.8228941559791565, 0.0000000000000000],
[0.0000000000000000, 0.0000000000000000, 0.8616894483566284, 0.1383105516433716],
[0.0000000000000000, 0.0000000000000000, 0.5710380673408508, 0.4289619624614716],
[0.0000000000000000, 0.0000000000000000, 0.3015440106391907, 0.6984559893608093],
[0.0000000000000000, 0.0000000000000000, 0.0503356307744980, 0.9496643543243408],
[0.0000000000000000, 0.0000000000000000, 0.0000000000000000, 0.8150880336761475],
[0.0000000000000000, 0.0000000000000000, 0.0000000000000000, 0.5938932299613953],
[0.0000000000000000, 0.0000000000000000, 0.0000000000000000, 0.3851676583290100],
[0.0000000000000000, 0.0000000000000000, 0.0000000000000000, 0.1875794380903244],
],
dtype=np.float64,
)
# fmt: on
# kaldi implementation does not compute values for last fft bin
# indeed, they enforce max_frequency <= sampling_rate / 2 and
# therefore they know that last fft bin filter bank values will be all 0
# and pad after with zeros
# to comply with our API for `mel_filter_bank`, we need to also pad here
expected = np.pad(expected, ((0, 1), (0, 0)))
self.assertTrue(np.allclose(mel_filters, expected))
def test_mel_filter_bank_slaney_norm(self):
mel_filters = mel_filter_bank(
num_frequency_bins=16,
num_mel_filters=4,
min_frequency=0,
max_frequency=2000,
sampling_rate=4000,
norm="slaney",
mel_scale="slaney",
)
# fmt: off
expected = np.array([
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[1.19217795e-03, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[2.38435591e-03, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[2.40387905e-03, 5.86232616e-04, 0.00000000e+00, 0.00000000e+00],
[1.21170110e-03, 1.77821783e-03, 0.00000000e+00, 0.00000000e+00],
[1.95231437e-05, 2.97020305e-03, 0.00000000e+00, 0.00000000e+00],
[0.00000000e+00, 1.81763684e-03, 1.04857612e-03, 0.00000000e+00],
[0.00000000e+00, 6.26036972e-04, 2.11460963e-03, 0.00000000e+00],
[0.00000000e+00, 0.00000000e+00, 2.26505954e-03, 3.07332945e-04],
[0.00000000e+00, 0.00000000e+00, 1.40202503e-03, 9.54861093e-04],
[0.00000000e+00, 0.00000000e+00, 5.38990521e-04, 1.60238924e-03],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.83458185e-03],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.37593638e-03],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 9.17290923e-04],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 4.58645462e-04],
[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]
])
# fmt: on
self.assertTrue(np.allclose(mel_filters, expected))
def test_window_function(self):
window = window_function(16, "hann")
self.assertEqual(len(window), 16)
# fmt: off
expected = np.array([
0.0, 0.03806023, 0.14644661, 0.30865828, 0.5, 0.69134172, 0.85355339, 0.96193977,
1.0, 0.96193977, 0.85355339, 0.69134172, 0.5, 0.30865828, 0.14644661, 0.03806023,
])
# fmt: on
self.assertTrue(np.allclose(window, expected))
def _load_datasamples(self, num_samples):
from datasets import load_dataset
if self._dataset is None:
self._dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
speech_samples = self._dataset.sort("id")[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def test_spectrogram_impulse(self):
waveform = np.zeros(40)
waveform[9] = 1.0 # impulse shifted in time
spec = spectrogram(
waveform,
window_function(12, "hann", frame_length=16),
frame_length=16,
hop_length=4,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec.shape, (9, 11))
expected = np.array([[0.0, 0.0669873, 0.9330127, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
self.assertTrue(np.allclose(spec, expected))
def test_spectrogram_batch_impulse(self):
waveform1 = np.zeros(40)
waveform1[9] = 1.0
waveform2 = np.zeros(28)
waveform2[12] = 3.0
waveform3 = np.zeros(51)
waveform3[26] = 4.5
waveform_list = [waveform1, waveform2, waveform3]
spec_list = spectrogram_batch(
waveform_list,
window_function(12, "hann", frame_length=16),
frame_length=16,
hop_length=4,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec_list[0].shape, (9, 11))
self.assertEqual(spec_list[1].shape, (9, 8))
self.assertEqual(spec_list[2].shape, (9, 13))
expected1 = np.array([[0.0, 0.0669873, 0.9330127, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
expected2 = np.array([[0.0, 0.0, 0.75, 3.0, 0.75, 0.0, 0.0, 0.0]])
expected3 = np.array([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.375, 3.375, 0.0, 0.0, 0.0, 0.0, 0.0]])
self.assertTrue(np.allclose(spec_list[0], expected1))
self.assertTrue(np.allclose(spec_list[1], expected2))
self.assertTrue(np.allclose(spec_list[2], expected3))
def test_spectrogram_integration_test(self):
waveform = self._load_datasamples(1)[0]
spec = spectrogram(
waveform,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=128,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec.shape, (257, 732))
# fmt: off
expected = np.array([
0.02464888, 0.04648664, 0.05872392, 0.02311783, 0.0327175 ,
0.02433643, 0.01198814, 0.02055709, 0.01559287, 0.01394357,
0.01299037, 0.01728045, 0.0254554 , 0.02486533, 0.02011792,
0.01755333, 0.02100457, 0.02337024, 0.01436963, 0.01464558,
0.0211017 , 0.0193489 , 0.01272165, 0.01858462, 0.03722598,
0.0456542 , 0.03281558, 0.00620586, 0.02226466, 0.03618042,
0.03508182, 0.02271432, 0.01051649, 0.01225771, 0.02315293,
0.02331886, 0.01417785, 0.0106844 , 0.01791214, 0.017177 ,
0.02125114, 0.05028201, 0.06830665, 0.05216664, 0.01963666,
0.06941418, 0.11513043, 0.12257859, 0.10948435, 0.08568069,
0.05509328, 0.05047818, 0.047112 , 0.05060737, 0.02982424,
0.02803827, 0.02933729, 0.01760491, 0.00587815, 0.02117637,
0.0293578 , 0.03452379, 0.02194803, 0.01676056,
])
# fmt: on
self.assertTrue(np.allclose(spec[:64, 400], expected))
spec = spectrogram(
waveform,
window_function(400, "hann"),
frame_length=400,
hop_length=128,
fft_length=512,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec.shape, (257, 732))
self.assertTrue(np.allclose(spec[:64, 400], expected))
mel_filters = mel_filter_bank(
num_frequency_bins=257,
num_mel_filters=400,
min_frequency=20,
max_frequency=8000,
sampling_rate=16000,
norm=None,
mel_scale="kaldi",
triangularize_in_mel_space=True,
)
spec = spectrogram(
waveform,
window_function(400, "povey", periodic=False),
frame_length=400,
hop_length=160,
fft_length=512,
power=2.0,
center=False,
pad_mode="reflect",
onesided=True,
preemphasis=0.97,
mel_filters=mel_filters,
log_mel="log",
mel_floor=1.1920928955078125e-07,
remove_dc_offset=True,
)
self.assertEqual(spec.shape, (400, 584))
# fmt: off
expected = np.array([-15.94238515, -8.20712299, -8.22704352, -15.94238515,
-15.94238515, -15.94238515, -15.94238515, -15.94238515,
-6.52463769, -7.73677889, -15.94238515, -15.94238515,
-15.94238515, -15.94238515, -4.18650018, -3.37195286,
-15.94238515, -15.94238515, -15.94238515, -15.94238515,
-4.70190154, -2.4217066 , -15.94238515, -15.94238515,
-15.94238515, -15.94238515, -5.62755239, -3.53385194,
-15.94238515, -15.94238515, -15.94238515, -15.94238515,
-9.43303023, -8.77480925, -15.94238515, -15.94238515,
-15.94238515, -15.94238515, -4.2951092 , -5.51585994,
-15.94238515, -15.94238515, -15.94238515, -4.40151721,
-3.95228878, -15.94238515, -15.94238515, -15.94238515,
-6.10365415, -4.59494697, -15.94238515, -15.94238515,
-15.94238515, -8.10727767, -6.2585298 , -15.94238515,
-15.94238515, -15.94238515, -5.60161702, -4.47217004,
-15.94238515, -15.94238515, -15.94238515, -5.91641988]
)
# fmt: on
self.assertTrue(np.allclose(spec[:64, 400], expected, atol=1e-5))
def test_spectrogram_batch_integration_test(self):
waveform_list = self._load_datasamples(3)
spec_list = spectrogram_batch(
waveform_list,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=128,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec_list[0].shape, (257, 732))
self.assertEqual(spec_list[1].shape, (257, 602))
self.assertEqual(spec_list[2].shape, (257, 1561))
# fmt: off
expected1 = np.array([
0.02464888, 0.04648664, 0.05872392, 0.02311783, 0.0327175 ,
0.02433643, 0.01198814, 0.02055709, 0.01559287, 0.01394357,
0.01299037, 0.01728045, 0.0254554 , 0.02486533, 0.02011792,
0.01755333, 0.02100457, 0.02337024, 0.01436963, 0.01464558,
0.0211017 , 0.0193489 , 0.01272165, 0.01858462, 0.03722598,
0.0456542 , 0.03281558, 0.00620586, 0.02226466, 0.03618042,
0.03508182, 0.02271432, 0.01051649, 0.01225771, 0.02315293,
0.02331886, 0.01417785, 0.0106844 , 0.01791214, 0.017177 ,
0.02125114, 0.05028201, 0.06830665, 0.05216664, 0.01963666,
0.06941418, 0.11513043, 0.12257859, 0.10948435, 0.08568069,
0.05509328, 0.05047818, 0.047112 , 0.05060737, 0.02982424,
0.02803827, 0.02933729, 0.01760491, 0.00587815, 0.02117637,
0.0293578 , 0.03452379, 0.02194803, 0.01676056,
])
expected2 = np.array([
7.61983171e-02, 1.45338190e-01, 2.63903728e+00, 7.74429535e+00,
9.61932980e+00, 5.40767686e+00, 1.08924884e+00, 3.40908262e+00,
3.59484250e+00, 1.68451077e+00, 5.88405873e-01, 1.17042530e+00,
9.94803324e-01, 3.53757065e-01, 5.47699239e-01, 9.48368581e-01,
7.17770457e-01, 2.09396633e-01, 1.77574463e-01, 2.35644731e-01,
1.31535991e-01, 1.53539552e-02, 4.34416305e-02, 5.32897267e-02,
4.03567305e-02, 1.41842226e-02, 2.90514538e-02, 3.36549485e-02,
1.53516624e-02, 2.37464225e-02, 4.60092464e-02, 4.05769324e-02,
4.82633401e-03, 4.12675364e-02, 7.13859796e-02, 6.16866566e-02,
2.55657822e-02, 1.68923281e-02, 1.91299946e-02, 1.60033798e-02,
1.33405095e-02, 1.52065457e-02, 1.21833352e-02, 2.25786382e-03,
6.15358376e-03, 1.07647616e-02, 1.23051018e-02, 6.75289378e-03,
2.71127435e-03, 1.06515263e-02, 1.18463583e-02, 7.14347935e-03,
1.87912782e-03, 4.44236027e-03, 5.19630243e-03, 2.46666998e-03,
1.01598645e-03, 1.21589237e-03, 1.29095500e-03, 1.07447628e-03,
1.40218156e-03, 3.65402623e-03, 4.00592755e-03, 4.20001841e-03
])
expected3 = np.array([
0.07805249, 0.34305022, 0.55617084, 1.22475182, 1.17040678,
0.51540532, 0.23570016, 0.06630775, 0.09017777, 0.07693192,
0.0333643 , 0.04873054, 0.04668559, 0.02384041, 0.02780435,
0.0289717 , 0.01704903, 0.0201644 , 0.01700376, 0.02176975,
0.02042491, 0.00732129, 0.00326042, 0.00245065, 0.00510645,
0.00681892, 0.00739329, 0.00551437, 0.0070674 , 0.00630015,
0.00379566, 0.0060098 , 0.00311543, 0.00902284, 0.01171038,
0.01202166, 0.01759194, 0.01652899, 0.01201872, 0.01295351,
0.00756432, 0.01415318, 0.02349972, 0.02296833, 0.02429341,
0.02447459, 0.01835044, 0.01437871, 0.02262246, 0.02972324,
0.03392252, 0.03037546, 0.01116927, 0.01555062, 0.02833379,
0.02294212, 0.02069847, 0.02496927, 0.02273526, 0.01341643,
0.00805407, 0.00624943, 0.01076262, 0.01876003
])
# fmt: on
self.assertTrue(np.allclose(spec_list[0][:64, 400], expected1))
self.assertTrue(np.allclose(spec_list[1][:64, 400], expected2))
self.assertTrue(np.allclose(spec_list[2][:64, 400], expected3))
spec_list = spectrogram_batch(
waveform_list,
window_function(400, "hann"),
frame_length=400,
hop_length=128,
fft_length=512,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec_list[0].shape, (257, 732))
self.assertEqual(spec_list[1].shape, (257, 602))
self.assertEqual(spec_list[2].shape, (257, 1561))
self.assertTrue(np.allclose(spec_list[0][:64, 400], expected1))
self.assertTrue(np.allclose(spec_list[1][:64, 400], expected2))
self.assertTrue(np.allclose(spec_list[2][:64, 400], expected3))
mel_filters = mel_filter_bank(
num_frequency_bins=257,
num_mel_filters=400,
min_frequency=20,
max_frequency=8000,
sampling_rate=16000,
norm=None,
mel_scale="kaldi",
triangularize_in_mel_space=True,
)
spec_list = spectrogram_batch(
waveform_list,
window_function(400, "povey", periodic=False),
frame_length=400,
hop_length=160,
fft_length=512,
power=2.0,
center=False,
pad_mode="reflect",
onesided=True,
preemphasis=0.97,
mel_filters=mel_filters,
log_mel="log",
mel_floor=1.1920928955078125e-07,
remove_dc_offset=True,
)
self.assertEqual(spec_list[0].shape, (400, 584))
self.assertEqual(spec_list[1].shape, (400, 480))
self.assertEqual(spec_list[2].shape, (400, 1247))
# fmt: off
expected1 = np.array([-15.94238515, -8.20712299, -8.22704352, -15.94238515,
-15.94238515, -15.94238515, -15.94238515, -15.94238515,
-6.52463769, -7.73677889, -15.94238515, -15.94238515,
-15.94238515, -15.94238515, -4.18650018, -3.37195286,
-15.94238515, -15.94238515, -15.94238515, -15.94238515,
-4.70190154, -2.4217066 , -15.94238515, -15.94238515,
-15.94238515, -15.94238515, -5.62755239, -3.53385194,
-15.94238515, -15.94238515, -15.94238515, -15.94238515,
-9.43303023, -8.77480925, -15.94238515, -15.94238515,
-15.94238515, -15.94238515, -4.2951092 , -5.51585994,
-15.94238515, -15.94238515, -15.94238515, -4.40151721,
-3.95228878, -15.94238515, -15.94238515, -15.94238515,
-6.10365415, -4.59494697, -15.94238515, -15.94238515,
-15.94238515, -8.10727767, -6.2585298 , -15.94238515,
-15.94238515, -15.94238515, -5.60161702, -4.47217004,
-15.94238515, -15.94238515, -15.94238515, -5.91641988]
)
expected2 = np.array([-15.942385, -8.531508, -8.551396, -15.942385, -15.942385,
-15.942385, -15.942385, -15.942385, -5.626043, -6.8381968,
-15.942385, -15.942385, -15.942385, -15.942385, -3.3122184,
-2.49764, -15.942385, -15.942385, -15.942385, -15.942385,
-3.625868, -1.3457257, -15.942385, -15.942385, -15.942385,
-15.942385, -4.2223063, -2.1285915, -15.942385, -15.942385,
-15.942385, -15.942385, -8.611152, -7.952894, -15.942385,
-15.942385, -15.942385, -15.942385, -2.7585578, -3.9793255,
-15.942385, -15.942385, -15.942385, -2.5377562, -2.0885658,
-15.942385, -15.942385, -15.942385, -3.8310733, -2.322393,
-15.942385, -15.942385, -15.942385, -7.674944, -5.8261633,
-15.942385, -15.942385, -15.942385, -3.5960004, -2.4665844,
-15.942385, -15.942385, -15.942385, -1.7905309]
)
expected3 = np.array([-15.942385, -13.406995, -13.426883, -15.942385, -15.942385,
-15.942385, -15.942385, -15.942385, -15.942385, -15.942385,
-15.942385, -15.942385, -15.942385, -15.942385, -13.493383,
-12.678805, -15.942385, -15.942385, -15.942385, -15.942385,
-14.809377, -12.529235, -15.942385, -15.942385, -15.942385,
-15.942385, -13.838827, -11.745112, -15.942385, -15.942385,
-15.942385, -15.942385, -13.9336405, -13.275384, -15.942385,
-15.942385, -15.942385, -15.942385, -13.043786, -14.264554,
-15.942385, -15.942385, -15.942385, -13.060181, -12.610991,
-15.942385, -15.942385, -15.942385, -14.152064, -12.643384,
-15.942385, -15.942385, -15.942385, -14.48317, -12.634389,
-15.942385, -15.942385, -15.942385, -14.627316, -13.4979,
-15.942385, -15.942385, -15.942385, -12.6279955]
)
# fmt: on
self.assertTrue(np.allclose(spec_list[0][:64, 400], expected1, atol=1e-5))
self.assertTrue(np.allclose(spec_list[1][:64, 400], expected2, atol=1e-5))
self.assertTrue(np.allclose(spec_list[2][:64, 400], expected3, atol=1e-5))
def test_spectrogram_center_padding(self):
waveform = self._load_datasamples(1)[0]
spec = spectrogram(
waveform,
window_function(512, "hann"),
frame_length=512,
hop_length=128,
center=True,
pad_mode="reflect",
)
self.assertEqual(spec.shape, (257, 732))
# fmt: off
expected = np.array([
0.1287945 , 0.12792738, 0.08311573, 0.03155122, 0.02470202,
0.00727857, 0.00910694, 0.00686163, 0.01238981, 0.01473668,
0.00336144, 0.00370314, 0.00600871, 0.01120164, 0.01942998,
0.03132008, 0.0232842 , 0.01124642, 0.02754783, 0.02423725,
0.00147893, 0.00038027, 0.00112299, 0.00596233, 0.00571529,
0.02084235, 0.0231855 , 0.00810006, 0.01837943, 0.00651339,
0.00093931, 0.00067426, 0.01058399, 0.01270507, 0.00151734,
0.00331913, 0.00302416, 0.01081792, 0.00754549, 0.00148963,
0.00111943, 0.00152573, 0.00608017, 0.01749986, 0.01205949,
0.0143082 , 0.01910573, 0.00413786, 0.03916619, 0.09873404,
0.08302026, 0.02673891, 0.00401255, 0.01397392, 0.00751862,
0.01024884, 0.01544606, 0.00638907, 0.00623633, 0.0085103 ,
0.00217659, 0.00276204, 0.00260835, 0.00299299,
])
# fmt: on
self.assertTrue(np.allclose(spec[:64, 0], expected))
spec = spectrogram(
waveform,
window_function(512, "hann"),
frame_length=512,
hop_length=128,
center=True,
pad_mode="constant",
)
self.assertEqual(spec.shape, (257, 732))
# fmt: off
expected = np.array([
0.06558744, 0.06889656, 0.06263352, 0.04264418, 0.03404115,
0.03244197, 0.02279134, 0.01646339, 0.01452216, 0.00826055,
0.00062093, 0.0031821 , 0.00419456, 0.00689327, 0.01106367,
0.01712119, 0.01721762, 0.00977533, 0.01606626, 0.02275621,
0.01727687, 0.00992739, 0.01217688, 0.01049927, 0.01022947,
0.01302475, 0.01166873, 0.01081812, 0.01057327, 0.00767912,
0.00429567, 0.00089625, 0.00654583, 0.00912084, 0.00700984,
0.00225026, 0.00290545, 0.00667712, 0.00730663, 0.00410813,
0.00073102, 0.00219296, 0.00527618, 0.00996585, 0.01123781,
0.00872816, 0.01165121, 0.02047945, 0.03681747, 0.0514379 ,
0.05137928, 0.03960042, 0.02821562, 0.01813349, 0.01201322,
0.01260964, 0.00900654, 0.00207905, 0.00456714, 0.00850599,
0.00788239, 0.00664407, 0.00824227, 0.00628301,
])
# fmt: on
self.assertTrue(np.allclose(spec[:64, 0], expected))
spec = spectrogram(
waveform,
window_function(512, "hann"),
frame_length=512,
hop_length=128,
center=False,
)
self.assertEqual(spec.shape, (257, 728))
# fmt: off
expected = np.array([
0.00250445, 0.02161521, 0.06232229, 0.04339567, 0.00937727,
0.01080616, 0.00248685, 0.0095264 , 0.00727476, 0.0079152 ,
0.00839946, 0.00254932, 0.00716622, 0.005559 , 0.00272623,
0.00581774, 0.01896395, 0.01829788, 0.01020514, 0.01632692,
0.00870888, 0.02065827, 0.0136022 , 0.0132382 , 0.011827 ,
0.00194505, 0.0189979 , 0.026874 , 0.02194014, 0.01923883,
0.01621437, 0.00661967, 0.00289517, 0.00470257, 0.00957801,
0.00191455, 0.00431664, 0.00544359, 0.01126213, 0.00785778,
0.00423469, 0.01322504, 0.02226548, 0.02318576, 0.03428908,
0.03648811, 0.0202938 , 0.011902 , 0.03226198, 0.06347476,
0.01306318, 0.05308729, 0.05474771, 0.03127991, 0.00998512,
0.01449977, 0.01272741, 0.00868176, 0.00850386, 0.00313876,
0.00811857, 0.00538216, 0.00685749, 0.00535275,
])
# fmt: on
self.assertTrue(np.allclose(spec[:64, 0], expected))
def test_spectrogram_batch_center_padding(self):
waveform_list = self._load_datasamples(3)
spec_list = spectrogram_batch(
waveform_list,
window_function(512, "hann"),
frame_length=512,
hop_length=128,
center=True,
pad_mode="reflect",
)
self.assertEqual(spec_list[0].shape, (257, 732))
self.assertEqual(spec_list[1].shape, (257, 602))
self.assertEqual(spec_list[2].shape, (257, 1561))
# fmt: off
expected1 = np.array([
0.1287945 , 0.12792738, 0.08311573, 0.03155122, 0.02470202,
0.00727857, 0.00910694, 0.00686163, 0.01238981, 0.01473668,
0.00336144, 0.00370314, 0.00600871, 0.01120164, 0.01942998,
0.03132008, 0.0232842 , 0.01124642, 0.02754783, 0.02423725,
0.00147893, 0.00038027, 0.00112299, 0.00596233, 0.00571529,
0.02084235, 0.0231855 , 0.00810006, 0.01837943, 0.00651339,
0.00093931, 0.00067426, 0.01058399, 0.01270507, 0.00151734,
0.00331913, 0.00302416, 0.01081792, 0.00754549, 0.00148963,
0.00111943, 0.00152573, 0.00608017, 0.01749986, 0.01205949,
0.0143082 , 0.01910573, 0.00413786, 0.03916619, 0.09873404,
0.08302026, 0.02673891, 0.00401255, 0.01397392, 0.00751862,
0.01024884, 0.01544606, 0.00638907, 0.00623633, 0.0085103 ,
0.00217659, 0.00276204, 0.00260835, 0.00299299,
])
expected2 = np.array([
1.89624839e-02, 1.23274978e-02, 3.69160250e-02, 4.76267971e-02,
1.39258439e-02, 2.98370440e-02, 2.74845166e-03, 3.01934010e-03,
1.18722776e-02, 9.70834121e-03, 2.06300567e-04, 6.32975250e-04,
8.20603687e-03, 1.21864351e-02, 3.28791840e-03, 3.36801982e-04,
2.79373326e-03, 5.00530424e-03, 8.46884679e-03, 1.14089288e-02,
8.59052036e-03, 2.88538425e-03, 9.95071139e-03, 6.80431770e-03,
2.95809377e-03, 1.46285209e-04, 3.36268265e-03, 4.80051298e-04,
2.84506916e-03, 9.34222655e-04, 3.42161348e-03, 2.79612141e-03,
3.38875921e-03, 2.85030343e-03, 5.39513239e-05, 2.72908504e-03,
2.09591188e-03, 5.00271388e-04, 8.31917219e-04, 2.37967237e-03,
1.75001193e-03, 1.31826295e-04, 8.83622793e-04, 1.54303256e-04,
3.09544569e-03, 4.08527814e-03, 2.73566321e-03, 1.78805250e-03,
9.53314066e-06, 1.74316950e-03, 1.51099428e-03, 8.65990878e-04,
8.44859460e-04, 5.35220199e-04, 5.36562002e-04, 8.33181897e-04,
8.22705682e-04, 1.81083288e-03, 9.75003233e-04, 6.73114730e-04,
6.81665202e-04, 2.05180887e-03, 1.10151991e-03, 4.75923851e-04,
])
expected3 = np.array([
0.07079848, 0.04237922, 0.0220724, 0.04446052, 0.03598337,
0.03327273, 0.02545774, 0.01319528, 0.00919659, 0.01376867,
0.00361992, 0.00608425, 0.01105873, 0.0105565, 0.00744286,
0.00244849, 0.00257317, 0.00749989, 0.01061386, 0.01525312,
0.00656914, 0.01199581, 0.00487319, 0.00830956, 0.0046706,
0.00588962, 0.00544486, 0.00565179, 0.00050112, 0.01108059,
0.00217417, 0.00453234, 0.00537306, 0.00269329, 0.00342333,
0.00095484, 0.00708934, 0.00660373, 0.00543686, 0.00217186,
0.00431519, 0.00457764, 0.00503529, 0.01166454, 0.01375581,
0.01467224, 0.00873404, 0.00534086, 0.00476848, 0.0226163,
0.0314, 0.00151021, 0.01975221, 0.01637519, 0.00046068,
0.0460544, 0.06285986, 0.03151625, 0.0013598, 0.004804,
0.0073824, 0.02312599, 0.02613977, 0.01056851
])
# fmt: on
self.assertTrue(np.allclose(spec_list[0][:64, 0], expected1))
self.assertTrue(np.allclose(spec_list[1][:64, 0], expected2))
self.assertTrue(np.allclose(spec_list[2][:64, 0], expected3))
spec_list = spectrogram_batch(
waveform_list,
window_function(512, "hann"),
frame_length=512,
hop_length=128,
center=True,
pad_mode="constant",
)
self.assertEqual(spec_list[0].shape, (257, 732))
self.assertEqual(spec_list[1].shape, (257, 602))
self.assertEqual(spec_list[2].shape, (257, 1561))
# fmt: off
expected1 = np.array([
0.06558744, 0.06889656, 0.06263352, 0.04264418, 0.03404115,
0.03244197, 0.02279134, 0.01646339, 0.01452216, 0.00826055,
0.00062093, 0.0031821 , 0.00419456, 0.00689327, 0.01106367,
0.01712119, 0.01721762, 0.00977533, 0.01606626, 0.02275621,
0.01727687, 0.00992739, 0.01217688, 0.01049927, 0.01022947,
0.01302475, 0.01166873, 0.01081812, 0.01057327, 0.00767912,
0.00429567, 0.00089625, 0.00654583, 0.00912084, 0.00700984,
0.00225026, 0.00290545, 0.00667712, 0.00730663, 0.00410813,
0.00073102, 0.00219296, 0.00527618, 0.00996585, 0.01123781,
0.00872816, 0.01165121, 0.02047945, 0.03681747, 0.0514379 ,
0.05137928, 0.03960042, 0.02821562, 0.01813349, 0.01201322,
0.01260964, 0.00900654, 0.00207905, 0.00456714, 0.00850599,
0.00788239, 0.00664407, 0.00824227, 0.00628301,
])
expected2 = np.array([
0.00955754, 0.01445548, 0.02393902, 0.02903068, 0.02512844,
0.01508297, 0.00474784, 0.00440362, 0.0073898, 0.00546519,
0.00126077, 0.00240507, 0.00523254, 0.00632742, 0.00415215,
0.00056628, 0.00161288, 0.0026956, 0.00431587, 0.00621471,
0.00791291, 0.0079454, 0.00594525, 0.00334581, 0.00180047,
0.00144485, 0.00175764, 0.00188037, 0.00134889, 0.00150253,
0.00178821, 0.00158875, 0.00204339, 0.00266497, 0.00280556,
0.00221949, 0.00108956, 0.000532, 0.00108454, 0.00129254,
0.00089315, 0.00022803, 0.00038176, 0.0011302, 0.00189306,
0.0021964, 0.00203576, 0.00207306, 0.00217727, 0.00174297,
0.00103331, 0.00076695, 0.0007422, 0.00061986, 0.00081204,
0.00079615, 0.00089417, 0.00105452, 0.00042615, 0.00066372,
0.00132765, 0.00122087, 0.00054903, 0.00107945,
])
expected3 = np.array([
0.03573493, 0.03625983, 0.03341755, 0.02431477, 0.01770546,
0.0169356 , 0.01579034, 0.01600499, 0.01329064, 0.00747957,
0.00367372, 0.00403853, 0.00519597, 0.00551022, 0.00532757,
0.00367569, 0.00130341, 0.00345149, 0.00520744, 0.00872308,
0.01172503, 0.00948154, 0.00344236, 0.00387997, 0.00425455,
0.00394357, 0.00711733, 0.00615654, 0.00055756, 0.00656414,
0.00852001, 0.00666252, 0.00509767, 0.00246784, 0.00376049,
0.00682879, 0.00641118, 0.00469685, 0.00358701, 0.0015552 ,
0.00261458, 0.00701979, 0.00929578, 0.00894536, 0.00828491,
0.00773528, 0.00552091, 0.00259871, 0.00933179, 0.01588626,
0.01697887, 0.01268552, 0.00957255, 0.01204092, 0.02123362,
0.03062669, 0.03215763, 0.02629963, 0.01769568, 0.01088869,
0.01151334, 0.01378197, 0.01319263, 0.01066859,
])
# fmt: on
self.assertTrue(np.allclose(spec_list[0][:64, 0], expected1))
self.assertTrue(np.allclose(spec_list[1][:64, 0], expected2))
self.assertTrue(np.allclose(spec_list[2][:64, 0], expected3))
spec_list = spectrogram_batch(
waveform_list,
window_function(512, "hann"),
frame_length=512,
hop_length=128,
center=False,
)
self.assertEqual(spec_list[0].shape, (257, 728))
self.assertEqual(spec_list[1].shape, (257, 598))
self.assertEqual(spec_list[2].shape, (257, 1557))
# fmt: off
expected1 = np.array([
0.00250445, 0.02161521, 0.06232229, 0.04339567, 0.00937727,
0.01080616, 0.00248685, 0.0095264 , 0.00727476, 0.0079152 ,
0.00839946, 0.00254932, 0.00716622, 0.005559 , 0.00272623,
0.00581774, 0.01896395, 0.01829788, 0.01020514, 0.01632692,
0.00870888, 0.02065827, 0.0136022 , 0.0132382 , 0.011827 ,
0.00194505, 0.0189979 , 0.026874 , 0.02194014, 0.01923883,
0.01621437, 0.00661967, 0.00289517, 0.00470257, 0.00957801,
0.00191455, 0.00431664, 0.00544359, 0.01126213, 0.00785778,
0.00423469, 0.01322504, 0.02226548, 0.02318576, 0.03428908,
0.03648811, 0.0202938 , 0.011902 , 0.03226198, 0.06347476,
0.01306318, 0.05308729, 0.05474771, 0.03127991, 0.00998512,
0.01449977, 0.01272741, 0.00868176, 0.00850386, 0.00313876,
0.00811857, 0.00538216, 0.00685749, 0.00535275,
])
expected2 = np.array([
0.01232908, 0.05980514, 0.08285419, 0.01850723, 0.02823627,
0.00204369, 0.01372626, 0.00956435, 0.02267217, 0.00947112,
0.00355174, 0.00418008, 0.00843608, 0.01559252, 0.01125505,
0.00183573, 0.00765051, 0.0109983 , 0.00890545, 0.00583453,
0.00115901, 0.00579039, 0.00151353, 0.00395812, 0.00231413,
0.00384272, 0.00313914, 0.00072331, 0.00338935, 0.00383328,
0.00218129, 0.00284516, 0.00228538, 0.00083603, 0.00111663,
0.00235799, 0.00142748, 0.00092908, 0.0012966 , 0.0011403 ,
0.0010619 , 0.00158732, 0.00289866, 0.00216709, 0.00313325,
0.00361277, 0.00202507, 0.0009948 , 0.00114428, 0.00200851,
0.0009234 , 0.00063468, 0.00018746, 0.00100463, 0.00053799,
0.00080009, 0.00158291, 0.00172077, 0.00173586, 0.00197127,
0.00107058, 0.00043486, 0.0009859 , 0.00215484,
])
expected3 = np.array([
0.01864123, 0.06131337, 0.08346292, 0.04936386, 0.02792609,
0.01005205, 0.00884826, 0.02198604, 0.02421535, 0.00957573,
0.00503561, 0.00241331, 0.00175652, 0.00195889, 0.00453299,
0.0020317 , 0.00249264, 0.00517483, 0.01111943, 0.0150079 ,
0.01977743, 0.01253825, 0.00517561, 0.01031712, 0.00579466,
0.00783679, 0.0071415 , 0.00591847, 0.01510728, 0.01194921,
0.00518072, 0.00125978, 0.00577552, 0.01050614, 0.0077644 ,
0.0042905 , 0.00278469, 0.00166695, 0.00255013, 0.00578153,
0.00586451, 0.00929514, 0.01501226, 0.00741419, 0.00310625,
0.00086757, 0.00595618, 0.0053882 , 0.0116266 , 0.02504773,
0.02889692, 0.03739442, 0.04730207, 0.03856638, 0.05700104,
0.04299267, 0.02153366, 0.03740607, 0.03811468, 0.01575022,
0.00676344, 0.01359865, 0.01769319, 0.00907966,
])
# fmt: on
self.assertTrue(np.allclose(spec_list[0][:64, 0], expected1))
self.assertTrue(np.allclose(spec_list[1][:64, 0], expected2))
self.assertTrue(np.allclose(spec_list[2][:64, 0], expected3))
def test_spectrogram_shapes(self):
waveform = self._load_datasamples(1)[0]
spec = spectrogram(
waveform,
window_function(400, "hann"),
frame_length=400,
hop_length=128,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec.shape, (201, 732))
spec = spectrogram(
waveform,
window_function(400, "hann"),
frame_length=400,
hop_length=128,
power=1.0,
center=False,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec.shape, (201, 729))
spec = spectrogram(
waveform,
window_function(400, "hann"),
frame_length=400,
hop_length=128,
fft_length=512,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec.shape, (257, 732))
spec = spectrogram(
waveform,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=64,
power=1.0,
center=True,
pad_mode="reflect",
onesided=False,
)
self.assertEqual(spec.shape, (512, 1464))
spec = spectrogram(
waveform,
window_function(512, "hann"),
frame_length=512,
hop_length=64,
power=1.0,
center=True,
pad_mode="reflect",
onesided=False,
)
self.assertEqual(spec.shape, (512, 1464))
spec = spectrogram(
waveform,
window_function(512, "hann"),
frame_length=512,
hop_length=512,
power=1.0,
center=True,
pad_mode="reflect",
onesided=False,
)
self.assertEqual(spec.shape, (512, 183))
def test_spectrogram_batch_shapes(self):
waveform_list = self._load_datasamples(3)
spec_list = spectrogram_batch(
waveform_list,
window_function(400, "hann"),
frame_length=400,
hop_length=128,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec_list[0].shape, (201, 732))
self.assertEqual(spec_list[1].shape, (201, 602))
self.assertEqual(spec_list[2].shape, (201, 1561))
spec_list = spectrogram_batch(
waveform_list,
window_function(400, "hann"),
frame_length=400,
hop_length=128,
power=1.0,
center=False,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec_list[0].shape, (201, 729))
self.assertEqual(spec_list[1].shape, (201, 599))
self.assertEqual(spec_list[2].shape, (201, 1558))
spec_list = spectrogram_batch(
waveform_list,
window_function(400, "hann"),
frame_length=400,
hop_length=128,
fft_length=512,
power=1.0,
center=True,
pad_mode="reflect",
onesided=True,
)
self.assertEqual(spec_list[0].shape, (257, 732))
self.assertEqual(spec_list[1].shape, (257, 602))
self.assertEqual(spec_list[2].shape, (257, 1561))
spec_list = spectrogram_batch(
waveform_list,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=64,
power=1.0,
center=True,
pad_mode="reflect",
onesided=False,
)
self.assertEqual(spec_list[0].shape, (512, 1464))
self.assertEqual(spec_list[1].shape, (512, 1204))
self.assertEqual(spec_list[2].shape, (512, 3122))
spec_list = spectrogram_batch(
waveform_list,
window_function(512, "hann"),
frame_length=512,
hop_length=64,
power=1.0,
center=True,
pad_mode="reflect",
onesided=False,
)
self.assertEqual(spec_list[0].shape, (512, 1464))
self.assertEqual(spec_list[1].shape, (512, 1204))
self.assertEqual(spec_list[2].shape, (512, 3122))
spec_list = spectrogram_batch(
waveform_list,
window_function(512, "hann"),
frame_length=512,
hop_length=512,
power=1.0,
center=True,
pad_mode="reflect",
onesided=False,
)
self.assertEqual(spec_list[0].shape, (512, 183))
self.assertEqual(spec_list[1].shape, (512, 151))
self.assertEqual(spec_list[2].shape, (512, 391))
def test_mel_spectrogram(self):
waveform = self._load_datasamples(1)[0]
mel_filters = mel_filter_bank(
num_frequency_bins=513,
num_mel_filters=13,
min_frequency=100,
max_frequency=4000,
sampling_rate=16000,
norm=None,
mel_scale="htk",
)
self.assertEqual(mel_filters.shape, (513, 13))
spec = spectrogram(
waveform,
window_function(800, "hann", frame_length=1024),
frame_length=1024,
hop_length=128,
power=2.0,
)
self.assertEqual(spec.shape, (513, 732))
spec = spectrogram(
waveform,
window_function(800, "hann", frame_length=1024),
frame_length=1024,
hop_length=128,
power=2.0,
mel_filters=mel_filters,
)
self.assertEqual(spec.shape, (13, 732))
# fmt: off
expected = np.array([
1.08027889e+02, 1.48080673e+01, 7.70758213e+00, 9.57676639e-01,
8.81639061e-02, 5.26073833e-02, 1.52736155e-02, 9.95350117e-03,
7.95364356e-03, 1.01148004e-02, 4.29241020e-03, 9.90708797e-03,
9.44153646e-04
])
# fmt: on
self.assertTrue(np.allclose(spec[:, 300], expected))
def test_mel_spectrogram_batch(self):
waveform_list = self._load_datasamples(3)
mel_filters = mel_filter_bank(
num_frequency_bins=513,
num_mel_filters=13,
min_frequency=100,
max_frequency=4000,
sampling_rate=16000,
norm=None,
mel_scale="htk",
)
self.assertEqual(mel_filters.shape, (513, 13))
spec_list = spectrogram_batch(
waveform_list,
window_function(800, "hann", frame_length=1024),
frame_length=1024,
hop_length=128,
power=2.0,
)
self.assertEqual(spec_list[0].shape, (513, 732))
self.assertEqual(spec_list[1].shape, (513, 602))
self.assertEqual(spec_list[2].shape, (513, 1561))
spec_list = spectrogram_batch(
waveform_list,
window_function(800, "hann", frame_length=1024),
frame_length=1024,
hop_length=128,
power=2.0,
mel_filters=mel_filters,
)
self.assertEqual(spec_list[0].shape, (13, 732))
self.assertEqual(spec_list[1].shape, (13, 602))
self.assertEqual(spec_list[2].shape, (13, 1561))
# fmt: off
expected1 = np.array([
1.08027889e+02, 1.48080673e+01, 7.70758213e+00, 9.57676639e-01,
8.81639061e-02, 5.26073833e-02, 1.52736155e-02, 9.95350117e-03,
7.95364356e-03, 1.01148004e-02, 4.29241020e-03, 9.90708797e-03,
9.44153646e-04
])
expected2 = np.array([
71.82577165, 109.44693334, 272.4834194, 164.90450355,
16.54056349, 11.60810547, 24.87525946, 21.07317022,
1.26736284, 1.4583074, 1.36659061, 1.76305768,
2.03703503
])
expected3 = np.array([
5.22246749e+02, 6.92660728e+02, 2.65895922e+02, 2.06526565e+01,
2.28692104e+00, 1.19473622e+00, 8.43228216e-01, 3.20760592e+00,
1.33654151e+00, 1.51050684e-01, 2.78282477e-01, 9.25020981e-01,
2.29908841e-01
])
# fmt: on
self.assertTrue(np.allclose(spec_list[0][:, 300], expected1))
self.assertTrue(np.allclose(spec_list[1][:, 300], expected2))
self.assertTrue(np.allclose(spec_list[2][:, 300], expected3))
def test_spectrogram_power(self):
waveform = self._load_datasamples(1)[0]
spec = spectrogram(
waveform,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=128,
power=None,
)
self.assertEqual(spec.shape, (257, 732))
self.assertEqual(spec.dtype, np.complex64)
# fmt: off
expected = np.array([
0.01452305+0.01820039j, -0.01737362-0.01641946j,
0.0121028 +0.01565081j, -0.02794554-0.03021514j,
0.04719803+0.04086519j, -0.04391563-0.02779365j,
0.05682834+0.01571325j, -0.08604821-0.02023657j,
0.07497991+0.0186641j , -0.06366091-0.00922475j,
0.11003416+0.0114788j , -0.13677941-0.01523552j,
0.10934535-0.00117226j, -0.11635598+0.02551187j,
0.14708674-0.03469823j, -0.1328196 +0.06034218j,
0.12667368-0.13973421j, -0.14764774+0.18912019j,
0.10235471-0.12181523j, -0.00773012+0.04730498j,
-0.01487191-0.07312611j, -0.02739162+0.09619419j,
0.02895459-0.05398273j, 0.01198589+0.05276592j,
-0.02117299-0.10123465j, 0.00666388+0.09526499j,
-0.01672773-0.05649684j, 0.02723125+0.05939891j,
-0.01879361-0.062954j , 0.03686557+0.04568823j,
-0.07394181-0.07949649j, 0.06238583+0.13905765j,
])
# fmt: on
self.assertTrue(np.allclose(spec[64:96, 321], expected))
spec = spectrogram(
waveform,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=128,
power=1.0,
)
self.assertEqual(spec.shape, (257, 732))
self.assertEqual(spec.dtype, np.float64)
# fmt: off
expected = np.array([
0.02328461, 0.02390484, 0.01978448, 0.04115711, 0.0624309 ,
0.05197181, 0.05896072, 0.08839577, 0.07726794, 0.06432579,
0.11063128, 0.13762532, 0.10935163, 0.11911998, 0.15112405,
0.14588428, 0.18860507, 0.23992978, 0.15910825, 0.04793241,
0.07462307, 0.10001811, 0.06125769, 0.05411011, 0.10342509,
0.09549777, 0.05892122, 0.06534349, 0.06569936, 0.05870678,
0.10856833, 0.1524107 , 0.11463385, 0.05766969, 0.12385171,
0.14472842, 0.11978184, 0.10353675, 0.07244056, 0.03461861,
0.02624896, 0.02227475, 0.01238363, 0.00885281, 0.0110049 ,
0.00807005, 0.01033663, 0.01703181, 0.01445856, 0.00585615,
0.0132431 , 0.02754132, 0.01524478, 0.0204908 , 0.07453328,
0.10716327, 0.07195779, 0.08816078, 0.18340898, 0.16449876,
0.12322842, 0.1621659 , 0.12334293, 0.06033659,
])
# fmt: on
self.assertTrue(np.allclose(spec[64:128, 321], expected))
spec = spectrogram(
waveform,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=128,
power=2.0,
)
self.assertEqual(spec.shape, (257, 732))
self.assertEqual(spec.dtype, np.float64)
# fmt: off
expected = np.array([
5.42173162e-04, 5.71441371e-04, 3.91425507e-04, 1.69390778e-03,
3.89761780e-03, 2.70106923e-03, 3.47636663e-03, 7.81381316e-03,
5.97033510e-03, 4.13780799e-03, 1.22392802e-02, 1.89407300e-02,
1.19577805e-02, 1.41895693e-02, 2.28384770e-02, 2.12822221e-02,
3.55718732e-02, 5.75663000e-02, 2.53154356e-02, 2.29751552e-03,
5.56860259e-03, 1.00036217e-02, 3.75250424e-03, 2.92790355e-03,
1.06967501e-02, 9.11982451e-03, 3.47171025e-03, 4.26977174e-03,
4.31640586e-03, 3.44648538e-03, 1.17870830e-02, 2.32290216e-02,
1.31409196e-02, 3.32579296e-03, 1.53392460e-02, 2.09463164e-02,
1.43476883e-02, 1.07198600e-02, 5.24763530e-03, 1.19844836e-03,
6.89007982e-04, 4.96164430e-04, 1.53354369e-04, 7.83722571e-05,
1.21107812e-04, 6.51257360e-05, 1.06845939e-04, 2.90082477e-04,
2.09049831e-04, 3.42945241e-05, 1.75379610e-04, 7.58524227e-04,
2.32403356e-04, 4.19872697e-04, 5.55520924e-03, 1.14839673e-02,
5.17792348e-03, 7.77232368e-03, 3.36388536e-02, 2.70598419e-02,
1.51852425e-02, 2.62977779e-02, 1.52134784e-02, 3.64050455e-03,
])
# fmt: on
self.assertTrue(np.allclose(spec[64:128, 321], expected))
def test_spectrogram_batch_power(self):
waveform_list = self._load_datasamples(3)
spec_list = spectrogram_batch(
waveform_list,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=128,
power=None,
)
self.assertEqual(spec_list[0].shape, (257, 732))
self.assertEqual(spec_list[0].dtype, np.complex64)
self.assertEqual(spec_list[1].shape, (257, 602))
self.assertEqual(spec_list[1].dtype, np.complex64)
self.assertEqual(spec_list[2].shape, (257, 1561))
self.assertEqual(spec_list[2].dtype, np.complex64)
# fmt: off
expected1 = np.array([
0.01452305+0.01820039j, -0.01737362-0.01641946j,
0.0121028 +0.01565081j, -0.02794554-0.03021514j,
0.04719803+0.04086519j, -0.04391563-0.02779365j,
0.05682834+0.01571325j, -0.08604821-0.02023657j,
0.07497991+0.0186641j , -0.06366091-0.00922475j,
0.11003416+0.0114788j , -0.13677941-0.01523552j,
0.10934535-0.00117226j, -0.11635598+0.02551187j,
0.14708674-0.03469823j, -0.1328196 +0.06034218j,
0.12667368-0.13973421j, -0.14764774+0.18912019j,
0.10235471-0.12181523j, -0.00773012+0.04730498j,
-0.01487191-0.07312611j, -0.02739162+0.09619419j,
0.02895459-0.05398273j, 0.01198589+0.05276592j,
-0.02117299-0.10123465j, 0.00666388+0.09526499j,
-0.01672773-0.05649684j, 0.02723125+0.05939891j,
-0.01879361-0.062954j , 0.03686557+0.04568823j,
-0.07394181-0.07949649j, 0.06238583+0.13905765j,
])
expected2 = np.array([
-0.01634146-7.0067253e-03j, -0.00068403+9.2661660e-03j,
0.00571721-3.9035487e-03j, -0.00915086+1.5033451e-03j,
0.01138636+5.4256055e-03j, -0.00294282-1.2016168e-02j,
-0.00428711+7.3687937e-03j, -0.001002 -1.3972387e-03j,
0.00622582+3.7551194e-03j, -0.00137886-7.0342086e-03j,
-0.00824075+3.8430823e-03j, 0.0107349 +7.1450039e-03j,
0.00363763-1.4242286e-02j, -0.01499857+1.7917662e-05j,
-0.0046242 +1.2500680e-02j, 0.02180984+7.2047939e-03j,
-0.00273568-1.6844695e-02j, -0.00178986-7.5209686e-03j,
-0.01661806+1.2662713e-03j, -0.01045276+2.0611197e-02j,
0.03252975+2.5592113e-02j, 0.03945662-6.7136563e-02j,
-0.10622615+4.9393820e-03j, 0.06684612+6.4607985e-02j,
-0.00753762-5.1637031e-02j, -0.00220644+1.8002450e-02j,
-0.00357443-4.1291970e-03j, 0.01463647-1.4063751e-03j,
-0.02252573-1.1189026e-02j, 0.00276293+1.9019062e-02j,
0.01216721+1.2095908e-03j, 0.00034753-7.4386634e-03j
])
expected3 = np.array([
2.3276670e-02+0.0406534j, -2.4413882e-02-0.07868771j,
1.0993068e-02+0.05550544j, -1.5825305e-02+0.00480187j,
4.7617555e-02-0.04421869j, -7.1669750e-02+0.06317082j,
5.9706111e-02-0.08369736j, -2.2317577e-02+0.08915959j,
-2.3291381e-02-0.06601578j, 5.9362967e-02+0.03185856j,
-6.5269925e-02+0.0030586j, 5.0898481e-02-0.04319243j,
-4.0413942e-02+0.08051146j, 3.0059000e-02-0.09730332j,
-1.2479190e-02+0.09703682j, -6.1806822e-03-0.09617531j,
2.6907364e-02+0.08084074j, -4.1639723e-02-0.03391053j,
3.1113219e-02-0.01497662j, 3.4023849e-03+0.03632669j,
-4.9804080e-02-0.039231j, 8.9777440e-02+0.02577243j,
-9.2947647e-02+0.01514865j, 6.2368069e-02-0.05954866j,
-2.9966677e-02+0.06520324j, -8.2365885e-05-0.0440613j ,
2.0203773e-02+0.04350767j, -8.9924788e-04-0.05406843j,
-3.5951469e-02+0.03055602j, 3.3790238e-02+0.02182594j,
1.0919777e-03-0.06437822j, -1.8534327e-02+0.07866792j
])
# fmt: on
self.assertTrue(np.allclose(spec_list[0][64:96, 321], expected1))
self.assertTrue(np.allclose(spec_list[1][64:96, 321], expected2))
self.assertTrue(np.allclose(spec_list[2][64:96, 321], expected3))
spec_list = spectrogram_batch(
waveform_list,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=128,
power=1.0,
)
self.assertEqual(spec_list[0].shape, (257, 732))
self.assertEqual(spec_list[0].dtype, np.float64)
self.assertEqual(spec_list[1].shape, (257, 602))
self.assertEqual(spec_list[1].dtype, np.float64)
self.assertEqual(spec_list[2].shape, (257, 1561))
self.assertEqual(spec_list[2].dtype, np.float64)
# fmt: off
expected1 = np.array([
0.02328461, 0.02390484, 0.01978448, 0.04115711, 0.0624309 ,
0.05197181, 0.05896072, 0.08839577, 0.07726794, 0.06432579,
0.11063128, 0.13762532, 0.10935163, 0.11911998, 0.15112405,
0.14588428, 0.18860507, 0.23992978, 0.15910825, 0.04793241,
0.07462307, 0.10001811, 0.06125769, 0.05411011, 0.10342509,
0.09549777, 0.05892122, 0.06534349, 0.06569936, 0.05870678,
0.10856833, 0.1524107 , 0.11463385, 0.05766969, 0.12385171,
0.14472842, 0.11978184, 0.10353675, 0.07244056, 0.03461861,
0.02624896, 0.02227475, 0.01238363, 0.00885281, 0.0110049 ,
0.00807005, 0.01033663, 0.01703181, 0.01445856, 0.00585615,
0.0132431 , 0.02754132, 0.01524478, 0.0204908 , 0.07453328,
0.10716327, 0.07195779, 0.08816078, 0.18340898, 0.16449876,
0.12322842, 0.1621659 , 0.12334293, 0.06033659,
])
expected2 = np.array([
0.01778026, 0.00929138, 0.00692273, 0.00927352, 0.01261294,
0.01237128, 0.00852516, 0.00171938, 0.00727061, 0.00716808,
0.00909281, 0.01289532, 0.01469949, 0.01499858, 0.01332855,
0.02296907, 0.01706539, 0.00773101, 0.01666623, 0.02311021,
0.0413901, 0.07787261, 0.10634092, 0.09296556, 0.05218428,
0.01813716, 0.00546139, 0.01470388, 0.02515159, 0.0192187,
0.01222719, 0.00744678, 0.01045674, 0.01923522, 0.01990819,
0.01174323, 0.01535391, 0.02786647, 0.02904595, 0.0313408 ,
0.0340503, 0.03118268, 0.02915136, 0.04200513, 0.05563153,
0.05429446, 0.05021769, 0.05882667, 0.06668596, 0.06555867,
0.04523559, 0.01489498, 0.01031892, 0.02134155, 0.01736669,
0.0195216, 0.03971575, 0.03938636, 0.02052712, 0.03104931,
0.0902727, 0.09022622, 0.03275532, 0.0172633,
])
expected3 = np.array([
0.04684551, 0.08238806, 0.05658358, 0.01653778, 0.06498249,
0.09553589, 0.10281084, 0.09191031, 0.07000408, 0.06737158,
0.06534155, 0.06675509, 0.09008541, 0.10184046, 0.09783596,
0.0963737, 0.08520112, 0.05370093, 0.03453015, 0.03648568,
0.06339967, 0.09340346, 0.09417402, 0.08623119, 0.07175977,
0.04406138, 0.04796988, 0.05407591, 0.0471824 , 0.04022626,
0.06438748, 0.0808218, 0.0745263, 0.06191467, 0.03116328,
0.03206497, 0.05867718, 0.04424652, 0.04448404, 0.07032498,
0.08300796, 0.07895744, 0.0816894, 0.09392357, 0.07571699,
0.03967651, 0.07703795, 0.06464871, 0.08704693, 0.14085226,
0.1350321, 0.18794712, 0.27043005, 0.26596246, 0.19948336,
0.06545141, 0.13204652, 0.08554521, 0.2262849, 0.33900721,
0.3970475, 0.3482436, 0.17134947, 0.46249565,
])
# fmt: on
self.assertTrue(np.allclose(spec_list[0][64:128, 321], expected1))
self.assertTrue(np.allclose(spec_list[1][64:128, 321], expected2))
self.assertTrue(np.allclose(spec_list[2][64:128, 321], expected3))
spec_list = spectrogram_batch(
waveform_list,
window_function(400, "hann", frame_length=512),
frame_length=512,
hop_length=128,
power=2.0,
)
self.assertEqual(spec_list[0].shape, (257, 732))
self.assertEqual(spec_list[0].dtype, np.float64)
self.assertEqual(spec_list[1].shape, (257, 602))
self.assertEqual(spec_list[1].dtype, np.float64)
self.assertEqual(spec_list[2].shape, (257, 1561))
self.assertEqual(spec_list[2].dtype, np.float64)
# fmt: off
expected1 = np.array([
5.42173162e-04, 5.71441371e-04, 3.91425507e-04, 1.69390778e-03,
3.89761780e-03, 2.70106923e-03, 3.47636663e-03, 7.81381316e-03,
5.97033510e-03, 4.13780799e-03, 1.22392802e-02, 1.89407300e-02,
1.19577805e-02, 1.41895693e-02, 2.28384770e-02, 2.12822221e-02,
3.55718732e-02, 5.75663000e-02, 2.53154356e-02, 2.29751552e-03,
5.56860259e-03, 1.00036217e-02, 3.75250424e-03, 2.92790355e-03,
1.06967501e-02, 9.11982451e-03, 3.47171025e-03, 4.26977174e-03,
4.31640586e-03, 3.44648538e-03, 1.17870830e-02, 2.32290216e-02,
1.31409196e-02, 3.32579296e-03, 1.53392460e-02, 2.09463164e-02,
1.43476883e-02, 1.07198600e-02, 5.24763530e-03, 1.19844836e-03,
6.89007982e-04, 4.96164430e-04, 1.53354369e-04, 7.83722571e-05,
1.21107812e-04, 6.51257360e-05, 1.06845939e-04, 2.90082477e-04,
2.09049831e-04, 3.42945241e-05, 1.75379610e-04, 7.58524227e-04,
2.32403356e-04, 4.19872697e-04, 5.55520924e-03, 1.14839673e-02,
5.17792348e-03, 7.77232368e-03, 3.36388536e-02, 2.70598419e-02,
1.51852425e-02, 2.62977779e-02, 1.52134784e-02, 3.64050455e-03,
])
expected2 = np.array([
3.16137604e-04, 8.63297362e-05, 4.79241720e-05, 8.59982493e-05,
1.59086326e-04, 1.53048476e-04, 7.26783945e-05, 2.95627100e-06,
5.28617352e-05, 5.13813355e-05, 8.26792588e-05, 1.66289156e-04,
2.16075069e-04, 2.24957314e-04, 1.77650211e-04, 5.27578282e-04,
2.91227688e-04, 5.97685493e-05, 2.77763360e-04, 5.34081651e-04,
1.71314057e-03, 6.06414277e-03, 1.13083916e-02, 8.64259617e-03,
2.72319867e-03, 3.28956593e-04, 2.98268126e-05, 2.16204145e-04,
6.32602626e-04, 3.69358508e-04, 1.49504171e-04, 5.54544917e-05,
1.09343371e-04, 3.69993847e-04, 3.96335839e-04, 1.37903521e-04,
2.35742483e-04, 7.76540114e-04, 8.43667068e-04, 9.82245923e-04,
1.15942286e-03, 9.72359636e-04, 8.49801853e-04, 1.76443092e-03,
3.09486753e-03, 2.94788822e-03, 2.52181630e-03, 3.46057723e-03,
4.44701769e-03, 4.29793858e-03, 2.04625858e-03, 2.21860290e-04,
1.06480179e-04, 4.55461892e-04, 3.01601836e-04, 3.81092892e-04,
1.57734053e-03, 1.55128531e-03, 4.21362677e-04, 9.64059883e-04,
8.14916019e-03, 8.14077014e-03, 1.07291131e-03, 2.98021545e-04,
])
expected3 = np.array([
0.0021945 , 0.00678779, 0.0032017 , 0.0002735 , 0.00422272,
0.00912711, 0.01057007, 0.00844751, 0.00490057, 0.00453893,
0.00426952, 0.00445624, 0.00811538, 0.01037148, 0.00957188,
0.00928789, 0.00725923, 0.00288379, 0.00119233, 0.0013312 ,
0.00401952, 0.00872421, 0.00886875, 0.00743582, 0.00514946,
0.00194141, 0.00230111, 0.0029242 , 0.00222618, 0.00161815,
0.00414575, 0.00653216, 0.00555417, 0.00383343, 0.00097115,
0.00102816, 0.00344301, 0.00195775, 0.00197883, 0.0049456 ,
0.00689032, 0.00623428, 0.00667316, 0.00882164, 0.00573306,
0.00157423, 0.00593485, 0.00417946, 0.00757717, 0.01983936,
0.01823367, 0.03532412, 0.07313241, 0.07073603, 0.03979361,
0.00428389, 0.01743628, 0.00731798, 0.05120486, 0.11492589,
0.15764671, 0.1212736 , 0.02936064, 0.21390222
])
# fmt: on
self.assertTrue(np.allclose(spec_list[0][64:128, 321], expected1))
self.assertTrue(np.allclose(spec_list[1][64:128, 321], expected2))
self.assertTrue(np.allclose(spec_list[2][64:128, 321], expected3))
def test_power_to_db(self):
spectrogram = np.zeros((2, 3))
spectrogram[0, 0] = 2.0
spectrogram[0, 1] = 0.5
spectrogram[0, 2] = 0.707
spectrogram[1, 1] = 1.0
output = power_to_db(spectrogram, reference=1.0)
expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-100.0, 0.0, -100.0]])
self.assertTrue(np.allclose(output, expected))
output = power_to_db(spectrogram, reference=2.0)
expected = np.array([[0.0, -6.02059991, -4.51610582], [-103.01029996, -3.01029996, -103.01029996]])
self.assertTrue(np.allclose(output, expected))
output = power_to_db(spectrogram, min_value=1e-6)
expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-60.0, 0.0, -60.0]])
self.assertTrue(np.allclose(output, expected))
output = power_to_db(spectrogram, db_range=80)
expected = np.array([[3.01029996, -3.01029996, -1.50580586], [-76.98970004, 0.0, -76.98970004]])
self.assertTrue(np.allclose(output, expected))
output = power_to_db(spectrogram, reference=2.0, db_range=80)
expected = np.array([[0.0, -6.02059991, -4.51610582], [-80.0, -3.01029996, -80.0]])
self.assertTrue(np.allclose(output, expected))
output = power_to_db(spectrogram, reference=2.0, min_value=1e-6, db_range=80)
expected = np.array([[0.0, -6.02059991, -4.51610582], [-63.01029996, -3.01029996, -63.01029996]])
self.assertTrue(np.allclose(output, expected))
with pytest.raises(ValueError):
power_to_db(spectrogram, reference=0.0)
with pytest.raises(ValueError):
power_to_db(spectrogram, min_value=0.0)
with pytest.raises(ValueError):
power_to_db(spectrogram, db_range=-80)
def test_power_to_db_batch(self):
# Setup a batch of spectrograms with varying values and lengths
batch_spectrogram = np.zeros((3, 2, 3))
batch_spectrogram[0, 0, 0] = 2.0
batch_spectrogram[0, 0, 1] = 0.5
batch_spectrogram[0, 0, 2] = 0.707
batch_spectrogram[0, 1, 1] = 1.0
batch_spectrogram[1, :, :2] = batch_spectrogram[0, :, :2] * 1.5
batch_spectrogram[2, :, :1] = batch_spectrogram[0, :, :1] * 0.5
# Expected values computed by applying `power_to_db` iteratively
output = power_to_db_batch(batch_spectrogram, reference=1.0)
expected = np.array(
[
[[3.01029996, -3.01029996, -1.50580586], [-100, 0, -100]],
[[4.77121255, -1.24938737, -100], [-100, 1.76091259, -100]],
[[0, -100, -100], [-100, -100, -100]],
]
)
self.assertTrue(np.allclose(output, expected))
output = power_to_db_batch(batch_spectrogram, reference=2.0)
expected = np.array(
[
[[0, -6.02059991, -4.51610582], [-103.01029996, -3.01029996, -103.01029996]],
[[1.76091259, -4.25968732, -103.01029996], [-103.01029996, -1.24938737, -103.01029996]],
[[-3.01029996, -103.01029996, -103.01029996], [-103.01029996, -103.01029996, -103.01029996]],
]
)
self.assertTrue(np.allclose(output, expected))
output = power_to_db_batch(batch_spectrogram, min_value=1e-6)
expected = np.array(
[
[[3.01029996, -3.01029996, -1.50580586], [-60, 0, -60]],
[[4.77121255, -1.24938737, -60], [-60, 1.76091259, -60]],
[[0, -60, -60], [-60, -60, -60]],
]
)
self.assertTrue(np.allclose(output, expected))
output = power_to_db_batch(batch_spectrogram, db_range=80)
expected = np.array(
[
[[3.01029996, -3.01029996, -1.50580586], [-76.98970004, 0, -76.98970004]],
[[4.77121255, -1.24938737, -75.22878745], [-75.22878745, 1.76091259, -75.22878745]],
[[0, -80, -80], [-80, -80, -80]],
]
)
self.assertTrue(np.allclose(output, expected))
output = power_to_db_batch(batch_spectrogram, reference=2.0, db_range=80)
expected = np.array(
[
[[0, -6.02059991, -4.51610582], [-80, -3.01029996, -80]],
[[1.76091259, -4.25968732, -78.23908741], [-78.23908741, -1.24938737, -78.23908741]],
[[-3.01029996, -83.01029996, -83.01029996], [-83.01029996, -83.01029996, -83.01029996]],
]
)
self.assertTrue(np.allclose(output, expected))
output = power_to_db_batch(batch_spectrogram, reference=2.0, min_value=1e-6, db_range=80)
expected = np.array(
[
[[0, -6.02059991, -4.51610582], [-63.01029996, -3.01029996, -63.01029996]],
[[1.76091259, -4.25968732, -63.01029996], [-63.01029996, -1.24938737, -63.01029996]],
[[-3.01029996, -63.01029996, -63.01029996], [-63.01029996, -63.01029996, -63.01029996]],
]
)
self.assertTrue(np.allclose(output, expected))
with pytest.raises(ValueError):
power_to_db_batch(batch_spectrogram, reference=0.0)
with pytest.raises(ValueError):
power_to_db_batch(batch_spectrogram, min_value=0.0)
with pytest.raises(ValueError):
power_to_db_batch(batch_spectrogram, db_range=-80)
def test_amplitude_to_db(self):
spectrogram = np.zeros((2, 3))
spectrogram[0, 0] = 2.0
spectrogram[0, 1] = 0.5
spectrogram[0, 2] = 0.707
spectrogram[1, 1] = 1.0
output = amplitude_to_db(spectrogram, reference=1.0)
expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-100.0, 0.0, -100.0]])
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db(spectrogram, reference=2.0)
expected = np.array([[0.0, -12.04119983, -9.03221164], [-106.02059991, -6.02059991, -106.02059991]])
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db(spectrogram, min_value=1e-3)
expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-60.0, 0.0, -60.0]])
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db(spectrogram, db_range=80)
expected = np.array([[6.02059991, -6.02059991, -3.01161172], [-73.97940009, 0.0, -73.97940009]])
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db(spectrogram, reference=2.0, db_range=80)
expected = np.array([[0.0, -12.04119983, -9.03221164], [-80.0, -6.02059991, -80.0]])
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db(spectrogram, reference=2.0, min_value=1e-3, db_range=80)
expected = np.array([[0.0, -12.04119983, -9.03221164], [-66.02059991, -6.02059991, -66.02059991]])
self.assertTrue(np.allclose(output, expected))
with pytest.raises(ValueError):
amplitude_to_db(spectrogram, reference=0.0)
with pytest.raises(ValueError):
amplitude_to_db(spectrogram, min_value=0.0)
with pytest.raises(ValueError):
amplitude_to_db(spectrogram, db_range=-80)
def test_amplitude_to_db_batch(self):
# Setup a batch of spectrograms with varying values and lengths
batch_spectrogram = np.zeros((3, 2, 3))
batch_spectrogram[0, 0, 0] = 2.0
batch_spectrogram[0, 0, 1] = 0.5
batch_spectrogram[0, 0, 2] = 0.707
batch_spectrogram[0, 1, 1] = 1.0
batch_spectrogram[1, :, :2] = batch_spectrogram[0, :, :2] * 1.5
batch_spectrogram[2, :, :1] = batch_spectrogram[0, :, :1] * 0.5
# Expected values computed by applying `amplitude_to_db` iteratively
output = amplitude_to_db_batch(batch_spectrogram, reference=1.0)
expected = np.array(
[
[[6.02059991, -6.02059991, -3.01161172], [-100, 0, -100]],
[[9.54242509, -2.49877473, -100], [-100, 3.52182518, -100]],
[[0, -100, -100], [-100, -100, -100]],
]
)
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db_batch(batch_spectrogram, reference=2.0)
expected = np.array(
[
[[0, -12.04119983, -9.03221164], [-106.02059991, -6.02059991, -106.02059991]],
[[3.52182518, -8.51937465, -106.02059991], [-106.02059991, -2.49877473, -106.02059991]],
[[-6.02059991, -106.02059991, -106.02059991], [-106.02059991, -106.02059991, -106.02059991]],
]
)
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db_batch(batch_spectrogram, min_value=1e-3)
expected = np.array(
[
[[6.02059991, -6.02059991, -3.01161172], [-60, 0, -60]],
[[9.54242509, -2.49877473, -60], [-60, 3.52182518, -60]],
[[0, -60, -60], [-60, -60, -60]],
]
)
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db_batch(batch_spectrogram, db_range=80)
expected = np.array(
[
[[6.02059991, -6.02059991, -3.01161172], [-73.97940009, 0, -73.97940009]],
[[9.54242509, -2.49877473, -70.45757491], [-70.45757491, 3.52182518, -70.45757491]],
[[0, -80, -80], [-80, -80, -80]],
]
)
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db_batch(batch_spectrogram, reference=2.0, db_range=80)
expected = np.array(
[
[[0, -12.04119983, -9.03221164], [-80, -6.02059991, -80]],
[[3.52182518, -8.51937465, -76.47817482], [-76.47817482, -2.49877473, -76.47817482]],
[[-6.02059991, -86.02059991, -86.02059991], [-86.02059991, -86.02059991, -86.02059991]],
]
)
self.assertTrue(np.allclose(output, expected))
output = amplitude_to_db_batch(batch_spectrogram, reference=2.0, min_value=1e-3, db_range=80)
expected = np.array(
[
[[0, -12.04119983, -9.03221164], [-66.02059991, -6.02059991, -66.02059991]],
[[3.52182518, -8.51937465, -66.02059991], [-66.02059991, -2.49877473, -66.02059991]],
[[-6.02059991, -66.02059991, -66.02059991], [-66.02059991, -66.02059991, -66.02059991]],
]
)
self.assertTrue(np.allclose(output, expected))
with pytest.raises(ValueError):
amplitude_to_db_batch(batch_spectrogram, reference=0.0)
with pytest.raises(ValueError):
amplitude_to_db_batch(batch_spectrogram, min_value=0.0)
with pytest.raises(ValueError):
amplitude_to_db_batch(batch_spectrogram, db_range=-80)
@require_librosa
def test_chroma_equivalence(self):
num_frequency_bins = 25
num_chroma = 6
sampling_rate = 24000
# test default parameters
original_chroma = chroma(sr=sampling_rate, n_chroma=num_chroma, n_fft=num_frequency_bins)
utils_chroma = chroma_filter_bank(
num_frequency_bins=num_frequency_bins, num_chroma=num_chroma, sampling_rate=sampling_rate
)
self.assertTrue(np.allclose(original_chroma, utils_chroma))
# test no weighting_parameters
original_chroma = chroma(sr=sampling_rate, n_chroma=num_chroma, n_fft=num_frequency_bins, octwidth=None)
utils_chroma = chroma_filter_bank(
num_frequency_bins=num_frequency_bins,
num_chroma=num_chroma,
sampling_rate=sampling_rate,
weighting_parameters=None,
)
self.assertTrue(np.allclose(original_chroma, utils_chroma))
# test with L1 norm
original_chroma = chroma(sr=sampling_rate, n_chroma=num_chroma, n_fft=num_frequency_bins, norm=1.0)
utils_chroma = chroma_filter_bank(
num_frequency_bins=num_frequency_bins, num_chroma=num_chroma, sampling_rate=sampling_rate, power=1.0
)
self.assertTrue(np.allclose(original_chroma, utils_chroma))
# test starting at 'A' chroma, power = None, tuning = 0, different weighting_parameters
original_chroma = chroma(
sr=sampling_rate,
n_chroma=num_chroma,
n_fft=num_frequency_bins,
norm=None,
base_c=None,
octwidth=1.0,
ctroct=4.0,
)
utils_chroma = chroma_filter_bank(
num_frequency_bins=num_frequency_bins,
num_chroma=num_chroma,
sampling_rate=sampling_rate,
power=None,
start_at_c_chroma=False,
weighting_parameters=(4.0, 1.0),
)
self.assertTrue(np.allclose(original_chroma, utils_chroma))
| AudioUtilsFunctionTester |
python | apache__airflow | providers/google/src/airflow/providers/google/ads/hooks/ads.py | {
"start": 1746,
"end": 14554
} | class ____(BaseHook):
"""
Interact with Google Ads API.
This hook offers two flows of authentication.
1. OAuth Service Account Flow (requires two connections)
- gcp_conn_id - provides service account details (like any other GCP connection)
- google_ads_conn_id - which contains information from Google Ads config.yaml file
in the ``extras``. Example of the ``extras``:
.. code-block:: json
{
"google_ads_client": {
"developer_token": "{{ INSERT_TOKEN }}",
"json_key_file_path": null,
"impersonated_email": "{{ INSERT_IMPERSONATED_EMAIL }}"
}
}
The ``json_key_file_path`` is resolved by the hook using credentials from gcp_conn_id.
https://developers.google.com/google-ads/api/docs/client-libs/python/oauth-service
.. seealso::
For more information on how Google Ads authentication flow works take a look at:
https://developers.google.com/google-ads/api/docs/client-libs/python/oauth-service
.. seealso::
For more information on the Google Ads API, take a look at the API docs:
https://developers.google.com/google-ads/api/docs/start
2. Developer token from API center flow (only requires google_ads_conn_id)
- google_ads_conn_id - which contains developer token, refresh token, client_id and client_secret
in the ``extras``. Example of the ``extras``:
.. code-block:: json
{
"google_ads_client": {
"developer_token": "{{ INSERT_DEVELOPER_TOKEN }}",
"refresh_token": "{{ INSERT_REFRESH_TOKEN }}",
"client_id": "{{ INSERT_CLIENT_ID }}",
"client_secret": "{{ INSERT_CLIENT_SECRET }}",
"use_proto_plus": "{{ True or False }}",
}
}
.. seealso::
For more information on how to obtain a developer token look at:
https://developers.google.com/google-ads/api/docs/get-started/dev-token
.. seealso::
For more information about use_proto_plus option see the Protobuf Messages guide:
https://developers.google.com/google-ads/api/docs/client-libs/python/protobuf-messages
:param gcp_conn_id: The connection ID with the service account details.
:param google_ads_conn_id: The connection ID with the details of Google Ads config.yaml file.
:param api_version: The Google Ads API version to use.
"""
conn_name_attr = "google_ads_conn_id"
default_conn_name = "google_ads_default"
conn_type = "google_ads"
hook_name = "Google Ads"
@classmethod
def get_connection_form_widgets(cls) -> dict[str, Any]:
"""Return connection widgets to add to Google Ads connection form."""
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import PasswordField, StringField
return {
"developer_token": StringField(lazy_gettext("Developer token"), widget=BS3TextFieldWidget()),
"client_id": StringField(lazy_gettext("OAuth2 Client ID"), widget=BS3TextFieldWidget()),
"client_secret": PasswordField(
lazy_gettext("OAuth2 Client Secret"), widget=BS3PasswordFieldWidget()
),
"refresh_token": PasswordField(
lazy_gettext("OAuth2 Refresh Token"), widget=BS3PasswordFieldWidget()
),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Return custom UI field behaviour for Google Ads connection."""
return {
"hidden_fields": ["host", "login", "schema", "port"],
"relabeling": {},
"placeholders": {
"password": "Leave blank (optional)",
},
}
def __init__(
self,
api_version: str | None = None,
gcp_conn_id: str = "google_cloud_default",
google_ads_conn_id: str = "google_ads_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.google_ads_conn_id = google_ads_conn_id
self.google_ads_config: dict[str, Any] = {}
self.authentication_method: Literal["service_account", "developer_token"] = "service_account"
def search(self, client_ids: list[str], query: str, **kwargs) -> list[GoogleAdsRow]:
"""
Pull data from the Google Ads API.
Native protobuf message instances are returned (those seen in versions
prior to 10.0.0 of the google-ads library).
This method is for backwards compatibility with older versions of the
google_ads_hook.
Check out the search_proto_plus method to get API results in the new
default format of the google-ads library since v10.0.0 that behave
more like conventional python object (using proto-plus-python).
:param client_ids: Google Ads client ID(s) to query the API for.
:param query: Google Ads Query Language query.
:return: Google Ads API response, converted to Google Ads Row objects.
"""
data_proto_plus = self._search(client_ids, query, **kwargs)
data_native_pb = [row._pb for row in data_proto_plus]
return data_native_pb
def search_proto_plus(self, client_ids: list[str], query: str, **kwargs) -> list[GoogleAdsRow]:
"""
Pull data from the Google Ads API.
Instances of proto-plus-python message are returned, which behave more
like conventional Python objects.
:param client_ids: Google Ads client ID(s) to query the API for.
:param query: Google Ads Query Language query.
:return: Google Ads API response, converted to Google Ads Row objects
"""
return self._search(client_ids, query, **kwargs)
def list_accessible_customers(self) -> list[str]:
"""
List resource names of customers.
The resulting list of customers is based on your OAuth credentials. The
request returns a list of all accounts that you are able to act upon
directly given your current credentials. This will not necessarily
include all accounts within the account hierarchy; rather, it will only
include accounts where your authenticated user has been added with admin
or other rights in the account.
..seealso::
https://developers.google.com/google-ads/api/reference/rpc
:return: List of names of customers
"""
try:
accessible_customers = self._get_customer_service.list_accessible_customers()
return list(accessible_customers.resource_names)
except GoogleAdsException as ex:
for error in ex.failure.errors:
self.log.error('\tError with message "%s".', error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
self.log.error("\t\tOn field: %s", field_path_element.field_name)
raise
@cached_property
def _get_service(self) -> GoogleAdsServiceClient:
"""Connect and authenticate with the Google Ads API using a service account."""
client = self._get_client
return client.get_service("GoogleAdsService", version=self.api_version)
@cached_property
def _get_client(self) -> GoogleAdsClient:
with NamedTemporaryFile("w", suffix=".json") as secrets_temp:
self._get_config()
self._determine_authentication_method()
self._update_config_with_secret(
secrets_temp
) if self.authentication_method == "service_account" else None
try:
client = GoogleAdsClient.load_from_dict(self.google_ads_config)
return client
except GoogleAuthError as e:
self.log.error("Google Auth Error: %s", e)
raise
@cached_property
def _get_customer_service(self) -> CustomerServiceClient:
"""Connect and authenticate with the Google Ads API using a service account."""
with NamedTemporaryFile("w", suffix=".json") as secrets_temp:
self._get_config()
self._determine_authentication_method()
if self.authentication_method == "service_account":
self._update_config_with_secret(secrets_temp)
try:
client = GoogleAdsClient.load_from_dict(self.google_ads_config)
return client.get_service("CustomerService", version=self.api_version)
except GoogleAuthError as e:
self.log.error("Google Auth Error: %s", e)
raise
def _get_config(self) -> None:
"""
Set up Google Ads config from Connection.
This pulls the connections from db, and uses it to set up
``google_ads_config``.
"""
conn = self.get_connection(self.google_ads_conn_id)
if "google_ads_client" not in conn.extra_dejson:
raise AirflowException("google_ads_client not found in extra field")
self.google_ads_config = conn.extra_dejson["google_ads_client"]
def _determine_authentication_method(self) -> None:
"""Determine authentication method based on google_ads_config."""
if self.google_ads_config.get("json_key_file_path") and self.google_ads_config.get(
"impersonated_email"
):
self.authentication_method = "service_account"
elif (
self.google_ads_config.get("refresh_token")
and self.google_ads_config.get("client_id")
and self.google_ads_config.get("client_secret")
and self.google_ads_config.get("use_proto_plus")
):
self.authentication_method = "developer_token"
else:
raise AirflowException("Authentication method could not be determined")
def _update_config_with_secret(self, secrets_temp: IO[str]) -> None:
"""
Set up Google Cloud config secret from Connection.
This pulls the connection, saves the contents to a temp file, and point
the config to the path containing the secret. Note that the secret must
be passed as a file path for Google Ads API.
"""
extras = self.get_connection(self.gcp_conn_id).extra_dejson
secret = get_field(extras, "keyfile_dict")
if not secret:
raise KeyError("secret_conn.extra_dejson does not contain keyfile_dict")
secrets_temp.write(secret)
secrets_temp.flush()
self.google_ads_config["json_key_file_path"] = secrets_temp.name
def _search(self, client_ids: list[str], query: str, **kwargs) -> list[GoogleAdsRow]:
"""
Pull data from the Google Ads API.
:param client_ids: Google Ads client ID(s) to query the API for.
:param query: Google Ads Query Language query.
:return: Google Ads API response, converted to Google Ads Row objects
"""
service = self._get_service
iterators = []
for client_id in client_ids:
iterator = service.search(request={"customer_id": client_id, "query": query})
iterators.append(iterator)
self.log.info("Fetched Google Ads Iterators")
return self._extract_rows(iterators)
def _extract_rows(self, iterators: list[SearchPager]) -> list[GoogleAdsRow]:
"""
Convert Google Page Iterator (SearchPager) objects to Google Ads Rows.
:param iterators: List of Google Page Iterator (SearchPager) objects
:return: API response for all clients in the form of Google Ads Row object(s)
"""
try:
self.log.info("Extracting data from returned Google Ads Iterators")
return [row for iterator in iterators for row in iterator]
except GoogleAdsException as e:
self.log.error(
"Request ID %s failed with status %s and includes the following errors:",
e.request_id,
e.error.code().name,
)
for error in e.failure.errors:
self.log.error("\tError with message: %s.", error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
self.log.error("\t\tOn field: %s", field_path_element.field_name)
raise
| GoogleAdsHook |
python | allegroai__clearml | clearml/backend_api/services/v2_9/models.py | {
"start": 74474,
"end": 75603
} | class ____(Request):
"""
Convert company models to public
:param ids: IDs of the models to convert
:type ids: Sequence[str]
"""
_service = "models"
_action = "make_public"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Ids of the models to convert",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
super(MakePublicRequest, self).__init__(**kwargs)
self.ids = ids
@schema_property("ids")
def ids(self) -> Optional[List[str]]:
return self._property_ids
@ids.setter
def ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
| MakePublicRequest |
python | FactoryBoy__factory_boy | tests/test_using.py | {
"start": 10457,
"end": 32259
} | class ____(unittest.TestCase):
def test_attribute(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = 'one'
test_object = TestObjectFactory.build()
self.assertEqual(test_object.one, 'one')
def test_inheriting_model_class(self):
class TestObjectFactory(factory.Factory, TestObject):
class Meta:
model = TestObject
one = 'one'
test_object = TestObjectFactory.build()
self.assertEqual(test_object.one, 'one')
def test_abstract(self):
class SomeAbstractFactory(factory.Factory):
class Meta:
abstract = True
one = 'one'
class InheritedFactory(SomeAbstractFactory):
class Meta:
model = TestObject
test_object = InheritedFactory.build()
self.assertEqual(test_object.one, 'one')
def test_sequence(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.Sequence(lambda n: 'one%d' % n)
two = factory.Sequence(lambda n: 'two%d' % n)
test_object0 = TestObjectFactory.build()
self.assertEqual(test_object0.one, 'one0')
self.assertEqual(test_object0.two, 'two0')
test_object1 = TestObjectFactory.build()
self.assertEqual(test_object1.one, 'one1')
self.assertEqual(test_object1.two, 'two1')
def test_sequence_custom_begin(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
@classmethod
def _setup_next_sequence(cls):
return 42
one = factory.Sequence(lambda n: 'one%d' % n)
two = factory.Sequence(lambda n: 'two%d' % n)
test_object0 = TestObjectFactory.build()
self.assertEqual('one42', test_object0.one)
self.assertEqual('two42', test_object0.two)
test_object1 = TestObjectFactory.build()
self.assertEqual('one43', test_object1.one)
self.assertEqual('two43', test_object1.two)
def test_sequence_override(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.Sequence(lambda n: 'one%d' % n)
o1 = TestObjectFactory()
o2 = TestObjectFactory()
o3 = TestObjectFactory(__sequence=42)
o4 = TestObjectFactory()
self.assertEqual('one0', o1.one)
self.assertEqual('one1', o2.one)
self.assertEqual('one42', o3.one)
self.assertEqual('one2', o4.one)
def test_custom_create(self):
class TestModelFactory(factory.Factory):
class Meta:
model = TestModel
two = 2
@classmethod
def _create(cls, model_class, *args, **kwargs):
obj = model_class.create(**kwargs)
obj.properly_created = True
return obj
obj = TestModelFactory.create(one=1)
self.assertEqual(1, obj.one)
self.assertEqual(2, obj.two)
self.assertEqual(1, obj.id)
self.assertTrue(obj.properly_created)
def test_non_django_create(self):
class NonDjango:
def __init__(self, x, y=2):
self.x = x
self.y = y
class NonDjangoFactory(factory.Factory):
class Meta:
model = NonDjango
x = 3
obj = NonDjangoFactory.create()
self.assertEqual(3, obj.x)
self.assertEqual(2, obj.y)
def test_sequence_batch(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.Sequence(lambda n: 'one%d' % n)
two = factory.Sequence(lambda n: 'two%d' % n)
objs = TestObjectFactory.build_batch(20)
self.assertEqual(20, len(objs))
self.assertEqual(20, len(set(objs)))
for i, obj in enumerate(objs):
self.assertEqual('one%d' % i, obj.one)
self.assertEqual('two%d' % i, obj.two)
def test_lazy_attribute(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.LazyAttribute(lambda a: 'abc')
two = factory.LazyAttribute(lambda a: a.one + ' xyz')
test_object = TestObjectFactory.build()
self.assertEqual(test_object.one, 'abc')
self.assertEqual(test_object.two, 'abc xyz')
def test_lazy_attribute_sequence(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.LazyAttributeSequence(lambda a, n: 'abc%d' % n)
two = factory.LazyAttributeSequence(lambda a, n: a.one + ' xyz%d' % n)
test_object0 = TestObjectFactory.build()
self.assertEqual(test_object0.one, 'abc0')
self.assertEqual(test_object0.two, 'abc0 xyz0')
test_object1 = TestObjectFactory.build()
self.assertEqual(test_object1.one, 'abc1')
self.assertEqual(test_object1.two, 'abc1 xyz1')
def test_lazy_attribute_decorator(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
@factory.lazy_attribute
def one(a):
return 'one'
test_object = TestObjectFactory.build()
self.assertEqual(test_object.one, 'one')
def test_self_attribute(self):
class TmpObj:
n = 3
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = 'xx'
two = factory.SelfAttribute('one')
three = TmpObj()
four = factory.SelfAttribute('three.n')
five = factory.SelfAttribute('three.nnn', 5)
test_object = TestObjectFactory.build(one=1)
self.assertEqual(1, test_object.two)
self.assertEqual(3, test_object.three.n)
self.assertEqual(3, test_object.four)
self.assertEqual(5, test_object.five)
def test_self_attribute_parent(self):
class TestModel2(FakeModel):
pass
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 3
three = factory.SelfAttribute('..bar')
class TestModel2Factory(FakeModelFactory):
class Meta:
model = TestModel2
bar = 4
two = factory.SubFactory(TestModelFactory, one=1)
test_model = TestModel2Factory()
self.assertEqual(4, test_model.two.three)
def test_sequence_decorator(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
@factory.sequence
def one(n):
return 'one%d' % n
test_object = TestObjectFactory.build()
self.assertEqual(test_object.one, 'one0')
def test_lazy_attribute_sequence_decorator(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
@factory.lazy_attribute_sequence
def one(a, n):
return 'one%d' % n
@factory.lazy_attribute_sequence
def two(a, n):
return a.one + ' two%d' % n
test_object = TestObjectFactory.build()
self.assertEqual(test_object.one, 'one0')
self.assertEqual(test_object.two, 'one0 two0')
def test_build_with_parameters(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.Sequence(lambda n: 'one%d' % n)
two = factory.Sequence(lambda n: 'two%d' % n)
test_object0 = TestObjectFactory.build(three='three')
self.assertEqual(test_object0.one, 'one0')
self.assertEqual(test_object0.two, 'two0')
self.assertEqual(test_object0.three, 'three')
test_object1 = TestObjectFactory.build(one='other')
self.assertEqual(test_object1.one, 'other')
self.assertEqual(test_object1.two, 'two1')
def test_create(self):
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 'one'
test_model = TestModelFactory.create()
self.assertEqual(test_model.one, 'one')
self.assertTrue(test_model.id)
def test_create_batch(self):
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 'one'
objs = TestModelFactory.create_batch(20, two=factory.Sequence(int))
self.assertEqual(20, len(objs))
self.assertEqual(20, len(set(objs)))
for i, obj in enumerate(objs):
self.assertEqual('one', obj.one)
self.assertEqual(i, obj.two)
self.assertTrue(obj.id)
def test_generate_build(self):
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 'one'
test_model = TestModelFactory.generate(factory.BUILD_STRATEGY)
self.assertEqual(test_model.one, 'one')
self.assertFalse(test_model.id)
def test_generate_create(self):
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 'one'
test_model = TestModelFactory.generate(factory.CREATE_STRATEGY)
self.assertEqual(test_model.one, 'one')
self.assertTrue(test_model.id)
def test_generate_stub(self):
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 'one'
test_model = TestModelFactory.generate(factory.STUB_STRATEGY)
self.assertEqual(test_model.one, 'one')
self.assertFalse(hasattr(test_model, 'id'))
def test_generate_batch_build(self):
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 'one'
objs = TestModelFactory.generate_batch(factory.BUILD_STRATEGY, 20, two='two')
self.assertEqual(20, len(objs))
self.assertEqual(20, len(set(objs)))
for i, obj in enumerate(objs):
self.assertEqual('one', obj.one)
self.assertEqual('two', obj.two)
self.assertFalse(obj.id)
def test_generate_batch_create(self):
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 'one'
objs = TestModelFactory.generate_batch(factory.CREATE_STRATEGY, 20, two='two')
self.assertEqual(20, len(objs))
self.assertEqual(20, len(set(objs)))
for i, obj in enumerate(objs):
self.assertEqual('one', obj.one)
self.assertEqual('two', obj.two)
self.assertTrue(obj.id)
def test_generate_batch_stub(self):
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 'one'
objs = TestModelFactory.generate_batch(factory.STUB_STRATEGY, 20, two='two')
self.assertEqual(20, len(objs))
self.assertEqual(20, len(set(objs)))
for i, obj in enumerate(objs):
self.assertEqual('one', obj.one)
self.assertEqual('two', obj.two)
self.assertFalse(hasattr(obj, 'id'))
def test_simple_generate_build(self):
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 'one'
test_model = TestModelFactory.simple_generate(False)
self.assertEqual(test_model.one, 'one')
self.assertFalse(test_model.id)
def test_simple_generate_create(self):
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 'one'
test_model = TestModelFactory.simple_generate(True)
self.assertEqual(test_model.one, 'one')
self.assertTrue(test_model.id)
def test_simple_generate_batch_build(self):
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 'one'
objs = TestModelFactory.simple_generate_batch(False, 20, two='two')
self.assertEqual(20, len(objs))
self.assertEqual(20, len(set(objs)))
for i, obj in enumerate(objs):
self.assertEqual('one', obj.one)
self.assertEqual('two', obj.two)
self.assertFalse(obj.id)
def test_simple_generate_batch_create(self):
class TestModelFactory(FakeModelFactory):
class Meta:
model = TestModel
one = 'one'
objs = TestModelFactory.simple_generate_batch(True, 20, two='two')
self.assertEqual(20, len(objs))
self.assertEqual(20, len(set(objs)))
for i, obj in enumerate(objs):
self.assertEqual('one', obj.one)
self.assertEqual('two', obj.two)
self.assertTrue(obj.id)
def test_stub_batch(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = 'one'
two = factory.LazyAttribute(lambda a: a.one + ' two')
three = factory.Sequence(lambda n: int(n))
objs = TestObjectFactory.stub_batch(
20,
one=factory.Sequence(lambda n: str(n)),
)
self.assertEqual(20, len(objs))
self.assertEqual(20, len(set(objs)))
for i, obj in enumerate(objs):
self.assertEqual(str(i), obj.one)
self.assertEqual('%d two' % i, obj.two)
self.assertEqual(i, obj.three)
def test_inheritance(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = 'one'
two = factory.LazyAttribute(lambda a: a.one + ' two')
class TestObjectFactory2(TestObjectFactory):
class Meta:
model = TestObject
three = 'three'
four = factory.LazyAttribute(lambda a: a.three + ' four')
test_object = TestObjectFactory2.build()
self.assertEqual(test_object.one, 'one')
self.assertEqual(test_object.two, 'one two')
self.assertEqual(test_object.three, 'three')
self.assertEqual(test_object.four, 'three four')
test_object_alt = TestObjectFactory.build()
self.assertEqual(None, test_object_alt.three)
def test_override_inherited(self):
"""Overriding inherited declarations"""
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = 'one'
class TestObjectFactory2(TestObjectFactory):
one = 'two'
test_object = TestObjectFactory2.build()
self.assertEqual('two', test_object.one)
def test_override_inherited_deep(self):
"""Overriding inherited declarations"""
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = 'one'
class TestObjectFactory2(TestObjectFactory):
one = 'two'
class TestObjectFactory3(TestObjectFactory2):
pass
test_object = TestObjectFactory3.build()
self.assertEqual('two', test_object.one)
def test_inheritance_and_sequences(self):
"""Sequence counters should be kept within an inheritance chain."""
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.Sequence(lambda n: n)
class TestObjectFactory2(TestObjectFactory):
class Meta:
model = TestObject
to1a = TestObjectFactory()
self.assertEqual(0, to1a.one)
to2a = TestObjectFactory2()
self.assertEqual(1, to2a.one)
to1b = TestObjectFactory()
self.assertEqual(2, to1b.one)
to2b = TestObjectFactory2()
self.assertEqual(3, to2b.one)
def test_inheritance_sequence_inheriting_objects(self):
"""Sequence counters are kept with inheritance, incl. misc objects."""
class TestObject2(TestObject):
pass
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.Sequence(lambda n: n)
class TestObjectFactory2(TestObjectFactory):
class Meta:
model = TestObject2
to1a = TestObjectFactory()
self.assertEqual(0, to1a.one)
to2a = TestObjectFactory2()
self.assertEqual(1, to2a.one)
to1b = TestObjectFactory()
self.assertEqual(2, to1b.one)
to2b = TestObjectFactory2()
self.assertEqual(3, to2b.one)
def test_inheritance_sequence_unrelated_objects(self):
"""Sequence counters are kept with inheritance, unrelated objects.
See issue https://github.com/FactoryBoy/factory_boy/issues/93
Problem: sequence counter is somewhat shared between factories
until the "slave" factory has been called.
"""
class TestObject2:
def __init__(self, one):
self.one = one
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = factory.Sequence(lambda n: n)
class TestObjectFactory2(TestObjectFactory):
class Meta:
model = TestObject2
to1a = TestObjectFactory()
self.assertEqual(0, to1a.one)
to2a = TestObjectFactory2()
self.assertEqual(0, to2a.one)
to1b = TestObjectFactory()
self.assertEqual(1, to1b.one)
to2b = TestObjectFactory2()
self.assertEqual(1, to2b.one)
def test_inheritance_with_inherited_class(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = 'one'
two = factory.LazyAttribute(lambda a: a.one + ' two')
class TestFactory(TestObjectFactory):
three = 'three'
four = factory.LazyAttribute(lambda a: a.three + ' four')
test_object = TestFactory.build()
self.assertEqual(test_object.one, 'one')
self.assertEqual(test_object.two, 'one two')
self.assertEqual(test_object.three, 'three')
self.assertEqual(test_object.four, 'three four')
def test_dual_inheritance(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
one = 'one'
class TestOtherFactory(factory.Factory):
class Meta:
model = TestObject
two = 'two'
four = 'four'
class TestFactory(TestObjectFactory, TestOtherFactory):
three = 'three'
obj = TestFactory.build(two=2)
self.assertEqual('one', obj.one)
self.assertEqual(2, obj.two)
self.assertEqual('three', obj.three)
self.assertEqual('four', obj.four)
def test_class_method_accessible(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
@classmethod
def alt_create(cls, **kwargs):
return kwargs
self.assertEqual(TestObjectFactory.alt_create(foo=1), {"foo": 1})
def test_static_method_accessible(self):
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
@staticmethod
def alt_create(**kwargs):
return kwargs
self.assertEqual(TestObjectFactory.alt_create(foo=1), {"foo": 1})
def test_inline_args(self):
class TestObject:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
inline_args = ('x', 'y')
x = 1
y = 2
z = 3
t = 4
obj = TestObjectFactory.build(x=42, z=5)
self.assertEqual((42, 2), obj.args)
self.assertEqual({'z': 5, 't': 4}, obj.kwargs)
def test_exclude(self):
class TestObject:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
exclude = ('x', 'z')
x = 1
y = 2
z = 3
t = 4
obj = TestObjectFactory.build(x=42, z=5)
self.assertEqual((), obj.args)
self.assertEqual({'y': 2, 't': 4}, obj.kwargs)
def test_exclude_and_inline_args(self):
class TestObject:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class TestObjectFactory(factory.Factory):
class Meta:
model = TestObject
exclude = ('x', 'z')
inline_args = ('y',)
x = 1
y = 2
z = 3
t = 4
obj = TestObjectFactory.build(x=42, z=5)
self.assertEqual((2,), obj.args)
self.assertEqual({'t': 4}, obj.kwargs)
| UsingFactoryTestCase |
python | great-expectations__great_expectations | contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_pair_values_lat_lng_matches_geohash.py | {
"start": 524,
"end": 927
} | class ____(ColumnPairMapMetricProvider):
condition_metric_name = "column_pair_values.lat_lng_matches_geohash"
# noinspection PyPep8Naming
@column_pair_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column_A, column_B, **kwargs):
df = pd.DataFrame(column_A).join(column_B)
return df.apply(lambda x: compare(x), axis=1)
| ColumnPairValuesLatLngMatchesGeohash |
python | Textualize__textual | src/textual/css/_style_properties.py | {
"start": 9238,
"end": 12031
} | class ____:
"""Descriptor for getting and setting outlines and borders along a single edge.
For example "border-right", "outline-bottom", etc.
"""
def __init__(self, default_color: Color) -> None:
self._default_color = default_color
def __set_name__(self, owner: StylesBase, name: str) -> None:
self.name = name
_type, edge = name.split("_")
self._type = _type
self.edge = edge
def __get__(
self, obj: StylesBase, objtype: type[StylesBase] | None = None
) -> tuple[EdgeType, Color]:
"""Get the box property.
Args:
obj: The ``Styles`` object.
objtype: The ``Styles`` class.
Returns:
A ``tuple[EdgeType, Style]`` containing the string type of the box and
its style. Example types are "round", "solid", and "dashed".
"""
return obj.get_rule(self.name) or ("", self._default_color) # type: ignore[return-value]
def __set__(
self,
obj: StylesBase,
border: tuple[EdgeType, str | Color] | Literal["none"] | None,
):
"""Set the box property.
Args:
obj: The ``Styles`` object.
value: A 2-tuple containing the type of box to use,
e.g. "dashed", and the ``Style`` to be used. You can supply the ``Style`` directly, or pass a
``str`` (e.g. ``"blue on #f0f0f0"`` ) or ``Color`` instead.
Raises:
StyleValueError: If the string supplied for the color is not a valid color.
"""
if border is None:
if obj.clear_rule(self.name):
obj.refresh(layout=True)
elif border == "none":
obj.set_rule(self.name, ("", obj.get_rule(self.name)[1]))
else:
_type, color = border
if _type in ("none", "hidden"):
_type = ""
new_value = border
if isinstance(color, str):
try:
new_value = (_type, Color.parse(color))
except ColorParseError as error:
raise StyleValueError(
str(error),
help_text=border_property_help_text(
self.name, context="inline"
),
)
elif isinstance(color, Color):
new_value = (_type, color)
current_value: tuple[str, Color] = cast(
"tuple[str, Color]", obj.get_rule(self.name)
)
has_edge = bool(current_value and current_value[0])
new_edge = bool(_type)
if obj.set_rule(self.name, new_value):
obj.refresh(layout=has_edge != new_edge)
@rich.repr.auto
| BoxProperty |
python | ray-project__ray | python/ray/serve/tests/test_deployment_scheduler.py | {
"start": 952,
"end": 5243
} | class ____:
@pytest.mark.parametrize(
"placement_group_config",
[
{},
{"bundles": [{"CPU": 3}]},
{
"bundles": [{"CPU": 1}, {"CPU": 1}, {"CPU": 1}],
"strategy": "STRICT_PACK",
},
],
)
def test_spread_deployment_scheduling_policy_upscale(
self, ray_start_cluster, placement_group_config
):
"""Test to make sure replicas are spreaded."""
cluster = ray_start_cluster
cluster.add_node(num_cpus=3)
cluster.add_node(num_cpus=3)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
cluster_node_info_cache = default_impl.create_cluster_node_info_cache(
GcsClient(address=ray.get_runtime_context().gcs_address)
)
cluster_node_info_cache.update()
scheduler = default_impl.create_deployment_scheduler(
cluster_node_info_cache,
get_head_node_id(),
)
dep_id = DeploymentID(name="deployment1")
r1_id = ReplicaID(unique_id="replica1", deployment_id=dep_id)
r2_id = ReplicaID(unique_id="replica2", deployment_id=dep_id)
scheduler.on_deployment_created(dep_id, SpreadDeploymentSchedulingPolicy())
replica_actor_handles = []
replica_placement_groups = []
def on_scheduled(actor_handle, placement_group):
replica_actor_handles.append(actor_handle)
replica_placement_groups.append(placement_group)
deployment_to_replicas_to_stop = scheduler.schedule(
upscales={
dep_id: [
ReplicaSchedulingRequest(
replica_id=r1_id,
actor_def=Replica,
actor_resources={"CPU": 1},
actor_options={"name": "deployment1_replica1"},
actor_init_args=(),
on_scheduled=on_scheduled,
placement_group_bundles=placement_group_config.get(
"bundles", None
),
placement_group_strategy=placement_group_config.get(
"strategy", None
),
),
ReplicaSchedulingRequest(
replica_id=r2_id,
actor_def=Replica,
actor_resources={"CPU": 1},
actor_options={"name": "deployment1_replica2"},
actor_init_args=(),
on_scheduled=on_scheduled,
placement_group_bundles=placement_group_config.get(
"bundles", None
),
placement_group_strategy=placement_group_config.get(
"strategy", None
),
),
]
},
downscales={},
)
assert not deployment_to_replicas_to_stop
assert len(replica_actor_handles) == 2
assert len(replica_placement_groups) == 2
assert not scheduler._pending_replicas[dep_id]
assert len(scheduler._launching_replicas[dep_id]) == 2
assert (
len(
{
ray.get(replica_actor_handles[0].get_node_id.remote()),
ray.get(replica_actor_handles[1].get_node_id.remote()),
}
)
== 2
)
if "bundles" in placement_group_config:
assert (
len(
{
ray.get(replica_actor_handles[0].get_placement_group.remote()),
ray.get(replica_actor_handles[1].get_placement_group.remote()),
}
)
== 2
)
scheduler.on_replica_stopping(r1_id)
scheduler.on_replica_stopping(r2_id)
scheduler.on_deployment_deleted(dep_id)
@serve.deployment
def A():
return ray.get_runtime_context().get_node_id()
app_A = A.bind()
@pytest.mark.skipif(
not RAY_SERVE_USE_COMPACT_SCHEDULING_STRATEGY, reason="Needs compact strategy."
)
| TestSpreadScheduling |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_async_job.py | {
"start": 18595,
"end": 24765
} | class ____:
def test_start(self, parent_job, grouped_jobs, api_limit):
# should attempt to start every child (DummyAPILimit never limits)
parent_job.start(api_limit)
for job in grouped_jobs:
job.start.assert_called_once_with(api_limit)
def test_completed(self, parent_job, grouped_jobs):
assert not parent_job.completed, "initially not completed"
# partially complete
grouped_jobs[0].completed = True
grouped_jobs[5].completed = True
assert not parent_job.completed, "not completed until all jobs completed"
# complete all
for j in grouped_jobs:
j.completed = True
assert parent_job.completed, "completed because all jobs completed"
def test_update_job_forwards_batch(self, parent_job, grouped_jobs, batch, api_limit):
for job in grouped_jobs:
job.started = True
parent_job.update_job(batch=batch)
for j in grouped_jobs:
j.update_job.assert_called_once_with(batch=batch)
def test_update_job_splices_new_children(self, mocker, api, batch):
"""
If a child sets .new_jobs = [ParentAsyncJob(...)] then parent.update_job()
should replace that child with the inner children from the provided ParentAsyncJob.
"""
interval = DateInterval(date(2020, 1, 1), date(2020, 1, 2))
# Two original children
child0 = mocker.Mock(spec=InsightAsyncJob, started=True, completed=False, new_jobs=[])
child1 = mocker.Mock(spec=InsightAsyncJob, started=True, completed=False, new_jobs=[])
# Their update_job should accept batch
child0.update_job = mocker.Mock()
child1.update_job = mocker.Mock()
# New children that will replace child0
new_a = mocker.Mock(spec=InsightAsyncJob, started=False, completed=False, new_jobs=[])
new_b = mocker.Mock(spec=InsightAsyncJob, started=False, completed=False, new_jobs=[])
# child0 announces work split: one ParentAsyncJob with two kids
child0.new_jobs = [
ParentAsyncJob(api=api, jobs=[new_a, new_b], interval=interval) # minimal required args
]
parent = ParentAsyncJob(api=api, jobs=[child0, child1], interval=interval)
# run update to forward polling and splice children
parent.update_job(batch=batch)
# update forwarded to both original children
child0.update_job.assert_called_once_with(batch=batch)
child1.update_job.assert_called_once_with(batch=batch)
# child0 replaced by (new_a, new_b), child1 preserved → total 3
assert parent._jobs == [new_a, new_b, child1]
def test_get_result_streams_children(self, parent_job, grouped_jobs):
"""
With no primary key provided, get_result() yields results from children in order.
"""
for j in grouped_jobs:
j.get_result.return_value = []
grouped_jobs[0].get_result.return_value = range(3, 8)
grouped_jobs[6].get_result.return_value = range(4, 11)
out = list(parent_job.get_result())
assert out == list(range(3, 8)) + list(range(4, 11))
def test_get_result_merges_by_primary_key(self, mocker, api):
"""
With primary_key set, rows from children with the same PK should merge,
and later children overwrite non-PK fields.
"""
interval = DateInterval(date(2020, 1, 1), date(2020, 1, 1))
pk = ["id"]
c1 = mocker.Mock(spec=InsightAsyncJob)
c2 = mocker.Mock(spec=InsightAsyncJob)
# child1 provides base rows
c1.get_result.return_value = [
{"id": 1, "a": 1},
{"id": 2, "a": 2},
]
# child2 overwrites/extends id=1
c2.get_result.return_value = [
{"id": 1, "b": 10},
{"id": 1, "a": 100},
]
parent = ParentAsyncJob(api=api, jobs=[c1, c2], interval=interval, primary_key=pk)
rows = list(parent.get_result())
# Convert ParentAsyncJob._ExportableRow to dicts
as_dicts = [r.export_all_data() if hasattr(r, "export_all_data") else dict(r) for r in rows]
# Expect merged records for ids {1,2}; for id=1, 'a' overwritten to 100, 'b' added
# Ordering by insertion of dict values isn't guaranteed; normalize by PK
merged = {d["id"]: d for d in as_dicts}
assert set(merged.keys()) == {1, 2}
assert merged[1]["a"] == 100
assert merged[1]["b"] == 10
assert merged[2]["a"] == 2
def test_get_result_merges_with_object_breakdown_id_injection(self, mocker, api):
"""
When object_breakdowns is provided (e.g. {'image_asset': 'image_asset_id'}),
the parent should inject the *_id from nested objects before computing the PK
and merge accordingly.
"""
interval = DateInterval(date(2020, 1, 1), date(2020, 1, 1))
pk = ["image_asset_id"]
ob_map = {"image_asset": "image_asset_id"}
c1 = mocker.Mock(spec=InsightAsyncJob)
c2 = mocker.Mock(spec=InsightAsyncJob)
# c1: no explicit *_id, but has the object with an "id"
c1.get_result.return_value = [{"image_asset": {"id": "img-123"}, "metric": 1}]
# c2: explicit *_id for same asset, adds more fields
c2.get_result.return_value = [{"image_asset_id": "img-123", "metric2": 2}]
parent = ParentAsyncJob(
api=api,
jobs=[c1, c2],
interval=interval,
primary_key=pk,
object_breakdowns=ob_map,
)
rows = list(parent.get_result())
as_dicts = [r.export_all_data() if hasattr(r, "export_all_data") else dict(r) for r in rows]
assert len(as_dicts) == 1
merged = as_dicts[0]
# injected ID should exist
assert merged["image_asset_id"] == "img-123"
# both metrics present
assert merged["metric"] == 1
assert merged["metric2"] == 2
def test_str(self, parent_job, grouped_jobs):
assert str(parent_job) == f"ParentAsyncJob({grouped_jobs[0]} ... {len(grouped_jobs) - 1} jobs more)"
| TestParentAsyncJob |
python | huggingface__transformers | src/transformers/models/qwen3_moe/modeling_qwen3_moe.py | {
"start": 31948,
"end": 32059
} | class ____(GenericForSequenceClassification, Qwen3MoePreTrainedModel):
pass
| Qwen3MoeForSequenceClassification |
python | viewflow__viewflow | tests/json/test_json__json.py | {
"start": 240,
"end": 838
} | class ____(TestCase):
def test_crud(self):
model = JsonFieldModel(json_field={"test": "value"})
self.assertIsInstance(model._meta.get_field("json_field"), models.JSONField)
self.assertEqual(
model.data,
{
"json_field": {"test": "value"},
},
)
model.save()
model = JsonFieldModel.objects.get()
self.assertEqual(
model.data,
{
"json_field": {"test": "value"},
},
)
self.assertEqual(model.json_field, {"test": "value"})
| Test |
python | anthropics__anthropic-sdk-python | src/anthropic/types/metadata_param.py | {
"start": 222,
"end": 594
} | class ____(TypedDict, total=False):
user_id: Optional[str]
"""An external identifier for the user who is associated with the request.
This should be a uuid, hash value, or other opaque identifier. Anthropic may use
this id to help detect abuse. Do not include any identifying information such as
name, email address, or phone number.
"""
| MetadataParam |
python | getsentry__sentry | src/sentry/deletions/base.py | {
"start": 6043,
"end": 9343
} | class ____(BaseDeletionTask[ModelT]):
DEFAULT_QUERY_LIMIT: int | None = None
manager_name = "objects"
def __init__(
self,
manager: DeletionTaskManager,
model: type[ModelT],
query: Mapping[str, Any],
query_limit: int | None = None,
order_by: str | None = None,
**kwargs: Any,
):
super().__init__(manager, **kwargs)
self.model = model
self.query = query
self.query_limit = query_limit or self.DEFAULT_QUERY_LIMIT or self.chunk_size
self.order_by = order_by
def __repr__(self) -> str:
return "<{}: model={} query={} order_by={} transaction_id={} actor_id={}>".format(
type(self),
self.model,
self.query,
self.order_by,
self.transaction_id,
self.actor_id,
)
def get_query_filter(self) -> None | Q:
"""
Override this to add additional filters to the queryset.
Returns a Q object or None.
"""
return None
def chunk(self, apply_filter: bool = False) -> bool:
"""
Deletes a chunk of this instance's data. Return ``True`` if there is
more work, or ``False`` if all matching entities have been removed.
"""
query_limit = self.query_limit
remaining = self.chunk_size
while remaining >= 0:
queryset = getattr(self.model, self.manager_name).filter(**self.query)
if apply_filter:
query_filter = self.get_query_filter()
if query_filter is not None:
queryset = queryset.filter(query_filter)
if self.order_by:
queryset = queryset.order_by(self.order_by)
queryset = list(queryset[:query_limit])
# If there are no more rows we are all done.
if not queryset:
return False
self.delete_bulk(queryset)
remaining = remaining - len(queryset)
# We have more work to do as we didn't run out of rows to delete.
return True
def delete_instance(self, instance: ModelT) -> None:
instance_id = instance.id
try:
instance.delete()
finally:
# Don't log Group and Event child object deletions.
model_name = type(instance).__name__
if not _leaf_re.search(model_name):
self.logger.info(
f"object.delete.executed ({model_name})",
extra={
"object_id": instance_id,
"transaction_id": self.transaction_id,
"app_label": instance._meta.app_label,
"model": model_name,
},
)
def get_actor(self) -> RpcUser | None:
if self.actor_id:
return user_service.get_user(user_id=self.actor_id)
return None
def mark_deletion_in_progress(self, instance_list: Sequence[ModelT]) -> None:
for instance in instance_list:
status = getattr(instance, "status", None)
if status not in (ObjectStatus.DELETION_IN_PROGRESS, None):
instance.update(status=ObjectStatus.DELETION_IN_PROGRESS)
| ModelDeletionTask |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDict15.py | {
"start": 203,
"end": 290
} | class ____(Protocol):
def clear(self) -> None: ...
_T = TypeVar("_T")
| SupportsClear |
python | django__django | django/core/checks/registry.py | {
"start": 155,
"end": 613
} | class ____:
"""
Built-in tags for internal checks.
"""
admin = "admin"
async_support = "async_support"
caches = "caches"
commands = "commands"
compatibility = "compatibility"
database = "database"
files = "files"
models = "models"
security = "security"
signals = "signals"
sites = "sites"
staticfiles = "staticfiles"
templates = "templates"
translation = "translation"
urls = "urls"
| Tags |
python | pypa__pipenv | pipenv/vendor/dotenv/ipython.py | {
"start": 300,
"end": 1303
} | class ____(Magics):
@magic_arguments()
@argument(
'-o', '--override', action='store_true',
help="Indicate to override existing variables"
)
@argument(
'-v', '--verbose', action='store_true',
help="Indicate function calls to be verbose"
)
@argument('dotenv_path', nargs='?', type=str, default='.env',
help='Search in increasingly higher folders for the `dotenv_path`')
@line_magic
def dotenv(self, line):
args = parse_argstring(self.dotenv, line)
# Locate the .env file
dotenv_path = args.dotenv_path
try:
dotenv_path = find_dotenv(dotenv_path, True, True)
except IOError:
print("cannot find .env file")
return
# Load the .env file
load_dotenv(dotenv_path, verbose=args.verbose, override=args.override)
def load_ipython_extension(ipython):
"""Register the %dotenv magic."""
ipython.register_magics(IPythonDotEnv)
| IPythonDotEnv |
python | pypa__warehouse | tests/unit/manage/views/test_teams.py | {
"start": 19777,
"end": 23812
} | class ____:
def test_get(self, db_request, user_service):
team = TeamFactory.create()
older_event = TeamEventFactory.create(
source=team,
tag="fake:event",
time=datetime.datetime(2017, 2, 5, 17, 18, 18, 462_634),
)
newer_event = TeamEventFactory.create(
source=team,
tag="fake:event",
time=datetime.datetime(2018, 2, 5, 17, 18, 18, 462_634),
)
assert team_views.manage_team_history(team, db_request) == {
"events": [newer_event, older_event],
"get_user": user_service.get_user,
"team": team,
}
def test_raises_400_with_pagenum_type_str(self, monkeypatch, db_request):
params = MultiDict({"page": "abc"})
db_request.params = params
events_query = pretend.stub()
db_request.events_query = pretend.stub(
events_query=lambda *a, **kw: events_query
)
page_obj = pretend.stub(page_count=10, item_count=1000)
page_cls = pretend.call_recorder(lambda *a, **kw: page_obj)
monkeypatch.setattr(views, "SQLAlchemyORMPage", page_cls)
url_maker = pretend.stub()
url_maker_factory = pretend.call_recorder(lambda request: url_maker)
monkeypatch.setattr(views, "paginate_url_factory", url_maker_factory)
team = TeamFactory.create()
with pytest.raises(HTTPBadRequest):
team_views.manage_team_history(team, db_request)
assert page_cls.calls == []
def test_first_page(self, db_request, user_service):
page_number = 1
params = MultiDict({"page": page_number})
db_request.params = params
team = TeamFactory.create()
items_per_page = 25
total_items = items_per_page + 2
TeamEventFactory.create_batch(total_items, source=team, tag="fake:event")
events_query = (
db_request.db.query(Team.Event)
.join(Team.Event.source)
.filter(Team.Event.source_id == team.id)
.order_by(Team.Event.time.desc())
)
events_page = SQLAlchemyORMPage(
events_query,
page=page_number,
items_per_page=items_per_page,
item_count=total_items,
url_maker=paginate_url_factory(db_request),
)
assert team_views.manage_team_history(team, db_request) == {
"events": events_page,
"get_user": user_service.get_user,
"team": team,
}
def test_last_page(self, db_request, user_service):
page_number = 2
params = MultiDict({"page": page_number})
db_request.params = params
team = TeamFactory.create()
items_per_page = 25
total_items = items_per_page + 2
TeamEventFactory.create_batch(total_items, source=team, tag="fake:event")
events_query = (
db_request.db.query(Team.Event)
.join(Team.Event.source)
.filter(Team.Event.source_id == team.id)
.order_by(Team.Event.time.desc())
)
events_page = SQLAlchemyORMPage(
events_query,
page=page_number,
items_per_page=items_per_page,
item_count=total_items,
url_maker=paginate_url_factory(db_request),
)
assert team_views.manage_team_history(team, db_request) == {
"events": events_page,
"get_user": user_service.get_user,
"team": team,
}
def test_raises_404_with_out_of_range_page(self, db_request):
page_number = 3
params = MultiDict({"page": page_number})
db_request.params = params
team = TeamFactory.create()
items_per_page = 25
total_items = items_per_page + 2
TeamEventFactory.create_batch(total_items, source=team, tag="fake:event")
with pytest.raises(HTTPNotFound):
assert team_views.manage_team_history(team, db_request)
| TestManageTeamHistory |
python | MongoEngine__mongoengine | tests/test_datastructures.py | {
"start": 425,
"end": 5690
} | class ____:
@staticmethod
def _get_basedict(dict_items):
"""Get a BaseList bound to a fake document instance"""
fake_doc = DocumentStub()
base_list = BaseDict(dict_items, instance=None, name="my_name")
base_list._instance = (
fake_doc # hack to inject the mock, it does not work in the constructor
)
return base_list
def test___init___(self):
class MyDoc(Document):
pass
dict_items = {"k": "v"}
doc = MyDoc()
base_dict = BaseDict(dict_items, instance=doc, name="my_name")
assert isinstance(base_dict._instance, Document)
assert base_dict._name == "my_name"
assert base_dict == dict_items
def test_setdefault_calls_mark_as_changed(self):
base_dict = self._get_basedict({})
base_dict.setdefault("k", "v")
assert base_dict._instance._changed_fields == [base_dict._name]
def test_popitems_calls_mark_as_changed(self):
base_dict = self._get_basedict({"k": "v"})
assert base_dict.popitem() == ("k", "v")
assert base_dict._instance._changed_fields == [base_dict._name]
assert not base_dict
def test_pop_calls_mark_as_changed(self):
base_dict = self._get_basedict({"k": "v"})
assert base_dict.pop("k") == "v"
assert base_dict._instance._changed_fields == [base_dict._name]
assert not base_dict
def test_pop_calls_does_not_mark_as_changed_when_it_fails(self):
base_dict = self._get_basedict({"k": "v"})
with pytest.raises(KeyError):
base_dict.pop("X")
assert not base_dict._instance._changed_fields
def test_clear_calls_mark_as_changed(self):
base_dict = self._get_basedict({"k": "v"})
base_dict.clear()
assert base_dict._instance._changed_fields == ["my_name"]
assert base_dict == {}
def test___delitem___calls_mark_as_changed(self):
base_dict = self._get_basedict({"k": "v"})
del base_dict["k"]
assert base_dict._instance._changed_fields == ["my_name.k"]
assert base_dict == {}
def test___getitem____KeyError(self):
base_dict = self._get_basedict({})
with pytest.raises(KeyError):
base_dict["new"]
def test___getitem____simple_value(self):
base_dict = self._get_basedict({"k": "v"})
base_dict["k"] = "v"
def test___getitem____sublist_gets_converted_to_BaseList(self):
base_dict = self._get_basedict({"k": [0, 1, 2]})
sub_list = base_dict["k"]
assert sub_list == [0, 1, 2]
assert isinstance(sub_list, BaseList)
assert sub_list._instance is base_dict._instance
assert sub_list._name == "my_name.k"
assert base_dict._instance._changed_fields == []
# Challenge mark_as_changed from sublist
sub_list[1] = None
assert base_dict._instance._changed_fields == ["my_name.k.1"]
def test___getitem____subdict_gets_converted_to_BaseDict(self):
base_dict = self._get_basedict({"k": {"subk": "subv"}})
sub_dict = base_dict["k"]
assert sub_dict == {"subk": "subv"}
assert isinstance(sub_dict, BaseDict)
assert sub_dict._instance is base_dict._instance
assert sub_dict._name == "my_name.k"
assert base_dict._instance._changed_fields == []
# Challenge mark_as_changed from subdict
sub_dict["subk"] = None
assert base_dict._instance._changed_fields == ["my_name.k.subk"]
def test_get_sublist_gets_converted_to_BaseList_just_like__getitem__(self):
base_dict = self._get_basedict({"k": [0, 1, 2]})
sub_list = base_dict.get("k")
assert sub_list == [0, 1, 2]
assert isinstance(sub_list, BaseList)
def test_get_returns_the_same_as___getitem__(self):
base_dict = self._get_basedict({"k": [0, 1, 2]})
get_ = base_dict.get("k")
getitem_ = base_dict["k"]
assert get_ == getitem_
def test_get_default(self):
base_dict = self._get_basedict({})
sentinel = object()
assert base_dict.get("new") is None
assert base_dict.get("new", sentinel) is sentinel
def test___setitem___calls_mark_as_changed(self):
base_dict = self._get_basedict({})
base_dict["k"] = "v"
assert base_dict._instance._changed_fields == ["my_name.k"]
assert base_dict == {"k": "v"}
def test_update_calls_mark_as_changed(self):
base_dict = self._get_basedict({})
base_dict.update({"k": "v"})
assert base_dict._instance._changed_fields == ["my_name"]
def test___setattr____not_tracked_by_changes(self):
base_dict = self._get_basedict({})
base_dict.a_new_attr = "test"
assert base_dict._instance._changed_fields == []
def test___delattr____tracked_by_changes(self):
# This is probably a bug as __setattr__ is not tracked
# This is even bad because it could be that there is an attribute
# with the same name as a key
base_dict = self._get_basedict({})
base_dict.a_new_attr = "test"
del base_dict.a_new_attr
assert base_dict._instance._changed_fields == ["my_name.a_new_attr"]
| TestBaseDict |
python | pypa__pip | tests/unit/test_utils.py | {
"start": 17223,
"end": 19223
} | class ____:
@pytest.mark.skipif("sys.platform == 'win32'")
def test_glibc_version_string(self, monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setattr(
os,
"confstr",
lambda x: "glibc 2.20",
raising=False,
)
assert glibc_version_string() == "2.20"
@pytest.mark.skipif("sys.platform == 'win32'")
def test_glibc_version_string_confstr(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setattr(
os,
"confstr",
lambda x: "glibc 2.20",
raising=False,
)
assert glibc_version_string_confstr() == "2.20"
@pytest.mark.parametrize(
"failure",
[
lambda x: raises(ValueError),
lambda x: raises(OSError),
lambda x: "XXX",
],
)
def test_glibc_version_string_confstr_fail(
self, monkeypatch: pytest.MonkeyPatch, failure: Callable[[Any], Any]
) -> None:
monkeypatch.setattr(os, "confstr", failure, raising=False)
assert glibc_version_string_confstr() is None
def test_glibc_version_string_confstr_missing(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.delattr(os, "confstr", raising=False)
assert glibc_version_string_confstr() is None
def test_glibc_version_string_ctypes_missing(
self, monkeypatch: pytest.MonkeyPatch
) -> None:
monkeypatch.setitem(sys.modules, "ctypes", None)
assert glibc_version_string_ctypes() is None
@pytest.mark.parametrize(
"version_info, expected",
[
((), (0, 0, 0)),
((3,), (3, 0, 0)),
((3, 6), (3, 6, 0)),
((3, 6, 2), (3, 6, 2)),
((3, 6, 2, 4), (3, 6, 2)),
],
)
def test_normalize_version_info(
version_info: tuple[int, ...], expected: tuple[int, int, int]
) -> None:
actual = normalize_version_info(version_info)
assert actual == expected
| TestGlibc |
python | ray-project__ray | python/ray/autoscaler/_private/aliyun/node_provider.py | {
"start": 831,
"end": 12726
} | class ____(NodeProvider):
def __init__(self, provider_config, cluster_name):
NodeProvider.__init__(self, provider_config, cluster_name)
self.cache_stopped_nodes = provider_config.get("cache_stopped_nodes", True)
self.acs = AcsClient(
access_key=provider_config["access_key"],
access_key_secret=provider_config["access_key_secret"],
region_id=provider_config["region"],
max_retries=BOTO_MAX_RETRIES,
)
# Try availability zones round-robin, starting from random offset
self.subnet_idx = random.randint(0, 100)
# Tags that we believe to actually be on the node.
self.tag_cache = {}
# Tags that we will soon upload.
self.tag_cache_pending = defaultdict(dict)
# Number of threads waiting for a batched tag update.
self.batch_thread_count = 0
self.batch_update_done = threading.Event()
self.batch_update_done.set()
self.ready_for_new_batch = threading.Event()
self.ready_for_new_batch.set()
self.tag_cache_lock = threading.Lock()
self.count_lock = threading.Lock()
# Cache of node objects from the last nodes() call. This avoids
# excessive DescribeInstances requests.
self.cached_nodes = {}
def non_terminated_nodes(self, tag_filters: Dict[str, str]) -> List[str]:
tags = [
{
"Key": TAG_RAY_CLUSTER_NAME,
"Value": self.cluster_name,
},
]
for k, v in tag_filters.items():
tags.append(
{
"Key": k,
"Value": v,
}
)
instances = self.acs.describe_instances(tags=tags)
non_terminated_instance = []
for instance in instances:
if instance.get("Status") == RUNNING or instance.get("Status") == PENDING:
non_terminated_instance.append(instance.get("InstanceId"))
self.cached_nodes[instance.get("InstanceId")] = instance
return non_terminated_instance
def is_running(self, node_id: str) -> bool:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
instance = instances[0]
return instance.get("Status") == "Running"
cli_logger.error("Invalid node id: %s", node_id)
return False
def is_terminated(self, node_id: str) -> bool:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances) == 1
instance = instances[0]
return instance.get("Status") == "Stopped"
cli_logger.error("Invalid node id: %s", node_id)
return False
def node_tags(self, node_id: str) -> Dict[str, str]:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances) == 1
instance = instances[0]
if instance.get("Tags") is not None:
node_tags = dict()
for tag in instance.get("Tags").get("Tag"):
node_tags[tag.get("TagKey")] = tag.get("TagValue")
return node_tags
return dict()
def external_ip(self, node_id: str) -> str:
while True:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances)
instance = instances[0]
if (
instance.get("PublicIpAddress") is not None
and instance.get("PublicIpAddress").get("IpAddress") is not None
):
if len(instance.get("PublicIpAddress").get("IpAddress")) > 0:
return instance.get("PublicIpAddress").get("IpAddress")[0]
cli_logger.error("PublicIpAddress attribute is not exist. %s" % instance)
time.sleep(STOPPING_NODE_DELAY)
def internal_ip(self, node_id: str) -> str:
while True:
instances = self.acs.describe_instances(instance_ids=[node_id])
if instances is not None:
assert len(instances) == 1
instance = instances[0]
if (
instance.get("VpcAttributes") is not None
and instance.get("VpcAttributes").get("PrivateIpAddress")
is not None
and len(
instance.get("VpcAttributes")
.get("PrivateIpAddress")
.get("IpAddress")
)
> 0
):
return (
instance.get("VpcAttributes")
.get("PrivateIpAddress")
.get("IpAddress")[0]
)
cli_logger.error("InnerIpAddress attribute is not exist. %s" % instance)
time.sleep(STOPPING_NODE_DELAY)
def set_node_tags(self, node_id: str, tags: Dict[str, str]) -> None:
is_batching_thread = False
with self.tag_cache_lock:
if not self.tag_cache_pending:
is_batching_thread = True
# Wait for threads in the last batch to exit
self.ready_for_new_batch.wait()
self.ready_for_new_batch.clear()
self.batch_update_done.clear()
self.tag_cache_pending[node_id].update(tags)
if is_batching_thread:
time.sleep(TAG_BATCH_DELAY)
with self.tag_cache_lock:
self._update_node_tags()
self.batch_update_done.set()
with self.count_lock:
self.batch_thread_count += 1
self.batch_update_done.wait()
with self.count_lock:
self.batch_thread_count -= 1
if self.batch_thread_count == 0:
self.ready_for_new_batch.set()
def _update_node_tags(self):
batch_updates = defaultdict(list)
for node_id, tags in self.tag_cache_pending.items():
for x in tags.items():
batch_updates[x].append(node_id)
self.tag_cache[node_id] = tags
self.tag_cache_pending = defaultdict(dict)
self._create_tags(batch_updates)
def _create_tags(self, batch_updates):
for (k, v), node_ids in batch_updates.items():
m = "Set tag {}={} on {}".format(k, v, node_ids)
with LogTimer("AliyunNodeProvider: {}".format(m)):
if k == TAG_RAY_NODE_NAME:
k = "Name"
self.acs.tag_resource(node_ids, [{"Key": k, "Value": v}])
def create_node(
self, node_config: Dict[str, Any], tags: Dict[str, str], count: int
) -> Optional[Dict[str, Any]]:
filter_tags = [
{
"Key": TAG_RAY_CLUSTER_NAME,
"Value": self.cluster_name,
},
{"Key": TAG_RAY_NODE_KIND, "Value": tags[TAG_RAY_NODE_KIND]},
{"Key": TAG_RAY_USER_NODE_TYPE, "Value": tags[TAG_RAY_USER_NODE_TYPE]},
{"Key": TAG_RAY_LAUNCH_CONFIG, "Value": tags[TAG_RAY_LAUNCH_CONFIG]},
{"Key": TAG_RAY_NODE_NAME, "Value": tags[TAG_RAY_NODE_NAME]},
]
reused_nodes_dict = {}
if self.cache_stopped_nodes:
reuse_nodes_candidate = self.acs.describe_instances(tags=filter_tags)
if reuse_nodes_candidate:
with cli_logger.group("Stopping instances to reuse"):
reuse_node_ids = []
for node in reuse_nodes_candidate:
node_id = node.get("InstanceId")
status = node.get("Status")
if status != STOPPING and status != STOPPED:
continue
if status == STOPPING:
# wait for node stopped
while (
self.acs.describe_instances(instance_ids=[node_id])[
0
].get("Status")
== STOPPING
):
logging.info("wait for %s stop" % node_id)
time.sleep(STOPPING_NODE_DELAY)
# logger.info("reuse %s" % node_id)
reuse_node_ids.append(node_id)
reused_nodes_dict[node.get("InstanceId")] = node
self.acs.start_instance(node_id)
self.tag_cache[node_id] = node.get("Tags")
self.set_node_tags(node_id, tags)
if len(reuse_node_ids) == count:
break
count -= len(reuse_node_ids)
created_nodes_dict = {}
if count > 0:
filter_tags.append(
{"Key": TAG_RAY_NODE_STATUS, "Value": tags[TAG_RAY_NODE_STATUS]}
)
instance_id_sets = self.acs.run_instances(
instance_type=node_config["InstanceType"],
image_id=node_config["ImageId"],
tags=filter_tags,
amount=count,
vswitch_id=self.provider_config["v_switch_id"],
security_group_id=self.provider_config["security_group_id"],
key_pair_name=self.provider_config["key_name"],
)
instances = self.acs.describe_instances(instance_ids=instance_id_sets)
if instances is not None:
for instance in instances:
created_nodes_dict[instance.get("InstanceId")] = instance
all_created_nodes = reused_nodes_dict
all_created_nodes.update(created_nodes_dict)
return all_created_nodes
def terminate_node(self, node_id: str) -> None:
logger.info("terminate node: %s" % node_id)
if self.cache_stopped_nodes:
logger.info(
"Stopping instance {} (to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)"
).format(node_id)
self.acs.stop_instance(node_id)
else:
self.acs.delete_instance(node_id)
def terminate_nodes(self, node_ids: List[str]) -> None:
if not node_ids:
return
if self.cache_stopped_nodes:
logger.info(
"Stopping instances {} (to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)".format(node_ids)
)
self.acs.stop_instances(node_ids)
else:
self.acs.delete_instances(node_ids)
def _get_node(self, node_id):
"""Refresh and get info for this node, updating the cache."""
self.non_terminated_nodes({}) # Side effect: updates cache
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
# Node not in {pending, running} -- retry with a point query. This
# usually means the node was recently preempted or terminated.
matches = self.acs.describe_instances(instance_ids=[node_id])
assert len(matches) == 1, "Invalid instance id {}".format(node_id)
return matches[0]
def _get_cached_node(self, node_id):
"""Return node info from cache if possible, otherwise fetches it."""
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
return self._get_node(node_id)
@staticmethod
def bootstrap_config(cluster_config):
return bootstrap_aliyun(cluster_config)
| AliyunNodeProvider |
python | catalyst-team__catalyst | catalyst/contrib/losses/dice.py | {
"start": 137,
"end": 1723
} | class ____(nn.Module):
"""The Dice loss.
DiceLoss = 1 - dice score
dice score = 2 * intersection / (intersection + union)) = \
= 2 * tp / (2 * tp + fp + fn)
"""
def __init__(
self,
class_dim: int = 1,
mode: str = "macro",
weights: List[float] = None,
eps: float = 1e-7,
):
"""
Args:
class_dim: indicates class dimention (K) for
``outputs`` and ``targets`` tensors (default = 1)
mode: class summation strategy. Must be one of ['micro', 'macro',
'weighted']. If mode='micro', classes are ignored, and metric
are calculated generally. If mode='macro', metric are
calculated per-class and than are averaged over all classes.
If mode='weighted', metric are calculated per-class and than
summed over all classes with weights.
weights: class weights(for mode="weighted")
eps: epsilon to avoid zero division
"""
super().__init__()
assert mode in ["micro", "macro", "weighted"]
self.loss_fn = partial(
dice,
eps=eps,
class_dim=class_dim,
threshold=None,
mode=mode,
weights=weights,
)
def forward(self, outputs: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:
"""Calculates loss between ``logits`` and ``target`` tensors."""
dice_score = self.loss_fn(outputs, targets)
return 1 - dice_score
__all__ = ["DiceLoss"]
| DiceLoss |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_organization_integration_channels.py | {
"start": 5921,
"end": 6608
} | class ____(OrganizationIntegrationChannelsTest):
def test_integration_not_found(self):
response = self.get_error_response(self.organization.slug, 9999, status_code=404)
assert response.status_code == 404
def test_unsupported_provider(self):
integration = self.create_integration(
organization=self.organization,
provider="github",
name="GitHub",
external_id="github:1",
)
response = self.get_success_response(self.organization.slug, integration.id)
assert response.data["results"] == []
assert "not supported" in response.data["warning"]
| OrganizationIntegrationChannelsErrorTest |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-diff-private-simple-dataset/examples/symptom_2_disease/event_handler.py | {
"start": 362,
"end": 2138
} | class ____(BaseEventHandler):
num_splits: int
t_max: int
synthetic_example_starts: int = 0
synthetic_example_ends: int = 0
llm_empty_responses: int = 0
empty_intersections: int = 0
critical_threshold: int = 0.025 # ~2.5% error rate with OpenAI API calls
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "MyEventHandler"
def compute_approximate_error_rate(self):
"""Returns an approximate error rate."""
return (self.llm_empty_responses + self.empty_intersections) / (
self.synthetic_example_starts * self.t_max * (self.num_splits + 1)
)
def handle(self, event) -> None:
"""Logic for handling event."""
if isinstance(event, SyntheticExampleStartEvent):
self.synthetic_example_starts += 1
elif isinstance(event, SyntheticExampleEndEvent):
self.synthetic_example_ends += 1
elif isinstance(event, LLMEmptyResponseEvent):
self.llm_empty_responses += 1
with open("error_report.json", "w") as f:
json.dump(self.dict(), f)
if self.compute_approximate_error_rate() > self.critical_threshold:
raise TooManyProblemsEncounteredError(
"There were too many errors encountered."
)
elif isinstance(event, EmptyIntersectionEvent):
self.empty_intersections += 1
with open("error_report.json", "w") as f:
json.dump(self.dict(), f)
if self.compute_approximate_error_rate() > self.critical_threshold:
raise TooManyProblemsEncounteredError(
"There were too many errors encountered."
)
| DiffPrivacyEventHandler |
python | PrefectHQ__prefect | src/prefect/client/orchestration/_artifacts/client.py | {
"start": 1258,
"end": 1514
} | class ____(BaseArtifactReadParams, total=False):
artifact_filter: Annotated[
Optional["ArtifactCollectionFilter"], Field(default=None)
]
sort: Annotated[Optional["ArtifactCollectionSort"], Field(default=None)]
| ArtifactCollectionReadParams |
python | pyca__cryptography | tests/doubles.py | {
"start": 1200,
"end": 1298
} | class ____(
serialization.KeySerializationEncryption
):
pass
| DummyKeySerializationEncryption |
python | astropy__astropy | astropy/units/tests/test_quantity_info.py | {
"start": 3195,
"end": 3768
} | class ____:
@classmethod
def setup_class(cls):
value = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=[("p", "f8"), ("v", "f8")])
cls.q = u.Quantity(value, "m, m/s")
cls.q.info.name = "pv"
cls.q.info.description = "Location and speed"
def test_keying(self):
q_p = self.q["p"]
assert_no_info(q_p)
def test_slicing(self):
q = self.q[:1]
assert_info_equal(q, self.q)
def test_item(self):
# Scalars do not get info set.
q = self.q[1]
assert_no_info(q)
| TestStructuredQuantity |
python | tensorflow__tensorflow | tensorflow/python/compat/disable_v2_behavior_test.py | {
"start": 993,
"end": 1500
} | class ____(test.TestCase):
def test_basic(self):
t = constant_op.constant([1, 2, 3]) # creates a hidden context
self.assertTrue(isinstance(t, ops.EagerTensor))
t = _pywrap_tf2.is_enabled()
self.assertTrue(t)
v2_compat.disable_v2_behavior()
t = constant_op.constant([1, 2, 3])
self.assertFalse(isinstance(t, ops.EagerTensor))
t = _pywrap_tf2.is_enabled()
self.assertFalse(t)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
| DisableV2BehaviorTest |
python | ray-project__ray | ci/ray_ci/windows_container.py | {
"start": 148,
"end": 1812
} | class ____(Container):
def install_ray(
self, build_type: Optional[str] = None, mask: Optional[str] = None
) -> List[str]:
assert not build_type, f"Windows does not support build type: {build_type}"
assert not mask, f"Windows does not support install mask: {mask}"
bazel_cache = os.environ.get("BUILDKITE_BAZEL_CACHE_URL", "")
pipeline_id = os.environ.get("BUILDKITE_PIPELINE_ID", "")
cache_readonly = os.environ.get("BUILDKITE_CACHE_READONLY", "")
subprocess.check_call(
[
"docker",
"build",
"--build-arg",
f"BASE_IMAGE={self._get_docker_image()}",
"--build-arg",
f"BUILDKITE_BAZEL_CACHE_URL={bazel_cache}",
"--build-arg",
f"BUILDKITE_PIPELINE_ID={pipeline_id}",
"--build-arg",
f"BUILDKITE_CACHE_READONLY={cache_readonly}",
"-t",
self._get_docker_image(),
"-f",
"C:\\workdir\\ci\\ray_ci\\windows\\tests.env.Dockerfile",
"C:\\workdir",
],
stdout=sys.stdout,
stderr=sys.stderr,
)
def get_run_command_shell(self) -> List[str]:
return ["bash", "-c"]
def get_run_command_extra_args(
self,
gpu_ids: Optional[List[int]] = None,
) -> List[str]:
assert not gpu_ids, "Windows does not support gpu ids"
return ["--workdir", WORKDIR]
def get_artifact_mount(self) -> Tuple[str, str]:
return ("C:\\tmp\\artifacts", "C:\\artifact-mount")
| WindowsContainer |
python | scikit-learn__scikit-learn | sklearn/ensemble/_forest.py | {
"start": 35801,
"end": 41346
} | class ____(RegressorMixin, BaseForest, metaclass=ABCMeta):
"""
Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(
self,
estimator,
n_estimators=100,
*,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
max_samples=None,
):
super().__init__(
estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
max_samples=max_samples,
)
def predict(self, X):
"""
Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted values.
"""
check_is_fitted(self)
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# avoid storing the output of every estimator by summing them here
if self.n_outputs_ > 1:
y_hat = np.zeros((X.shape[0], self.n_outputs_), dtype=np.float64)
else:
y_hat = np.zeros((X.shape[0]), dtype=np.float64)
# Parallel loop
lock = threading.Lock()
Parallel(n_jobs=n_jobs, verbose=self.verbose, require="sharedmem")(
delayed(_accumulate_prediction)(e.predict, X, [y_hat], lock)
for e in self.estimators_
)
y_hat /= len(self.estimators_)
return y_hat
@staticmethod
def _get_oob_predictions(tree, X):
"""Compute the OOB predictions for an individual tree.
Parameters
----------
tree : DecisionTreeRegressor object
A single decision tree regressor.
X : ndarray of shape (n_samples, n_features)
The OOB samples.
Returns
-------
y_pred : ndarray of shape (n_samples, 1, n_outputs)
The OOB associated predictions.
"""
y_pred = tree.predict(X, check_input=False)
if y_pred.ndim == 1:
# single output regression
y_pred = y_pred[:, np.newaxis, np.newaxis]
else:
# multioutput regression
y_pred = y_pred[:, np.newaxis, :]
return y_pred
def _set_oob_score_and_attributes(self, X, y, scoring_function=None):
"""Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
scoring_function : callable, default=None
Scoring function for OOB score. Defaults to `r2_score`.
"""
self.oob_prediction_ = super()._compute_oob_predictions(X, y).squeeze(axis=1)
if self.oob_prediction_.shape[-1] == 1:
# drop the n_outputs axis if there is a single output
self.oob_prediction_ = self.oob_prediction_.squeeze(axis=-1)
if scoring_function is None:
scoring_function = r2_score
self.oob_score_ = scoring_function(y, self.oob_prediction_)
def _compute_partial_dependence_recursion(self, grid, target_features):
"""Fast partial dependence computation.
Parameters
----------
grid : ndarray of shape (n_samples, n_target_features), dtype=DTYPE
The grid points on which the partial dependence should be
evaluated.
target_features : ndarray of shape (n_target_features), dtype=np.intp
The set of target features for which the partial dependence
should be evaluated.
Returns
-------
averaged_predictions : ndarray of shape (n_samples,)
The value of the partial dependence function on each grid point.
"""
grid = np.asarray(grid, dtype=DTYPE, order="C")
target_features = np.asarray(target_features, dtype=np.intp, order="C")
averaged_predictions = np.zeros(
shape=grid.shape[0], dtype=np.float64, order="C"
)
for tree in self.estimators_:
# Note: we don't sum in parallel because the GIL isn't released in
# the fast method.
tree.tree_.compute_partial_dependence(
grid, target_features, averaged_predictions
)
# Average over the forest
averaged_predictions /= len(self.estimators_)
return averaged_predictions
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.sparse = True
return tags
| ForestRegressor |
python | getsentry__sentry-python | sentry_sdk/integrations/google_genai/utils.py | {
"start": 885,
"end": 22499
} | class ____(TypedDict):
"""Structure for token usage data."""
input_tokens: int
input_tokens_cached: int
output_tokens: int
output_tokens_reasoning: int
total_tokens: int
def extract_usage_data(response):
# type: (Union[GenerateContentResponse, dict[str, Any]]) -> UsageData
"""Extract usage data from response into a structured format.
Args:
response: The GenerateContentResponse object or dictionary containing usage metadata
Returns:
UsageData: Dictionary with input_tokens, input_tokens_cached,
output_tokens, and output_tokens_reasoning fields
"""
usage_data = UsageData(
input_tokens=0,
input_tokens_cached=0,
output_tokens=0,
output_tokens_reasoning=0,
total_tokens=0,
)
# Handle dictionary response (from streaming)
if isinstance(response, dict):
usage = response.get("usage_metadata", {})
if not usage:
return usage_data
prompt_tokens = usage.get("prompt_token_count", 0) or 0
tool_use_prompt_tokens = usage.get("tool_use_prompt_token_count", 0) or 0
usage_data["input_tokens"] = prompt_tokens + tool_use_prompt_tokens
cached_tokens = usage.get("cached_content_token_count", 0) or 0
usage_data["input_tokens_cached"] = cached_tokens
reasoning_tokens = usage.get("thoughts_token_count", 0) or 0
usage_data["output_tokens_reasoning"] = reasoning_tokens
candidates_tokens = usage.get("candidates_token_count", 0) or 0
# python-genai reports output and reasoning tokens separately
# reasoning should be sub-category of output tokens
usage_data["output_tokens"] = candidates_tokens + reasoning_tokens
total_tokens = usage.get("total_token_count", 0) or 0
usage_data["total_tokens"] = total_tokens
return usage_data
if not hasattr(response, "usage_metadata"):
return usage_data
usage = response.usage_metadata
# Input tokens include both prompt and tool use prompt tokens
prompt_tokens = getattr(usage, "prompt_token_count", 0) or 0
tool_use_prompt_tokens = getattr(usage, "tool_use_prompt_token_count", 0) or 0
usage_data["input_tokens"] = prompt_tokens + tool_use_prompt_tokens
# Cached input tokens
cached_tokens = getattr(usage, "cached_content_token_count", 0) or 0
usage_data["input_tokens_cached"] = cached_tokens
# Reasoning tokens
reasoning_tokens = getattr(usage, "thoughts_token_count", 0) or 0
usage_data["output_tokens_reasoning"] = reasoning_tokens
# output_tokens = candidates_tokens + reasoning_tokens
# google-genai reports output and reasoning tokens separately
candidates_tokens = getattr(usage, "candidates_token_count", 0) or 0
usage_data["output_tokens"] = candidates_tokens + reasoning_tokens
total_tokens = getattr(usage, "total_token_count", 0) or 0
usage_data["total_tokens"] = total_tokens
return usage_data
def _capture_exception(exc):
# type: (Any) -> None
"""Capture exception with Google GenAI mechanism."""
event, hint = event_from_exception(
exc,
client_options=sentry_sdk.get_client().options,
mechanism={"type": "google_genai", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
def get_model_name(model):
# type: (Union[str, Model]) -> str
"""Extract model name from model parameter."""
if isinstance(model, str):
return model
# Handle case where model might be an object with a name attribute
if hasattr(model, "name"):
return str(model.name)
return str(model)
def extract_contents_text(contents):
# type: (ContentListUnion) -> Optional[str]
"""Extract text from contents parameter which can have various formats."""
if contents is None:
return None
# Simple string case
if isinstance(contents, str):
return contents
# List of contents or parts
if isinstance(contents, list):
texts = []
for item in contents:
# Recursively extract text from each item
extracted = extract_contents_text(item)
if extracted:
texts.append(extracted)
return " ".join(texts) if texts else None
# Dictionary case
if isinstance(contents, dict):
if "text" in contents:
return contents["text"]
# Try to extract from parts if present in dict
if "parts" in contents:
return extract_contents_text(contents["parts"])
# Content object with parts - recurse into parts
if getattr(contents, "parts", None):
return extract_contents_text(contents.parts)
# Direct text attribute
if hasattr(contents, "text"):
return contents.text
return None
def _format_tools_for_span(tools):
# type: (Iterable[Tool | Callable[..., Any]]) -> Optional[List[dict[str, Any]]]
"""Format tools parameter for span data."""
formatted_tools = []
for tool in tools:
if callable(tool):
# Handle callable functions passed directly
formatted_tools.append(
{
"name": getattr(tool, "__name__", "unknown"),
"description": getattr(tool, "__doc__", None),
}
)
elif (
hasattr(tool, "function_declarations")
and tool.function_declarations is not None
):
# Tool object with function declarations
for func_decl in tool.function_declarations:
formatted_tools.append(
{
"name": getattr(func_decl, "name", None),
"description": getattr(func_decl, "description", None),
}
)
else:
# Check for predefined tool attributes - each of these tools
# is an attribute of the tool object, by default set to None
for attr_name, description in TOOL_ATTRIBUTES_MAP.items():
if getattr(tool, attr_name, None):
formatted_tools.append(
{
"name": attr_name,
"description": description,
}
)
break
return formatted_tools if formatted_tools else None
def extract_tool_calls(response):
# type: (GenerateContentResponse) -> Optional[List[dict[str, Any]]]
"""Extract tool/function calls from response candidates and automatic function calling history."""
tool_calls = []
# Extract from candidates, sometimes tool calls are nested under the content.parts object
if getattr(response, "candidates", []):
for candidate in response.candidates:
if not hasattr(candidate, "content") or not getattr(
candidate.content, "parts", []
):
continue
for part in candidate.content.parts:
if getattr(part, "function_call", None):
function_call = part.function_call
tool_call = {
"name": getattr(function_call, "name", None),
"type": "function_call",
}
# Extract arguments if available
if getattr(function_call, "args", None):
tool_call["arguments"] = safe_serialize(function_call.args)
tool_calls.append(tool_call)
# Extract from automatic_function_calling_history
# This is the history of tool calls made by the model
if getattr(response, "automatic_function_calling_history", None):
for content in response.automatic_function_calling_history:
if not getattr(content, "parts", None):
continue
for part in getattr(content, "parts", []):
if getattr(part, "function_call", None):
function_call = part.function_call
tool_call = {
"name": getattr(function_call, "name", None),
"type": "function_call",
}
# Extract arguments if available
if hasattr(function_call, "args"):
tool_call["arguments"] = safe_serialize(function_call.args)
tool_calls.append(tool_call)
return tool_calls if tool_calls else None
def _capture_tool_input(args, kwargs, tool):
# type: (tuple[Any, ...], dict[str, Any], Tool) -> dict[str, Any]
"""Capture tool input from args and kwargs."""
tool_input = kwargs.copy() if kwargs else {}
# If we have positional args, try to map them to the function signature
if args:
try:
sig = inspect.signature(tool)
param_names = list(sig.parameters.keys())
for i, arg in enumerate(args):
if i < len(param_names):
tool_input[param_names[i]] = arg
except Exception:
# Fallback if we can't get the signature
tool_input["args"] = args
return tool_input
def _create_tool_span(tool_name, tool_doc):
# type: (str, Optional[str]) -> Span
"""Create a span for tool execution."""
span = sentry_sdk.start_span(
op=OP.GEN_AI_EXECUTE_TOOL,
name=f"execute_tool {tool_name}",
origin=ORIGIN,
)
span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool_name)
span.set_data(SPANDATA.GEN_AI_TOOL_TYPE, "function")
if tool_doc:
span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool_doc)
return span
def wrapped_tool(tool):
# type: (Tool | Callable[..., Any]) -> Tool | Callable[..., Any]
"""Wrap a tool to emit execute_tool spans when called."""
if not callable(tool):
# Not a callable function, return as-is (predefined tools)
return tool
tool_name = getattr(tool, "__name__", "unknown")
tool_doc = tool.__doc__
if inspect.iscoroutinefunction(tool):
# Async function
@wraps(tool)
async def async_wrapped(*args, **kwargs):
# type: (Any, Any) -> Any
with _create_tool_span(tool_name, tool_doc) as span:
# Capture tool input
tool_input = _capture_tool_input(args, kwargs, tool)
with capture_internal_exceptions():
span.set_data(
SPANDATA.GEN_AI_TOOL_INPUT, safe_serialize(tool_input)
)
try:
result = await tool(*args, **kwargs)
# Capture tool output
with capture_internal_exceptions():
span.set_data(
SPANDATA.GEN_AI_TOOL_OUTPUT, safe_serialize(result)
)
return result
except Exception as exc:
_capture_exception(exc)
raise
return async_wrapped
else:
# Sync function
@wraps(tool)
def sync_wrapped(*args, **kwargs):
# type: (Any, Any) -> Any
with _create_tool_span(tool_name, tool_doc) as span:
# Capture tool input
tool_input = _capture_tool_input(args, kwargs, tool)
with capture_internal_exceptions():
span.set_data(
SPANDATA.GEN_AI_TOOL_INPUT, safe_serialize(tool_input)
)
try:
result = tool(*args, **kwargs)
# Capture tool output
with capture_internal_exceptions():
span.set_data(
SPANDATA.GEN_AI_TOOL_OUTPUT, safe_serialize(result)
)
return result
except Exception as exc:
_capture_exception(exc)
raise
return sync_wrapped
def wrapped_config_with_tools(config):
# type: (GenerateContentConfig) -> GenerateContentConfig
"""Wrap tools in config to emit execute_tool spans. Tools are sometimes passed directly as
callable functions as a part of the config object."""
if not config or not getattr(config, "tools", None):
return config
result = copy.copy(config)
result.tools = [wrapped_tool(tool) for tool in config.tools]
return result
def _extract_response_text(response):
# type: (GenerateContentResponse) -> Optional[List[str]]
"""Extract text from response candidates."""
if not response or not getattr(response, "candidates", []):
return None
texts = []
for candidate in response.candidates:
if not hasattr(candidate, "content") or not hasattr(candidate.content, "parts"):
continue
for part in candidate.content.parts:
if getattr(part, "text", None):
texts.append(part.text)
return texts if texts else None
def extract_finish_reasons(response):
# type: (GenerateContentResponse) -> Optional[List[str]]
"""Extract finish reasons from response candidates."""
if not response or not getattr(response, "candidates", []):
return None
finish_reasons = []
for candidate in response.candidates:
if getattr(candidate, "finish_reason", None):
# Convert enum value to string if necessary
reason = str(candidate.finish_reason)
# Remove enum prefix if present (e.g., "FinishReason.STOP" -> "STOP")
if "." in reason:
reason = reason.split(".")[-1]
finish_reasons.append(reason)
return finish_reasons if finish_reasons else None
def set_span_data_for_request(span, integration, model, contents, kwargs):
# type: (Span, Any, str, ContentListUnion, dict[str, Any]) -> None
"""Set span data for the request."""
span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model)
if kwargs.get("stream", False):
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
config = kwargs.get("config")
if config is None:
return
config = cast(GenerateContentConfig, config)
# Set input messages/prompts if PII is allowed
if should_send_default_pii() and integration.include_prompts:
messages = []
# Add system instruction if present
if hasattr(config, "system_instruction"):
system_instruction = config.system_instruction
if system_instruction:
system_text = extract_contents_text(system_instruction)
if system_text:
messages.append({"role": "system", "content": system_text})
# Add user message
contents_text = extract_contents_text(contents)
if contents_text:
messages.append({"role": "user", "content": contents_text})
if messages:
normalized_messages = normalize_message_roles(messages)
scope = sentry_sdk.get_current_scope()
messages_data = truncate_and_annotate_messages(
normalized_messages, span, scope
)
if messages_data is not None:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
messages_data,
unpack=False,
)
# Extract parameters directly from config (not nested under generation_config)
for param, span_key in [
("temperature", SPANDATA.GEN_AI_REQUEST_TEMPERATURE),
("top_p", SPANDATA.GEN_AI_REQUEST_TOP_P),
("top_k", SPANDATA.GEN_AI_REQUEST_TOP_K),
("max_output_tokens", SPANDATA.GEN_AI_REQUEST_MAX_TOKENS),
("presence_penalty", SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY),
("frequency_penalty", SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY),
("seed", SPANDATA.GEN_AI_REQUEST_SEED),
]:
if hasattr(config, param):
value = getattr(config, param)
if value is not None:
span.set_data(span_key, value)
# Set tools if available
if hasattr(config, "tools"):
tools = config.tools
if tools:
formatted_tools = _format_tools_for_span(tools)
if formatted_tools:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
formatted_tools,
unpack=False,
)
def set_span_data_for_response(span, integration, response):
# type: (Span, Any, GenerateContentResponse) -> None
"""Set span data for the response."""
if not response:
return
if should_send_default_pii() and integration.include_prompts:
response_texts = _extract_response_text(response)
if response_texts:
# Format as JSON string array as per documentation
span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, safe_serialize(response_texts))
tool_calls = extract_tool_calls(response)
if tool_calls:
# Tool calls should be JSON serialized
span.set_data(SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(tool_calls))
finish_reasons = extract_finish_reasons(response)
if finish_reasons:
set_data_normalized(
span, SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, finish_reasons
)
if getattr(response, "response_id", None):
span.set_data(SPANDATA.GEN_AI_RESPONSE_ID, response.response_id)
if getattr(response, "model_version", None):
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response.model_version)
usage_data = extract_usage_data(response)
if usage_data["input_tokens"]:
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage_data["input_tokens"])
if usage_data["input_tokens_cached"]:
span.set_data(
SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED,
usage_data["input_tokens_cached"],
)
if usage_data["output_tokens"]:
span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage_data["output_tokens"])
if usage_data["output_tokens_reasoning"]:
span.set_data(
SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING,
usage_data["output_tokens_reasoning"],
)
if usage_data["total_tokens"]:
span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage_data["total_tokens"])
def prepare_generate_content_args(args, kwargs):
# type: (tuple[Any, ...], dict[str, Any]) -> tuple[Any, Any, str]
"""Extract and prepare common arguments for generate_content methods."""
model = args[0] if args else kwargs.get("model", "unknown")
contents = args[1] if len(args) > 1 else kwargs.get("contents")
model_name = get_model_name(model)
config = kwargs.get("config")
wrapped_config = wrapped_config_with_tools(config)
if wrapped_config is not config:
kwargs["config"] = wrapped_config
return model, contents, model_name
def prepare_embed_content_args(args, kwargs):
# type: (tuple[Any, ...], dict[str, Any]) -> tuple[str, Any]
"""Extract and prepare common arguments for embed_content methods.
Returns:
tuple: (model_name, contents)
"""
model = kwargs.get("model", "unknown")
contents = kwargs.get("contents")
model_name = get_model_name(model)
return model_name, contents
def set_span_data_for_embed_request(span, integration, contents, kwargs):
# type: (Span, Any, Any, dict[str, Any]) -> None
"""Set span data for embedding request."""
# Include input contents if PII is allowed
if should_send_default_pii() and integration.include_prompts:
if contents:
# For embeddings, contents is typically a list of strings/texts
input_texts = []
# Handle various content formats
if isinstance(contents, str):
input_texts = [contents]
elif isinstance(contents, list):
for item in contents:
text = extract_contents_text(item)
if text:
input_texts.append(text)
else:
text = extract_contents_text(contents)
if text:
input_texts = [text]
if input_texts:
set_data_normalized(
span,
SPANDATA.GEN_AI_EMBEDDINGS_INPUT,
input_texts,
unpack=False,
)
def set_span_data_for_embed_response(span, integration, response):
# type: (Span, Any, EmbedContentResponse) -> None
"""Set span data for embedding response."""
if not response:
return
# Extract token counts from embeddings statistics (Vertex AI only)
# Each embedding has its own statistics with token_count
if hasattr(response, "embeddings") and response.embeddings:
total_tokens = 0
for embedding in response.embeddings:
if hasattr(embedding, "statistics") and embedding.statistics:
token_count = getattr(embedding.statistics, "token_count", None)
if token_count is not None:
total_tokens += int(token_count)
# Set token count if we found any
if total_tokens > 0:
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, total_tokens)
| UsageData |
python | streamlit__streamlit | lib/tests/streamlit/web/server/oauth_authlib_routes_test.py | {
"start": 1044,
"end": 1619
} | class ____(dict):
def to_dict(self):
return self
SECRETS_MOCK = SecretMock(
{
"redirect_uri": "http://localhost:8501/oauth2callback",
"google": {
"client_id": "CLIENT_ID",
"client_secret": "CLIENT_SECRET",
"server_metadata_url": "https://accounts.google.com/.well-known/openid-configuration",
},
}
)
@patch(
"streamlit.auth_util.secrets_singleton",
MagicMock(
load_if_toml_exists=MagicMock(return_value=True),
get=MagicMock(return_value=SECRETS_MOCK),
),
)
| SecretMock |
python | huggingface__transformers | src/transformers/integrations/executorch.py | {
"start": 6637,
"end": 17536
} | class ____(torch.nn.Module):
"""
A recipe module designed to make a `PreTrainedModel` exportable with `torch.export`,
specifically for decoder-only LM with cache. This module ensures that the
exported model is compatible with further lowering and execution in `ExecuTorch`.
"""
def __init__(
self,
model: PreTrainedModel,
batch_size: int | None = None,
max_cache_len: int | None = None,
device: torch.device | None = None,
) -> None:
"""
Initializes the exportable module.
Args:
model (`PreTrainedModel`): The pretrained model to wrap.
Raises:
ValueError: If the model is configured with a unsupported cache implementation.
"""
super().__init__()
config = model.config.get_text_config()
if not hasattr(config, "use_cache") or config.use_cache is False:
raise ValueError("The model must have caching enabled to be performant.")
if hasattr(config, "layer_types") and getattr(config, "sliding_window", None) is not None:
self.model = TorchExportableModuleWithHybridCache(model, batch_size, max_cache_len, device)
else:
# If `layer_types` is not specified explicitly in the config or `sliding_window` is null,
# there is only 1 type of layers, so export will use `StaticCache` by default.
logging.info(
"Using `StaticCache` for export as `layer_types` is not specified or `sliding_window` is `null` in the config."
)
self.model = TorchExportableModuleWithStaticCache(model, batch_size, max_cache_len, device)
def forward(
self,
input_ids: torch.Tensor | None = None,
inputs_embeds: torch.Tensor | None = None,
cache_position: torch.Tensor | None = None,
) -> torch.Tensor:
"""
Forward pass of the module, which is compatible with the ExecuTorch llm runner.
Args:
input_ids (`torch.Tensor`): Tensor representing current input token id to the module.
inputs_embeds (`torch.Tensor`): Tensor representing current input embeddings to the module.
cache_position (`torch.Tensor`): Tensor representing current input position in the cache.
Returns:
torch.Tensor: Logits output from the model.
"""
return self.model.forward(
input_ids=input_ids,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
)
def export(
self,
input_ids: torch.Tensor | None = None,
inputs_embeds: torch.Tensor | None = None,
cache_position: torch.Tensor | None = None,
dynamic_shapes: dict | None = None,
strict: bool | None = None,
) -> torch.export.ExportedProgram:
"""
Export the wrapped module using `torch.export`.
Args:
input_ids (`Optional[torch.Tensor]`):
Tensor representing current input token id to the module. Must specify either this or inputs_embeds.
inputs_embeds (`Optional[torch.Tensor]`):
Tensor representing current input embeddings to the module. Must specify either this or input_ids.
cache_position (`Optional[torch.Tensor]`):
Tensor representing current input position in the cache. If not provided, a default tensor will be used.
dynamic_shapes (`Optional[dict]`):
Dynamic shapes to use for export if specified.
strict(`Optional[bool]`):
Flag to instruct `torch.export` to use `torchdynamo`.
Returns:
torch.export.ExportedProgram: The exported program that can be used for inference.
Examples:
Export with input_ids:
```python
# Prepare inputs
input_ids = torch.tensor([[1, 2, 3]], dtype=torch.long, device=model.device)
cache_position = torch.arange(input_ids.shape[-1], dtype=torch.long, device=model.device)
# Export
exported = exportable_module.export(
input_ids=input_ids,
cache_position=cache_position
)
```
Export with inputs_embeds:
```python
# Prepare embeddings
inputs_embeds = torch.randn(1, 3, 768, device=model.device) # batch_size=1, seq_len=3, hidden_size=768
cache_position = torch.arange(inputs_embeds.shape[1], dtype=torch.long, device=model.device)
# Export
exported = exportable_module.export(
inputs_embeds=inputs_embeds,
cache_position=cache_position
)
```
"""
if not (input_ids is None) ^ (inputs_embeds is None):
raise ValueError("Need to specify either input_ids or inputs_embeds.")
if hasattr(self.model, "base_model_prefix"):
base = getattr(self.model, self.model.base_model_prefix, self.model)
model_device = base.device
elif hasattr(self.model, "model"):
model_device = self.model.model.device
else:
model_device = "cpu"
logging.warning(
"TorchExportableModuleForDecoderOnlyLM.export Can't infer device from the model. Set to CPU by default."
)
if input_ids is not None:
input_kwargs = {
"input_ids": input_ids,
"cache_position": cache_position
if cache_position is not None
else torch.arange(input_ids.shape[-1], dtype=torch.long, device=model_device),
}
else: # inputs_embeds
input_kwargs = {
"inputs_embeds": inputs_embeds,
"cache_position": cache_position
if cache_position is not None
else torch.arange(inputs_embeds.shape[1], dtype=torch.long, device=model_device),
}
exported_program = torch.export.export(
self.model,
args=(),
kwargs=input_kwargs,
dynamic_shapes=dynamic_shapes,
strict=strict if strict is not None else True,
)
return exported_program
@staticmethod
def generate(
exported_program: torch.export.ExportedProgram,
tokenizer,
prompt: str,
max_new_tokens: int = 20,
do_sample: bool = False,
temperature: float = 1.0,
top_k: int = 50,
top_p: float = 1.0,
device: str = "cpu",
) -> str:
"""
Generate a sequence of tokens using an exported program.
Args:
exported_program (`torch.export.ExportedProgram`): The exported model being used for generate.
tokenizer: The tokenizer to use.
prompt (str): The input prompt.
max_new_tokens (int): Maximum number of new tokens to generate.
do_sample (bool): Whether to use sampling or greedy decoding.
temperature (float): The temperature for sampling.
top_k (int): The number of highest probability tokens to keep for top-k sampling.
top_p (float): The cumulative probability for nucleus sampling.
device (str): The device to use.
Returns:
str: The generated text.
"""
# Get the module from the exported program
exported_module = exported_program.module()
# Tokenize the prompt
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
# Initialize with the prompt
generated_ids = input_ids.clone()
# Process the prompt tokens first
curr_position = 0
for i in range(input_ids.shape[1]):
# Process one token at a time
curr_input_ids = input_ids[:, i : i + 1]
curr_cache_position = torch.tensor([curr_position], dtype=torch.long, device=device)
# Forward pass
_ = exported_module(input_ids=curr_input_ids, cache_position=curr_cache_position)
curr_position += 1
# Generate new tokens
for _ in range(max_new_tokens):
# Get the last token as input
curr_input_ids = generated_ids[:, -1:]
curr_cache_position = torch.tensor([curr_position], dtype=torch.long, device=device)
# Forward pass to get next token logits
outputs = exported_module(input_ids=curr_input_ids, cache_position=curr_cache_position)
# Get the next token ID
if do_sample:
# Apply temperature
if temperature > 0:
logits = outputs / temperature
else:
logits = outputs
# Apply top-k filtering
if top_k > 0:
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = float("-inf")
# Apply top-p (nucleus) filtering
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(torch.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(-1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = float("-inf")
# Sample from the filtered distribution
probs = torch.softmax(logits, dim=-1)
next_token_id = torch.multinomial(probs, num_samples=1)
else:
# Greedy decoding
next_token_id = outputs.argmax(dim=-1, keepdim=True)
# Ensure next_token_id has the right shape before concatenation
if next_token_id.dim() > 2:
next_token_id = next_token_id.squeeze(-1)
# Append to the generated sequence
generated_ids = torch.cat([generated_ids, next_token_id], dim=-1)
curr_position += 1
# Stop if we generate an EOS token
if next_token_id.item() == tokenizer.eos_token_id:
break
# Decode the generated text
return tokenizer.decode(generated_ids[0], skip_special_tokens=True)
| TorchExportableModuleForDecoderOnlyLM |
python | huggingface__transformers | src/transformers/models/swiftformer/modeling_swiftformer.py | {
"start": 7800,
"end": 8950
} | class ____(nn.Module):
"""
Local Representation module for SwiftFormer that is implemented by 3*3 depth-wise and point-wise convolutions.
Input: tensor of shape `[batch_size, channels, height, width]`
Output: tensor of shape `[batch_size, channels, height, width]`
"""
def __init__(self, config: SwiftFormerConfig, dim: int):
super().__init__()
self.depth_wise_conv = nn.Conv2d(dim, dim, kernel_size=3, padding=1, groups=dim)
self.norm = nn.BatchNorm2d(dim, eps=config.batch_norm_eps)
self.point_wise_conv1 = nn.Conv2d(dim, dim, kernel_size=1)
self.act = nn.GELU()
self.point_wise_conv2 = nn.Conv2d(dim, dim, kernel_size=1)
self.drop_path = nn.Identity()
self.layer_scale = nn.Parameter(torch.ones(dim).unsqueeze(-1).unsqueeze(-1), requires_grad=True)
def forward(self, x):
input = x
x = self.depth_wise_conv(x)
x = self.norm(x)
x = self.point_wise_conv1(x)
x = self.act(x)
x = self.point_wise_conv2(x)
x = input + self.drop_path(self.layer_scale * x)
return x
| SwiftFormerLocalRepresentation |
python | bokeh__bokeh | src/bokeh/models/tools.py | {
"start": 66078,
"end": 66464
} | class ____(EditTool):
''' A base class for polygon draw/edit tools. '''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
vertex_renderer = Nullable(GlyphRendererOf(XYGlyph), help="""
The renderer used to render the vertices of a selected line or polygon.
""")
| PolyTool |
python | django__django | tests/template_tests/filter_tests/test_capfirst.py | {
"start": 879,
"end": 1010
} | class ____(SimpleTestCase):
def test_capfirst(self):
self.assertEqual(capfirst("hello world"), "Hello world")
| FunctionTests |
python | crytic__slither | slither/detectors/reentrancy/token.py | {
"start": 1747,
"end": 3692
} | class ____(AbstractDetector):
ARGUMENT = "token-reentrancy"
HELP = "Tokens that are reentrancies unsafe"
IMPACT = DetectorClassification.MEDIUM
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#token-reentrant"
WIKI_TITLE = "Token reentrant"
# region wiki_description
WIKI_DESCRIPTION = """
Tokens that allow arbitrary external call on transfer/transfer (such as ERC223/ERC777) can be exploited on third
party through a reentrancy."""
# endregion wiki_description
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract MyToken{
function transferFrom(address from, address to, uint) public{
// do some stuff
from.call("..")
// do some stuff
}
}
contract MyDefi{
function convert(ERC token) public{
// do some stuff
token.transferFrom(..)
//
}
}
```
`MyDefi` has a reentrancy, but its developers did not think transferFrom could be reentrancy.
`MyToken` is used in MyDefi. As a result an attacker can exploit the reentrancy."""
# endregion wiki_exploit_scenario
# region wiki_recommendation
WIKI_RECOMMENDATION = """Avoid to have external calls in `transfer`/`transferFrom`.
If you do, ensure your users are aware of the potential issues."""
# endregion wiki_recommendation
def _detect(self) -> List[Output]:
results = []
for contract in self.compilation_unit.contracts_derived:
vulns = _detect_token_reentrant(contract)
for function, nodes in vulns.items():
info: DETECTOR_INFO = [function, " is an reentrancy unsafe token function:\n"]
for node in nodes:
info += ["\t-", node, "\n"]
json = self.generate_result(info)
results.append(json)
return results
| TokenReentrancy |
python | getsentry__sentry | src/sentry/workflow_engine/utils/log_context.py | {
"start": 1841,
"end": 4908
} | class ____(logging.LoggerAdapter[logging.Logger]):
def debug(self, msg: str, *args: Any, **kwargs: Any) -> None: # type: ignore[override]
if _log_context_state.get().verbose:
self.info(msg, *args, **kwargs)
else:
self.log(logging.DEBUG, msg, *args, **kwargs)
@override
def process(
self, msg: str, kwargs: MutableMapping[str, Any]
) -> tuple[str, MutableMapping[str, Any]]:
context = _log_context_state.get()
if context.extra:
if "extra" in kwargs:
kwargs["extra"] = {**context.extra, **kwargs["extra"]}
else:
kwargs["extra"] = context.extra
return msg, kwargs
def get_logger(name: str) -> logging.Logger:
"""
Returns a Logger that will be annotated based on the current context.
"""
# We need to fake the type here because we want callers to be able to treat it as
# a Logger, which it is for nearly any practical purpose.
return _Adapter(logging.getLogger(name)) # type: ignore[return-value]
def set_verbose(verbose: bool) -> None:
"""
Set the verbose flag for the current context.
When set, DEBUG logs will be promoted to INFO level.
"""
_log_context_state.get().verbose = verbose
def add_extras(**extras: Any) -> None:
"""
Add extra data for the current context.
This data will be included in all log records for the current context.
"""
_log_context_state.get().extra.update(extras)
@contextmanager
def new_context(verbose: bool | None = None, **extras: Any) -> Generator[LogContextData]:
"""
Create a new sub-context.
The sub-context will be cleaned up when the context manager exits.
"""
current = _log_context_state.get()
new_extra = dict(current.extra)
new_extra.update(extras)
new_ctx = LogContextData(verbose if verbose is not None else current.verbose, new_extra)
token = _log_context_state.set(new_ctx)
try:
yield new_ctx
finally:
_log_context_state.reset(token)
T = TypeVar("T")
def root(add_context_id: bool = True) -> Callable[[Callable[..., T]], Callable[..., T]]:
"""Decorator defines a function as the root of a log context.
When it executes, it will start with a fresh context, and any
modifications to the context will be discarded when it returns.
Additionally, it will add a unique context ID to log records, allowing you
to easily filter logs to a specific context and exclude logs from
other threads or processes in the same file.
"""
def decorator(func: Callable[..., T]) -> Callable[..., T]:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> T:
data = LogContextData()
if add_context_id:
data.extra["context_id"] = str(uuid.uuid4())
token = _log_context_state.set(data)
try:
return func(*args, **kwargs)
finally:
_log_context_state.reset(token)
return wrapper
return decorator
| _Adapter |
python | getsentry__sentry | src/sentry/models/eventattachment.py | {
"start": 1514,
"end": 1931
} | class ____:
content_type: str
size: int
sha1: str
blob_path: str | None = None
def can_store_inline(data: bytes) -> bool:
"""
Determines whether `data` can be stored inline
That is the case when it is shorter than 192 bytes,
and all the bytes are non-NULL ASCII.
"""
return len(data) < 192 and all(byte > 0x00 and byte < 0x7F for byte in data)
@region_silo_model
| PutfileResult |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py | {
"start": 95561,
"end": 95855
} | class ____(Qwen3Attention):
def __init__(self, config: Qwen3OmniMoeCode2WavConfig, layer_idx):
super().__init__(config, layer_idx)
self.q_norm = nn.Identity()
self.k_norm = nn.Identity()
self.sliding_window = config.sliding_window
| Qwen3OmniMoeCode2WavAttention |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_project_forms.py | {
"start": 44688,
"end": 48835
} | class ____(TestCase):
def setUp(self):
self.project = get(Project)
def test_addonsconfig_form(self):
data = {
"enabled": True,
"options_root_selector": "main",
"analytics_enabled": False,
"doc_diff_enabled": False,
"filetreediff_enabled": True,
# Empty lines, lines with trailing spaces or lines full of spaces are ignored
"filetreediff_ignored_files": "user/index.html\n \n\n\n changelog.html \n/normalized.html",
"flyout_enabled": True,
"flyout_sorting": ADDONS_FLYOUT_SORTING_CALVER,
"flyout_sorting_latest_stable_at_beginning": True,
"flyout_sorting_custom_pattern": None,
"flyout_position": "bottom-left",
"hotkeys_enabled": False,
"search_enabled": False,
"linkpreviews_enabled": True,
"notifications_enabled": True,
"notifications_show_on_latest": True,
"notifications_show_on_non_stable": True,
"notifications_show_on_external": True,
}
form = AddonsConfigForm(data=data, project=self.project)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(self.project.addons.enabled, True)
self.assertEqual(self.project.addons.options_root_selector, "main")
self.assertEqual(self.project.addons.analytics_enabled, False)
self.assertEqual(self.project.addons.doc_diff_enabled, False)
self.assertEqual(self.project.addons.filetreediff_enabled, True)
self.assertEqual(
self.project.addons.filetreediff_ignored_files,
[
"user/index.html",
"changelog.html",
"normalized.html",
],
)
self.assertEqual(self.project.addons.notifications_enabled, True)
self.assertEqual(self.project.addons.notifications_show_on_latest, True)
self.assertEqual(self.project.addons.notifications_show_on_non_stable, True)
self.assertEqual(self.project.addons.notifications_show_on_external, True)
self.assertEqual(self.project.addons.flyout_enabled, True)
self.assertEqual(
self.project.addons.flyout_sorting,
ADDONS_FLYOUT_SORTING_CALVER,
)
self.assertEqual(
self.project.addons.flyout_sorting_latest_stable_at_beginning,
True,
)
self.assertEqual(self.project.addons.flyout_sorting_custom_pattern, None)
self.assertEqual(self.project.addons.flyout_position, "bottom-left")
self.assertEqual(self.project.addons.hotkeys_enabled, False)
self.assertEqual(self.project.addons.search_enabled, False)
self.assertEqual(self.project.addons.linkpreviews_enabled, True)
self.assertEqual(self.project.addons.notifications_enabled, True)
self.assertEqual(self.project.addons.notifications_show_on_latest, True)
self.assertEqual(self.project.addons.notifications_show_on_non_stable, True)
self.assertEqual(self.project.addons.notifications_show_on_external, True)
def test_addonsconfig_form_invalid_sorting_custom_pattern(self):
data = {
"enabled": True,
"analytics_enabled": False,
"doc_diff_enabled": False,
"flyout_enabled": True,
"flyout_sorting": ADDONS_FLYOUT_SORTING_CUSTOM_PATTERN,
"flyout_sorting_latest_stable_at_beginning": True,
"flyout_sorting_custom_pattern": None,
"hotkeys_enabled": False,
"search_enabled": False,
"notifications_enabled": True,
"notifications_show_on_latest": True,
"notifications_show_on_non_stable": True,
"notifications_show_on_external": True,
}
form = AddonsConfigForm(data=data, project=self.project)
self.assertFalse(form.is_valid())
self.assertEqual(
"The flyout sorting custom pattern is required when selecting a custom pattern.",
form.errors["__all__"][0],
)
| TestAddonsConfigForm |
python | airbytehq__airbyte | airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/tests/test_incremental.py | {
"start": 6013,
"end": 27302
} | class ____(BaseTest):
async def test_two_sequential_reads(
self,
connector_config: SecretDict,
configured_catalog_for_incremental: ConfiguredAirbyteCatalog,
docker_runner: ConnectorRunner,
client_container: Optional[dagger.Container],
client_container_config: Optional[ClientContainerConfig],
detailed_logger: Logger,
):
"""
This test makes two calls to the read method and verifies that the records returned are different.
Important!
Assert only that the reads are different. Nothing else.
This is because there is only a small subset of assertions we can make
in the absense of enforcing that all connectors return 3 or more state messages
during the first read.
To learn more: https://github.com/airbytehq/airbyte/issues/29926
"""
output_1 = await docker_runner.call_read(connector_config, configured_catalog_for_incremental)
records_1 = filter_output(output_1, type_=Type.RECORD)
states_1 = filter_output(output_1, type_=Type.STATE)
assert states_1, "First Read should produce at least one state"
assert records_1, "First Read should produce at least one record"
# For legacy state format, the final state message contains the final state of all streams. For per-stream state format,
# the complete final state of streams must be assembled by going through all prior state messages received
if is_per_stream_state(states_1[-1]):
latest_state = construct_latest_state_from_messages(states_1)
state_input = []
for stream_name, stream_state in latest_state.items():
stream_descriptor = {"name": stream_name}
if "stream_namespace" in stream_state:
stream_descriptor["namespace"] = stream_state["stream_namespace"]
state_input.append(
{
"type": "STREAM",
"stream": {"stream_descriptor": stream_descriptor, "stream_state": stream_state},
}
)
elif is_global_state(states_1[-1]):
# TODO: DB sources to fill out this case
state_input = states_1[-1].state.data
else:
state_input = states_1[-1].state.data
# READ #2
if client_container and client_container_config.between_syncs_command:
detailed_logger.info(
await client_container.with_env_variable("CACHEBUSTER", str(uuid4()))
.with_exec(client_container_config.between_syncs_command)
.stdout()
)
output_2 = await docker_runner.call_read_with_state(connector_config, configured_catalog_for_incremental, state=state_input)
records_2 = filter_output(output_2, type_=Type.RECORD)
diff = naive_diff_records(records_1, records_2)
assert (
diff
), f"Records should change between reads but did not.\n\n records_1: {records_1} \n\n state: {state_input} \n\n records_2: {records_2} \n\n diff: {diff}"
async def test_read_sequential_slices(
self,
inputs: IncrementalConfig,
connector_config,
configured_catalog_for_incremental,
docker_runner: ConnectorRunner,
client_container: Optional[dagger.Container],
client_container_config: Optional[ClientContainerConfig],
detailed_logger: Logger,
):
"""
Incremental test that makes calls to the read method without a state checkpoint. Then we partition the results by stream and
slice checkpoints.
Then we make additional read method calls using the state message and verify the correctness of the
messages in the response.
"""
if inputs.skip_comprehensive_incremental_tests:
pytest.skip("Skipping new incremental test based on acceptance-test-config.yml")
return
for stream in configured_catalog_for_incremental.streams:
configured_catalog_for_incremental_per_stream = ConfiguredAirbyteCatalog(streams=[stream])
output_1 = await docker_runner.call_read(connector_config, configured_catalog_for_incremental_per_stream)
records_1 = filter_output(output_1, type_=Type.RECORD)
# If the output of a full read is empty, there is no reason to iterate over its state.
# So, reading from any checkpoint of an empty stream will also produce nothing.
if len(records_1) == 0:
continue
states_1 = filter_output(output_1, type_=Type.STATE)
# To learn more: https://github.com/airbytehq/airbyte/issues/29926
if len(states_1) == 0:
continue
states_with_expected_record_count = self._state_messages_selector(states_1)
if not states_with_expected_record_count:
pytest.fail(
"Unable to test because there is no suitable state checkpoint, likely due to a zero record count in the states."
)
mutating_stream_name_to_per_stream_state = dict()
for idx, state_message_data in enumerate(states_with_expected_record_count):
state_message, expected_records_count = state_message_data
assert state_message.type == Type.STATE
state_input, mutating_stream_name_to_per_stream_state = self.get_next_state_input(
state_message, mutating_stream_name_to_per_stream_state
)
output_N = await docker_runner.call_read_with_state(
connector_config, configured_catalog_for_incremental_per_stream, state=state_input
)
records_N = filter_output(output_N, type_=Type.RECORD)
assert (
# We assume that the output may be empty when we read the latest state, or it must produce some data if we are in the middle of our progression
len(records_N)
>= expected_records_count
), f"Read {idx + 1} of {len(states_with_expected_record_count)} should produce at least one record.\n\n state: {state_input} \n\n records_{idx + 1}: {records_N}"
# Temporary comment this to avoid fake failures while handling corner cases such as:
# - start date is equal to the latest state checkpoint date and date compare condition is >=, so we have two equal sets of data
# - ...
# See this issue for more details: https://github.com/airbytehq/airbyte-internal-issues/issues/8056
# diff = naive_diff_records(records_1, records_N)
# assert (
# diff
# ), f"Records for subsequent reads with new state should be different.\n\n records_1: {records_1} \n\n state: {state_input} \n\n records_{idx + 1}: {records_N} \n\n diff: {diff}"
async def test_state_with_abnormally_large_values(
self, inputs: IncrementalConfig, connector_config, configured_catalog, future_state, docker_runner: ConnectorRunner
):
configured_catalog = incremental_only_catalog(configured_catalog)
output = await docker_runner.call_read_with_state(config=connector_config, catalog=configured_catalog, state=future_state)
records = filter_output(output, type_=Type.RECORD)
states = filter_output(output, type_=Type.STATE)
assert (
not records
), f"The sync should produce no records when run with the state with abnormally large values {records[0].record.stream}"
assert states, "The sync should produce at least one STATE message"
if states and is_global_state(states[0]):
# TODO: DB sources to fill out this case. Also, can we assume all states will be global if the first one is?
pass
# TODO: else:
cursor_fields_per_stream = {
stream.stream.name: self._get_cursor_field(stream)
for stream in configured_catalog.streams
if stream.sync_mode == SyncMode.incremental
}
actual_state_cursor_values_per_stream = {
state.state.stream.stream_descriptor.name: self._get_cursor_values_from_states_by_cursor(
state.state.stream.stream_state.dict(), cursor_fields_per_stream[state.state.stream.stream_descriptor.name]
)
for state in states
}
future_state_cursor_values_per_stream = {
state["stream"]["stream_descriptor"]["name"]: self._get_cursor_values_from_states_by_cursor(
state["stream"]["stream_state"], cursor_fields_per_stream[state["stream"]["stream_descriptor"]["name"]]
)
for state in future_state
if state["stream"]["stream_descriptor"]["name"] in cursor_fields_per_stream
}
assert all(future_state_cursor_values_per_stream.values()), "Future state must be set up for all given streams"
expected_cursor_value_schema_per_stream = {
# TODO: Check if cursor value may be a nested property. If so, then should I use ._get_cursor_values_from_states ?
stream.stream.name: stream.stream.json_schema["properties"][cursor_fields_per_stream[stream.stream.name]]
for stream in configured_catalog.streams
}
future_state_formatrs_per_stream = {stream.name: stream for stream in inputs.future_state.cursor_format.streams}
for stream in configured_catalog.streams:
pattern = future_state_formatrs_per_stream.get(stream.stream.name, inputs.future_state.cursor_format).format
# All streams must be defined in the abnormal_state.json file due to the high test strictness level rule.
# However, a state may not be present in the output if a stream was unavailable during sync.
# Ideally, this should not be the case, but in reality, it often happens.
# It is not the purpose of this test to check for this, so we just skip it here.
if stream.stream.name not in actual_state_cursor_values_per_stream:
continue
actual_cursor_values = actual_state_cursor_values_per_stream[stream.stream.name]
future_state_cursor_values = future_state_cursor_values_per_stream[stream.stream.name]
expected_types = self._get_cursor_value_types(expected_cursor_value_schema_per_stream[stream.stream.name]["type"])
for actual_cursor_value, future_state_cursor_value in zip(actual_cursor_values, future_state_cursor_values):
for _type in expected_types:
if actual_cursor_value:
assert isinstance(
actual_cursor_value, _type
), f"Cursor value {actual_cursor_value} is not of type {_type}. Expected {_type}, got {type(actual_cursor_value)}"
if future_state_cursor_value:
assert isinstance(
future_state_cursor_value, _type
), f"Cursor value {future_state_cursor_value} is not of type {_type}. Expected {_type}, got {type(future_state_cursor_value)}"
if not (actual_cursor_value and future_state_cursor_value):
continue
# If the cursor value is numeric and the type check has passed, it means the format is correct
if isinstance(actual_cursor_value, (int, float)):
continue
# When the data is of string type, we need to ensure the format is correct for both cursor values
if pattern:
assert self._check_cursor_by_regex_match(
actual_cursor_value, pattern
), f"Actual cursor value {actual_cursor_value} does not match pattern: {pattern}"
assert self._check_cursor_by_regex_match(
future_state_cursor_value, pattern
), f"Future cursor value {future_state_cursor_value} does not match pattern: {pattern}"
else:
assert self._check_cursor_by_char_types(
actual_cursor_value, future_state_cursor_value
), f"Actual and future state formats do not match. Actual cursor value: {actual_cursor_value}, future cursor value: {future_state_cursor_value}"
def get_next_state_input(
self, state_message: AirbyteStateMessage, stream_name_to_per_stream_state: MutableMapping
) -> Tuple[Union[List[MutableMapping], MutableMapping], MutableMapping]:
# Including all the latest state values from previous batches, update the combined stream state
# with the current batch's stream state and then use it in the following read() request
current_state = state_message.state
if current_state and current_state.type == AirbyteStateType.STREAM:
per_stream = current_state.stream
if per_stream.stream_state:
stream_name_to_per_stream_state[per_stream.stream_descriptor.name] = (
per_stream.stream_state.dict() if per_stream.stream_state else {}
)
elif current_state and current_state.type == AirbyteStateType.GLOBAL:
# TODO: DB Sources to fill in this case
pass
state_input = [
{"type": "STREAM", "stream": {"stream_descriptor": {"name": stream_name}, "stream_state": stream_state}}
for stream_name, stream_state in stream_name_to_per_stream_state.items()
]
return state_input, stream_name_to_per_stream_state
@staticmethod
def _get_cursor_values_from_states_by_cursor(states: Union[list, dict], cursor_field: str) -> List[Union[str, int]]:
values = []
nodes_to_visit = [states]
while nodes_to_visit:
current_node = nodes_to_visit.pop()
if isinstance(current_node, dict):
for key, value in current_node.items():
# DB sources use a hardcoded field `cursor` to denote cursor value.
if key == cursor_field or ("cursor_field" in current_node and key == "cursor"):
values.append(value)
nodes_to_visit.append(value)
elif isinstance(current_node, list):
nodes_to_visit.extend(current_node)
return values
@staticmethod
def _check_cursor_by_char_types(actual_cursor: str, expected_cursor: str) -> bool:
if len(actual_cursor) != len(expected_cursor):
return False
for char1, char2 in zip(actual_cursor, expected_cursor):
if char1.isalpha() and char2.isalpha():
continue
elif char1.isdigit() and char2.isdigit():
continue
elif not char1.isalnum() and not char2.isalnum() and char1 == char2:
continue
else:
return False
return True
@staticmethod
def _check_cursor_by_regex_match(cursor: str, pattern: str) -> bool:
return bool(re.match(pattern, cursor))
@staticmethod
def _get_cursor_field(stream: ConfiguredAirbyteStream) -> Optional[str]:
cursor_field = stream.cursor_field or stream.stream.default_cursor_field
if cursor_field:
return next(iter(cursor_field))
@staticmethod
def _get_cursor_value_types(schema_type: Union[list, str]) -> List[Callable[..., Any]]:
if isinstance(schema_type, str):
schema_type = [schema_type]
types = []
for _type in schema_type:
if _type == "null":
continue
if _type not in SCHEMA_TYPES_MAPPING:
pytest.fail(f"Unsupported type: {_type}. Update SCHEMA_TYPES_MAPPING with the {_type} and its corresponding function")
types.append(SCHEMA_TYPES_MAPPING[_type])
return types
@staticmethod
def _get_state(airbyte_message: AirbyteMessage) -> AirbyteStateMessage:
if not airbyte_message.state.stream:
return airbyte_message.state
return airbyte_message.state.stream.stream_state
@staticmethod
def _get_record_count(airbyte_message: AirbyteMessage) -> float:
return airbyte_message.state.sourceStats.recordCount
def _get_unique_state_messages_with_record_count(self, states: List[AirbyteMessage]) -> List[Tuple[AirbyteMessage, float]]:
"""
Validates a list of state messages to ensure that consecutive messages with the same stream state are represented by only the first message, while subsequent duplicates are ignored.
"""
if len(states) <= 1:
return [(state, 0.0) for state in states if self._get_record_count(state)]
current_idx = 0
unique_state_messages = []
# Iterate through the list of state messages
while current_idx < len(states) - 1:
next_idx = current_idx + 1
# Check if consecutive messages have the same stream state
while self._get_state(states[current_idx]) == self._get_state(states[next_idx]) and next_idx < len(states) - 1:
next_idx += 1
states[current_idx].state.sourceStats = AirbyteStateStats(
recordCount=sum(map(self._get_record_count, states[current_idx:next_idx]))
)
# Append the first message with a unique stream state to the result list
unique_state_messages.append(states[current_idx])
# If the last message has a different stream state than the previous one, append it to the result list
if next_idx == len(states) - 1 and self._get_state(states[current_idx]) != self._get_state(states[next_idx]):
unique_state_messages.append(states[next_idx])
current_idx = next_idx
# Drop all states with a record count of 0.0
unique_non_zero_state_messages = list(filter(self._get_record_count, unique_state_messages))
total_record_count = sum(map(self._get_record_count, unique_non_zero_state_messages))
# Calculates the expected record count per state based on the total record count and distribution across states.
# The expected record count is the number of records we expect to receive when applying a specific state checkpoint.
unique_non_zero_state_messages_with_record_count = zip(
unique_non_zero_state_messages,
[
total_record_count - sum(map(self._get_record_count, unique_non_zero_state_messages[: idx + 1]))
for idx in range(len(unique_non_zero_state_messages))
],
)
return list(unique_non_zero_state_messages_with_record_count)
def _states_with_expected_record_count_batch_selector(
self, unique_state_messages_with_record_count: List[Tuple[AirbyteMessage, float]]
) -> List[Tuple[AirbyteMessage, float]]:
# Important!
# There is only a small subset of assertions we can make
# in the absense of enforcing that all connectors return 3 or more state messages
# during the first read.
if len(unique_state_messages_with_record_count) < 3:
return unique_state_messages_with_record_count[-1:]
# To avoid spamming APIs we only test a fraction of batches (4 or 5 states by default)
sample_rate = (len(unique_state_messages_with_record_count) // MIN_BATCHES_TO_TEST) or 1
states_with_expected_record_count_batch = []
for idx, state_message_data in enumerate(unique_state_messages_with_record_count):
# if first state message, skip
# this is because we cannot assert if the first state message will result in new records
# as in this case it is possible for a connector to return an empty state message when it first starts.
# e.g. if the connector decides it wants to let the caller know that it has started with an empty state.
if idx == 0:
continue
# if batching required, and not a sample, skip
if idx % sample_rate != 0:
continue
# if last state message, skip
# this is because we cannot assert if the last state message will result in new records
# as in this case it is possible for a connector to return a previous state message.
# e.g. if the connector is using pagination and the last page is only partially full
if idx == len(unique_state_messages_with_record_count) - 1:
continue
states_with_expected_record_count_batch.append(state_message_data)
return states_with_expected_record_count_batch
def _state_messages_selector(self, state_messages: List[AirbyteMessage]) -> List[Tuple[AirbyteMessage, float]]:
unique_state_messages_with_record_count = self._get_unique_state_messages_with_record_count(state_messages)
return self._states_with_expected_record_count_batch_selector(unique_state_messages_with_record_count)
| TestIncremental |
python | ray-project__ray | rllib/utils/metrics/window_stat.py | {
"start": 87,
"end": 2553
} | class ____:
"""Handles/stores incoming dataset and provides window-based statistics.
.. testcode::
:skipif: True
win_stats = WindowStat("level", 3)
win_stats.push(5.0)
win_stats.push(7.0)
win_stats.push(7.0)
win_stats.push(10.0)
# Expect 8.0 as the mean of the last 3 values: (7+7+10)/3=8.0
print(win_stats.mean())
.. testoutput::
8.0
"""
def __init__(self, name: str, n: int):
"""Initializes a WindowStat instance.
Args:
name: The name of the stats to collect and return stats for.
n: The window size. Statistics will be computed for the last n
items received from the stream.
"""
# The window-size.
self.window_size = n
# The name of the data (used for `self.stats()`).
self.name = name
# List of items to do calculations over (len=self.n).
self.items = [None] * self.window_size
# The current index to insert the next item into `self.items`.
self.idx = 0
# How many items have been added over the lifetime of this object.
self.count = 0
def push(self, obj) -> None:
"""Pushes a new value/object into the data buffer."""
# Insert object at current index.
self.items[self.idx] = obj
# Increase insertion index by 1.
self.idx += 1
# Increase lifetime count by 1.
self.count += 1
# Fix index in case of rollover.
self.idx %= len(self.items)
def mean(self) -> float:
"""Returns the (NaN-)mean of the last `self.window_size` items."""
return float(np.nanmean(self.items[: self.count]))
def std(self) -> float:
"""Returns the (NaN)-stddev of the last `self.window_size` items."""
return float(np.nanstd(self.items[: self.count]))
def quantiles(self) -> np.ndarray:
"""Returns ndarray with 0, 10, 50, 90, and 100 percentiles."""
if not self.count:
return np.ndarray([], dtype=np.float32)
else:
return np.nanpercentile(
self.items[: self.count], [0, 10, 50, 90, 100]
).tolist()
def stats(self):
return {
self.name + "_count": int(self.count),
self.name + "_mean": self.mean(),
self.name + "_std": self.std(),
self.name + "_quantiles": self.quantiles(),
}
| WindowStat |
python | eth-brownie__brownie | brownie/utils/docopt.py | {
"start": 4090,
"end": 4193
} | class ____(Exception):
"""Error in construction of usage-message by developer."""
| DocoptLanguageError |
python | django__django | django/db/backends/ddl_references.py | {
"start": 4311,
"end": 5815
} | class ____(TableColumns):
"""Hold a reference to a foreign key name."""
def __init__(
self,
from_table,
from_columns,
to_table,
to_columns,
suffix_template,
create_fk_name,
):
self.to_reference = TableColumns(to_table, to_columns)
self.suffix_template = suffix_template
self.create_fk_name = create_fk_name
super().__init__(
from_table,
from_columns,
)
def references_table(self, table):
return super().references_table(table) or self.to_reference.references_table(
table
)
def references_column(self, table, column):
return super().references_column(
table, column
) or self.to_reference.references_column(table, column)
def rename_table_references(self, old_table, new_table):
super().rename_table_references(old_table, new_table)
self.to_reference.rename_table_references(old_table, new_table)
def rename_column_references(self, table, old_column, new_column):
super().rename_column_references(table, old_column, new_column)
self.to_reference.rename_column_references(table, old_column, new_column)
def __str__(self):
suffix = self.suffix_template % {
"to_table": self.to_reference.table,
"to_column": self.to_reference.columns[0],
}
return self.create_fk_name(self.table, self.columns, suffix)
| ForeignKeyName |
python | sqlalchemy__sqlalchemy | test/ext/asyncio/test_session.py | {
"start": 28913,
"end": 34149
} | class ____(AsyncFixture):
@async_test
async def test_get_connection_engine_bound(self, async_session):
c1 = await async_session.connection()
c2 = await async_session.connection()
is_(c1, c2)
is_(c1.engine, c2.engine)
@async_test
async def test_get_connection_kws(self, async_session):
c1 = await async_session.connection(
execution_options={"isolation_level": "AUTOCOMMIT"}
)
eq_(
c1.sync_connection._execution_options,
{"isolation_level": "AUTOCOMMIT"},
)
@async_test
async def test_get_connection_connection_bound(self, async_engine):
async with async_engine.begin() as conn:
async_session = AsyncSession(conn)
c1 = await async_session.connection()
is_(c1, conn)
is_(c1.engine, conn.engine)
@async_test
async def test_get_transaction(self, async_session):
is_(async_session.get_transaction(), None)
is_(async_session.get_nested_transaction(), None)
t1 = await async_session.begin()
is_(async_session.get_transaction(), t1)
is_(async_session.get_nested_transaction(), None)
n1 = await async_session.begin_nested()
is_(async_session.get_transaction(), t1)
is_(async_session.get_nested_transaction(), n1)
await n1.commit()
is_(async_session.get_transaction(), t1)
is_(async_session.get_nested_transaction(), None)
await t1.commit()
is_(async_session.get_transaction(), None)
is_(async_session.get_nested_transaction(), None)
@async_test
async def test_get_transaction_gced(self, async_session):
"""test #12471
this tests that the AsyncSessionTransaction is regenerated if
we don't have any reference to it beforehand.
"""
is_(async_session.get_transaction(), None)
is_(async_session.get_nested_transaction(), None)
await async_session.begin()
trans = async_session.get_transaction()
is_not(trans, None)
is_(trans.session, async_session)
is_false(trans.nested)
is_(
trans.sync_transaction,
async_session.sync_session.get_transaction(),
)
await async_session.begin_nested()
nested = async_session.get_nested_transaction()
is_not(nested, None)
is_true(nested.nested)
is_(nested.session, async_session)
is_(
nested.sync_transaction,
async_session.sync_session.get_nested_transaction(),
)
@async_test
async def test_async_object_session(self, async_engine):
User = self.classes.User
s1 = AsyncSession(async_engine)
s2 = AsyncSession(async_engine)
u1 = await s1.get(User, 7)
u2 = User(name="n1")
s2.add(u2)
u3 = User(name="n2")
is_(async_object_session(u1), s1)
is_(async_object_session(u2), s2)
is_(async_object_session(u3), None)
await s2.reset()
is_(async_object_session(u2), None)
s2.add(u2)
is_(async_object_session(u2), s2)
await s2.close()
is_(async_object_session(u2), None)
@async_test
async def test_async_object_session_custom(self, async_engine):
User = self.classes.User
class MyCustomAsync(AsyncSession):
pass
s1 = MyCustomAsync(async_engine)
u1 = await s1.get(User, 7)
assert isinstance(async_object_session(u1), MyCustomAsync)
@testing.requires.predictable_gc
@async_test
async def test_async_object_session_del(self, async_engine):
User = self.classes.User
s1 = AsyncSession(async_engine)
u1 = await s1.get(User, 7)
is_(async_object_session(u1), s1)
await s1.rollback()
del s1
is_(async_object_session(u1), None)
@async_test
async def test_inspect_session(self, async_engine):
User = self.classes.User
s1 = AsyncSession(async_engine)
s2 = AsyncSession(async_engine)
u1 = await s1.get(User, 7)
u2 = User(name="n1")
s2.add(u2)
u3 = User(name="n2")
is_(inspect(u1).async_session, s1)
is_(inspect(u2).async_session, s2)
is_(inspect(u3).async_session, None)
def test_inspect_session_no_asyncio_used(self):
User = self.classes.User
s1 = Session(testing.db)
u1 = s1.get(User, 7)
is_(inspect(u1).async_session, None)
def test_inspect_session_no_asyncio_imported(self):
with mock.patch("sqlalchemy.orm.state._async_provider", None):
User = self.classes.User
s1 = Session(testing.db)
u1 = s1.get(User, 7)
is_(inspect(u1).async_session, None)
@testing.requires.predictable_gc
def test_gc(self, async_engine):
ReversibleProxy._proxy_objects.clear()
eq_(len(ReversibleProxy._proxy_objects), 0)
async_session = AsyncSession(async_engine)
eq_(len(ReversibleProxy._proxy_objects), 1)
del async_session
eq_(len(ReversibleProxy._proxy_objects), 0)
| AsyncProxyTest |
python | pandas-dev__pandas | pandas/tests/indexes/datetimes/test_scalar_compat.py | {
"start": 516,
"end": 16187
} | class ____:
def test_dti_no_millisecond_field(self):
msg = "type object 'DatetimeIndex' has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex.millisecond
msg = "'DatetimeIndex' object has no attribute 'millisecond'"
with pytest.raises(AttributeError, match=msg):
DatetimeIndex([]).millisecond
def test_dti_time(self):
rng = date_range("1/1/2000", freq="12min", periods=10)
result = Index(rng).time
expected = [t.time() for t in rng]
assert (result == expected).all()
def test_dti_date(self):
rng = date_range("1/1/2000", freq="12h", periods=10)
result = Index(rng).date
expected = [t.date() for t in rng]
assert (result == expected).all()
@pytest.mark.parametrize(
"dtype",
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
)
def test_dti_date2(self, dtype):
# Regression test for GH#21230
expected = np.array([date(2018, 6, 4), NaT])
index = DatetimeIndex(["2018-06-04 10:00:00", NaT], dtype=dtype)
result = index.date
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
)
def test_dti_time2(self, dtype):
# Regression test for GH#21267
expected = np.array([time(10, 20, 30), NaT])
index = DatetimeIndex(["2018-06-04 10:20:30", NaT], dtype=dtype)
result = index.time
tm.assert_numpy_array_equal(result, expected)
def test_dti_timetz(self, tz_naive_fixture):
# GH#21358
tz = timezones.maybe_get_tz(tz_naive_fixture)
expected = np.array([time(10, 20, 30, tzinfo=tz), NaT])
index = DatetimeIndex(["2018-06-04 10:20:30", NaT], tz=tz)
result = index.timetz
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"field",
[
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
],
)
def test_dti_timestamp_fields(self, field):
# extra fields from DatetimeIndex like quarter and week
idx = date_range("2020-01-01", periods=10)
expected = getattr(idx, field)[-1]
result = getattr(Timestamp(idx[-1]), field)
assert result == expected
def test_dti_nanosecond(self):
dti = DatetimeIndex(np.arange(10))
expected = Index(np.arange(10, dtype=np.int32))
tm.assert_index_equal(dti.nanosecond, expected)
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_hour_tzaware(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
rng = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
assert (rng.hour == 0).all()
# a more unusual time zone, GH#1946
dr = date_range(
"2011-10-02 00:00", freq="h", periods=10, tz=prefix + "America/Atikokan"
)
expected = Index(np.arange(10, dtype=np.int32))
tm.assert_index_equal(dr.hour, expected)
# GH#12806
# error: Unsupported operand types for + ("List[None]" and "List[str]")
@pytest.mark.parametrize(
"time_locale",
[None] + tm.get_locales(), # type: ignore[operator]
)
def test_day_name_month_name(self, time_locale):
# Test Monday -> Sunday and January -> December, in that sequence
if time_locale is None:
# If the time_locale is None, day-name and month_name should
# return the english attributes
expected_days = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
expected_months = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
else:
with tm.set_locale(time_locale, locale.LC_TIME):
expected_days = calendar.day_name[:]
expected_months = calendar.month_name[1:]
# GH#11128
dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)
english_days = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday",
]
for day, name, eng_name in zip(range(4, 11), expected_days, english_days):
name = name.capitalize()
assert dti.day_name(locale=time_locale)[day] == name
assert dti.day_name(locale=None)[day] == eng_name
ts = Timestamp(datetime(2016, 4, day))
assert ts.day_name(locale=time_locale) == name
dti = dti.append(DatetimeIndex([NaT]))
assert np.isnan(dti.day_name(locale=time_locale)[-1])
ts = Timestamp(NaT)
assert np.isnan(ts.day_name(locale=time_locale))
# GH#12805
dti = date_range(freq="ME", start="2012", end="2013")
result = dti.month_name(locale=time_locale)
expected = Index([month.capitalize() for month in expected_months])
# work around different normalization schemes GH#22342
result = result.str.normalize("NFD")
expected = expected.str.normalize("NFD")
tm.assert_index_equal(result, expected)
for item, expected in zip(dti, expected_months):
result = item.month_name(locale=time_locale)
expected = expected.capitalize()
result = unicodedata.normalize("NFD", result)
expected = unicodedata.normalize("NFD", result)
assert result == expected
dti = dti.append(DatetimeIndex([NaT]))
assert np.isnan(dti.month_name(locale=time_locale)[-1])
def test_dti_week(self):
# GH#6538: Check that DatetimeIndex and its TimeStamp elements
# return the same weekofyear accessor close to new year w/ tz
dates = ["2013/12/29", "2013/12/30", "2013/12/31"]
dates = DatetimeIndex(dates, tz="Europe/Brussels")
expected = [52, 1, 1]
assert dates.isocalendar().week.tolist() == expected
assert [d.weekofyear for d in dates] == expected
@pytest.mark.parametrize("tz", [None, "US/Eastern"])
def test_dti_fields(self, tz):
# GH#13303
dti = date_range(
freq="D", start=datetime(1998, 1, 1), periods=365, tz=tz, unit="ns"
)
assert dti.year[0] == 1998
assert dti.month[0] == 1
assert dti.day[0] == 1
assert dti.hour[0] == 0
assert dti.minute[0] == 0
assert dti.second[0] == 0
assert dti.microsecond[0] == 0
assert dti.dayofweek[0] == 3
assert dti.dayofyear[0] == 1
assert dti.dayofyear[120] == 121
assert dti.isocalendar().week.iloc[0] == 1
assert dti.isocalendar().week.iloc[120] == 18
assert dti.quarter[0] == 1
assert dti.quarter[120] == 2
assert dti.days_in_month[0] == 31
assert dti.days_in_month[90] == 30
assert dti.is_month_start[0]
assert not dti.is_month_start[1]
assert dti.is_month_start[31]
assert dti.is_quarter_start[0]
assert dti.is_quarter_start[90]
assert dti.is_year_start[0]
assert not dti.is_year_start[364]
assert not dti.is_month_end[0]
assert dti.is_month_end[30]
assert not dti.is_month_end[31]
assert dti.is_month_end[364]
assert not dti.is_quarter_end[0]
assert not dti.is_quarter_end[30]
assert dti.is_quarter_end[89]
assert dti.is_quarter_end[364]
assert not dti.is_year_end[0]
assert dti.is_year_end[364]
assert len(dti.year) == 365
assert len(dti.month) == 365
assert len(dti.day) == 365
assert len(dti.hour) == 365
assert len(dti.minute) == 365
assert len(dti.second) == 365
assert len(dti.microsecond) == 365
assert len(dti.dayofweek) == 365
assert len(dti.dayofyear) == 365
assert len(dti.isocalendar()) == 365
assert len(dti.quarter) == 365
assert len(dti.is_month_start) == 365
assert len(dti.is_month_end) == 365
assert len(dti.is_quarter_start) == 365
assert len(dti.is_quarter_end) == 365
assert len(dti.is_year_start) == 365
assert len(dti.is_year_end) == 365
dti.name = "name"
# non boolean accessors -> return Index
for accessor in DatetimeArray._field_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, Index)
assert res.name == "name"
# boolean accessors -> return array
for accessor in DatetimeArray._bool_ops:
res = getattr(dti, accessor)
assert len(res) == 365
assert isinstance(res, np.ndarray)
# test boolean indexing
res = dti[dti.is_quarter_start]
exp = dti[[0, 90, 181, 273]]
tm.assert_index_equal(res, exp)
res = dti[dti.is_leap_year]
exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name").as_unit("ns")
tm.assert_index_equal(res, exp)
def test_dti_is_year_quarter_start(self):
dti = date_range(freq="BQE-FEB", start=datetime(1998, 1, 1), periods=4)
assert sum(dti.is_quarter_start) == 0
assert sum(dti.is_quarter_end) == 4
assert sum(dti.is_year_start) == 0
assert sum(dti.is_year_end) == 1
def test_dti_is_month_start(self):
dti = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"])
assert dti.is_month_start[0] == 1
def test_dti_is_month_start_custom(self):
# Ensure is_start/end accessors throw ValueError for CustomBusinessDay,
bday_egypt = offsets.CustomBusinessDay(weekmask="Sun Mon Tue Wed Thu")
dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)
msg = "Custom business days is not supported by is_month_start"
with pytest.raises(ValueError, match=msg):
dti.is_month_start
@pytest.mark.parametrize(
"timestamp, freq, periods, expected_values",
[
("2017-12-01", "MS", 3, np.array([False, True, False])),
("2017-12-01", "QS", 3, np.array([True, False, False])),
("2017-12-01", "YS", 3, np.array([True, True, True])),
],
)
def test_dti_dr_is_year_start(self, timestamp, freq, periods, expected_values):
# GH57377
result = date_range(timestamp, freq=freq, periods=periods).is_year_start
tm.assert_numpy_array_equal(result, expected_values)
@pytest.mark.parametrize(
"timestamp, freq, periods, expected_values",
[
("2017-12-01", "ME", 3, np.array([True, False, False])),
("2017-12-01", "QE", 3, np.array([True, False, False])),
("2017-12-01", "YE", 3, np.array([True, True, True])),
],
)
def test_dti_dr_is_year_end(self, timestamp, freq, periods, expected_values):
# GH57377
result = date_range(timestamp, freq=freq, periods=periods).is_year_end
tm.assert_numpy_array_equal(result, expected_values)
@pytest.mark.parametrize(
"timestamp, freq, periods, expected_values",
[
("2017-12-01", "MS", 3, np.array([False, True, False])),
("2017-12-01", "QS", 3, np.array([True, True, True])),
("2017-12-01", "YS", 3, np.array([True, True, True])),
],
)
def test_dti_dr_is_quarter_start(self, timestamp, freq, periods, expected_values):
# GH57377
result = date_range(timestamp, freq=freq, periods=periods).is_quarter_start
tm.assert_numpy_array_equal(result, expected_values)
@pytest.mark.parametrize(
"timestamp, freq, periods, expected_values",
[
("2017-12-01", "ME", 3, np.array([True, False, False])),
("2017-12-01", "QE", 3, np.array([True, True, True])),
("2017-12-01", "YE", 3, np.array([True, True, True])),
],
)
def test_dti_dr_is_quarter_end(self, timestamp, freq, periods, expected_values):
# GH57377
result = date_range(timestamp, freq=freq, periods=periods).is_quarter_end
tm.assert_numpy_array_equal(result, expected_values)
@pytest.mark.parametrize(
"timestamp, freq, periods, expected_values",
[
("2017-12-01", "MS", 3, np.array([True, True, True])),
("2017-12-01", "QS", 3, np.array([True, True, True])),
("2017-12-01", "YS", 3, np.array([True, True, True])),
],
)
def test_dti_dr_is_month_start(self, timestamp, freq, periods, expected_values):
# GH57377
result = date_range(timestamp, freq=freq, periods=periods).is_month_start
tm.assert_numpy_array_equal(result, expected_values)
@pytest.mark.parametrize(
"timestamp, freq, periods, expected_values",
[
("2017-12-01", "ME", 3, np.array([True, True, True])),
("2017-12-01", "QE", 3, np.array([True, True, True])),
("2017-12-01", "YE", 3, np.array([True, True, True])),
],
)
def test_dti_dr_is_month_end(self, timestamp, freq, periods, expected_values):
# GH57377
result = date_range(timestamp, freq=freq, periods=periods).is_month_end
tm.assert_numpy_array_equal(result, expected_values)
def test_dti_is_year_quarter_start_doubledigit_freq(self):
# GH#58523
dr = date_range("2017-01-01", periods=2, freq="10YS")
assert all(dr.is_year_start)
dr = date_range("2017-01-01", periods=2, freq="10QS")
assert all(dr.is_quarter_start)
def test_dti_is_year_start_freq_custom_business_day_with_digit(self):
# GH#58664
dr = date_range("2020-01-01", periods=2, freq="2C")
msg = "Custom business days is not supported by is_year_start"
with pytest.raises(ValueError, match=msg):
dr.is_year_start
@pytest.mark.parametrize("freq", ["3BMS", offsets.BusinessMonthBegin(3)])
def test_dti_is_year_quarter_start_freq_business_month_begin(self, freq):
# GH#58729
dr = date_range("2020-01-01", periods=5, freq=freq)
result = [x.is_year_start for x in dr]
assert result == [True, False, False, False, True]
dr = date_range("2020-01-01", periods=4, freq=freq)
result = [x.is_quarter_start for x in dr]
assert all(dr.is_quarter_start)
@given(
dt=st.datetimes(min_value=datetime(1960, 1, 1), max_value=datetime(1980, 1, 1)),
n=st.integers(min_value=1, max_value=10),
freq=st.sampled_from(["MS", "QS", "YS"]),
)
@pytest.mark.slow
def test_against_scalar_parametric(freq, dt, n):
# https://github.com/pandas-dev/pandas/issues/49606
freq = f"{n}{freq}"
d = date_range(dt, periods=3, freq=freq)
result = list(d.is_year_start)
expected = [x.is_year_start for x in d]
assert result == expected
| TestDatetimeIndexOps |
python | urllib3__urllib3 | src/urllib3/exceptions.py | {
"start": 8053,
"end": 8601
} | class ____(HTTPError, httplib_IncompleteRead):
"""Invalid chunk length in a chunked response."""
def __init__(self, response: HTTPResponse, length: bytes) -> None:
self.partial: int = response.tell() # type: ignore[assignment]
self.expected: int | None = response.length_remaining
self.response = response
self.length = length
def __repr__(self) -> str:
return "InvalidChunkLength(got length %r, %i bytes read)" % (
self.length,
self.partial,
)
| InvalidChunkLength |
python | dagster-io__dagster | python_modules/dagster/dagster/_daemon/asset_daemon.py | {
"start": 14283,
"end": 58282
} | class ____(DagsterDaemon):
def __init__(self, settings: Mapping[str, Any], pre_sensor_interval_seconds: int):
self._pre_sensor_interval_seconds = pre_sensor_interval_seconds
self._last_pre_sensor_submit_time = None
self._checked_migrations = False
self._settings = settings
super().__init__()
@classmethod
def daemon_type(cls) -> str:
return "ASSET"
def instrument_elapsed(
self, sensor: Optional[RemoteSensor], elapsed: Optional[float], min_interval: int
) -> None:
pass
def _get_print_sensor_name(self, sensor: Optional[RemoteSensor]) -> str:
if not sensor:
return ""
repo_origin = sensor.get_remote_origin().repository_origin
repo_name = repo_origin.repository_name
location_name = repo_origin.code_location_origin.location_name
repo_name = (
location_name
if repo_name == SINGLETON_REPOSITORY_NAME
else f"{repo_name}@{location_name}"
)
return f" for {sensor.name} in {repo_name}"
def core_loop(
self,
workspace_process_context: IWorkspaceProcessContext,
shutdown_event: threading.Event,
) -> DaemonIterator:
instance: DagsterInstance = workspace_process_context.instance
schedule_storage = check.not_none(
instance.schedule_storage,
"Auto materialization requires schedule storage to be configured",
)
if not schedule_storage.supports_auto_materialize_asset_evaluations:
self._logger.warning(
"Auto materialize evaluations are not getting logged. Run `dagster instance"
" migrate` to enable."
)
amp_tick_futures: dict[Optional[str], Future] = {}
threadpool_executor = None
with ExitStack() as stack:
if self._settings.get("use_threads"):
threadpool_executor = stack.enter_context(
InheritContextThreadPoolExecutor(
max_workers=self._settings.get("num_workers"),
thread_name_prefix="asset_daemon_worker",
)
)
while True:
start_time = get_current_timestamp()
yield SpanMarker.START_SPAN
try:
self._run_iteration_impl(
workspace_process_context,
threadpool_executor=threadpool_executor,
amp_tick_futures=amp_tick_futures,
debug_crash_flags={},
)
except Exception:
error_info = DaemonErrorCapture.process_exception(
exc_info=sys.exc_info(),
logger=self._logger,
log_message="AssetDaemon caught an error",
)
yield error_info
yield SpanMarker.END_SPAN
end_time = get_current_timestamp()
loop_duration = end_time - start_time
sleep_time = max(0, MIN_INTERVAL_LOOP_SECONDS - loop_duration)
shutdown_event.wait(sleep_time)
yield None
def _run_iteration_impl(
self,
workspace_process_context: IWorkspaceProcessContext,
threadpool_executor: Optional[ThreadPoolExecutor],
amp_tick_futures: dict[Optional[str], Future],
debug_crash_flags: SingleInstigatorDebugCrashFlags,
):
instance: DagsterInstance = workspace_process_context.instance
use_auto_materialize_sensors = instance.auto_materialize_use_sensors
if get_auto_materialize_paused(instance) and not use_auto_materialize_sensors:
return
with workspace_process_context.create_request_context() as workspace_request_context:
self._run_iteration_impl_with_request_context(
workspace_process_context,
workspace_request_context,
instance,
threadpool_executor,
amp_tick_futures,
use_auto_materialize_sensors,
debug_crash_flags,
)
def _run_iteration_impl_with_request_context(
self,
workspace_process_context: IWorkspaceProcessContext,
workspace_request_context: BaseWorkspaceRequestContext,
instance: DagsterInstance,
threadpool_executor: Optional[ThreadPoolExecutor],
amp_tick_futures: dict[Optional[str], Future],
use_auto_materialize_sensors: bool,
debug_crash_flags: SingleInstigatorDebugCrashFlags,
):
now = get_current_timestamp()
sensors_and_repos: Sequence[tuple[Optional[RemoteSensor], Optional[RemoteRepository]]] = []
if use_auto_materialize_sensors:
current_workspace = {
location_entry.origin.location_name: location_entry
for location_entry in workspace_request_context.get_code_location_entries().values()
}
eligible_sensors_and_repos = []
for location_entry in current_workspace.values():
code_location = location_entry.code_location
if code_location:
for repo in code_location.get_repositories().values():
for sensor in repo.get_sensors():
if sensor.sensor_type.is_handled_by_asset_daemon:
eligible_sensors_and_repos.append((sensor, repo))
if not eligible_sensors_and_repos:
return
all_sensor_states = {
sensor_state.selector_id: sensor_state
for sensor_state in instance.all_instigator_state(
instigator_type=InstigatorType.SENSOR
)
}
if not self._checked_migrations:
if not get_has_migrated_to_sensors(instance):
# Do a one-time migration to create the cursors for each sensor, based on the
# existing cursor for the legacy AMP tick
asset_graph = workspace_request_context.asset_graph
pre_sensor_cursor = _get_pre_sensor_auto_materialize_cursor(
instance, asset_graph
)
if pre_sensor_cursor != AssetDaemonCursor.empty():
self._logger.info(
"Translating legacy cursor into a new cursor for each new automation policy sensor"
)
all_sensor_states = self._create_initial_sensor_cursors_from_raw_cursor(
instance,
eligible_sensors_and_repos,
all_sensor_states,
pre_sensor_cursor,
)
set_has_migrated_to_sensors(instance)
if not get_has_migrated_sensor_names(instance):
# Do a one-time migration to copy state from sensors with the legacy default
# name to the new default name
if all_sensor_states:
self._logger.info(
"Renaming any states corresponding to the legacy default name"
)
all_sensor_states = self._copy_default_auto_materialize_sensor_states(
instance, all_sensor_states
)
set_has_migrated_sensor_names(instance)
self._checked_migrations = True
for sensor, repo in eligible_sensors_and_repos:
selector_id = sensor.selector_id
if sensor.get_current_instigator_state(
all_sensor_states.get(selector_id)
).is_running:
sensors_and_repos.append((sensor, repo))
else:
sensors_and_repos.append(
(
None,
None,
) # Represents that there's a single set of ticks with no underlying sensor
)
all_sensor_states = {}
for sensor, repo in sensors_and_repos:
if sensor:
selector_id = sensor.selector.get_id()
auto_materialize_state = all_sensor_states.get(selector_id)
else:
selector_id = None
auto_materialize_state = None
if not sensor:
# make sure we are only running every pre_sensor_interval_seconds
if (
self._last_pre_sensor_submit_time
and now - self._last_pre_sensor_submit_time < self._pre_sensor_interval_seconds
):
continue
self._last_pre_sensor_submit_time = now
elif not auto_materialize_state:
assert sensor.default_status == DefaultSensorStatus.RUNNING
auto_materialize_state = InstigatorState(
sensor.get_remote_origin(),
InstigatorType.SENSOR,
InstigatorStatus.DECLARED_IN_CODE,
SensorInstigatorData(
min_interval=sensor.min_interval_seconds,
cursor=None,
last_sensor_start_timestamp=get_current_timestamp(),
sensor_type=sensor.sensor_type,
),
)
instance.add_instigator_state(auto_materialize_state)
elif is_under_min_interval(
auto_materialize_state,
sensor,
minimum_allowed_min_interval=_get_minimum_allowed_asset_daemon_interval(),
):
continue
self.instrument_elapsed(
sensor,
get_elapsed(auto_materialize_state) if auto_materialize_state else None,
sensor.min_interval_seconds if sensor else self._pre_sensor_interval_seconds,
)
if threadpool_executor:
# only one tick per sensor can be in flight
if selector_id in amp_tick_futures and not amp_tick_futures[selector_id].done():
continue
future = threadpool_executor.submit(
self._process_auto_materialize_tick,
workspace_process_context,
workspace_request_context,
repo,
sensor,
debug_crash_flags,
)
amp_tick_futures[selector_id] = future
else:
self._process_auto_materialize_tick(
workspace_process_context,
workspace_request_context,
repo,
sensor,
debug_crash_flags,
)
def _create_initial_sensor_cursors_from_raw_cursor(
self,
instance: DagsterInstance,
sensors_and_repos: Sequence[tuple[RemoteSensor, RemoteRepository]],
all_sensor_states: Mapping[str, InstigatorState],
pre_sensor_cursor: AssetDaemonCursor,
) -> Mapping[str, InstigatorState]:
start_status = (
InstigatorStatus.STOPPED
if get_auto_materialize_paused(instance)
else InstigatorStatus.RUNNING
)
result = {}
for sensor, repo in sensors_and_repos:
selection = sensor.asset_selection
if not selection:
continue
repo_asset_graph = repo.asset_graph
resolved_keys = selection.resolve(repo_asset_graph) | selection.resolve_checks(
repo_asset_graph
)
serialized_cursor = None
if len(resolved_keys) > 0:
# filter down the cursor to just the keys targeted by the sensor
condition_cursors = [
condition_cursor
for condition_cursor in (pre_sensor_cursor.previous_condition_cursors or [])
if condition_cursor.key in resolved_keys
]
cursor_to_use = dataclasses.replace(
pre_sensor_cursor,
previous_condition_cursors=condition_cursors,
)
serialized_cursor = asset_daemon_cursor_to_instigator_serialized_cursor(
cursor_to_use
)
new_auto_materialize_state = InstigatorState(
sensor.get_remote_origin(),
InstigatorType.SENSOR,
(
InstigatorStatus.DECLARED_IN_CODE
if sensor.default_status == DefaultSensorStatus.RUNNING
else start_status
),
SensorInstigatorData(
min_interval=sensor.min_interval_seconds,
cursor=serialized_cursor,
last_sensor_start_timestamp=get_current_timestamp(),
sensor_type=sensor.sensor_type,
),
)
if all_sensor_states.get(sensor.selector_id):
instance.update_instigator_state(new_auto_materialize_state)
else:
instance.add_instigator_state(new_auto_materialize_state)
result[sensor.selector_id] = new_auto_materialize_state
return result
def _copy_default_auto_materialize_sensor_states(
self,
instance: DagsterInstance,
all_sensor_states: Mapping[str, InstigatorState],
) -> Mapping[str, InstigatorState]:
"""Searches for sensors named `default_auto_materialize_sensor` and copies their state
to a sensor in the same repository named `default_automation_condition_sensor`.
"""
result = dict(all_sensor_states)
for instigator_state in all_sensor_states.values():
# only migrate instigators with the name "default_auto_materialize_sensor" and are
# handled by the asset daemon
if instigator_state.origin.instigator_name != "default_auto_materialize_sensor" and (
instigator_state.sensor_instigator_data
and instigator_state.sensor_instigator_data.sensor_type
and instigator_state.sensor_instigator_data.sensor_type.is_handled_by_asset_daemon
):
continue
new_sensor_origin = instigator_state.origin._replace(
instigator_name="default_automation_condition_sensor"
)
new_auto_materialize_state = InstigatorState(
new_sensor_origin,
InstigatorType.SENSOR,
instigator_state.status,
instigator_state.instigator_data,
)
new_sensor_selector_id = new_sensor_origin.get_selector().get_id()
result[new_sensor_selector_id] = new_auto_materialize_state
if all_sensor_states.get(new_sensor_selector_id):
instance.update_instigator_state(new_auto_materialize_state)
else:
instance.add_instigator_state(new_auto_materialize_state)
return result
def _process_auto_materialize_tick(
self,
workspace_process_context: IWorkspaceProcessContext,
workspace: BaseWorkspaceRequestContext,
repository: Optional[RemoteRepository],
sensor: Optional[RemoteSensor],
debug_crash_flags: SingleInstigatorDebugCrashFlags, # TODO No longer single instigator
):
asyncio.run(
self._async_process_auto_materialize_tick(
workspace_process_context,
workspace.asset_graph,
repository,
sensor,
debug_crash_flags,
)
)
async def _async_process_auto_materialize_tick(
self,
workspace_process_context: IWorkspaceProcessContext,
workspace_asset_graph: RemoteWorkspaceAssetGraph,
repository: Optional[RemoteRepository],
sensor: Optional[RemoteSensor],
debug_crash_flags: SingleInstigatorDebugCrashFlags, # TODO No longer single instigator
):
evaluation_time = get_current_datetime()
workspace = workspace_process_context.create_request_context()
instance: DagsterInstance = workspace_process_context.instance
if sensor:
auto_materialize_instigator_state = check.not_none(
instance.get_instigator_state(sensor.get_remote_origin_id(), sensor.selector_id)
)
if is_under_min_interval(
auto_materialize_instigator_state,
sensor,
minimum_allowed_min_interval=_get_minimum_allowed_asset_daemon_interval(),
):
# check the since we might have been queued before processing
return
else:
mark_sensor_state_for_tick(
instance, sensor, auto_materialize_instigator_state, evaluation_time
)
else:
auto_materialize_instigator_state = None
print_group_name = self._get_print_sensor_name(sensor)
try:
if sensor:
selection = check.not_none(sensor.asset_selection)
repository_origin = check.not_none(repository).get_remote_origin()
# resolve the selection against just the assets in the sensor's repository
repo_asset_graph = check.not_none(repository).asset_graph
resolved_keys = selection.resolve(repo_asset_graph) | selection.resolve_checks(
repo_asset_graph
)
eligibility_graph = repo_asset_graph
# Ensure that if there are two identical asset keys defined in different code
# locations with automation conditions, only one of them actually launches runs
eligible_keys = {
key
for key in resolved_keys
if (
workspace_asset_graph.get_repository_handle(key).get_remote_origin()
== repository_origin
)
}
else:
eligible_keys = workspace_asset_graph.get_all_asset_keys()
eligibility_graph = workspace_asset_graph
auto_materialize_entity_keys = {
target_key
for target_key in eligible_keys
if eligibility_graph.get(target_key).automation_condition is not None
}
num_target_entities = len(auto_materialize_entity_keys)
auto_observe_asset_keys = {
key
for key in eligible_keys
if isinstance(key, AssetKey)
and eligibility_graph.get(key).auto_observe_interval_minutes is not None
}
num_auto_observe_assets = len(auto_observe_asset_keys)
if not auto_materialize_entity_keys and not auto_observe_asset_keys:
self._logger.debug(f"No assets/checks that require evaluation{print_group_name}")
return
self._logger.info(
f"Checking {num_target_entities} assets/checks and"
f" {num_auto_observe_assets} observable source"
f" asset{'' if num_auto_observe_assets == 1 else 's'}{print_group_name}"
f" in thread {threading.current_thread().name}"
)
if sensor:
stored_cursor = asset_daemon_cursor_from_instigator_serialized_cursor(
cast(
"SensorInstigatorData",
check.not_none(auto_materialize_instigator_state).instigator_data,
).cursor,
workspace_asset_graph,
)
instigator_origin_id = sensor.get_remote_origin().get_id()
instigator_selector_id = sensor.get_remote_origin().get_selector().get_id()
instigator_name = sensor.name
else:
stored_cursor = _get_pre_sensor_auto_materialize_cursor(
instance, workspace_asset_graph
)
instigator_origin_id = _PRE_SENSOR_AUTO_MATERIALIZE_ORIGIN_ID
instigator_selector_id = _PRE_SENSOR_AUTO_MATERIALIZE_SELECTOR_ID
instigator_name = _PRE_SENSOR_AUTO_MATERIALIZE_INSTIGATOR_NAME
tick_retention_settings = instance.get_tick_retention_settings(
InstigatorType.SENSOR if sensor else InstigatorType.AUTO_MATERIALIZE
)
ticks = instance.get_ticks(instigator_origin_id, instigator_selector_id, limit=1)
latest_tick = ticks[0] if ticks else None
max_retries = instance.auto_materialize_max_tick_retries
# Determine if the most recent tick requires retrying
retry_tick: Optional[InstigatorTick] = None
override_evaluation_id: Optional[int] = None
consecutive_failure_count: int = 0
if latest_tick:
can_resume = (
get_current_timestamp() - latest_tick.timestamp
) <= MAX_TIME_TO_RESUME_TICK_SECONDS
if latest_tick.status in {TickStatus.FAILURE, TickStatus.STARTED}:
consecutive_failure_count = (
latest_tick.consecutive_failure_count or latest_tick.failure_count
)
# the evaluation ids not matching indicates that the tick failed or crashed before
# the cursor could be written, so no new runs could have been launched and it's
# safe to re-evaluate things from scratch in a new tick without retrying anything
previous_cursor_written = (
latest_tick.automation_condition_evaluation_id == stored_cursor.evaluation_id
)
if can_resume and not previous_cursor_written:
# if the tick failed before writing a cursor, we don't want to advance the
# evaluation id yet
override_evaluation_id = latest_tick.automation_condition_evaluation_id
# If the previous tick matches the stored cursor's evaluation ID, check if it failed
# or crashed partway through execution and needs to be resumed
# Don't resume very old ticks though in case the daemon crashed for a long time and
# then restarted
if can_resume and previous_cursor_written:
if latest_tick.status == TickStatus.STARTED:
self._logger.warn(
f"Tick for evaluation {stored_cursor.evaluation_id}{print_group_name} was interrupted part-way through, resuming"
)
retry_tick = latest_tick
elif (
latest_tick.status == TickStatus.FAILURE
and latest_tick.tick_data.failure_count <= max_retries
):
self._logger.info(
f"Retrying failed tick for evaluation {stored_cursor.evaluation_id}{print_group_name}"
)
retry_tick = instance.create_tick(
latest_tick.tick_data.with_status(
TickStatus.STARTED,
error=None,
timestamp=evaluation_time.timestamp(),
end_timestamp=None,
)._replace(
# make sure to override the evaluation id to stay on the previous value
auto_materialize_evaluation_id=latest_tick.automation_condition_evaluation_id
)
)
# otherwise, tick completed normally, no need to do anything
else:
if latest_tick.status == TickStatus.STARTED:
# Old tick that won't be resumed - move it into a SKIPPED state so it isn't
# left dangling in STARTED
self._logger.warn(
f"Moving dangling STARTED tick from evaluation {latest_tick.automation_condition_evaluation_id}{print_group_name} into SKIPPED"
)
latest_tick = latest_tick.with_status(status=TickStatus.SKIPPED)
instance.update_tick(latest_tick)
if retry_tick:
tick = retry_tick
else:
tick = instance.create_tick(
TickData(
instigator_origin_id=instigator_origin_id,
instigator_name=instigator_name,
instigator_type=(
InstigatorType.SENSOR if sensor else InstigatorType.AUTO_MATERIALIZE
),
status=TickStatus.STARTED,
timestamp=evaluation_time.timestamp(),
selector_id=instigator_selector_id,
consecutive_failure_count=consecutive_failure_count,
# we only set the auto_materialize_evaluation_id if it is not equal to the
# current tick id
auto_materialize_evaluation_id=override_evaluation_id,
)
)
with (
AutoMaterializeLaunchContext(
tick,
sensor,
instance,
self._logger,
tick_retention_settings,
) as tick_context,
workspace,
):
await self._evaluate_auto_materialize_tick(
tick_context,
tick,
sensor,
workspace_process_context,
workspace,
workspace_asset_graph,
auto_materialize_entity_keys,
stored_cursor,
auto_observe_asset_keys,
debug_crash_flags,
is_retry=(retry_tick is not None),
)
except Exception:
DaemonErrorCapture.process_exception(
exc_info=sys.exc_info(),
logger=self._logger,
log_message="Automation condition daemon caught an error",
)
async def _evaluate_auto_materialize_tick(
self,
tick_context: AutoMaterializeLaunchContext,
tick: InstigatorTick,
sensor: Optional[RemoteSensor],
workspace_process_context: IWorkspaceProcessContext,
workspace: BaseWorkspaceRequestContext,
asset_graph: RemoteWorkspaceAssetGraph,
auto_materialize_entity_keys: set[EntityKey],
stored_cursor: AssetDaemonCursor,
auto_observe_asset_keys: set[AssetKey],
debug_crash_flags: SingleInstigatorDebugCrashFlags,
is_retry: bool,
):
evaluation_id = tick.automation_condition_evaluation_id
instance = workspace_process_context.instance
schedule_storage = check.not_none(instance.schedule_storage)
run_request_execution_data_cache = {}
print_group_name = self._get_print_sensor_name(sensor)
if is_retry:
# Unfinished or retried tick already generated evaluations and run requests and cursor, now
# need to finish it
run_requests = tick.tick_data.run_requests or []
reserved_run_ids = tick.tick_data.reserved_run_ids or []
if schedule_storage.supports_auto_materialize_asset_evaluations:
evaluation_records = (
schedule_storage.get_auto_materialize_evaluations_for_evaluation_id(
evaluation_id
)
)
evaluations_by_key = {
evaluation_record.key: evaluation_record.get_evaluation_with_run_ids()
for evaluation_record in evaluation_records
}
else:
evaluations_by_key = {}
else:
sensor_tags = {SENSOR_NAME_TAG: sensor.name, **sensor.run_tags} if sensor else {}
skip_key_env_var = os.getenv(SKIP_DECLARATIVE_AUTOMATION_KEYS_ENV_VAR)
if skip_key_env_var:
skip_keys = skip_key_env_var.split(",")
skip_keys = {AssetKey.from_user_string(key) for key in skip_keys}
auto_materialize_entity_keys = {
key for key in auto_materialize_entity_keys if key not in skip_keys
}
# mold this into a shape AutomationTickEvaluationContext expects
asset_selection = AssetSelection.keys(
*{key for key in auto_materialize_entity_keys if isinstance(key, AssetKey)}
).without_checks() | AssetSelection.checks(
*{key for key in auto_materialize_entity_keys if isinstance(key, AssetCheckKey)}
)
run_requests, new_cursor, evaluations = await AutomationTickEvaluationContext(
evaluation_id=evaluation_id,
asset_graph=asset_graph,
asset_selection=asset_selection,
instance=instance,
cursor=stored_cursor,
materialize_run_tags={
**instance.auto_materialize_run_tags,
**DagsterRun.tags_for_tick_id(
str(tick.tick_id),
),
**sensor_tags,
},
observe_run_tags={AUTO_OBSERVE_TAG: "true", **sensor_tags},
emit_backfills=bool(
sensor
and sensor.metadata
and sensor.metadata.standard_metadata
and EMIT_BACKFILLS_METADATA_KEY in sensor.metadata.standard_metadata
),
auto_observe_asset_keys=auto_observe_asset_keys,
logger=self._logger,
).async_evaluate()
check.invariant(new_cursor.evaluation_id == evaluation_id)
check_for_debug_crash(debug_crash_flags, "EVALUATIONS_FINISHED")
evaluations_by_key = {
evaluation.key: evaluation.with_run_ids(set()) for evaluation in evaluations
}
# Write the asset evaluations without run IDs first
if schedule_storage.supports_auto_materialize_asset_evaluations:
schedule_storage.add_auto_materialize_asset_evaluations(
evaluation_id,
list(evaluations_by_key.values()),
)
check_for_debug_crash(debug_crash_flags, "ASSET_EVALUATIONS_ADDED")
reserved_run_ids = [
make_new_backfill_id() if rr.requires_backfill_daemon() else make_new_run_id()
for rr in run_requests
]
self._logger.info(
"Tick produced"
f" {len(run_requests)} run{'s' if len(run_requests) != 1 else ''} and"
f" {len(evaluations_by_key)} asset"
f" evaluation{'s' if len(evaluations_by_key) != 1 else ''} for evaluation ID"
f" {evaluation_id}{print_group_name}"
)
# Fetch all data that requires the code server before writing the cursor, to minimize
# the chances that changes to code servers after the cursor is written (e.g. a
# code server moving into an error state or an asset being renamed) causes problems
async_code_server_tasks = []
for run_request_index, run_request in enumerate(run_requests):
if not run_request.requires_backfill_daemon():
async_code_server_tasks.append(
get_job_execution_data_from_run_request(
asset_graph,
run_request,
instance,
workspace=workspace,
run_request_execution_data_cache=run_request_execution_data_cache,
)
)
check_for_debug_crash(debug_crash_flags, "EXECUTION_PLAN_CACHED")
check_for_debug_crash(
debug_crash_flags, f"EXECUTION_PLAN_CACHED_{run_request_index}"
)
# Use semaphore to limit concurrency to ensure code servers don't get overloaded
batch_size = int(os.getenv("DAGSTER_ASSET_DAEMON_CODE_SERVER_CONCURRENCY", "4"))
code_server_semaphore = asyncio.Semaphore(batch_size)
async def run_with_semaphore(task):
async with code_server_semaphore:
return await task
await asyncio.gather(*[run_with_semaphore(task) for task in async_code_server_tasks])
# Write out the in-progress tick data, which ensures that if the tick crashes or raises an exception, it will retry
tick = tick_context.set_run_requests(
run_requests=run_requests,
reserved_run_ids=reserved_run_ids,
)
tick_context.write()
check_for_debug_crash(debug_crash_flags, "RUN_REQUESTS_CREATED")
# Write out the persistent cursor, which ensures that future ticks will move on once
# they determine that nothing needs to be retried
if sensor:
state = instance.get_instigator_state(
sensor.get_remote_origin_id(), sensor.selector_id
)
instance.update_instigator_state(
check.not_none(state).with_data(
SensorInstigatorData(
last_tick_timestamp=tick.timestamp,
min_interval=sensor.min_interval_seconds,
cursor=asset_daemon_cursor_to_instigator_serialized_cursor(new_cursor),
sensor_type=sensor.sensor_type,
)
)
)
else:
instance.daemon_cursor_storage.set_cursor_values(
{_PRE_SENSOR_AUTO_MATERIALIZE_CURSOR_KEY: serialize_value(new_cursor)}
)
check_for_debug_crash(debug_crash_flags, "CURSOR_UPDATED")
check.invariant(len(run_requests) == len(reserved_run_ids))
await self._submit_run_requests_and_update_evaluations(
instance=instance,
tick_context=tick_context,
workspace_process_context=workspace_process_context,
workspace=workspace,
evaluations_by_key=evaluations_by_key,
evaluation_id=evaluation_id,
run_requests=run_requests,
reserved_run_ids=reserved_run_ids,
debug_crash_flags=debug_crash_flags,
remote_sensor=sensor,
run_request_execution_data_cache=run_request_execution_data_cache,
)
if schedule_storage.supports_auto_materialize_asset_evaluations:
schedule_storage.purge_asset_evaluations(
before=(
get_current_datetime() - datetime.timedelta(days=EVALUATIONS_TTL_DAYS)
).timestamp(),
)
self._logger.info(f"Finished auto-materialization tick{print_group_name}")
async def _submit_run_request(
self,
i: int,
instance: DagsterInstance,
workspace_process_context: IWorkspaceProcessContext,
workspace: BaseWorkspaceRequestContext,
evaluation_id: int,
run_request: RunRequest,
reserved_run_id: str,
run_request_execution_data_cache: dict[JobSubsetSelector, RunRequestExecutionData],
debug_crash_flags: SingleInstigatorDebugCrashFlags,
) -> tuple[str, AbstractSet[EntityKey]]:
# check that the run_request requires the backfill daemon rather than if the setting is enabled to
# account for the setting changing between tick retries
if run_request.requires_backfill_daemon():
asset_graph_subset = check.not_none(run_request.asset_graph_subset)
if instance.get_backfill(reserved_run_id):
self._logger.warn(
f"Run {reserved_run_id} already submitted on a previously interrupted tick, skipping"
)
else:
instance.add_backfill(
PartitionBackfill.from_asset_graph_subset(
backfill_id=reserved_run_id,
dynamic_partitions_store=instance,
backfill_timestamp=get_current_timestamp(),
asset_graph_subset=asset_graph_subset,
tags={
**run_request.tags,
AUTO_MATERIALIZE_TAG: "true",
AUTOMATION_CONDITION_TAG: "true",
ASSET_EVALUATION_ID_TAG: str(evaluation_id),
},
title=f"Run for Declarative Automation evaluation ID {evaluation_id}",
description=None,
run_config=run_request.run_config,
)
)
return reserved_run_id, check.not_none(asset_graph_subset.asset_keys)
else:
submitted_run = await submit_asset_run(
run_id=reserved_run_id,
run_request=run_request._replace(
tags={
**run_request.tags,
AUTO_MATERIALIZE_TAG: "true",
AUTOMATION_CONDITION_TAG: "true",
ASSET_EVALUATION_ID_TAG: str(evaluation_id),
}
),
run_request_index=i,
instance=instance,
workspace_process_context=workspace_process_context,
workspace=workspace,
run_request_execution_data_cache=run_request_execution_data_cache,
debug_crash_flags=debug_crash_flags,
logger=self._logger,
)
entity_keys = {
*(run_request.asset_selection or []),
*(run_request.asset_check_keys or []),
}
return submitted_run.run_id, entity_keys
async def _submit_run_requests_and_update_evaluations(
self,
instance: DagsterInstance,
tick_context: AutoMaterializeLaunchContext,
workspace_process_context: IWorkspaceProcessContext,
workspace: BaseWorkspaceRequestContext,
evaluations_by_key: dict[EntityKey, AutomationConditionEvaluationWithRunIds],
evaluation_id: int,
run_requests: Sequence[RunRequest],
reserved_run_ids: Sequence[str],
debug_crash_flags: SingleInstigatorDebugCrashFlags,
remote_sensor: Optional[RemoteSensor],
run_request_execution_data_cache: dict[JobSubsetSelector, RunRequestExecutionData],
):
updated_evaluation_keys = set()
check_after_runs_num = instance.get_tick_termination_check_interval()
check.invariant(len(run_requests) == len(reserved_run_ids))
to_submit = enumerate(tick_context.tick.reserved_run_ids_with_requests)
async def submit_run_request(
run_id_with_run_request: tuple[int, tuple[str, RunRequest]],
) -> tuple[str, AbstractSet[EntityKey]]:
i, (run_id, run_request) = run_id_with_run_request
return await self._submit_run_request(
i=i,
instance=instance,
run_request=run_request,
reserved_run_id=run_id,
evaluation_id=evaluation_id,
run_request_execution_data_cache=run_request_execution_data_cache,
workspace_process_context=workspace_process_context,
workspace=workspace,
debug_crash_flags=debug_crash_flags,
)
gen_run_request_results = [submit_run_request(item) for item in to_submit]
for i, generator in enumerate(gen_run_request_results):
submitted_run_id, entity_keys = await generator
tick_context.add_run_info(run_id=submitted_run_id)
# write the submitted run ID to any evaluations
for entity_key in entity_keys:
# asset keys for observation runs don't have evaluations
if entity_key in evaluations_by_key:
evaluation = evaluations_by_key[entity_key]
evaluations_by_key[entity_key] = dataclasses.replace(
evaluation, run_ids=evaluation.run_ids | {submitted_run_id}
)
updated_evaluation_keys.add(entity_key)
# check if the sensor is still enabled:
if check_after_runs_num is not None and i % check_after_runs_num == 0:
if not self._sensor_is_enabled(instance, remote_sensor):
# The user has manually stopped the sensor mid-iteration. In this case we assume
# the user has a good reason for stopping the sensor (e.g. the sensor is submitting
# many unintentional runs) so we stop submitting runs and will mark the tick as
# skipped so that when the sensor is turned back on we don't detect this tick as incomplete
# and try to submit the same runs again.
self._logger.info(
"Sensor has been manually stopped while submitted runs. No more runs will be submitted."
)
tick_context.set_user_interrupted(True)
break
evaluations_to_update = [
evaluations_by_key[asset_key] for asset_key in updated_evaluation_keys
]
if evaluations_to_update:
schedule_storage = check.not_none(instance.schedule_storage)
schedule_storage.add_auto_materialize_asset_evaluations(
evaluation_id, evaluations_to_update
)
check_for_debug_crash(debug_crash_flags, "RUN_IDS_ADDED_TO_EVALUATIONS")
if tick_context.tick.tick_data.user_interrupted:
# mark as skipped so that we don't request any remaining runs when the sensor is started again
tick_context.update_state(TickStatus.SKIPPED)
tick_context.set_skip_reason("Sensor manually stopped mid-iteration.")
else:
tick_context.update_state(
TickStatus.SUCCESS if len(run_requests) > 0 else TickStatus.SKIPPED,
)
def _sensor_is_enabled(self, instance: DagsterInstance, remote_sensor: Optional[RemoteSensor]):
use_auto_materialize_sensors = instance.auto_materialize_use_sensors
if (not use_auto_materialize_sensors) and get_auto_materialize_paused(instance):
return False
if use_auto_materialize_sensors and remote_sensor:
instigator_state = instance.get_instigator_state(
remote_sensor.get_remote_origin_id(), remote_sensor.selector_id
)
if instigator_state and not instigator_state.is_running:
return False
return True
| AssetDaemon |
python | apache__airflow | airflow-core/src/airflow/utils/log/logging_mixin.py | {
"start": 7507,
"end": 10183
} | class ____(StreamHandler):
"""
Custom StreamHandler that uses current sys.stderr/stdout as the stream for logging.
This class is like a StreamHandler using sys.stderr/stdout, but uses
whatever sys.stderr/stdout is currently set to rather than the value of
sys.stderr/stdout at handler construction time, except when running a
task in a kubernetes executor pod.
"""
def __init__(self, stream):
if not isinstance(stream, str):
raise TypeError(
"Cannot use file like objects. Use 'stdout' or 'stderr' as a str and without 'ext://'."
)
self._use_stderr = True
if "stdout" in stream:
self._use_stderr = False
self._orig_stream = sys.stdout
else:
self._orig_stream = sys.stderr
# StreamHandler tries to set self.stream
Handler.__init__(self)
@property
def stream(self):
"""Returns current stream."""
from airflow.settings import IS_EXECUTOR_CONTAINER, IS_K8S_EXECUTOR_POD
if IS_K8S_EXECUTOR_POD or IS_EXECUTOR_CONTAINER:
return self._orig_stream
if self._use_stderr:
return sys.stderr
return sys.stdout
def set_context(logger, value):
"""
Walk the tree of loggers and try to set the context for each handler.
:param logger: logger
:param value: value to set
"""
if not isinstance(logger, logging.Logger):
# This fn doesn't make sense for structlog based handlers
return
while logger:
orig_propagate = logger.propagate
for handler in logger.handlers:
# Not all handlers need to have context passed in so we ignore
# the error when handlers do not have set_context defined.
# Don't use getatrr so we have type checking. And we don't care if handler is actually a
# FileTaskHandler, it just needs to have a set_context function!
if hasattr(handler, "set_context"):
from airflow.utils.log.file_task_handler import FileTaskHandler # noqa: TC001
flag = cast("FileTaskHandler", handler).set_context(value)
# By default we disable propagate once we have configured the logger, unless that handler
# explicitly asks us to keep it on.
if flag is not SetContextPropagate.MAINTAIN_PROPAGATE:
logger.propagate = False
if orig_propagate is True:
# If we were set to propagate before we turned if off, then keep passing set_context up
logger = logger.parent
else:
break
| RedirectStdHandler |
python | django__django | tests/migrations/test_migrations_squashed_complex_multi_apps/app1/2_auto.py | {
"start": 35,
"end": 182
} | class ____(migrations.Migration):
dependencies = [("app1", "1_auto")]
operations = [migrations.RunPython(migrations.RunPython.noop)]
| Migration |
python | django-guardian__django-guardian | example_project/articles/admin.py | {
"start": 89,
"end": 243
} | class ____(admin.ModelAdmin):
list_display = ("title", "slug", "created_at")
list_filter = ("created_at",)
search_fields = ("title",)
| ArticleAdmin |
python | getsentry__sentry | src/sentry/tasks/email.py | {
"start": 1757,
"end": 3022
} | class ____(Exception):
"""
SMTPDataError with a 4xx code, and thus is temporary and retriable.
"""
def __init__(self, code: int, msg: str | bytes) -> None:
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
def _send_email(message: dict[str, Any]) -> None:
try:
send_messages([message_from_dict(message)])
except SMTPDataError as e:
# 4xx means temporary and retriable; See RFC 5321, §4.2.1
if 400 <= e.smtp_code < 500:
raise TemporaryEmailError(e.smtp_code, e.smtp_error)
raise
@instrumented_task(
name="sentry.tasks.email.send_email",
namespace=notifications_tasks,
processing_deadline_duration=90,
retry=Retry(times=2, delay=60 * 5),
silo_mode=SiloMode.REGION,
)
@retry(on=(TemporaryEmailError,))
def send_email(message: dict[str, Any]) -> None:
_send_email(message)
@instrumented_task(
name="sentry.tasks.email.send_email_control",
namespace=notifications_control_tasks,
processing_deadline_duration=90,
retry=Retry(times=2, delay=60 * 5),
silo_mode=SiloMode.CONTROL,
)
@retry(on=(TemporaryEmailError,))
def send_email_control(message: dict[str, Any]) -> None:
_send_email(message)
| TemporaryEmailError |
python | getsentry__sentry | src/sentry/api/endpoints/project_statistical_detectors.py | {
"start": 573,
"end": 2148
} | class ____(ProjectEndpoint):
owner = ApiOwner.PROFILING
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
enforce_rate_limit = True
def get(self, request: Request, project: Project) -> Response:
try:
timestamp = parse_datetime_string(request.GET["end"])
except KeyError:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={"details": "Missing required argument: end"},
)
except Exception:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={"details": "Invalid value for end"},
)
transaction = request.GET.get("transaction")
if transaction is not None:
_detect_transaction_change_points([(project.id, transaction)], timestamp)
return Response(status=status.HTTP_202_ACCEPTED)
fingerprint = request.GET.get("function")
if fingerprint is not None:
try:
function = int(fingerprint)
except ValueError:
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={"details": "Invalid fingerprint"},
)
_detect_function_change_points([(project.id, function)], timestamp)
return Response(status=status.HTTP_202_ACCEPTED)
return Response(
status=status.HTTP_400_BAD_REQUEST,
data={"details": "Missing transaction or fingerprint"},
)
| ProjectStatisticalDetectors |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 211277,
"end": 211747
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of DeleteDeployment"""
__schema__ = github_schema
__field_names__ = ("id", "client_mutation_id")
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
"""The Node ID of the deployment to be deleted."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| DeleteDeploymentInput |
python | ansible__ansible | test/units/_internal/templating/fixtures/valid_collection/ansible_collections/valid/also_valid/plugins/lookup/also_also_valid.py | {
"start": 84,
"end": 194
} | class ____(LookupBase):
def run(self, terms, variables=None, **kwargs) -> list:
return []
| LookupModule |
python | sqlalchemy__sqlalchemy | test/orm/test_composites.py | {
"start": 58709,
"end": 62188
} | class ____(fixtures.TestBase):
@testing.fixture
def edge_point_fixture(self, decl_base):
@dataclasses.dataclass
class Point:
x: Optional[int]
y: Optional[int]
def go(return_none_on):
class Edge(decl_base):
__tablename__ = "edge"
id: Mapped[int] = mapped_column(primary_key=True)
start = composite(Point, return_none_on=return_none_on)
return Point, Edge
return go
@testing.fixture
def edge_point_persist_fixture(self, edge_point_fixture, decl_base):
def go(return_none_on):
Point, Edge = edge_point_fixture(return_none_on)
decl_base.metadata.create_all(testing.db)
with Session(testing.db) as sess:
sess.add(Edge(x=None, y=None))
sess.commit()
return Point, Edge
return go
def test_special_rule(self, edge_point_fixture):
Point, Edge = edge_point_fixture(lambda x, y: y is None)
obj = Edge()
eq_(obj.start, None)
obj = Edge(y=5)
eq_(obj.start, Point(x=None, y=5))
obj = Edge(y=5, x=7)
eq_(obj.start, Point(x=7, y=5))
obj = Edge(y=None, x=7)
eq_(obj.start, None)
@testing.variation("return_none_on", [True, False])
def test_pending_object_no_return_none(
self, edge_point_fixture, return_none_on
):
Point, Edge = edge_point_fixture(
(lambda *args: all(arg is None for arg in args))
if return_none_on
else None
)
obj = Edge()
if return_none_on:
eq_(obj.start, None)
else:
eq_(obj.start, Point(x=None, y=None))
# object stays in place since it was assigned. this is to support
# in-place mutation of the object
obj.x = 5
if return_none_on:
eq_(obj.start, None)
else:
eq_(obj.start, Point(x=None, y=None))
# only if we pop from the dict can we change that
obj.__dict__.pop("start")
eq_(obj.start, Point(x=5, y=None))
obj.x = None
obj.__dict__.pop("start")
if return_none_on:
eq_(obj.start, None)
else:
eq_(obj.start, Point(x=None, y=None))
@testing.variation("return_none_on", [True, False])
def test_query_from_composite_directly(
self, edge_point_persist_fixture, return_none_on
):
Point, Edge = edge_point_persist_fixture(
(lambda *args: all(arg is None for arg in args))
if return_none_on
else None
)
with Session(testing.db) as sess:
value = sess.scalar(select(Edge.start))
if return_none_on:
eq_(value, None)
else:
eq_(value, Point(x=None, y=None))
@testing.variation("return_none_on", [True, False])
def test_access_on_persistent(
self, edge_point_persist_fixture, return_none_on
):
Point, Edge = edge_point_persist_fixture(
(lambda *args: all(arg is None for arg in args))
if return_none_on
else None
)
with Session(testing.db) as sess:
edge = sess.scalars(select(Edge)).one()
if return_none_on:
eq_(edge.start, None)
else:
eq_(edge.start, Point(x=None, y=None))
| NoneReturnTest |
python | airbytehq__airbyte | airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/config.py | {
"start": 12116,
"end": 13207
} | class ____(BaseConfig):
"""
Config that is used to verify that a connector and its streams uphold certain behavior and features that are
required to maintain enterprise-level standard of quality.
Attributes:
streams_without_primary_key: A list of streams where a primary key is not available from the API or is not relevant to the record
"""
timeout_seconds: int = timeout_seconds
config_path: str = config_path
streams_without_primary_key: Optional[List[NoPrimaryKeyConfiguration]] = Field(
description="Streams that do not support a primary key such as reports streams"
)
allowed_hosts: Optional[AllowedHostsConfiguration] = Field(
description="Used to bypass checking the `allowedHosts` field in a source's `metadata.yaml` when all external hosts should be reachable."
)
suggested_streams: Optional[SuggestedStreamsConfiguration] = Field(
description="Used to bypass checking the `suggestedStreams` field in a source's `metadata.yaml` when certified source doesn't have any."
)
| ConnectorAttributesConfig |
python | spack__spack | lib/spack/spack/compilers/libraries.py | {
"start": 12121,
"end": 12985
} | class ____:
"""Deserialized cache entry for a compiler"""
__slots__ = ("c_compiler_output",)
def __init__(self, c_compiler_output: Optional[str]):
self.c_compiler_output = c_compiler_output
@property
def empty(self) -> bool:
"""Sometimes the compiler is temporarily broken, preventing us from getting output. The
call site determines if that is a problem."""
return self.c_compiler_output is None
@classmethod
def from_dict(cls, data: Dict[str, Optional[str]]):
if not isinstance(data, dict):
raise ValueError(f"Invalid {cls.__name__} data")
c_compiler_output = data.get("c_compiler_output")
if not isinstance(c_compiler_output, (str, type(None))):
raise ValueError(f"Invalid {cls.__name__} data")
return cls(c_compiler_output)
| CompilerCacheEntry |
python | astropy__astropy | astropy/table/meta.py | {
"start": 857,
"end": 12238
} | class ____(dict):
"""
Specialized dict subclass to represent attributes of a Column
and return items() in a preferred order. This is only for use
in generating a YAML map representation that has a fixed order.
"""
def items(self):
"""
Return items as a ColumnOrderList, which sorts in the preferred
way for column attributes.
"""
return ColumnOrderList(super().items())
def _construct_odict(load, node):
"""
Construct dict from !!omap in yaml safe load.
See ``get_header_from_yaml()`` for usage.
Source: https://gist.github.com/weaver/317164
License: Unspecified
This is the same as SafeConstructor.construct_yaml_omap(),
except the data type is changed to OrderedDict() and setitem is
used instead of append in the loop
"""
omap = {}
yield omap
if not isinstance(node, yaml.SequenceNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map",
node.start_mark,
f"expected a sequence, but found {node.id}",
node.start_mark,
)
for subnode in node.value:
if not isinstance(subnode, yaml.MappingNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map",
node.start_mark,
f"expected a mapping of length 1, but found {subnode.id}",
subnode.start_mark,
)
if len(subnode.value) != 1:
raise yaml.constructor.ConstructorError(
"while constructing an ordered map",
node.start_mark,
f"expected a single mapping item, but found {len(subnode.value)} items",
subnode.start_mark,
)
key_node, value_node = subnode.value[0]
key = load.construct_object(key_node)
value = load.construct_object(value_node)
omap[key] = value
def _repr_pairs(dump, tag, sequence, flow_style=None):
"""
This is the same code as BaseRepresenter.represent_sequence(),
but the value passed to dump.represent_data() in the loop is a
dictionary instead of a tuple.
Source: https://gist.github.com/weaver/317164
License: Unspecified
"""
value = []
node = yaml.SequenceNode(tag, value, flow_style=flow_style)
if dump.alias_key is not None:
dump.represented_objects[dump.alias_key] = node
best_style = True
for key, val in sequence:
item = dump.represent_data({key: val})
if not (isinstance(item, yaml.ScalarNode) and not item.style):
best_style = False
value.append(item)
if flow_style is None:
if dump.default_flow_style is not None:
node.flow_style = dump.default_flow_style
else:
node.flow_style = best_style
return node
def _repr_odict(dumper, data):
"""
Represent OrderedDict in yaml dump.
Source: https://gist.github.com/weaver/317164
License: Unspecified
>>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.dump(data, default_flow_style=False) # doctest: +SKIP
'!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n'
>>> yaml.dump(data, default_flow_style=True) # doctest: +SKIP
'!!omap [foo: bar, mumble: quux, baz: gorp]\\n'
"""
return _repr_pairs(dumper, "tag:yaml.org,2002:omap", data.items())
def _repr_column_dict(dumper, data):
"""
Represent ColumnDict in yaml dump.
This is the same as an ordinary mapping except that the keys
are written in a fixed order that makes sense for astropy table
columns.
"""
return dumper.represent_mapping("tag:yaml.org,2002:map", data)
def _get_variable_length_array_shape(col):
"""Check if object-type ``col`` is really a variable length list.
That is true if the object consists purely of list of nested lists, where
the shape of every item can be represented as (m, n, ..., *) where the (m,
n, ...) are constant and only the lists in the last axis have variable
shape. If so the returned value of shape will be a tuple in the form (m, n,
..., None).
If ``col`` is a variable length array then the return ``dtype`` corresponds
to the type found by numpy for all the individual values. Otherwise it will
be ``np.dtype(object)``.
Parameters
----------
col : column-like
Input table column, assumed to be object-type
Returns
-------
shape : tuple
Inferred variable length shape or None
dtype : np.dtype
Numpy dtype that applies to col
"""
class ConvertError(ValueError):
"""Local conversion error used below."""
# Numpy types supported as variable-length arrays
np_classes = (np.floating, np.integer, np.bool_, np.str_)
try:
if len(col) == 0 or not all(isinstance(val, np.ndarray) for val in col):
raise ConvertError
dtype = col[0].dtype
shape = col[0].shape[:-1]
for val in col:
if not issubclass(val.dtype.type, np_classes) or val.shape[:-1] != shape:
raise ConvertError
dtype = np.promote_types(dtype, val.dtype)
shape = shape + (None,)
except ConvertError:
# `col` is not a variable length array, return shape and dtype to
# the original. Note that this function is only called if
# col.shape[1:] was () and col.info.dtype is object.
dtype = col.info.dtype
shape = ()
return shape, dtype
def _get_datatype_from_dtype(dtype):
"""Return string version of ``dtype`` for writing to ECSV ``datatype``."""
datatype = dtype.name
if datatype.startswith(("bytes", "str")):
datatype = "string"
datatype = datatype.removesuffix("_") # string_ and bool_ lose the final _ for ECSV
return datatype
def _get_col_attributes(col):
"""
Extract information from a column (apart from the values) that is required
to fully serialize the column.
Parameters
----------
col : column-like
Input Table column
Returns
-------
attrs : dict
Dict of ECSV attributes for ``col``
"""
dtype = col.info.dtype # Type of column values that get written
subtype = None # Type of data for object columns serialized with JSON
shape = col.shape[1:] # Shape of multidim / variable length columns
if dtype.name == "object":
if shape == ():
# 1-d object type column might be a variable length array
dtype = np.dtype(str)
shape, subtype = _get_variable_length_array_shape(col)
else:
# N-d object column is subtype object but serialized as JSON string
dtype = np.dtype(str)
subtype = np.dtype(object)
elif shape:
# N-d column which is not object is serialized as JSON string
dtype = np.dtype(str)
subtype = col.info.dtype
datatype = _get_datatype_from_dtype(dtype)
# Set the output attributes
attrs = ColumnDict()
attrs["name"] = col.info.name
attrs["datatype"] = datatype
for attr, nontrivial, xform in (
("unit", lambda x: x is not None, str),
("format", lambda x: x is not None, None),
("description", lambda x: x is not None, None),
("meta", lambda x: x, OrderedDict),
):
col_attr = getattr(col.info, attr)
if nontrivial(col_attr):
attrs[attr] = xform(col_attr) if xform else col_attr
if subtype:
attrs["subtype"] = _get_datatype_from_dtype(subtype)
# Numpy 'object' maps to 'subtype' of 'json' in ECSV
if attrs["subtype"] == "object":
attrs["subtype"] = "json"
if shape:
attrs["subtype"] += json.dumps(list(shape), separators=(",", ":"))
return attrs
def get_yaml_from_table(table):
"""
Return lines with a YAML representation of header content from the ``table``.
Parameters
----------
table : `~astropy.table.Table` object
Table for which header content is output
Returns
-------
lines : list
List of text lines with YAML header content
"""
header = {"cols": list(table.columns.values())}
if table.meta:
header["meta"] = OrderedDict(table.meta)
return get_yaml_from_header(header)
def get_yaml_from_header(header):
"""
Return lines with a YAML representation of header content from a Table.
The ``header`` dict must contain these keys:
- 'cols' : list of table column objects (required)
- 'meta' : table 'meta' attribute (optional)
Other keys included in ``header`` will be serialized in the output YAML
representation.
Parameters
----------
header : dict
Table header content
Returns
-------
lines : list
List of text lines with YAML header content
"""
from astropy.io.misc.yaml import AstropyDumper
class TableDumper(AstropyDumper):
"""
Custom Dumper that represents OrderedDict as an !!omap object.
"""
def represent_mapping(self, tag, mapping, flow_style=None):
"""
This is a combination of the Python 2 and 3 versions of this method
in the PyYAML library to allow the required key ordering via the
ColumnOrderList object. The Python 3 version insists on turning the
items() mapping into a list object and sorting, which results in
alphabetical order for the column keys.
"""
value = []
node = yaml.MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, "items"):
mapping = mapping.items()
if hasattr(mapping, "sort"):
mapping.sort()
else:
mapping = list(mapping)
try:
mapping = sorted(mapping)
except TypeError:
pass
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style):
best_style = False
if not (
isinstance(node_value, yaml.ScalarNode) and not node_value.style
):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
TableDumper.add_representer(OrderedDict, _repr_odict)
TableDumper.add_representer(ColumnDict, _repr_column_dict)
header = copy.copy(header) # Don't overwrite original
header["datatype"] = [_get_col_attributes(col) for col in header["cols"]]
del header["cols"]
lines = yaml.dump(
header, default_flow_style=None, Dumper=TableDumper, width=130
).splitlines()
return lines
| ColumnDict |
python | dagster-io__dagster | python_modules/libraries/dagster-mlflow/dagster_mlflow/resources.py | {
"start": 2352,
"end": 12206
} | class ____(metaclass=MlflowMeta):
"""Class for setting up an mlflow resource for dagster runs.
This takes care of all the configuration required to use mlflow tracking and the complexities of
mlflow tracking dagster parallel runs.
"""
def __init__(self, context):
# Context associated attributes
self.log = context.log
self.run_name = context.dagster_run.job_name
self.dagster_run_id = context.run_id
# resource config attributes
resource_config = context.resource_config
self.tracking_uri = resource_config.get("mlflow_tracking_uri")
if self.tracking_uri:
mlflow.set_tracking_uri(self.tracking_uri)
self.parent_run_id = resource_config.get("parent_run_id")
self.mlflow_run_id = resource_config.get("mlflow_run_id")
self.experiment_name = resource_config["experiment_name"]
self.env_tags_to_log = resource_config.get("env_to_tag") or []
self.extra_tags = resource_config.get("extra_tags")
# Update env variables if any are given
self.env_vars = resource_config.get("env", {})
if self.env_vars:
environ.update(self.env_vars)
# If the experiment exists then the set won't do anything
mlflow.set_experiment(self.experiment_name)
self.experiment = mlflow.get_experiment_by_name(self.experiment_name)
# Get the client object
self.tracking_client = mlflow.tracking.MlflowClient()
# Set up the active run and tags
self._setup()
def _setup(self):
"""Sets the active run and tags. If an Mlflow run_id exists then the
active run is set to it. This way a single Dagster run outputs data
to the same Mlflow run, even when multiprocess executors are used.
"""
# If already set in self then use that mlflow_run_id, else search for the run
if self.mlflow_run_id is None:
run_id = self._get_current_run_id()
self.mlflow_run_id = run_id
else:
run_id = self.mlflow_run_id
self._set_active_run(run_id=run_id)
self._set_all_tags()
# hack needed to stop mlflow from marking run as finished when
# a process exits in parallel runs
atexit.unregister(mlflow.end_run)
def _get_current_run_id(
self, experiment: Optional[Any] = None, dagster_run_id: Optional[str] = None
):
"""Gets the run id of a specific dagster run and experiment id.
If it doesn't exist then it returns a None.
Args:
experiment (optional): Mlflow experiment.
When none is passed it fetches the experiment object set in
the constructor. Defaults to None.
dagster_run_id (optional): The Dagster run id.
When none is passed it fetches the dagster_run_id object set in
the constructor. Defaults to None.
Returns:
run_id (str or None): run_id if it is found else None
"""
experiment = experiment or self.experiment
dagster_run_id = dagster_run_id or self.dagster_run_id
if experiment:
# Check if a run with this dagster run id has already been started
# in mlflow, will get an empty dataframe if not.
# Note: Search requests have a lower rate limit than others, so we
# need to limit/retry searches where possible.
current_run_df = backoff(
mlflow.search_runs,
retry_on=(MlflowException,),
kwargs={
"experiment_ids": [experiment.experiment_id],
"filter_string": f"tags.dagster_run_id='{dagster_run_id}'",
},
max_retries=3,
)
if not current_run_df.empty: # type: ignore
return current_run_df.run_id.values[0] # type: ignore
def _set_active_run(self, run_id=None):
"""This method sets the active run to be that of the specified
run_id. If None is passed then a new run is started. The new run also
takes care of nested runs.
Args:
run_id (str, optional): Mlflow run_id. Defaults to None.
"""
nested_run = False
if self.parent_run_id is not None:
self._start_run(run_id=self.parent_run_id, run_name=self.run_name)
nested_run = True
self._start_run(run_id=run_id, run_name=self.run_name, nested=nested_run)
def _start_run(self, **kwargs):
"""Catches the Mlflow exception if a run is already active."""
try:
# If a run_id is passed, mlflow will generally not start a new run
# and instead, it will just be set as the active run
run = mlflow.start_run(**kwargs)
self.log.info(
f"Starting a new mlflow run with id {run.info.run_id} "
f"in experiment {self.experiment_name}"
)
except Exception as ex:
run = mlflow.active_run()
if "is already active" not in str(ex):
raise (ex)
self.log.info(f"Run with id {run.info.run_id} is already active.") # type: ignore
def _set_all_tags(self):
"""Method collects dagster_run_id plus all env variables/tags that have been
specified by the user in the config_schema and logs them as tags in mlflow.
Returns:
tags [dict]: Dictionary of all the tags
"""
tags = {tag: environ.get(tag) for tag in self.env_tags_to_log}
tags["dagster_run_id"] = self.dagster_run_id
if self.extra_tags:
tags.update(self.extra_tags)
mlflow.set_tags(tags)
def cleanup_on_error(self):
"""Method ends mlflow run with correct exit status for failed runs. Note that
this method does not work when a job running in the webserver fails, it seems
that in this case a different process runs the job and when it fails
the stack trace is therefore not available. For this case we can use the
cleanup_on_failure hook defined below.
"""
any_error = sys.exc_info()
if any_error[1]:
if isinstance(any_error[1], KeyboardInterrupt):
mlflow.end_run(status=RunStatus.to_string(RunStatus.KILLED))
else:
mlflow.end_run(status=RunStatus.to_string(RunStatus.FAILED))
@staticmethod
def log_params(params: dict):
"""Overload of the mlflow.log_params. If len(params) >100 then
params is sent to mlflow in chunks.
Args:
params (dict): Parameters to be logged
"""
for param_chunk in MlFlow.chunks(params, 100):
mlflow.log_params(param_chunk)
@staticmethod
def chunks(params: dict, size: int = 100):
"""Method that chunks a dictionary into batches of size.
Args:
params (dict): Dictionary set to be batched
size (int, optional): Number of batches. Defaults to 100.
Yields:
(dict): Batch of dictionary
"""
it = iter(params)
for _ in range(0, len(params), size):
yield {k: params[k] for k in islice(it, size)}
@beta
@dagster_maintained_resource
@resource(config_schema=CONFIG_SCHEMA)
def mlflow_tracking(context):
"""This resource initializes an MLflow run that's used for all steps within a Dagster run.
This resource provides access to all of mlflow's methods as well as the mlflow tracking client's
methods.
Usage:
1. Add the mlflow resource to any ops in which you want to invoke mlflow tracking APIs.
2. Add the `end_mlflow_on_run_finished` hook to your job to end the MLflow run
when the Dagster run is finished.
Examples:
.. code-block:: python
from dagster_mlflow import end_mlflow_on_run_finished, mlflow_tracking
@op(required_resource_keys={"mlflow"})
def mlflow_op(context):
mlflow.log_params(some_params)
mlflow.tracking.MlflowClient().create_registered_model(some_model_name)
@end_mlflow_on_run_finished
@job(resource_defs={"mlflow": mlflow_tracking})
def mlf_example():
mlflow_op()
# example using an mlflow instance with s3 storage
mlf_example.execute_in_process(run_config={
"resources": {
"mlflow": {
"config": {
"experiment_name": my_experiment,
"mlflow_tracking_uri": "http://localhost:5000",
# if want to run a nested run, provide parent_run_id
"parent_run_id": an_existing_mlflow_run_id,
# if you want to resume a run or avoid creating a new run in the resource init,
# provide mlflow_run_id
"mlflow_run_id": an_existing_mlflow_run_id,
# env variables to pass to mlflow
"env": {
"MLFLOW_S3_ENDPOINT_URL": my_s3_endpoint,
"AWS_ACCESS_KEY_ID": my_aws_key_id,
"AWS_SECRET_ACCESS_KEY": my_secret,
},
# env variables you want to log as mlflow tags
"env_to_tag": ["DOCKER_IMAGE_TAG"],
# key-value tags to add to your experiment
"extra_tags": {"super": "experiment"},
}
}
}
})
"""
mlf = MlFlow(context)
yield mlf
mlf.cleanup_on_error()
| MlFlow |
python | pytorch__pytorch | torch/_inductor/pattern_matcher.py | {
"start": 3573,
"end": 3664
} | class ____(Protocol):
def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
| ReplaceFn |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/decl_base.py | {
"start": 9210,
"end": 10490
} | class ____(_ORMClassConfigurator):
"""Configurator that configures a class that's potentially going to be
mapped, and optionally turned into a dataclass as well."""
__slots__ = (
"properties",
"declared_attr_reg",
)
properties: util.OrderedDict[
str,
Union[
Sequence[NamedColumn[Any]], NamedColumn[Any], MapperProperty[Any]
],
]
declared_attr_reg: Dict[declared_attr[Any], Any]
def __init__(
self,
registry: _RegistryType,
cls_: Type[Any],
):
super().__init__(cls_)
self.properties = util.OrderedDict()
self.declared_attr_reg = {}
instrumentation.register_class(
self.cls,
finalize=False,
registry=registry,
declarative_scan=self,
init_method=registry.constructor,
)
def set_cls_attribute(self, attrname: str, value: _T) -> _T:
manager = instrumentation.manager_of_class(self.cls)
manager.install_member(attrname, value)
return value
def map(self, mapper_kw: _MapperKwArgs) -> Mapper[Any]:
raise NotImplementedError()
def _early_mapping(self, mapper_kw: _MapperKwArgs) -> None:
self.map(mapper_kw)
| _MapperConfig |
python | Pylons__pyramid | tests/test_url.py | {
"start": 47629,
"end": 48383
} | class ____(unittest.TestCase):
def _callFUT(self, request, *elements, **kw):
from pyramid.url import current_route_path
return current_route_path(request, *elements, **kw)
def _makeRequest(self):
class Request:
def current_route_path(self, *elements, **kw):
self.elements = elements
self.kw = kw
return 'current route path'
return Request()
def test_it(self):
request = self._makeRequest()
result = self._callFUT(request, 'abc', _anchor='abc')
self.assertEqual(result, 'current route path')
self.assertEqual(request.elements, ('abc',))
self.assertEqual(request.kw, {'_anchor': 'abc'})
| Test_current_route_path |
python | getsentry__sentry | src/sentry/integrations/pagerduty/client.py | {
"start": 1140,
"end": 3335
} | class ____(ApiClient):
allow_redirects = False
integration_name = IntegrationProviderSlug.PAGERDUTY.value
base_url = "https://events.pagerduty.com/v2/enqueue"
def __init__(self, integration_key: str, integration_id: int | None) -> None:
self.integration_key = integration_key
super().__init__(integration_id=integration_id)
def request(self, *args: Any, **kwargs: Any) -> Any:
kwargs.setdefault("headers", {"Content-Type": "application/json"})
return self._request(*args, **kwargs)
def send_trigger(self, data: PagerDutyEventPayload) -> Response:
with record_event(OnCallInteractionType.CREATE).capture():
return self.post("/", data=data)
def build_pagerduty_event_payload(
*,
routing_key: str,
event: Event | GroupEvent,
notification_uuid: str | None,
severity: PagerdutySeverity,
) -> PagerDutyEventPayload:
source = event.transaction or event.culprit or "<unknown>"
group = event.group
level = event.get_tag("level") or "error"
custom_details = serialize(event, None, ExternalEventSerializer())
summary = custom_details["message"][:PAGERDUTY_SUMMARY_MAX_LENGTH] or custom_details["title"]
link_params = {"referrer": "pagerduty_integration"}
if notification_uuid:
link_params["notification_uuid"] = notification_uuid
if severity == PAGERDUTY_DEFAULT_SEVERITY:
severity = LEVEL_SEVERITY_MAP[level]
payload: PagerDutyEventPayload = {
"routing_key": routing_key,
"event_action": "trigger",
"payload": {
"summary": summary,
"severity": severity,
"source": source,
"custom_details": custom_details,
},
"client": "sentry",
}
if group:
client_url = group.get_absolute_url(params=link_params)
payload["client_url"] = client_url
payload["dedup_key"] = group.qualified_short_id
payload["payload"]["component"] = group.project.slug
payload["links"] = [
{
"href": client_url,
"text": "View Sentry Issue Details",
}
]
return payload
| PagerDutyClient |
python | mlflow__mlflow | tests/pyfunc/test_chat_model.py | {
"start": 4915,
"end": 21901
} | class ____(mlflow.pyfunc.ChatModel):
def predict(
self, context, messages: list[ChatMessage], params: ChatParams
) -> ChatCompletionResponse:
tools = params.tools
# call the first tool with some value for all the required params
tool_name = tools[0].function.name
tool_params = tools[0].function.parameters
arguments = {}
for param in tool_params.required:
param_type = tool_params.properties[param].type
if param_type == "string":
arguments[param] = "some_value"
elif param_type == "number":
arguments[param] = 123
elif param_type == "boolean":
arguments[param] = True
else:
# keep the test example simple
raise ValueError(f"Unsupported param type: {param_type}")
tool_call = FunctionToolCallArguments(
name=tool_name,
arguments=json.dumps(arguments),
).to_tool_call(id=uuid.uuid4().hex)
tool_message = ChatMessage(
role="assistant",
tool_calls=[tool_call],
)
return ChatCompletionResponse(choices=[ChatChoice(index=0, message=tool_message)])
def test_chat_model_save_load(tmp_path):
model = SimpleChatModel()
mlflow.pyfunc.save_model(python_model=model, path=tmp_path)
loaded_model = mlflow.pyfunc.load_model(tmp_path)
assert isinstance(loaded_model._model_impl, _ChatModelPyfuncWrapper)
input_schema = loaded_model.metadata.get_input_schema()
output_schema = loaded_model.metadata.get_output_schema()
assert input_schema == CHAT_MODEL_INPUT_SCHEMA
assert output_schema == CHAT_MODEL_OUTPUT_SCHEMA
def test_chat_model_with_trace(tmp_path):
model = ChatModelWithTrace()
mlflow.pyfunc.save_model(python_model=model, path=tmp_path)
# predict() call during saving chat model should not generate a trace
assert len(get_traces()) == 0
loaded_model = mlflow.pyfunc.load_model(tmp_path)
messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "Hello!"},
]
loaded_model.predict({"messages": messages})
traces = get_traces()
assert len(traces) == 1
assert traces[0].info.tags[TraceTagKey.TRACE_NAME] == "predict"
request = json.loads(traces[0].data.request)
assert request["messages"] == [asdict(ChatMessage.from_dict(msg)) for msg in messages]
def test_chat_model_save_throws_with_signature(tmp_path):
model = SimpleChatModel()
with pytest.raises(MlflowException, match="Please remove the `signature` parameter"):
mlflow.pyfunc.save_model(
python_model=model,
path=tmp_path,
signature=ModelSignature(
Schema([ColSpec(name="test", type=DataType.string)]),
Schema([ColSpec(name="test", type=DataType.string)]),
),
)
def mock_predict():
return "hello"
def test_chat_model_with_context_saves_successfully(tmp_path):
model_path = tmp_path / "model"
predict_path = tmp_path / "predict.pkl"
predict_path.write_bytes(pickle.dumps(mock_predict))
model = ChatModelWithContext()
mlflow.pyfunc.save_model(
python_model=model,
path=model_path,
artifacts={"predict_fn": str(predict_path)},
)
loaded_model = mlflow.pyfunc.load_model(model_path)
messages = [{"role": "user", "content": "test"}]
response = loaded_model.predict({"messages": messages})
expected_response = json.dumps([{"role": "assistant", "content": "hello"}])
assert response["choices"][0]["message"]["content"] == expected_response
@pytest.mark.parametrize(
"ret",
[
"not a ChatCompletionResponse",
{"dict": "with", "bad": "keys"},
{
"id": "1",
"created": 1,
"model": "m",
"choices": [{"bad": "choice"}],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 10,
"total_tokens": 20,
},
},
],
)
def test_save_throws_on_invalid_output(tmp_path, ret):
class BadChatModel(mlflow.pyfunc.ChatModel):
def predict(self, context, messages, params) -> ChatCompletionResponse:
return ret
model = BadChatModel()
with pytest.raises(
MlflowException,
match=(
"Failed to save ChatModel. Please ensure that the model's "
r"predict\(\) method returns a ChatCompletionResponse object"
),
):
mlflow.pyfunc.save_model(python_model=model, path=tmp_path)
# test that we can predict with the model
def test_chat_model_predict(tmp_path):
model = SimpleChatModel()
mlflow.pyfunc.save_model(python_model=model, path=tmp_path)
loaded_model = mlflow.pyfunc.load_model(tmp_path)
messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "Hello!"},
]
response = loaded_model.predict({"messages": messages})
assert response["choices"][0]["message"]["content"] == json.dumps(messages)
assert json.loads(response["choices"][1]["message"]["content"]) == DEFAULT_PARAMS
# override all params
params_override = {
"temperature": 0.5,
"max_tokens": 10,
"stop": ["\n"],
"n": 2,
"stream": True,
"top_p": 0.1,
"top_k": 20,
"frequency_penalty": 0.5,
"presence_penalty": -0.5,
}
response = loaded_model.predict({"messages": messages, **params_override})
assert response["choices"][0]["message"]["content"] == json.dumps(messages)
assert json.loads(response["choices"][1]["message"]["content"]) == params_override
# override a subset of params
params_subset = {
"max_tokens": 100,
}
response = loaded_model.predict({"messages": messages, **params_subset})
assert response["choices"][0]["message"]["content"] == json.dumps(messages)
assert json.loads(response["choices"][1]["message"]["content"]) == {
**DEFAULT_PARAMS,
**params_subset,
}
def test_chat_model_works_in_serving():
model = SimpleChatModel()
messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "Hello!"},
]
params_subset = {
"max_tokens": 100,
}
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model=model,
input_example=(messages, params_subset),
)
inference_payload = load_serving_example(model_info.model_uri)
response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=inference_payload,
content_type="application/json",
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
choices = json.loads(response.content)["choices"]
assert choices[0]["message"]["content"] == json.dumps(messages)
assert json.loads(choices[1]["message"]["content"]) == {
**DEFAULT_PARAMS,
**params_subset,
}
def test_chat_model_works_with_infer_signature_input_example(tmp_path):
model = SimpleChatModel()
params_subset = {
"max_tokens": 100,
}
input_example = {
"messages": [
{
"role": "user",
"content": "What is Retrieval-augmented Generation?",
}
],
**params_subset,
}
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model", python_model=model, input_example=input_example
)
assert model_info.signature.inputs == CHAT_MODEL_INPUT_SCHEMA
assert model_info.signature.outputs == CHAT_MODEL_OUTPUT_SCHEMA
mlflow_model = Model.load(model_info.model_uri)
local_path = _download_artifact_from_uri(model_info.model_uri)
assert mlflow_model.load_input_example(local_path) == {
"messages": input_example["messages"],
**params_subset,
}
inference_payload = load_serving_example(model_info.model_uri)
response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=inference_payload,
content_type="application/json",
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
choices = json.loads(response.content)["choices"]
assert choices[0]["message"]["content"] == json.dumps(input_example["messages"])
assert json.loads(choices[1]["message"]["content"]) == {
**DEFAULT_PARAMS,
**params_subset,
}
def test_chat_model_logs_default_metadata_task(tmp_path):
model = SimpleChatModel()
params_subset = {
"max_tokens": 100,
}
input_example = {
"messages": [
{
"role": "user",
"content": "What is Retrieval-augmented Generation?",
}
],
**params_subset,
}
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model", python_model=model, input_example=input_example
)
assert model_info.signature.inputs == CHAT_MODEL_INPUT_SCHEMA
assert model_info.signature.outputs == CHAT_MODEL_OUTPUT_SCHEMA
assert model_info.metadata["task"] == "agent/v1/chat"
with mlflow.start_run():
model_info_with_override = mlflow.pyfunc.log_model(
name="model", python_model=model, input_example=input_example, metadata={"task": None}
)
assert model_info_with_override.metadata["task"] is None
def test_chat_model_works_with_chat_message_input_example(tmp_path):
model = SimpleChatModel()
input_example = [
ChatMessage(role="user", content="What is Retrieval-augmented Generation?", name="chat")
]
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model", python_model=model, input_example=input_example
)
assert model_info.signature.inputs == CHAT_MODEL_INPUT_SCHEMA
assert model_info.signature.outputs == CHAT_MODEL_OUTPUT_SCHEMA
mlflow_model = Model.load(model_info.model_uri)
local_path = _download_artifact_from_uri(model_info.model_uri)
assert mlflow_model.load_input_example(local_path) == {
"messages": [message.to_dict() for message in input_example],
}
inference_payload = load_serving_example(model_info.model_uri)
response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=inference_payload,
content_type="application/json",
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
choices = json.loads(response.content)["choices"]
assert choices[0]["message"]["content"] == json.dumps(json.loads(inference_payload)["messages"])
def test_chat_model_works_with_infer_signature_multi_input_example(tmp_path):
model = SimpleChatModel()
params_subset = {
"max_tokens": 100,
}
input_example = {
"messages": [
{
"role": "assistant",
"content": "You are in helpful assistant!",
},
{
"role": "user",
"content": "What is Retrieval-augmented Generation?",
},
],
**params_subset,
}
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model", python_model=model, input_example=input_example
)
assert model_info.signature.inputs == CHAT_MODEL_INPUT_SCHEMA
assert model_info.signature.outputs == CHAT_MODEL_OUTPUT_SCHEMA
mlflow_model = Model.load(model_info.model_uri)
local_path = _download_artifact_from_uri(model_info.model_uri)
assert mlflow_model.load_input_example(local_path) == {
"messages": input_example["messages"],
**params_subset,
}
inference_payload = load_serving_example(model_info.model_uri)
response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=inference_payload,
content_type="application/json",
extra_args=["--env-manager", "local"],
)
expect_status_code(response, 200)
choices = json.loads(response.content)["choices"]
assert choices[0]["message"]["content"] == json.dumps(input_example["messages"])
assert json.loads(choices[1]["message"]["content"]) == {
**DEFAULT_PARAMS,
**params_subset,
}
def test_chat_model_predict_stream(tmp_path):
model = SimpleChatModel()
mlflow.pyfunc.save_model(python_model=model, path=tmp_path)
loaded_model = mlflow.pyfunc.load_model(tmp_path)
messages = [
{"role": "system", "content": "You are a helpful assistant"},
{"role": "user", "content": "Hello!"},
]
responses = list(loaded_model.predict_stream({"messages": messages}))
for i, resp in enumerate(responses[:-1]):
assert resp["choices"][0]["delta"]["content"] == f"message {i}"
assert responses[-1]["choices"][0]["delta"] == {}
def test_chat_model_can_receive_and_return_metadata():
messages = [{"role": "user", "content": "Hello!"}]
params = {
"custom_inputs": {"image_url": "example", "detail": "high", "other_dict": {"key": "value"}},
}
input_example = {
"messages": messages,
**params,
}
model = ChatModelWithMetadata()
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model=model,
input_example=input_example,
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
# test that it works for normal pyfunc predict
response = loaded_model.predict({"messages": messages, **params})
assert response["custom_outputs"] == params["custom_inputs"]
# test that it works in serving
inference_payload = load_serving_example(model_info.model_uri)
response = pyfunc_serve_and_score_model(
model_uri=model_info.model_uri,
data=inference_payload,
content_type="application/json",
extra_args=["--env-manager", "local"],
)
serving_response = json.loads(response.content)
assert serving_response["custom_outputs"] == params["custom_inputs"]
def test_chat_model_can_use_tool_calls():
messages = [{"role": "user", "content": "What's the weather?"}]
weather_tool = (
FunctionToolDefinition(
name="get_weather",
description="Get the weather for your current location",
parameters=ToolParamsSchema(
{
"city": {
"type": "string",
"description": "The city to get the weather for",
},
"unit": {"type": "string", "enum": ["F", "C"]},
},
required=["city", "unit"],
),
)
.to_tool_definition()
.to_dict()
)
example = {
"messages": messages,
"tools": [weather_tool],
}
model = ChatModelWithToolCalling()
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model=model,
input_example=example,
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
response = loaded_model.predict(example)
model_tool_calls = response["choices"][0]["message"]["tool_calls"]
assert json.loads(model_tool_calls[0]["function"]["arguments"]) == {
"city": "some_value",
"unit": "some_value",
}
def test_chat_model_without_context_in_predict():
response = ChatCompletionResponse(
choices=[ChatChoice(message=ChatMessage(role="assistant", content="hi"))]
)
chunk_response = ChatCompletionChunk(
choices=[ChatChunkChoice(delta=ChatChoiceDelta(role="assistant", content="hi"))]
)
class Model(mlflow.pyfunc.ChatModel):
def predict(self, messages: list[ChatMessage], params: ChatParams):
return response
def predict_stream(self, messages: list[ChatMessage], params: ChatParams):
yield chunk_response
model = Model()
messages = [ChatMessage(role="user", content="hello?", name="chat")]
assert model.predict(messages, ChatParams()) == response
assert next(iter(model.predict_stream(messages, ChatParams()))) == chunk_response
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model", python_model=model, input_example=messages
)
pyfunc_model = mlflow.pyfunc.load_model(model_info.model_uri)
input_data = {"messages": [{"role": "user", "content": "hello"}]}
assert pyfunc_model.predict(input_data) == response.to_dict()
assert next(iter(pyfunc_model.predict_stream(input_data))) == chunk_response.to_dict()
| ChatModelWithToolCalling |
python | kamyu104__LeetCode-Solutions | Python/graph-valid-tree.py | {
"start": 118,
"end": 947
} | class ____(object):
# @param {integer} n
# @param {integer[][]} edges
# @return {boolean}
def validTree(self, n, edges):
if len(edges) != n - 1: # Check number of edges.
return False
# init node's neighbors in dict
neighbors = collections.defaultdict(list)
for u, v in edges:
neighbors[u].append(v)
neighbors[v].append(u)
# BFS to check whether the graph is valid tree.
q = collections.deque([0])
visited = set([0])
while q:
curr = q.popleft()
for node in neighbors[curr]:
if node not in visited:
visited.add(node)
q.append(node)
return len(visited) == n
# Time: O(|V| + |E|)
# Space: O(|V| + |E|)
# BFS solution.
| Solution |
python | PyCQA__pylint | pylint/testutils/checker_test_case.py | {
"start": 590,
"end": 3325
} | class ____:
"""A base testcase class for unit testing individual checker classes."""
# TODO: Figure out way to type this as type[BaseChecker] while also
# setting self.checker correctly.
CHECKER_CLASS: Any
CONFIG: dict[str, Any] = {}
def setup_method(self) -> None:
self.linter = UnittestLinter()
self.checker = self.CHECKER_CLASS(self.linter)
for key, value in self.CONFIG.items():
setattr(self.checker.linter.config, key, value)
self.checker.open()
@contextlib.contextmanager
def assertNoMessages(self) -> Iterator[None]:
"""Assert that no messages are added by the given method."""
with self.assertAddsMessages():
yield
@contextlib.contextmanager
def assertAddsMessages(
self, *messages: MessageTest, ignore_position: bool = False
) -> Generator[None]:
"""Assert that exactly the given method adds the given messages.
The list of messages must exactly match *all* the messages added by the
method. Additionally, we check to see whether the args in each message can
actually be substituted into the message string.
Using the keyword argument `ignore_position`, all checks for position
arguments (line, col_offset, ...) will be skipped. This can be used to
just test messages for the correct node.
"""
yield
got = self.linter.release_messages()
no_msg = "No message."
expected = "\n".join(repr(m) for m in messages) or no_msg
got_str = "\n".join(repr(m) for m in got) or no_msg
msg = (
"Expected messages did not match actual.\n"
f"\nExpected:\n{expected}\n\nGot:\n{got_str}\n"
)
assert len(messages) == len(got), msg
for expected_msg, gotten_msg in zip(messages, got):
assert expected_msg.msg_id == gotten_msg.msg_id, msg
assert expected_msg.node == gotten_msg.node, msg
assert expected_msg.args == gotten_msg.args, msg
assert expected_msg.confidence == gotten_msg.confidence, msg
if ignore_position:
# Do not check for line, col_offset etc...
continue
assert expected_msg.line == gotten_msg.line, msg
assert expected_msg.col_offset == gotten_msg.col_offset, msg
assert expected_msg.end_line == gotten_msg.end_line, msg
assert expected_msg.end_col_offset == gotten_msg.end_col_offset, msg
def walk(self, node: nodes.NodeNG) -> None:
"""Recursive walk on the given node."""
walker = ASTWalker(linter)
walker.add_checker(self.checker)
walker.walk(node)
| CheckerTestCase |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/partitions/mapping/partition_mapping.py | {
"start": 572,
"end": 1384
} | class ____:
"""Represents the result of mapping a PartitionsSubset to the corresponding
partitions in another PartitionsDefinition.
partitions_subset (PartitionsSubset): The resulting partitions subset that was
mapped to. Only contains partitions for existent partitions, filtering out nonexistent partitions.
required_but_nonexistent_subset (PartitionsSubset): A set of invalid partition keys in to_partitions_def
that partitions in from_partitions_subset were mapped to.
"""
partitions_subset: PartitionsSubset
required_but_nonexistent_subset: PartitionsSubset
@cached_property
def required_but_nonexistent_partition_keys(self) -> Sequence[str]:
return list(self.required_but_nonexistent_subset.get_partition_keys())
@public
| UpstreamPartitionsResult |
python | python-visualization__folium | folium/plugins/draw.py | {
"start": 119,
"end": 6337
} | class ____(JSCSSMixin, MacroElement):
'''
Vector drawing and editing plugin for Leaflet.
Parameters
----------
export : bool, default False
Add a small button that exports the drawn shapes as a geojson file.
feature_group : FeatureGroup, optional
The FeatureGroup object that will hold the editable figures. This can
be used to initialize the Draw plugin with predefined Layer objects.
filename : string, default 'data.geojson'
Name of geojson file
position : {'topleft', 'toprigth', 'bottomleft', 'bottomright'}
Position of control.
See https://leafletjs.com/reference.html#control
show_geometry_on_click : bool, default True
When True, opens an alert with the geometry description on click.
draw_options : dict, optional
The options used to configure the draw toolbar. See
http://leaflet.github.io/Leaflet.draw/docs/leaflet-draw-latest.html#drawoptions
edit_options : dict, optional
The options used to configure the edit toolbar. See
https://leaflet.github.io/Leaflet.draw/docs/leaflet-draw-latest.html#editpolyoptions
on : dict, optional
Event handlers to attach to the created layer. Pass a mapping from the
names of the events to their `JsCode` handlers.
Examples
--------
>>> m = folium.Map()
>>> Draw(
... export=True,
... filename="my_data.geojson",
... show_geometry_on_click=False,
... position="topleft",
... draw_options={"polyline": {"allowIntersection": False}},
... edit_options={"poly": {"allowIntersection": False}},
... on={
... "click": JsCode(
... """
... function(event) {
... alert(JSON.stringify(this.toGeoJSON()));
... }
... """
... )
... },
... ).add_to(m)
For more info please check
https://leaflet.github.io/Leaflet.draw/docs/leaflet-draw-latest.html
'''
_template = Template(
"""
{% macro html(this, kwargs) %}
{% if this.export %}
<style>
#export {
position: absolute;
top: 5px;
right: 10px;
z-index: 999;
background: white;
color: black;
padding: 6px;
border-radius: 4px;
font-family: 'Helvetica Neue';
cursor: pointer;
font-size: 12px;
text-decoration: none;
top: 90px;
}
</style>
<a href='#' id='export'>Export</a>
{% endif %}
{% endmacro %}
{% macro script(this, kwargs) %}
var options = {
position: {{ this.position|tojson }},
draw: {{ this.draw_options|tojson }},
edit: {{ this.edit_options|tojson }},
}
{%- if this.feature_group %}
var drawnItems_{{ this.get_name() }} =
{{ this.feature_group.get_name() }};
{%- else %}
// FeatureGroup is to store editable layers.
var drawnItems_{{ this.get_name() }} =
new L.featureGroup().addTo(
{{ this._parent.get_name() }}
);
{%- endif %}
options.edit.featureGroup = drawnItems_{{ this.get_name() }};
var {{ this.get_name() }} = new L.Control.Draw(
options
).addTo( {{this._parent.get_name()}} );
{{ this._parent.get_name() }}.on(L.Draw.Event.CREATED, function(e) {
var layer = e.layer,
type = e.layerType;
var coords = JSON.stringify(layer.toGeoJSON());
{%- if this.show_geometry_on_click %}
layer.on('click', function() {
alert(coords);
console.log(coords);
});
{%- endif %}
{%- for event, handler in this.on.items() %}
layer.on(
"{{event}}",
{{handler}}
);
{%- endfor %}
drawnItems_{{ this.get_name() }}.addLayer(layer);
});
{{ this._parent.get_name() }}.on('draw:created', function(e) {
drawnItems_{{ this.get_name() }}.addLayer(e.layer);
});
{% if this.export %}
document.getElementById('export').onclick = function(e) {
var data = drawnItems_{{ this.get_name() }}.toGeoJSON();
var convertedData = 'text/json;charset=utf-8,'
+ encodeURIComponent(JSON.stringify(data));
document.getElementById('export').setAttribute(
'href', 'data:' + convertedData
);
document.getElementById('export').setAttribute(
'download', {{ this.filename|tojson }}
);
}
{% endif %}
{% endmacro %}
"""
)
default_js = [
(
"leaflet_draw_js",
"https://cdnjs.cloudflare.com/ajax/libs/leaflet.draw/1.0.2/leaflet.draw.js",
)
]
default_css = [
(
"leaflet_draw_css",
"https://cdnjs.cloudflare.com/ajax/libs/leaflet.draw/1.0.2/leaflet.draw.css",
)
]
def __init__(
self,
export=False,
feature_group=None,
filename="data.geojson",
position="topleft",
show_geometry_on_click=True,
draw_options=None,
edit_options=None,
on=None,
):
super().__init__()
self._name = "DrawControl"
self.export = export
self.feature_group = feature_group
self.filename = filename
self.position = position
self.show_geometry_on_click = show_geometry_on_click
self.draw_options = draw_options or {}
self.edit_options = edit_options or {}
self.on = on or {}
| Draw |
python | numba__numba | numba/core/types/iterators.py | {
"start": 1771,
"end": 2326
} | class ____(SimpleIteratorType):
"""
Type class for `enumerate` objects.
Type instances are parametered with the underlying source type.
"""
def __init__(self, iterable_type):
from numba.core.types import Tuple, intp
self.source_type = iterable_type.iterator_type
yield_type = Tuple([intp, self.source_type.yield_type])
name = 'enumerate(%s)' % (self.source_type)
super(EnumerateType, self).__init__(name, yield_type)
@property
def key(self):
return self.source_type
| EnumerateType |
python | scipy__scipy | scipy/interpolate/tests/test_fitpack.py | {
"start": 7923,
"end": 10551
} | class ____:
def setup_method(self):
# non-uniform grid, just to make it sure
x = np.linspace(0, 1, 100)**3
y = np.sin(20 * x)
self.spl = splrep(x, y)
# double check that knots are non-uniform
assert np.ptp(np.diff(self.spl[0])) > 0
def test_inverse(self):
# Check that antiderivative + derivative is identity.
for n in range(5):
spl2 = splantider(self.spl, n)
spl3 = splder(spl2, n)
xp_assert_close(self.spl[0], spl3[0])
xp_assert_close(self.spl[1], spl3[1])
assert self.spl[2] == spl3[2]
def test_splder_vs_splev(self):
# Check derivative vs. FITPACK
for n in range(3+1):
# Also extrapolation!
xx = np.linspace(-1, 2, 2000)
if n == 3:
# ... except that FITPACK extrapolates strangely for
# order 0, so let's not check that.
xx = xx[(xx >= 0) & (xx <= 1)]
dy = splev(xx, self.spl, n)
spl2 = splder(self.spl, n)
dy2 = splev(xx, spl2)
if n == 1:
xp_assert_close(dy, dy2, rtol=2e-6)
else:
xp_assert_close(dy, dy2)
def test_splantider_vs_splint(self):
# Check antiderivative vs. FITPACK
spl2 = splantider(self.spl)
# no extrapolation, splint assumes function is zero outside
# range
xx = np.linspace(0, 1, 20)
for x1 in xx:
for x2 in xx:
y1 = splint(x1, x2, self.spl)
y2 = splev(x2, spl2) - splev(x1, spl2)
xp_assert_close(np.asarray(y1), np.asarray(y2))
def test_order0_diff(self):
assert_raises(ValueError, splder, self.spl, 4)
def test_kink(self):
# Should refuse to differentiate splines with kinks
spl2 = insert(0.5, self.spl, m=2)
splder(spl2, 2) # Should work
assert_raises(ValueError, splder, spl2, 3)
spl2 = insert(0.5, self.spl, m=3)
splder(spl2, 1) # Should work
assert_raises(ValueError, splder, spl2, 2)
spl2 = insert(0.5, self.spl, m=4)
assert_raises(ValueError, splder, spl2, 1)
def test_multidim(self):
# c can have trailing dims
for n in range(3):
t, c, k = self.spl
c2 = np.c_[c, c, c]
c2 = np.dstack((c2, c2))
spl2 = splantider((t, c2, k), n)
spl3 = splder(spl2, n)
xp_assert_close(t, spl3[0])
xp_assert_close(c2, spl3[1])
assert k == spl3[2]
| TestSplder |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingLiteralMember1.py | {
"start": 2644,
"end": 2864
} | class ____:
@property
def type(self) -> Literal[1]:
return 1
def test(x: E | F) -> None:
if x.type == 1:
reveal_type(x, expected_type="F")
else:
reveal_type(x, expected_type="E")
| F |
python | numba__numba | numba/tests/test_datamodel.py | {
"start": 694,
"end": 757
} | class ____(test_factory()):
fe_type = types.uint16
| TestUInt16 |
python | PyCQA__pylint | tests/functional/u/unnecessary/unnecessary_dunder_call.py | {
"start": 1214,
"end": 1282
} | class ____:
def __init__(self):
super().__init__(self)
| Foo2 |
python | django__django | tests/forms_tests/tests/test_input_formats.py | {
"start": 4644,
"end": 8759
} | class ____(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.enterClassContext(translation.override(None))
super().setUpClass()
def test_timeField(self):
"TimeFields can parse dates in the default format"
f = forms.TimeField()
# Parse a time in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("1:30:05 PM")
self.assertEqual(result, time(13, 30, 5))
# The parsed result does a round trip
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid, but non-default format, get a parsed result
result = f.clean("1:30 PM")
self.assertEqual(result, time(13, 30, 0))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_localized_timeField(self):
"Localized TimeFields act as unlocalized widgets"
f = forms.TimeField(localize=True)
# Parse a time in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("1:30:05 PM")
self.assertEqual(result, time(13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean("01:30 PM")
self.assertEqual(result, time(13, 30, 0))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_timeField_with_inputformat(self):
"""
TimeFields with manually specified input formats can accept those
formats
"""
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"])
# Parse a time in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("1:30:05 PM")
with self.assertRaises(ValidationError):
f.clean("13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("13.30.05")
self.assertEqual(result, time(13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean("13.30")
self.assertEqual(result, time(13, 30, 0))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM")
def test_localized_timeField_with_inputformat(self):
"""
Localized TimeFields with manually specified input formats can accept
those formats.
"""
f = forms.TimeField(input_formats=["%H.%M.%S", "%H.%M"], localize=True)
# Parse a time in an unaccepted format; get an error
with self.assertRaises(ValidationError):
f.clean("1:30:05 PM")
with self.assertRaises(ValidationError):
f.clean("13:30:05")
# Parse a time in a valid format, get a parsed result
result = f.clean("13.30.05")
self.assertEqual(result, time(13, 30, 5))
# The parsed result does a round trip to the same format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:05 PM")
# Parse a time in a valid format, get a parsed result
result = f.clean("13.30")
self.assertEqual(result, time(13, 30, 0))
# The parsed result does a round trip to default format
text = f.widget.format_value(result)
self.assertEqual(text, "01:30:00 PM")
| CustomTimeInputFormatsTests |
python | ray-project__ray | release/ray_release/tests/test_state_machine.py | {
"start": 2762,
"end": 11158
} | class ____:
def builds(self):
return MockBuildkiteBuild()
def jobs(self):
return MockBuildkiteJob()
TestStateMachine.ray_repo = MockRepo()
TestStateMachine.ray_buildkite = MockBuildkite()
def test_ci_empty_results():
test = Test(name="w00t", team="ci", state=TestState.FLAKY)
test.test_results = []
CITestStateMachine(test).move()
# do not change the state
assert test.get_state() == TestState.FLAKY
def test_ci_move_from_passing_to_flaky():
"""
Test the entire lifecycle of a CI test when it moves from passing to flaky.
"""
test = Test(name="w00t", team="ci")
# start from passing
assert test.get_state() == TestState.PASSING
# passing to flaky
test.test_results = [
TestResult.from_result(Result(status=ResultStatus.SUCCESS.value)),
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
] * 10
CITestStateMachine(test).move()
assert test.get_state() == TestState.FLAKY
issue = MockIssueDB.issue_db[test.get(Test.KEY_GITHUB_ISSUE_NUMBER)]
assert issue.state == "open"
assert issue.title == "CI test w00t is flaky"
# flaky to jail
issue.edit(labels=[MockLabel(JAILED_TAG)])
CITestStateMachine(test).move()
assert test.get_state() == TestState.JAILED
assert issue.comments[-1] == JAILED_MESSAGE
def test_ci_move_from_passing_to_failing_to_flaky():
"""
Test the entire lifecycle of a CI test when it moves from passing to failing.
Check that the conditions are met for each state transition. Also check that
gihub issues are created and closed correctly.
"""
test = Test(name="test", team="ci")
# start from passing
assert test.get_state() == TestState.PASSING
# passing to failing
test.test_results = [
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
]
CITestStateMachine(test).move()
assert test.get_state() == TestState.FAILING
# failing to consistently failing
test.test_results.extend(
[
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
]
)
CITestStateMachine(test).move()
assert test.get_state() == TestState.CONSITENTLY_FAILING
issue = MockIssueDB.issue_db[test.get(Test.KEY_GITHUB_ISSUE_NUMBER)]
assert issue.state == "open"
assert "ci-test" in [label.name for label in issue.labels]
# move from consistently failing to flaky
test.test_results.extend(
[TestResult.from_result(Result(status=ResultStatus.ERROR.value))]
* CONTINUOUS_FAILURE_TO_FLAKY
)
CITestStateMachine(test).move()
assert test.get_state() == TestState.FLAKY
assert issue.comments[-1] == FAILING_TO_FLAKY_MESSAGE
# go back to passing
test.test_results = [
TestResult.from_result(Result(status=ResultStatus.SUCCESS.value)),
] * CONTINUOUS_PASSING_TO_PASSING
CITestStateMachine(test).move()
assert test.get_state() == TestState.PASSING
assert test.get(Test.KEY_GITHUB_ISSUE_NUMBER) == issue.number
assert issue.state == "closed"
# go back to failing and reuse the github issue
test.test_results = 3 * [
TestResult.from_result(Result(status=ResultStatus.ERROR.value))
]
CITestStateMachine(test).move()
assert test.get_state() == TestState.CONSITENTLY_FAILING
assert test.get(Test.KEY_GITHUB_ISSUE_NUMBER) == issue.number
assert issue.state == "open"
def test_release_move_from_passing_to_failing():
test = Test(name="test", team="ci")
# Test original state
test.test_results = [
TestResult.from_result(Result(status=ResultStatus.SUCCESS.value)),
]
assert test.get_state() == TestState.PASSING
# Test moving from passing to failing
test.test_results.insert(
0,
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
)
sm = ReleaseTestStateMachine(test)
sm.move()
assert test.get_state() == TestState.FAILING
assert test[Test.KEY_BISECT_BUILD_NUMBER] == 1
# Test moving from failing to consistently failing
test.test_results.insert(
0,
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
)
sm = ReleaseTestStateMachine(test)
sm.move()
assert test.get_state() == TestState.CONSITENTLY_FAILING
assert test[Test.KEY_GITHUB_ISSUE_NUMBER] == MockIssueDB.issue_id - 1
def test_release_move_from_failing_to_consisently_failing():
test = Test(name="test", team="ci", stable=False)
test[Test.KEY_BISECT_BUILD_NUMBER] = 1
test.test_results = [
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
]
sm = ReleaseTestStateMachine(test)
sm.move()
assert test.get_state() == TestState.FAILING
test[Test.KEY_BISECT_BLAMED_COMMIT] = "1234567890"
sm = ReleaseTestStateMachine(test)
sm.move()
sm.comment_blamed_commit_on_github_issue()
issue = MockIssueDB.issue_db[test.get(Test.KEY_GITHUB_ISSUE_NUMBER)]
assert test.get_state() == TestState.CONSITENTLY_FAILING
assert "Blamed commit: 1234567890" in issue.comments[0]
labels = [label.name for label in issue.get_labels()]
assert "ci" in labels
assert "unstable-release-test" in labels
def test_release_move_from_failing_to_passing():
test = Test(name="test", team="ci")
test.test_results = [
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
]
sm = ReleaseTestStateMachine(test)
sm.move()
assert test.get_state() == TestState.CONSITENTLY_FAILING
assert test[Test.KEY_GITHUB_ISSUE_NUMBER] == MockIssueDB.issue_id - 1
test.test_results.insert(
0,
TestResult.from_result(Result(status=ResultStatus.SUCCESS.value)),
)
sm = ReleaseTestStateMachine(test)
sm.move()
assert test.get_state() == TestState.PASSING
assert test.get(Test.KEY_BISECT_BUILD_NUMBER) is None
assert test.get(Test.KEY_BISECT_BLAMED_COMMIT) is None
def test_release_move_from_failing_to_jailed():
test = Test(name="test", team="ci")
test.test_results = [
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
]
sm = ReleaseTestStateMachine(test)
sm.move()
assert test.get_state() == TestState.CONSITENTLY_FAILING
test.test_results.insert(
0,
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
)
sm = ReleaseTestStateMachine(test)
sm.move()
assert test.get_state() == TestState.JAILED
# Test moving from jailed to jailed
issue = MockIssueDB.issue_db[test.get(Test.KEY_GITHUB_ISSUE_NUMBER)]
issue.edit(state="closed")
test.test_results.insert(
0,
TestResult.from_result(Result(status=ResultStatus.ERROR.value)),
)
sm = ReleaseTestStateMachine(test)
sm.move()
assert test.get_state() == TestState.JAILED
assert issue.state == "open"
# Test moving from jailed to passing
test.test_results.insert(
0,
TestResult.from_result(Result(status=ResultStatus.SUCCESS.value)),
)
sm = ReleaseTestStateMachine(test)
sm.move()
assert test.get_state() == TestState.PASSING
assert issue.state == "closed"
def test_get_release_blockers() -> None:
MockIssueDB.issue_id = 1
MockIssueDB.issue_db = {}
TestStateMachine.ray_repo.create_issue(labels=["non-blocker"], title="non-blocker")
TestStateMachine.ray_repo.create_issue(
labels=[WEEKLY_RELEASE_BLOCKER_TAG], title="blocker"
)
issues = TestStateMachine.get_release_blockers()
assert len(issues) == 1
assert issues[0].title == "blocker"
def test_get_issue_owner() -> None:
issue = TestStateMachine.ray_repo.create_issue(labels=["core"], title="hi")
assert TestStateMachine.get_issue_owner(issue) == "core"
issue = TestStateMachine.ray_repo.create_issue(labels=["w00t"], title="bye")
assert TestStateMachine.get_issue_owner(issue) == NO_TEAM
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
| MockBuildkite |
python | huggingface__transformers | src/transformers/models/granitemoeshared/modeling_granitemoeshared.py | {
"start": 4775,
"end": 6466
} | class ____(nn.Module):
def __init__(self, num_experts: int, input_size: int, output_size: int) -> None:
"""
Initialize the GraniteMoeSharedParallelExperts module.
The experts weights are stored in [num_experts, output_size, input_size] format. Such that it's compatible with
many MoE libraries, such as [Megablock](https://github.com/databricks/megablocks) and
[ScatterMoE](https://github.com/shawntan/scattermoe), as well as the
[MoE kernel](https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/layers/fused_moe/fused_moe.py)
used in vllm.
Args:
num_experts (int):
Number of experts.
input_size (int):
Size of the input.
output_size (int):
Size of the output.
"""
super().__init__()
self.weight = nn.Parameter(torch.empty(num_experts, output_size, input_size))
self.num_experts = num_experts
self.input_size = input_size
self.output_size = output_size
def forward(self, inputs, expert_size):
"""
Forward pass of the GraniteMoeSharedParallelExperts module.
Args:
inputs (Tensor):
Input tensor.
expert_size:
Expert size information.
Returns:
Tensor: Output tensor.
"""
input_list = inputs.split(expert_size, dim=0)
output_list = []
for i in range(self.num_experts):
output_list.append(F.linear(input_list[i], self.weight[i]))
results = torch.cat(output_list, dim=0)
return results
| GraniteMoeSharedParallelExperts |
python | django-haystack__django-haystack | test_haystack/test_indexes.py | {
"start": 2973,
"end": 3349
} | class ____(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(
document=True, use_template=True, index_fieldname="more_content"
)
author = indexes.CharField(model_attr="author", index_fieldname="name_s")
hello = indexes.CharField(model_attr="hello")
def get_model(self):
return MockModel
| GoodOverriddenFieldNameMockSearchIndex |
python | mlflow__mlflow | mlflow/deployments/__init__.py | {
"start": 1550,
"end": 4763
} | class ____(dict):
"""
Represents the predictions and metadata returned in response to a scoring request, such as a
REST API request sent to the ``/invocations`` endpoint of an MLflow Model Server.
"""
def get_predictions(self, predictions_format="dataframe", dtype=None):
"""Get the predictions returned from the MLflow Model Server in the specified format.
Args:
predictions_format: The format in which to return the predictions. Either
``"dataframe"`` or ``"ndarray"``.
dtype: The NumPy datatype to which to coerce the predictions. Only used when
the "ndarray" predictions_format is specified.
Raises:
Exception: If the predictions cannot be represented in the specified format.
Returns:
The predictions, represented in the specified format.
"""
import numpy as np
import pandas as pd
from pandas.core.dtypes.common import is_list_like
if predictions_format == "dataframe":
predictions = self["predictions"]
if isinstance(predictions, str):
return pd.DataFrame(data=[predictions])
if isinstance(predictions, dict) and not any(
is_list_like(p) and getattr(p, "ndim", 1) == 1 for p in predictions.values()
):
return pd.DataFrame(data=predictions, index=[0])
return pd.DataFrame(data=predictions)
elif predictions_format == "ndarray":
return np.array(self["predictions"], dtype)
else:
raise MlflowException(
f"Unrecognized predictions format: '{predictions_format}'",
INVALID_PARAMETER_VALUE,
)
def to_json(self, path=None):
"""Get the JSON representation of the MLflow Predictions Response.
Args:
path: If specified, the JSON representation is written to this file path.
Returns:
If ``path`` is unspecified, the JSON representation of the MLflow Predictions
Response. Else, None.
"""
if path is not None:
with open(path, "w") as f:
json.dump(dict(self), f)
else:
return json.dumps(dict(self))
@classmethod
def from_json(cls, json_str):
try:
parsed_response = json.loads(json_str)
except Exception as e:
raise MlflowException("Predictions response contents are not valid JSON") from e
if not isinstance(parsed_response, dict) or "predictions" not in parsed_response:
raise MlflowException(
f"Invalid response. Predictions response contents must be a dictionary"
f" containing a 'predictions' field. Instead, received: {parsed_response}"
)
return PredictionsResponse(parsed_response)
__all__ = [
"get_deploy_client",
"run_local",
"BaseDeploymentClient",
"DatabricksDeploymentClient",
"OpenAIDeploymentClient",
"DatabricksEndpoint",
"MlflowDeploymentClient",
"PredictionsResponse",
"get_deployments_target",
"set_deployments_target",
]
| PredictionsResponse |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 40092,
"end": 40643
} | class ____(DelegatingLexer):
"""
Subclass of the `SmartyLexer` that highlights unlexed data with the
`XmlLexer`.
"""
name = 'XML+Smarty'
aliases = ['xml+smarty']
alias_filenames = ['*.xml', '*.tpl']
mimetypes = ['application/xml+smarty']
def __init__(self, **options):
super(XmlSmartyLexer, self).__init__(XmlLexer, SmartyLexer, **options)
def analyse_text(text):
rv = SmartyLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
| XmlSmartyLexer |
python | pytorch__pytorch | torch/serialization.py | {
"start": 25219,
"end": 25460
} | class ____(_opener[IO[bytes]]):
def __init__(self, name: Union[str, os.PathLike[str]], mode: str) -> None:
super().__init__(open(name, mode)) # noqa: SIM115
def __exit__(self, *args):
self.file_like.close()
| _open_file |
python | getsentry__sentry | tests/sentry/integrations/test_base.py | {
"start": 560,
"end": 2531
} | class ____(TestCase):
def setUp(self) -> None:
self.user = self.create_user()
self.organization = self.create_organization()
self.project = self.create_project()
(
self.model,
self.org_integration,
self.identity,
identity_provider,
) = self.create_identity_integration(
user=self.user,
organization=self.organization,
integration_params={
"provider": "integrations:base",
"external_id": "base_external_id",
"name": "base_name",
},
identity_params={"external_id": "base_id", "data": {"access_token": "11234567"}},
)
def test_with_context(self) -> None:
integration = ExampleIntegration(self.model, self.organization.id)
assert integration.model.id == self.model.id
assert integration.org_integration is not None
assert integration.org_integration.id == self.org_integration.id
assert integration.default_identity == serialize_identity(self.identity)
def test_missing_org_integration(self) -> None:
with pytest.raises(OrganizationIntegrationNotFound):
ExampleIntegration(self.model, -1).org_integration
with pytest.raises(Identity.DoesNotExist):
ExampleIntegration(self.model, -1).default_identity
def test_model_default_fields(self) -> None:
# These fields are added through the DefaultFieldsModel
# and date_updated should get automatically updated any
# time the model is saved
assert self.model.date_added
assert self.model.date_updated
initial_value = self.model.date_updated
with assume_test_silo_mode(SiloMode.CONTROL):
self.model.name = "cooler_name"
self.model.save()
self.model.refresh_from_db()
assert initial_value < self.model.date_updated
| IntegrationTestCase |
python | bokeh__bokeh | tests/unit/bokeh/application/handlers/test_script.py | {
"start": 1440,
"end": 2980
} | class ____:
# Public methods ----------------------------------------------------------
def test_runner_uses_source_from_filename(self) -> None:
doc = Document()
source = "# Test contents for script"
result = {}
def load(filename):
handler = bahs.ScriptHandler(filename=filename)
handler.modify_document(doc)
result['handler'] = handler
result['filename'] = filename
with_file_contents(source, load)
assert result['handler']._runner.path == result['filename']
assert result['handler']._runner.source == source
assert not doc.roots
def test_runner_script_with_encoding(self) -> None:
doc = Document()
source = "# -*- coding: utf-8 -*-\nimport os"
result = {}
def load(filename):
handler = bahs.ScriptHandler(filename=filename)
handler.modify_document(doc)
result['handler'] = handler
result['filename'] = filename
with_file_contents(source, load)
assert result['handler'].error is None
assert result['handler'].failed is False
assert not doc.roots
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Test_ScriptHandler |
python | pydata__xarray | xarray/core/common.py | {
"start": 8361,
"end": 12725
} | class ____:
"""Mixin class that allows getting keys with attribute access"""
__slots__ = ()
def __init_subclass__(cls, **kwargs):
"""Verify that all subclasses explicitly define ``__slots__``. If they don't,
raise error in the core xarray module and a FutureWarning in third-party
extensions.
"""
if not hasattr(object.__new__(cls), "__dict__"):
pass
elif cls.__module__.startswith("xarray."):
raise AttributeError(f"{cls.__name__} must explicitly define __slots__")
else:
cls.__setattr__ = cls._setattr_dict
warnings.warn(
f"xarray subclass {cls.__name__} should explicitly define __slots__",
FutureWarning,
stacklevel=2,
)
super().__init_subclass__(**kwargs)
@property
def _attr_sources(self) -> Iterable[Mapping[Hashable, Any]]:
"""Places to look-up items for attribute-style access"""
yield from ()
@property
def _item_sources(self) -> Iterable[Mapping[Hashable, Any]]:
"""Places to look-up items for key-autocompletion"""
yield from ()
def __getattr__(self, name: str) -> Any:
if name not in {"__dict__", "__setstate__"}:
# this avoids an infinite loop when pickle looks for the
# __setstate__ attribute before the xarray object is initialized
for source in self._attr_sources:
with suppress(KeyError):
return source[name]
raise AttributeError(
f"{type(self).__name__!r} object has no attribute {name!r}"
)
# This complicated two-method design boosts overall performance of simple operations
# - particularly DataArray methods that perform a _to_temp_dataset() round-trip - by
# a whopping 8% compared to a single method that checks hasattr(self, "__dict__") at
# runtime before every single assignment. All of this is just temporary until the
# FutureWarning can be changed into a hard crash.
def _setattr_dict(self, name: str, value: Any) -> None:
"""Deprecated third party subclass (see ``__init_subclass__`` above)"""
object.__setattr__(self, name, value)
if name in self.__dict__:
# Custom, non-slotted attr, or improperly assigned variable?
warnings.warn(
f"Setting attribute {name!r} on a {type(self).__name__!r} object. Explicitly define __slots__ "
"to suppress this warning for legitimate custom attributes and "
"raise an error when attempting variables assignments.",
FutureWarning,
stacklevel=2,
)
def __setattr__(self, name: str, value: Any) -> None:
"""Objects with ``__slots__`` raise AttributeError if you try setting an
undeclared attribute. This is desirable, but the error message could use some
improvement.
"""
try:
object.__setattr__(self, name, value)
except AttributeError as e:
# Don't accidentally shadow custom AttributeErrors, e.g.
# DataArray.dims.setter
if str(e) != f"{type(self).__name__!r} object has no attribute {name!r}":
raise
raise AttributeError(
f"cannot set attribute {name!r} on a {type(self).__name__!r} object. Use __setitem__ style"
"assignment (e.g., `ds['name'] = ...`) instead of assigning variables."
) from e
def __dir__(self) -> list[str]:
"""Provide method name lookup and completion. Only provide 'public'
methods.
"""
extra_attrs = {
item
for source in self._attr_sources
for item in source
if isinstance(item, str)
}
return sorted(set(dir(type(self))) | extra_attrs)
def _ipython_key_completions_(self) -> list[str]:
"""Provide method for the key-autocompletions in IPython.
See https://ipython.readthedocs.io/en/stable/config/integrating.html#tab-completion
For the details.
"""
items = {
item
for source in self._item_sources
for item in source
if isinstance(item, str)
}
return list(items)
| AttrAccessMixin |
python | django__django | tests/schema/models.py | {
"start": 4749,
"end": 4942
} | class ____(models.Model):
year = models.IntegerField()
slug = models.SlugField(unique=False)
class Meta:
apps = new_apps
unique_together = ["year", "slug"]
| UniqueTest |
python | lazyprogrammer__machine_learning_examples | rl/linear_rl_trader.py | {
"start": 6206,
"end": 9829
} | class ____(object):
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.model = LinearModel(state_size, action_size)
def act(self, state):
if np.random.rand() <= self.epsilon:
return np.random.choice(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def train(self, state, action, reward, next_state, done):
if done:
target = reward
else:
target = reward + self.gamma * np.amax(self.model.predict(next_state), axis=1)
target_full = self.model.predict(state)
target_full[0, action] = target
# Run one training step
self.model.sgd(state, target_full)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
def play_one_episode(agent, env, is_train):
# note: after transforming states are already 1xD
state = env.reset()
state = scaler.transform([state])
done = False
while not done:
action = agent.act(state)
next_state, reward, done, info = env.step(action)
next_state = scaler.transform([next_state])
if is_train == 'train':
agent.train(state, action, reward, next_state, done)
state = next_state
return info['cur_val']
if __name__ == '__main__':
# config
models_folder = 'linear_rl_trader_models'
rewards_folder = 'linear_rl_trader_rewards'
num_episodes = 2000
batch_size = 32
initial_investment = 20000
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', type=str, required=True,
help='either "train" or "test"')
args = parser.parse_args()
maybe_make_dir(models_folder)
maybe_make_dir(rewards_folder)
data = get_data()
n_timesteps, n_stocks = data.shape
n_train = n_timesteps // 2
train_data = data[:n_train]
test_data = data[n_train:]
env = MultiStockEnv(train_data, initial_investment)
state_size = env.state_dim
action_size = len(env.action_space)
agent = DQNAgent(state_size, action_size)
scaler = get_scaler(env)
# store the final value of the portfolio (end of episode)
portfolio_value = []
if args.mode == 'test':
# then load the previous scaler
with open(f'{models_folder}/scaler.pkl', 'rb') as f:
scaler = pickle.load(f)
# remake the env with test data
env = MultiStockEnv(test_data, initial_investment)
# make sure epsilon is not 1!
# no need to run multiple episodes if epsilon = 0, it's deterministic
agent.epsilon = 0.01
# load trained weights
agent.load(f'{models_folder}/linear.npz')
# play the game num_episodes times
for e in range(num_episodes):
t0 = datetime.now()
val = play_one_episode(agent, env, args.mode)
dt = datetime.now() - t0
print(f"episode: {e + 1}/{num_episodes}, episode end value: {val:.2f}, duration: {dt}")
portfolio_value.append(val) # append episode end portfolio value
# save the weights when we are done
if args.mode == 'train':
# save the DQN
agent.save(f'{models_folder}/linear.npz')
# save the scaler
with open(f'{models_folder}/scaler.pkl', 'wb') as f:
pickle.dump(scaler, f)
# plot losses
plt.plot(agent.model.losses)
plt.show()
# save portfolio value for each episode
np.save(f'{rewards_folder}/{args.mode}.npy', portfolio_value)
| DQNAgent |
python | scipy__scipy | benchmarks/benchmarks/integrate.py | {
"start": 7767,
"end": 9941
} | class ____(Benchmark):
params = (
# rule
[
"genz-malik",
"gk15",
"gk21",
],
# input dimension of integrand (ndim)
[1, 3, 5],
# output dimension of integrand (fdim)
[1, 8],
# rtol
[1e-10, 1e-11],
)
param_names = ["rule", "ndim", "fdim", "rtol"]
def setup(self, rule, ndim, fdim, rtol):
self.ndim = ndim
self.fdim = fdim
self.rtol = rtol
self.atol = 0
self.a = np.zeros(self.ndim)
self.b = np.repeat(1, self.ndim)
self.rule = rule
self.pool = ThreadPoolExecutor(2)
if rule == "genz-malik" and ndim == 1:
raise SkipNotImplemented(f"{rule} not defined for 1D integrals")
if (rule == "gk-15" or rule == "gk-21") and ndim > 5:
raise SkipNotImplemented(f"{rule} uses too much memory for ndim > 5")
if rule == "gk-21" and ndim >= 5 and fdim == 8 and not is_xslow():
raise SkipNotImplemented("Takes too long to run in CI")
def f(self, x):
npoints, ndim = x.shape[0], x.shape[-1]
r = np.repeat(0.5, self.fdim)
alphas = np.repeat(0.1, self.fdim * ndim).reshape(self.fdim, ndim)
x_reshaped = x.reshape(npoints, *([1]*(len(alphas.shape) - 1)), ndim)
return np.cos(2*np.pi*r + np.sum(alphas * x_reshaped, axis=-1))
def time_plain(self, rule, ndim, fdim, rtol):
cubature(
f=self.f,
a=self.a,
b=self.b,
rule=self.rule,
rtol=self.rtol,
atol=self.atol,
)
def time_threads(self, rule, ndim, fdim, rtol):
cubature(
f=self.f,
a=self.a,
b=self.b,
rule=self.rule,
rtol=self.rtol,
atol=self.atol,
workers=self.pool.map,
)
def track_subdivisions(self, rule, ndim, fdim, rtol):
return cubature(
f=self.f,
a=self.a,
b=self.b,
rule=self.rule,
rtol=self.rtol,
atol=self.atol,
).subdivisions
| CubatureOscillatory |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 29224,
"end": 29613
} | class ____(BaseModel):
"""
Variable serializer for responses.
"""
key: Annotated[str, Field(title="Key")]
value: Annotated[str, Field(title="Value")]
description: Annotated[str | None, Field(title="Description")] = None
is_encrypted: Annotated[bool, Field(title="Is Encrypted")]
team_id: Annotated[UUID | None, Field(title="Team Id")] = None
| VariableResponse |
python | django-extensions__django-extensions | tests/test_runscript.py | {
"start": 1556,
"end": 2595
} | class ____(RunScriptTests):
def test_prints_error_on_nonexistent_script(self):
cmd = self.get_command()
with self.assertRaises(CommandError):
call_command(cmd, "non_existent_script", verbosity=2)
self.assertIn(
"No (valid) module for script 'non_existent_script' found",
sys.stdout.getvalue(),
)
self.assertEqual(cmd.last_exit_code, 1)
def test_prints_nothing_for_nonexistent_script_when_silent(self):
cmd = self.get_command()
call_command(cmd, "non_existent_script", silent=True)
self.assertEqual("", sys.stdout.getvalue())
self.assertEqual(cmd.last_exit_code, 1)
def test_doesnt_print_exception_for_nonexistent_script_when_no_traceback(self):
cmd = self.get_command()
with self.assertRaises(CommandError):
call_command(cmd, "non_existent_script", no_traceback=True)
self.assertEqual("", sys.stderr.getvalue())
self.assertEqual(cmd.last_exit_code, 1)
| NonExistentScriptsTests |
python | crytic__slither | slither/detectors/reentrancy/reentrancy_benign.py | {
"start": 583,
"end": 7600
} | class ____(Reentrancy):
ARGUMENT = "reentrancy-benign"
HELP = "Benign reentrancy vulnerabilities"
IMPACT = DetectorClassification.LOW
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = (
"https://github.com/crytic/slither/wiki/Detector-Documentation#reentrancy-vulnerabilities-2"
)
WIKI_TITLE = "Reentrancy vulnerabilities"
# region wiki_description
WIKI_DESCRIPTION = """
Detection of the [reentrancy bug](https://github.com/trailofbits/not-so-smart-contracts/tree/master/reentrancy).
Only report reentrancy that acts as a double call (see `reentrancy-eth`, `reentrancy-no-eth`)."""
# endregion wiki_description
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
function callme(){
if( ! (msg.sender.call()() ) ){
throw;
}
counter += 1
}
```
`callme` contains a reentrancy. The reentrancy is benign because it's exploitation would have the same effect as two consecutive calls."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Apply the [`check-effects-interactions` pattern](http://solidity.readthedocs.io/en/v0.4.21/security-considerations.html#re-entrancy)."
STANDARD_JSON = False
def find_reentrancies(self) -> DefaultDict[FindingKey, Set[FindingValue]]:
result = defaultdict(set)
for contract in self.contracts:
for f in contract.functions_and_modifiers_declared:
for node in f.nodes:
# dead code
if self.KEY not in node.context:
continue
if node.context[self.KEY].calls:
if not any(n != node for n in node.context[self.KEY].calls):
continue
read_then_written = []
for c in node.context[self.KEY].calls:
read_then_written += [
v
for v in node.context[self.KEY].written
if v in node.context[self.KEY].reads_prior_calls[c]
]
not_read_then_written = {
FindingValue(
v,
node,
tuple(sorted(nodes, key=lambda x: x.node_id)),
)
for (v, nodes) in node.context[self.KEY].written.items()
if v not in read_then_written
}
if not_read_then_written:
# calls are ordered
finding_key = FindingKey(
function=node.function,
calls=to_hashable(node.context[self.KEY].calls),
send_eth=to_hashable(node.context[self.KEY].send_eth),
)
result[finding_key] |= not_read_then_written
return result
def _detect(self) -> List[Output]: # pylint: disable=too-many-branches
""""""
super()._detect()
reentrancies = self.find_reentrancies()
results = []
result_sorted = sorted(list(reentrancies.items()), key=lambda x: x[0].function.name)
varsWritten: List[FindingValue]
for (func, calls, send_eth), varsWritten in result_sorted:
calls = sorted(list(set(calls)), key=lambda x: x[0].node_id)
send_eth = sorted(list(set(send_eth)), key=lambda x: x[0].node_id)
varsWritten = sorted(varsWritten, key=lambda x: (x.variable.name, x.node.node_id))
info = ["Reentrancy in ", func, ":\n"]
info += ["\tExternal calls:\n"]
for (call_info, calls_list) in calls:
info += ["\t- ", call_info, "\n"]
for call_list_info in calls_list:
if call_list_info != call_info:
info += ["\t\t- ", call_list_info, "\n"]
if calls != send_eth and send_eth:
info += ["\tExternal calls sending eth:\n"]
for (call_info, calls_list) in send_eth:
info += ["\t- ", call_info, "\n"]
for call_list_info in calls_list:
if call_list_info != call_info:
info += ["\t\t- ", call_list_info, "\n"]
info += ["\tState variables written after the call(s):\n"]
for finding_value in varsWritten:
info += ["\t- ", finding_value.node, "\n"]
for other_node in finding_value.nodes:
if other_node != finding_value.node:
info += ["\t\t- ", other_node, "\n"]
# Create our JSON result
res = self.generate_result(info)
# Add the function with the re-entrancy first
res.add(func)
# Add all underlying calls in the function which are potentially problematic.
for (call_info, calls_list) in calls:
res.add(call_info, {"underlying_type": "external_calls"})
for call_list_info in calls_list:
if call_list_info != call_info:
res.add(
call_list_info,
{"underlying_type": "external_calls_sending_eth"},
)
#
# If the calls are not the same ones that send eth, add the eth sending nodes.
if calls != send_eth:
for (call_info, calls_list) in calls:
res.add(call_info, {"underlying_type": "external_calls_sending_eth"})
for call_list_info in calls_list:
if call_list_info != call_info:
res.add(
call_list_info,
{"underlying_type": "external_calls_sending_eth"},
)
# Add all variables written via nodes which write them.
for finding_value in varsWritten:
res.add(
finding_value.node,
{
"underlying_type": "variables_written",
"variable_name": finding_value.variable.name,
},
)
for other_node in finding_value.nodes:
if other_node != finding_value.node:
res.add(
other_node,
{
"underlying_type": "variables_written",
"variable_name": finding_value.variable.name,
},
)
# Append our result
results.append(res)
return results
| ReentrancyBenign |
python | keon__algorithms | algorithms/stack/ordered_stack.py | {
"start": 110,
"end": 991
} | class ____:
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def push_t(self, item):
self.items.append(item)
# push method to maintain order when pushing new elements
def push(self, item):
temp_stack = OrderedStack()
if self.is_empty() or item > self.peek():
self.push_t(item)
else:
while item < self.peek() and not self.is_empty():
temp_stack.push_t(self.pop())
self.push_t(item)
while not temp_stack.is_empty():
self.push_t(temp_stack.pop())
def pop(self):
if self.is_empty():
raise IndexError("Stack is empty")
return self.items.pop()
def peek(self):
return self.items[len(self.items) - 1]
def size(self):
return len(self.items)
| OrderedStack |
python | kamyu104__LeetCode-Solutions | Python/maximum-depth-of-n-ary-tree.py | {
"start": 29,
"end": 146
} | class ____(object):
def __init__(self, val, children):
self.val = val
self.children = children
| Node |
python | realpython__materials | python-type-checking/game_003.py | {
"start": 806,
"end": 1139
} | class ____:
def __init__(self, name, hand):
self.name = name
self.hand = hand
def play_card(self):
"""Play a card from the player's hand"""
card = random.choice(self.hand.cards)
self.hand.cards.remove(card)
print(f"{self.name}: {card!r:<3} ", end="")
return card
| Player |
python | Textualize__textual | src/textual/widgets/_tree.py | {
"start": 1389,
"end": 1533
} | class ____(Exception):
"""Exception raised when trying to remove the root of a [`TreeNode`][textual.widgets.tree.TreeNode]."""
| RemoveRootError |
python | scipy__scipy | scipy/fft/_pocketfft/tests/test_basic.py | {
"start": 13070,
"end": 13229
} | class ____(_TestIRFFTBase):
def setup_method(self):
self.cdt = np.complex128
self.rdt = np.float64
self.ndec = 14
| TestIRFFTLongDouble |
python | scikit-image__scikit-image | src/skimage/util/_backends.py | {
"start": 5067,
"end": 5195
} | class ____(RuntimeWarning):
"""Notification issued when a function is dispatched to a backend."""
pass
| DispatchNotification |
python | fastapi__sqlmodel | docs_src/tutorial/select/tutorial004.py | {
"start": 100,
"end": 1085
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str
secret_name: str
age: Optional[int] = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.commit()
def select_heroes():
with Session(engine) as session:
heroes = session.exec(select(Hero)).all()
print(heroes)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | pytorch__pytorch | torch/_export/serde/schema.py | {
"start": 11142,
"end": 11218
} | class ____:
field_names: Annotated[list[str], 10]
@dataclass
| NamedTupleDef |
python | readthedocs__readthedocs.org | readthedocs/audit/filters.py | {
"start": 247,
"end": 1416
} | class ____(FilterSet):
"""Filter for user security logs."""
allowed_actions = [
(AuditLog.AUTHN, AuditLog.AUTHN_TEXT),
(AuditLog.AUTHN_FAILURE, AuditLog.AUTHN_FAILURE_TEXT),
(AuditLog.LOGOUT, AuditLog.LOGOUT_TEXT),
(AuditLog.INVITATION_SENT, AuditLog.INVITATION_SENT_TEXT),
(AuditLog.INVITATION_REVOKED, AuditLog.INVITATION_REVOKED_TEXT),
(AuditLog.INVITATION_ACCEPTED, AuditLog.INVITATION_ACCEPTED_TEXT),
(AuditLog.INVITATION_DECLINED, AuditLog.INVITATION_DECLINED_TEXT),
]
ip = CharFilter(field_name="ip", lookup_expr="exact")
project = CharFilter(field_name="log_project_slug", lookup_expr="exact")
action = ChoiceFilter(
field_name="action",
lookup_expr="exact",
# Choices are filled at runtime,
# using the list from `allowed_actions`.
choices=[],
)
date = DateFromToRangeFilter(field_name="created")
class Meta:
model = AuditLog
fields = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filters["action"].field.choices = self.allowed_actions
| UserSecurityLogFilter |
python | django__django | tests/lookup/tests.py | {
"start": 964,
"end": 67259
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
# Create a few Authors.
cls.au1 = Author.objects.create(name="Author 1", alias="a1", bio="x" * 4001)
cls.au2 = Author.objects.create(name="Author 2", alias="a2")
# Create a few Articles.
cls.a1 = Article.objects.create(
headline="Article 1",
pub_date=datetime(2005, 7, 26),
author=cls.au1,
slug="a1",
)
cls.a2 = Article.objects.create(
headline="Article 2",
pub_date=datetime(2005, 7, 27),
author=cls.au1,
slug="a2",
)
cls.a3 = Article.objects.create(
headline="Article 3",
pub_date=datetime(2005, 7, 27),
author=cls.au1,
slug="a3",
)
cls.a4 = Article.objects.create(
headline="Article 4",
pub_date=datetime(2005, 7, 28),
author=cls.au1,
slug="a4",
)
cls.a5 = Article.objects.create(
headline="Article 5",
pub_date=datetime(2005, 8, 1, 9, 0),
author=cls.au2,
slug="a5",
)
cls.a6 = Article.objects.create(
headline="Article 6",
pub_date=datetime(2005, 8, 1, 8, 0),
author=cls.au2,
slug="a6",
)
cls.a7 = Article.objects.create(
headline="Article 7",
pub_date=datetime(2005, 7, 27),
author=cls.au2,
slug="a7",
)
# Create a few Tags.
cls.t1 = Tag.objects.create(name="Tag 1")
cls.t1.articles.add(cls.a1, cls.a2, cls.a3)
cls.t2 = Tag.objects.create(name="Tag 2")
cls.t2.articles.add(cls.a3, cls.a4, cls.a5)
cls.t3 = Tag.objects.create(name="Tag 3")
cls.t3.articles.add(cls.a5, cls.a6, cls.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertSequenceEqual(
Article.objects.filter(id__iexact=str(self.a1.id)),
[self.a1],
)
@skipUnlessDBFeature("supports_date_lookup_using_string")
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertSequenceEqual(
Article.objects.filter(pub_date__startswith="2005"),
[self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertIsInstance(Article.objects.iterator(), collections.abc.Iterator)
self.assertQuerySetEqual(
Article.objects.iterator(),
[
"Article 5",
"Article 6",
"Article 4",
"Article 2",
"Article 3",
"Article 7",
"Article 1",
],
transform=attrgetter("headline"),
)
# iterator() can be used on any QuerySet.
self.assertQuerySetEqual(
Article.objects.filter(headline__endswith="4").iterator(),
["Article 4"],
transform=attrgetter("headline"),
)
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3
)
self.assertEqual(
Article.objects.filter(headline__startswith="Blah blah").count(), 0
)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(
Article.objects.filter(pub_date__exact="2005-07-27 00:00:00").count(), 3
)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to
# objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(
Article.objects.in_bulk(),
{
self.a1.id: self.a1,
self.a2.id: self.a2,
self.a3.id: self.a3,
self.a4.id: self.a4,
self.a5.id: self.a5,
self.a6.id: self.a6,
self.a7.id: self.a7,
},
)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3})
self.assertEqual(
Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3}
)
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(
Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1}
)
self.assertEqual(Article.objects.in_bulk(iter([])), {})
with self.assertRaises(TypeError):
Article.objects.in_bulk(headline__startswith="Blah")
def test_in_bulk_lots_of_ids(self):
test_range = 2000
max_query_params = connection.features.max_query_params
expected_num_queries = (
ceil(test_range / max_query_params) if max_query_params else 1
)
Author.objects.bulk_create(
[Author() for i in range(test_range - Author.objects.count())]
)
authors = {author.pk: author for author in Author.objects.all()}
with self.assertNumQueries(expected_num_queries):
self.assertEqual(Author.objects.in_bulk(authors), authors)
def test_in_bulk_with_field(self):
self.assertEqual(
Article.objects.in_bulk(
[self.a1.slug, self.a2.slug, self.a3.slug], field_name="slug"
),
{
self.a1.slug: self.a1,
self.a2.slug: self.a2,
self.a3.slug: self.a3,
},
)
def test_in_bulk_meta_constraint(self):
season_2011 = Season.objects.create(year=2011)
season_2012 = Season.objects.create(year=2012)
Season.objects.create(year=2013)
self.assertEqual(
Season.objects.in_bulk(
[season_2011.year, season_2012.year],
field_name="year",
),
{season_2011.year: season_2011, season_2012.year: season_2012},
)
def test_in_bulk_non_unique_field(self):
msg = "in_bulk()'s field_name must be a unique field but 'author' isn't."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.in_bulk([self.au1], field_name="author")
def test_in_bulk_preserve_ordering(self):
self.assertEqual(
list(Article.objects.in_bulk([self.a2.id, self.a1.id])),
[self.a2.id, self.a1.id],
)
def test_in_bulk_preserve_ordering_with_batch_size(self):
qs = Article.objects.all()
with (
mock.patch.object(connection.ops, "bulk_batch_size", return_value=2),
self.assertNumQueries(2),
):
self.assertEqual(
list(qs.in_bulk([self.a4.id, self.a3.id, self.a2.id, self.a1.id])),
[self.a4.id, self.a3.id, self.a2.id, self.a1.id],
)
@skipUnlessDBFeature("can_distinct_on_fields")
def test_in_bulk_distinct_field(self):
self.assertEqual(
Article.objects.order_by("headline")
.distinct("headline")
.in_bulk(
[self.a1.headline, self.a5.headline],
field_name="headline",
),
{self.a1.headline: self.a1, self.a5.headline: self.a5},
)
@skipUnlessDBFeature("can_distinct_on_fields")
def test_in_bulk_multiple_distinct_field(self):
msg = "in_bulk()'s field_name must be a unique field but 'pub_date' isn't."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.order_by("headline", "pub_date").distinct(
"headline",
"pub_date",
).in_bulk(field_name="pub_date")
@isolate_apps("lookup")
def test_in_bulk_non_unique_meta_constaint(self):
class Model(models.Model):
ean = models.CharField(max_length=100)
brand = models.CharField(max_length=100)
name = models.CharField(max_length=80)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["ean"],
name="partial_ean_unique",
condition=models.Q(is_active=True),
),
models.UniqueConstraint(
fields=["brand", "name"],
name="together_brand_name_unique",
),
]
msg = "in_bulk()'s field_name must be a unique field but '%s' isn't."
for field_name in ["brand", "ean"]:
with self.subTest(field_name=field_name):
with self.assertRaisesMessage(ValueError, msg % field_name):
Model.objects.in_bulk(field_name=field_name)
def test_in_bulk_sliced_queryset(self):
msg = "Cannot use 'limit' or 'offset' with in_bulk()."
with self.assertRaisesMessage(TypeError, msg):
Article.objects.all()[0:5].in_bulk([self.a1.id, self.a2.id])
def test_in_bulk_values_empty(self):
arts = Article.objects.values().in_bulk([])
self.assertEqual(arts, {})
def test_in_bulk_values_all(self):
Article.objects.exclude(pk__in=[self.a1.pk, self.a2.pk]).delete()
arts = Article.objects.values().in_bulk()
self.assertEqual(
arts,
{
self.a1.pk: {
"id": self.a1.pk,
"author_id": self.au1.pk,
"headline": "Article 1",
"pub_date": self.a1.pub_date,
"slug": "a1",
},
self.a2.pk: {
"id": self.a2.pk,
"author_id": self.au1.pk,
"headline": "Article 2",
"pub_date": self.a2.pub_date,
"slug": "a2",
},
},
)
def test_in_bulk_values_pks(self):
arts = Article.objects.values().in_bulk([self.a1.pk])
self.assertEqual(
arts,
{
self.a1.pk: {
"id": self.a1.pk,
"author_id": self.au1.pk,
"headline": "Article 1",
"pub_date": self.a1.pub_date,
"slug": "a1",
}
},
)
def test_in_bulk_values_fields(self):
arts = Article.objects.values("headline").in_bulk([self.a1.pk])
self.assertEqual(
arts,
{self.a1.pk: {"headline": "Article 1"}},
)
def test_in_bulk_values_fields_including_pk(self):
arts = Article.objects.values("pk", "headline").in_bulk([self.a1.pk])
self.assertEqual(
arts,
{self.a1.pk: {"pk": self.a1.pk, "headline": "Article 1"}},
)
def test_in_bulk_values_fields_pk(self):
arts = Article.objects.values("pk").in_bulk([self.a1.pk])
self.assertEqual(
arts,
{self.a1.pk: {"pk": self.a1.pk}},
)
def test_in_bulk_values_fields_id(self):
arts = Article.objects.values("id").in_bulk([self.a1.pk])
self.assertEqual(
arts,
{self.a1.pk: {"id": self.a1.pk}},
)
def test_in_bulk_values_alternative_field_name(self):
arts = Article.objects.values("headline").in_bulk(
[self.a1.slug], field_name="slug"
)
self.assertEqual(
arts,
{self.a1.slug: {"headline": "Article 1"}},
)
def test_in_bulk_values_list_empty(self):
arts = Article.objects.values_list().in_bulk([])
self.assertEqual(arts, {})
def test_in_bulk_values_list_all(self):
Article.objects.exclude(pk__in=[self.a1.pk, self.a2.pk]).delete()
arts = Article.objects.values_list().in_bulk()
self.assertEqual(
arts,
{
self.a1.pk: (
self.a1.pk,
"Article 1",
self.a1.pub_date,
self.au1.pk,
"a1",
),
self.a2.pk: (
self.a2.pk,
"Article 2",
self.a2.pub_date,
self.au1.pk,
"a2",
),
},
)
def test_in_bulk_values_list_fields(self):
arts = Article.objects.values_list("headline").in_bulk([self.a1.pk, self.a2.pk])
self.assertEqual(
arts,
{
self.a1.pk: ("Article 1",),
self.a2.pk: ("Article 2",),
},
)
def test_in_bulk_values_list_fields_including_pk(self):
arts = Article.objects.values_list("pk", "headline").in_bulk(
[self.a1.pk, self.a2.pk]
)
self.assertEqual(
arts,
{
self.a1.pk: (self.a1.pk, "Article 1"),
self.a2.pk: (self.a2.pk, "Article 2"),
},
)
def test_in_bulk_values_list_fields_pk(self):
arts = Article.objects.values_list("pk").in_bulk([self.a1.pk, self.a2.pk])
self.assertEqual(
arts,
{
self.a1.pk: (self.a1.pk,),
self.a2.pk: (self.a2.pk,),
},
)
def test_in_bulk_values_list_fields_id(self):
arts = Article.objects.values_list("id").in_bulk([self.a1.pk, self.a2.pk])
self.assertEqual(
arts,
{
self.a1.pk: (self.a1.pk,),
self.a2.pk: (self.a2.pk,),
},
)
def test_in_bulk_values_list_named(self):
arts = Article.objects.values_list(named=True).in_bulk([self.a1.pk, self.a2.pk])
self.assertIsInstance(arts, dict)
self.assertEqual(len(arts), 2)
arts1 = arts[self.a1.pk]
self.assertEqual(
arts1._fields, ("pk", "id", "headline", "pub_date", "author_id", "slug")
)
self.assertEqual(arts1.pk, self.a1.pk)
self.assertEqual(arts1.headline, "Article 1")
self.assertEqual(arts1.pub_date, self.a1.pub_date)
self.assertEqual(arts1.author_id, self.au1.pk)
self.assertEqual(arts1.slug, "a1")
def test_in_bulk_values_list_named_fields(self):
arts = Article.objects.values_list("pk", "headline", named=True).in_bulk(
[self.a1.pk, self.a2.pk]
)
self.assertIsInstance(arts, dict)
self.assertEqual(len(arts), 2)
arts1 = arts[self.a1.pk]
self.assertEqual(arts1._fields, ("pk", "headline"))
self.assertEqual(arts1.pk, self.a1.pk)
self.assertEqual(arts1.headline, "Article 1")
def test_in_bulk_values_list_named_fields_alternative_field(self):
arts = Article.objects.values_list("headline", named=True).in_bulk(
[self.a1.slug, self.a2.slug], field_name="slug"
)
self.assertEqual(len(arts), 2)
arts1 = arts[self.a1.slug]
self.assertEqual(arts1._fields, ("slug", "headline"))
self.assertEqual(arts1.slug, "a1")
self.assertEqual(arts1.headline, "Article 1")
# RemovedInDjango70Warning: When the deprecation ends, remove this
# test.
def test_in_bulk_values_list_flat_empty(self):
with ignore_warnings(category=RemovedInDjango70Warning):
arts = Article.objects.values_list(flat=True).in_bulk([])
self.assertEqual(arts, {})
# RemovedInDjango70Warning: When the deprecation ends, remove this
# test.
def test_in_bulk_values_list_flat_all(self):
Article.objects.exclude(pk__in=[self.a1.pk, self.a2.pk]).delete()
with ignore_warnings(category=RemovedInDjango70Warning):
arts = Article.objects.values_list(flat=True).in_bulk()
self.assertEqual(
arts,
{
self.a1.pk: self.a1.pk,
self.a2.pk: self.a2.pk,
},
)
# RemovedInDjango70Warning: When the deprecation ends, remove this
# test.
def test_in_bulk_values_list_flat_pks(self):
with ignore_warnings(category=RemovedInDjango70Warning):
arts = Article.objects.values_list(flat=True).in_bulk(
[self.a1.pk, self.a2.pk]
)
self.assertEqual(
arts,
{
self.a1.pk: self.a1.pk,
self.a2.pk: self.a2.pk,
},
)
def test_in_bulk_values_list_flat_field(self):
arts = Article.objects.values_list("headline", flat=True).in_bulk(
[self.a1.pk, self.a2.pk]
)
self.assertEqual(
arts,
{self.a1.pk: "Article 1", self.a2.pk: "Article 2"},
)
def test_in_bulk_values_list_flat_field_pk(self):
arts = Article.objects.values_list("pk", flat=True).in_bulk(
[self.a1.pk, self.a2.pk]
)
self.assertEqual(
arts,
{
self.a1.pk: self.a1.pk,
self.a2.pk: self.a2.pk,
},
)
def test_in_bulk_values_list_flat_field_id(self):
arts = Article.objects.values_list("id", flat=True).in_bulk(
[self.a1.pk, self.a2.pk]
)
self.assertEqual(
arts,
{
self.a1.pk: self.a1.pk,
self.a2.pk: self.a2.pk,
},
)
def test_values(self):
# values() returns a list of dictionaries instead of object instances,
# and you can specify which fields you want to retrieve.
self.assertSequenceEqual(
Article.objects.filter(id__in=(self.a5.id, self.a6.id)).values(),
[
{
"id": self.a5.id,
"headline": "Article 5",
"pub_date": datetime(2005, 8, 1, 9, 0),
"author_id": self.au2.id,
"slug": "a5",
},
{
"id": self.a6.id,
"headline": "Article 6",
"pub_date": datetime(2005, 8, 1, 8, 0),
"author_id": self.au2.id,
"slug": "a6",
},
],
)
self.assertSequenceEqual(
Article.objects.values("headline"),
[
{"headline": "Article 5"},
{"headline": "Article 6"},
{"headline": "Article 4"},
{"headline": "Article 2"},
{"headline": "Article 3"},
{"headline": "Article 7"},
{"headline": "Article 1"},
],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values("id"),
[{"id": self.a2.id}, {"id": self.a3.id}, {"id": self.a7.id}],
)
self.assertSequenceEqual(
Article.objects.values("id", "headline"),
[
{"id": self.a5.id, "headline": "Article 5"},
{"id": self.a6.id, "headline": "Article 6"},
{"id": self.a4.id, "headline": "Article 4"},
{"id": self.a2.id, "headline": "Article 2"},
{"id": self.a3.id, "headline": "Article 3"},
{"id": self.a7.id, "headline": "Article 7"},
{"id": self.a1.id, "headline": "Article 1"},
],
)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertSequenceEqual(
list(Article.objects.values("id", "headline").iterator()),
[
{"headline": "Article 5", "id": self.a5.id},
{"headline": "Article 6", "id": self.a6.id},
{"headline": "Article 4", "id": self.a4.id},
{"headline": "Article 2", "id": self.a2.id},
{"headline": "Article 3", "id": self.a3.id},
{"headline": "Article 7", "id": self.a7.id},
{"headline": "Article 1", "id": self.a1.id},
],
)
# The values() method works with "extra" fields specified in
# extra(select).
self.assertSequenceEqual(
Article.objects.extra(select={"id_plus_one": "id + 1"}).values(
"id", "id_plus_one"
),
[
{"id": self.a5.id, "id_plus_one": self.a5.id + 1},
{"id": self.a6.id, "id_plus_one": self.a6.id + 1},
{"id": self.a4.id, "id_plus_one": self.a4.id + 1},
{"id": self.a2.id, "id_plus_one": self.a2.id + 1},
{"id": self.a3.id, "id_plus_one": self.a3.id + 1},
{"id": self.a7.id, "id_plus_one": self.a7.id + 1},
{"id": self.a1.id, "id_plus_one": self.a1.id + 1},
],
)
data = {
"id_plus_one": "id+1",
"id_plus_two": "id+2",
"id_plus_three": "id+3",
"id_plus_four": "id+4",
"id_plus_five": "id+5",
"id_plus_six": "id+6",
"id_plus_seven": "id+7",
"id_plus_eight": "id+8",
}
self.assertSequenceEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data),
[
{
"id_plus_one": self.a1.id + 1,
"id_plus_two": self.a1.id + 2,
"id_plus_three": self.a1.id + 3,
"id_plus_four": self.a1.id + 4,
"id_plus_five": self.a1.id + 5,
"id_plus_six": self.a1.id + 6,
"id_plus_seven": self.a1.id + 7,
"id_plus_eight": self.a1.id + 8,
}
],
)
# You can specify fields from forward and reverse relations, just like
# filter().
self.assertSequenceEqual(
Article.objects.values("headline", "author__name"),
[
{"headline": self.a5.headline, "author__name": self.au2.name},
{"headline": self.a6.headline, "author__name": self.au2.name},
{"headline": self.a4.headline, "author__name": self.au1.name},
{"headline": self.a2.headline, "author__name": self.au1.name},
{"headline": self.a3.headline, "author__name": self.au1.name},
{"headline": self.a7.headline, "author__name": self.au2.name},
{"headline": self.a1.headline, "author__name": self.au1.name},
],
)
self.assertSequenceEqual(
Author.objects.values("name", "article__headline").order_by(
"name", "article__headline"
),
[
{"name": self.au1.name, "article__headline": self.a1.headline},
{"name": self.au1.name, "article__headline": self.a2.headline},
{"name": self.au1.name, "article__headline": self.a3.headline},
{"name": self.au1.name, "article__headline": self.a4.headline},
{"name": self.au2.name, "article__headline": self.a5.headline},
{"name": self.au2.name, "article__headline": self.a6.headline},
{"name": self.au2.name, "article__headline": self.a7.headline},
],
)
self.assertSequenceEqual(
(
Author.objects.values(
"name", "article__headline", "article__tag__name"
).order_by("name", "article__headline", "article__tag__name")
),
[
{
"name": self.au1.name,
"article__headline": self.a1.headline,
"article__tag__name": self.t1.name,
},
{
"name": self.au1.name,
"article__headline": self.a2.headline,
"article__tag__name": self.t1.name,
},
{
"name": self.au1.name,
"article__headline": self.a3.headline,
"article__tag__name": self.t1.name,
},
{
"name": self.au1.name,
"article__headline": self.a3.headline,
"article__tag__name": self.t2.name,
},
{
"name": self.au1.name,
"article__headline": self.a4.headline,
"article__tag__name": self.t2.name,
},
{
"name": self.au2.name,
"article__headline": self.a5.headline,
"article__tag__name": self.t2.name,
},
{
"name": self.au2.name,
"article__headline": self.a5.headline,
"article__tag__name": self.t3.name,
},
{
"name": self.au2.name,
"article__headline": self.a6.headline,
"article__tag__name": self.t3.name,
},
{
"name": self.au2.name,
"article__headline": self.a7.headline,
"article__tag__name": self.t3.name,
},
],
)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a nonexistent field name in values() (a field that is neither in the
# model nor in extra(select)).
msg = (
"Cannot resolve keyword 'id_plus_two' into field. Choices are: "
"author, author_id, headline, id, id_plus_one, pub_date, slug, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.extra(select={"id_plus_one": "id + 1"}).values(
"id", "id_plus_two"
)
# If you don't specify field names to values(), all are returned.
self.assertSequenceEqual(
Article.objects.filter(id=self.a5.id).values(),
[
{
"id": self.a5.id,
"author_id": self.au2.id,
"headline": "Article 5",
"pub_date": datetime(2005, 8, 1, 9, 0),
"slug": "a5",
}
],
)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
self.assertSequenceEqual(
Article.objects.filter(id__in=(self.a5.id, self.a6.id)).values_list(),
[
(
self.a5.id,
"Article 5",
datetime(2005, 8, 1, 9, 0),
self.au2.id,
"a5",
),
(
self.a6.id,
"Article 6",
datetime(2005, 8, 1, 8, 0),
self.au2.id,
"a6",
),
],
)
# RemovedInDjango70Warning: When the deprecation ends, remove this
# assertion.
with ignore_warnings(category=RemovedInDjango70Warning):
qs = Article.objects.values_list(flat=True)
self.assertSequenceEqual(
qs,
[
self.a5.id,
self.a6.id,
self.a4.id,
self.a2.id,
self.a3.id,
self.a7.id,
self.a1.id,
],
)
self.assertSequenceEqual(
Article.objects.values_list("headline"),
[
("Article 5",),
("Article 6",),
("Article 4",),
("Article 2",),
("Article 3",),
("Article 7",),
("Article 1",),
],
)
self.assertSequenceEqual(
Article.objects.values_list("id").order_by("id"),
[
(self.a1.id,),
(self.a2.id,),
(self.a3.id,),
(self.a4.id,),
(self.a5.id,),
(self.a6.id,),
(self.a7.id,),
],
)
self.assertSequenceEqual(
Article.objects.values_list("id", flat=True).order_by("id"),
[
self.a1.id,
self.a2.id,
self.a3.id,
self.a4.id,
self.a5.id,
self.a6.id,
self.a7.id,
],
)
self.assertSequenceEqual(
Article.objects.extra(select={"id_plus_one": "id+1"})
.order_by("id")
.values_list("id"),
[
(self.a1.id,),
(self.a2.id,),
(self.a3.id,),
(self.a4.id,),
(self.a5.id,),
(self.a6.id,),
(self.a7.id,),
],
)
self.assertSequenceEqual(
Article.objects.extra(select={"id_plus_one": "id+1"})
.order_by("id")
.values_list("id_plus_one", "id"),
[
(self.a1.id + 1, self.a1.id),
(self.a2.id + 1, self.a2.id),
(self.a3.id + 1, self.a3.id),
(self.a4.id + 1, self.a4.id),
(self.a5.id + 1, self.a5.id),
(self.a6.id + 1, self.a6.id),
(self.a7.id + 1, self.a7.id),
],
)
self.assertSequenceEqual(
Article.objects.extra(select={"id_plus_one": "id+1"})
.order_by("id")
.values_list("id", "id_plus_one"),
[
(self.a1.id, self.a1.id + 1),
(self.a2.id, self.a2.id + 1),
(self.a3.id, self.a3.id + 1),
(self.a4.id, self.a4.id + 1),
(self.a5.id, self.a5.id + 1),
(self.a6.id, self.a6.id + 1),
(self.a7.id, self.a7.id + 1),
],
)
args = ("name", "article__headline", "article__tag__name")
self.assertSequenceEqual(
Author.objects.values_list(*args).order_by(*args),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
],
)
with self.assertRaises(TypeError):
Article.objects.values_list("id", "headline", flat=True)
# RemovedInDjango70Warning: When the deprecation ends, replace with:
# def test_values_list_flat_empty_error(self):
# msg = (
# "'flat' is not valid when values_list is called with no fields."
# )
# with self.assertRaisesMessage(TypeError, msg):
# Article.objects.values_list(flat=True)
def test_values_list_flat_empty_warning(self):
msg = (
"Calling values_list() with no field name and flat=True "
"is deprecated. Pass an explicit field name instead, like "
"'pk'."
)
with self.assertRaisesMessage(RemovedInDjango70Warning, msg):
Article.objects.values_list(flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()), "<Article: Article 2>")
self.assertEqual(repr(self.a2.get_next_by_pub_date()), "<Article: Article 3>")
self.assertEqual(
repr(self.a2.get_next_by_pub_date(headline__endswith="6")),
"<Article: Article 6>",
)
self.assertEqual(repr(self.a3.get_next_by_pub_date()), "<Article: Article 7>")
self.assertEqual(repr(self.a4.get_next_by_pub_date()), "<Article: Article 6>")
with self.assertRaises(Article.DoesNotExist):
self.a5.get_next_by_pub_date()
self.assertEqual(repr(self.a6.get_next_by_pub_date()), "<Article: Article 5>")
self.assertEqual(repr(self.a7.get_next_by_pub_date()), "<Article: Article 4>")
self.assertEqual(
repr(self.a7.get_previous_by_pub_date()), "<Article: Article 3>"
)
self.assertEqual(
repr(self.a6.get_previous_by_pub_date()), "<Article: Article 4>"
)
self.assertEqual(
repr(self.a5.get_previous_by_pub_date()), "<Article: Article 6>"
)
self.assertEqual(
repr(self.a4.get_previous_by_pub_date()), "<Article: Article 7>"
)
self.assertEqual(
repr(self.a3.get_previous_by_pub_date()), "<Article: Article 2>"
)
self.assertEqual(
repr(self.a2.get_previous_by_pub_date()), "<Article: Article 1>"
)
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in
# the underlying SQL code, but Django handles the quoting of them
# automatically.
a8 = Article.objects.create(
headline="Article_ with underscore", pub_date=datetime(2005, 11, 20)
)
self.assertSequenceEqual(
Article.objects.filter(headline__startswith="Article"),
[a8, self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
self.assertSequenceEqual(
Article.objects.filter(headline__startswith="Article_"),
[a8],
)
a9 = Article.objects.create(
headline="Article% with percent sign", pub_date=datetime(2005, 11, 21)
)
self.assertSequenceEqual(
Article.objects.filter(headline__startswith="Article"),
[a9, a8, self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
self.assertSequenceEqual(
Article.objects.filter(headline__startswith="Article%"),
[a9],
)
a10 = Article.objects.create(
headline="Article with \\ backslash", pub_date=datetime(2005, 11, 22)
)
self.assertSequenceEqual(
Article.objects.filter(headline__contains="\\"),
[a10],
)
def test_exclude(self):
a8 = Article.objects.create(
headline="Article_ with underscore", pub_date=datetime(2005, 11, 20)
)
a9 = Article.objects.create(
headline="Article% with percent sign", pub_date=datetime(2005, 11, 21)
)
a10 = Article.objects.create(
headline="Article with \\ backslash", pub_date=datetime(2005, 11, 22)
)
# exclude() is the opposite of filter() when doing lookups:
self.assertSequenceEqual(
Article.objects.filter(headline__contains="Article").exclude(
headline__contains="with"
),
[self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
self.assertSequenceEqual(
Article.objects.exclude(headline__startswith="Article_"),
[a10, a9, self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
self.assertSequenceEqual(
Article.objects.exclude(headline="Article 7"),
[a10, a9, a8, self.a5, self.a6, self.a4, self.a2, self.a3, self.a1],
)
def test_none(self):
# none() returns a QuerySet that behaves like any other QuerySet object
self.assertSequenceEqual(Article.objects.none(), [])
self.assertSequenceEqual(
Article.objects.none().filter(headline__startswith="Article"), []
)
self.assertSequenceEqual(
Article.objects.filter(headline__startswith="Article").none(), []
)
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(
Article.objects.none().update(headline="This should not take effect"), 0
)
self.assertSequenceEqual(list(Article.objects.none().iterator()), [])
def test_in(self):
self.assertSequenceEqual(
Article.objects.exclude(id__in=[]),
[self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
def test_in_empty_list(self):
self.assertSequenceEqual(Article.objects.filter(id__in=[]), [])
def test_in_different_database(self):
with self.assertRaisesMessage(
ValueError,
"Subqueries aren't allowed across different databases. Force the "
"inner query to be evaluated using `list(inner_query)`.",
):
list(Article.objects.filter(id__in=Article.objects.using("other").all()))
def test_in_keeps_value_ordering(self):
query = (
Article.objects.filter(slug__in=["a%d" % i for i in range(1, 8)])
.values("pk")
.query
)
self.assertIn(" IN (a1, a2, a3, a4, a5, a6, a7) ", str(query))
def test_in_ignore_none(self):
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(
Article.objects.filter(id__in=[None, self.a1.id]),
[self.a1],
)
sql = ctx.captured_queries[0]["sql"]
self.assertIn("IN (%s)" % self.a1.pk, sql)
def test_in_ignore_solo_none(self):
with self.assertNumQueries(0):
self.assertSequenceEqual(Article.objects.filter(id__in=[None]), [])
def test_in_ignore_none_with_unhashable_items(self):
class UnhashableInt(int):
__hash__ = None
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(
Article.objects.filter(id__in=[None, UnhashableInt(self.a1.id)]),
[self.a1],
)
sql = ctx.captured_queries[0]["sql"]
self.assertIn("IN (%s)" % self.a1.pk, sql)
def test_in_select_mismatch(self):
msg = (
"The QuerySet value for the 'in' lookup must have 1 "
"selected fields (received 2)"
)
with self.assertRaisesMessage(ValueError, msg):
Article.objects.filter(id__in=Article.objects.values("id", "headline"))
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
with self.assertRaisesMessage(
FieldError,
"Cannot resolve keyword 'pub_date_year' into field. Choices are: "
"author, author_id, headline, id, pub_date, slug, tag",
):
Article.objects.filter(pub_date_year="2005").count()
def test_unsupported_lookups(self):
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'starts' for CharField or join on the field "
"not permitted, perhaps you meant startswith or istartswith?",
):
Article.objects.filter(headline__starts="Article")
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'is_null' for DateTimeField or join on the field "
"not permitted, perhaps you meant isnull?",
):
Article.objects.filter(pub_date__is_null=True)
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'gobbledygook' for DateTimeField or join on the field "
"not permitted.",
):
Article.objects.filter(pub_date__gobbledygook="blahblah")
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'gt__foo' for DateTimeField or join on the field "
"not permitted, perhaps you meant gt or gte?",
):
Article.objects.filter(pub_date__gt__foo="blahblah")
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'gt__' for DateTimeField or join on the field "
"not permitted, perhaps you meant gt or gte?",
):
Article.objects.filter(pub_date__gt__="blahblah")
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'gt__lt' for DateTimeField or join on the field "
"not permitted, perhaps you meant gt or gte?",
):
Article.objects.filter(pub_date__gt__lt="blahblah")
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'gt__lt__foo' for DateTimeField or join"
" on the field not permitted, perhaps you meant gt or gte?",
):
Article.objects.filter(pub_date__gt__lt__foo="blahblah")
def test_unsupported_lookups_custom_lookups(self):
slug_field = Article._meta.get_field("slug")
msg = (
"Unsupported lookup 'lengtp' for SlugField or join on the field not "
"permitted, perhaps you meant length?"
)
with self.assertRaisesMessage(FieldError, msg):
with register_lookup(slug_field, Length):
Article.objects.filter(slug__lengtp=20)
def test_relation_nested_lookup_error(self):
# An invalid nested lookup on a related field raises a useful error.
msg = (
"Unsupported lookup 'editor__name' for ForeignKey or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(author__editor__name="James")
msg = (
"Unsupported lookup 'foo' for ForeignKey or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(articles__foo="bar")
def test_unsupported_lookup_reverse_foreign_key(self):
msg = (
"Unsupported lookup 'title' for ManyToOneRel or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.filter(article__title="Article 1")
def test_unsupported_lookup_reverse_foreign_key_custom_lookups(self):
msg = (
"Unsupported lookup 'abspl' for ManyToOneRel or join on the field not "
"permitted, perhaps you meant abspk?"
)
fk_field = Article._meta.get_field("author")
with self.assertRaisesMessage(FieldError, msg):
with register_lookup(fk_field, Abs, lookup_name="abspk"):
Author.objects.filter(article__abspl=2)
def test_filter_by_reverse_related_field_transform(self):
fk_field = Article._meta.get_field("author")
with register_lookup(fk_field, Abs):
self.assertSequenceEqual(
Author.objects.filter(article__abs=self.a1.pk), [self.au1]
)
def test_regex(self):
# Create some articles with a bit more interesting headlines for
# testing field lookups.
Article.objects.all().delete()
now = datetime.now()
Article.objects.bulk_create(
[
Article(pub_date=now, headline="f"),
Article(pub_date=now, headline="fo"),
Article(pub_date=now, headline="foo"),
Article(pub_date=now, headline="fooo"),
Article(pub_date=now, headline="hey-Foo"),
Article(pub_date=now, headline="bar"),
Article(pub_date=now, headline="AbBa"),
Article(pub_date=now, headline="baz"),
Article(pub_date=now, headline="baxZ"),
]
)
# zero-or-more
self.assertQuerySetEqual(
Article.objects.filter(headline__regex=r"fo*"),
Article.objects.filter(headline__in=["f", "fo", "foo", "fooo"]),
)
self.assertQuerySetEqual(
Article.objects.filter(headline__iregex=r"fo*"),
Article.objects.filter(headline__in=["f", "fo", "foo", "fooo", "hey-Foo"]),
)
# one-or-more
self.assertQuerySetEqual(
Article.objects.filter(headline__regex=r"fo+"),
Article.objects.filter(headline__in=["fo", "foo", "fooo"]),
)
# wildcard
self.assertQuerySetEqual(
Article.objects.filter(headline__regex=r"fooo?"),
Article.objects.filter(headline__in=["foo", "fooo"]),
)
# leading anchor
self.assertQuerySetEqual(
Article.objects.filter(headline__regex=r"^b"),
Article.objects.filter(headline__in=["bar", "baxZ", "baz"]),
)
self.assertQuerySetEqual(
Article.objects.filter(headline__iregex=r"^a"),
Article.objects.filter(headline="AbBa"),
)
# trailing anchor
self.assertQuerySetEqual(
Article.objects.filter(headline__regex=r"z$"),
Article.objects.filter(headline="baz"),
)
self.assertQuerySetEqual(
Article.objects.filter(headline__iregex=r"z$"),
Article.objects.filter(headline__in=["baxZ", "baz"]),
)
# character sets
self.assertQuerySetEqual(
Article.objects.filter(headline__regex=r"ba[rz]"),
Article.objects.filter(headline__in=["bar", "baz"]),
)
self.assertQuerySetEqual(
Article.objects.filter(headline__regex=r"ba.[RxZ]"),
Article.objects.filter(headline="baxZ"),
)
self.assertQuerySetEqual(
Article.objects.filter(headline__iregex=r"ba[RxZ]"),
Article.objects.filter(headline__in=["bar", "baxZ", "baz"]),
)
# and more articles:
Article.objects.bulk_create(
[
Article(pub_date=now, headline="foobar"),
Article(pub_date=now, headline="foobaz"),
Article(pub_date=now, headline="ooF"),
Article(pub_date=now, headline="foobarbaz"),
Article(pub_date=now, headline="zoocarfaz"),
Article(pub_date=now, headline="barfoobaz"),
Article(pub_date=now, headline="bazbaRFOO"),
]
)
# alternation
self.assertQuerySetEqual(
Article.objects.filter(headline__regex=r"oo(f|b)"),
Article.objects.filter(
headline__in=[
"barfoobaz",
"foobar",
"foobarbaz",
"foobaz",
]
),
)
self.assertQuerySetEqual(
Article.objects.filter(headline__iregex=r"oo(f|b)"),
Article.objects.filter(
headline__in=[
"barfoobaz",
"foobar",
"foobarbaz",
"foobaz",
"ooF",
]
),
)
self.assertQuerySetEqual(
Article.objects.filter(headline__regex=r"^foo(f|b)"),
Article.objects.filter(headline__in=["foobar", "foobarbaz", "foobaz"]),
)
# greedy matching
self.assertQuerySetEqual(
Article.objects.filter(headline__regex=r"b.*az"),
Article.objects.filter(
headline__in=[
"barfoobaz",
"baz",
"bazbaRFOO",
"foobarbaz",
"foobaz",
]
),
)
self.assertQuerySetEqual(
Article.objects.filter(headline__iregex=r"b.*ar"),
Article.objects.filter(
headline__in=[
"bar",
"barfoobaz",
"bazbaRFOO",
"foobar",
"foobarbaz",
]
),
)
@skipUnlessDBFeature("supports_regex_backreferencing")
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
Article.objects.bulk_create(
[
Article(pub_date=now, headline="foobar"),
Article(pub_date=now, headline="foobaz"),
Article(pub_date=now, headline="ooF"),
Article(pub_date=now, headline="foobarbaz"),
Article(pub_date=now, headline="zoocarfaz"),
Article(pub_date=now, headline="barfoobaz"),
Article(pub_date=now, headline="bazbaRFOO"),
]
)
self.assertQuerySetEqual(
Article.objects.filter(headline__regex=r"b(.).*b\1").values_list(
"headline", flat=True
),
["barfoobaz", "bazbaRFOO", "foobarbaz"],
)
def test_regex_null(self):
"""
A regex lookup does not fail on null/None values
"""
Season.objects.create(year=2012, gt=None)
self.assertQuerySetEqual(Season.objects.filter(gt__regex=r"^$"), [])
def test_textfield_exact_null(self):
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(Author.objects.filter(bio=None), [self.au2])
# Columns with IS NULL condition are not wrapped (except PostgreSQL).
bio_column = connection.ops.quote_name(Author._meta.get_field("bio").column)
self.assertIn(f"{bio_column} IS NULL", ctx.captured_queries[0]["sql"])
def test_regex_non_string(self):
"""
A regex lookup does not fail on non-string fields
"""
s = Season.objects.create(year=2013, gt=444)
self.assertQuerySetEqual(Season.objects.filter(gt__regex=r"^444$"), [s])
def test_regex_non_ascii(self):
"""
A regex lookup does not trip on non-ASCII characters.
"""
Player.objects.create(name="\u2660")
Player.objects.get(name__regex="\u2660")
def test_nonfield_lookups(self):
"""
A lookup query containing non-fields raises the proper exception.
"""
msg = (
"Unsupported lookup 'blahblah' for CharField or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(headline__blahblah=99)
msg = (
"Unsupported lookup 'blahblah__exact' for CharField or join "
"on the field not permitted."
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(headline__blahblah__exact=99)
msg = (
"Cannot resolve keyword 'blahblah' into field. Choices are: "
"author, author_id, headline, id, pub_date, slug, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Genuine field names don't collide with built-in lookup types
('year', 'gt', 'range', 'in' etc.) (#11670).
"""
# 'gt' is used as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games.set(Game.objects.filter(season__year__in=[2009, 2010]))
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games.set(Game.objects.filter(season__year=2009))
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games.set(Game.objects.filter(season__year__in=[2011]))
johnson = Player.objects.create(name="Johnson")
johnson.games.set(Game.objects.filter(season__year__in=[2011]))
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(
Player.objects.filter(games__season__year=2009).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__gt=111).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2
)
# Players who played in 2010
self.assertEqual(
Player.objects.filter(games__season__year=2010).distinct().count(), 1
)
self.assertEqual(
Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1
)
self.assertEqual(
Player.objects.filter(games__season__gt=222).distinct().count(), 1
)
self.assertEqual(
Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1
)
# Players who played in 2011
self.assertEqual(
Player.objects.filter(games__season__year=2011).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__gt=333).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2
)
self.assertEqual(
Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2
)
def test_chain_date_time_lookups(self):
self.assertCountEqual(
Article.objects.filter(pub_date__month__gt=7),
[self.a5, self.a6],
)
self.assertCountEqual(
Article.objects.filter(pub_date__day__gte=27),
[self.a2, self.a3, self.a4, self.a7],
)
self.assertCountEqual(
Article.objects.filter(pub_date__hour__lt=8),
[self.a1, self.a2, self.a3, self.a4, self.a7],
)
self.assertCountEqual(
Article.objects.filter(pub_date__minute__lte=0),
[self.a1, self.a2, self.a3, self.a4, self.a5, self.a6, self.a7],
)
def test_exact_none_transform(self):
"""Transforms are used for __exact=None."""
Season.objects.create(year=1, nulled_text_field="not null")
self.assertFalse(Season.objects.filter(nulled_text_field__isnull=True))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled__isnull=True))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled__exact=None))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled=None))
def test_exact_sliced_queryset_limit_one(self):
self.assertCountEqual(
Article.objects.filter(author=Author.objects.all()[:1]),
[self.a1, self.a2, self.a3, self.a4],
)
def test_exact_sliced_queryset_limit_one_offset(self):
self.assertCountEqual(
Article.objects.filter(author=Author.objects.all()[1:2]),
[self.a5, self.a6, self.a7],
)
def test_exact_sliced_queryset_not_limited_to_one(self):
msg = (
"The QuerySet value for an exact lookup must be limited to one "
"result using slicing."
)
with self.assertRaisesMessage(ValueError, msg):
list(Article.objects.filter(author=Author.objects.all()[:2]))
with self.assertRaisesMessage(ValueError, msg):
list(Article.objects.filter(author=Author.objects.all()[1:]))
@skipUnless(connection.vendor == "mysql", "MySQL-specific workaround.")
def test_exact_booleanfield(self):
# MySQL ignores indexes with boolean fields unless they're compared
# directly to a boolean value.
product = Product.objects.create(name="Paper", qty_target=5000)
Stock.objects.create(product=product, short=False, qty_available=5100)
stock_1 = Stock.objects.create(product=product, short=True, qty_available=180)
qs = Stock.objects.filter(short=True)
self.assertSequenceEqual(qs, [stock_1])
self.assertIn(
"%s = True" % connection.ops.quote_name("short"),
str(qs.query),
)
@skipUnless(connection.vendor == "mysql", "MySQL-specific workaround.")
def test_exact_booleanfield_annotation(self):
# MySQL ignores indexes with boolean fields unless they're compared
# directly to a boolean value.
qs = Author.objects.annotate(
case=Case(
When(alias="a1", then=True),
default=False,
output_field=BooleanField(),
)
).filter(case=True)
self.assertSequenceEqual(qs, [self.au1])
self.assertIn(" = True", str(qs.query))
qs = Author.objects.annotate(
wrapped=ExpressionWrapper(Q(alias="a1"), output_field=BooleanField()),
).filter(wrapped=True)
self.assertSequenceEqual(qs, [self.au1])
self.assertIn(" = True", str(qs.query))
# EXISTS(...) shouldn't be compared to a boolean value.
qs = Author.objects.annotate(
exists=Exists(Author.objects.filter(alias="a1", pk=OuterRef("pk"))),
).filter(exists=True)
self.assertSequenceEqual(qs, [self.au1])
self.assertNotIn(" = True", str(qs.query))
def test_custom_field_none_rhs(self):
"""
__exact=value is transformed to __isnull=True if Field.get_prep_value()
converts value to None.
"""
season = Season.objects.create(year=2012, nulled_text_field=None)
self.assertTrue(
Season.objects.filter(pk=season.pk, nulled_text_field__isnull=True)
)
self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field=""))
def test_pattern_lookups_with_substr(self):
a = Author.objects.create(name="John Smith", alias="Johx")
b = Author.objects.create(name="Rhonda Simpson", alias="sonx")
tests = (
("startswith", [a]),
("istartswith", [a]),
("contains", [a, b]),
("icontains", [a, b]),
("endswith", [b]),
("iendswith", [b]),
)
for lookup, result in tests:
with self.subTest(lookup=lookup):
authors = Author.objects.filter(
**{"name__%s" % lookup: Substr("alias", 1, 3)}
)
self.assertCountEqual(authors, result)
def test_custom_lookup_none_rhs(self):
"""Lookup.can_use_none_as_rhs=True allows None as a lookup value."""
season = Season.objects.create(year=2012, nulled_text_field=None)
query = Season.objects.get_queryset().query
field = query.model._meta.get_field("nulled_text_field")
self.assertIsInstance(
query.build_lookup(["isnull_none_rhs"], field, None), IsNullWithNoneAsRHS
)
self.assertTrue(
Season.objects.filter(pk=season.pk, nulled_text_field__isnull_none_rhs=True)
)
def test_exact_exists(self):
qs = Article.objects.filter(pk=OuterRef("pk"))
seasons = Season.objects.annotate(pk_exists=Exists(qs)).filter(
pk_exists=Exists(qs),
)
self.assertCountEqual(seasons, Season.objects.all())
def test_nested_outerref_lhs(self):
tag = Tag.objects.create(name=self.au1.alias)
tag.articles.add(self.a1)
qs = Tag.objects.annotate(
has_author_alias_match=Exists(
Article.objects.annotate(
author_exists=Exists(
Author.objects.filter(alias=OuterRef(OuterRef("name")))
),
).filter(author_exists=True)
),
)
self.assertEqual(qs.get(has_author_alias_match=True), tag)
def test_exact_query_rhs_with_selected_columns(self):
newest_author = Author.objects.create(name="Author 2")
authors_max_ids = (
Author.objects.filter(
name="Author 2",
)
.values(
"name",
)
.annotate(
max_id=Max("id"),
)
.values("max_id")
)
authors = Author.objects.filter(id=authors_max_ids[:1])
self.assertEqual(authors.get(), newest_author)
def test_exact_query_rhs_with_selected_columns_mismatch(self):
msg = (
"The QuerySet value for the exact lookup must have 1 "
"selected fields (received 2)"
)
with self.assertRaisesMessage(ValueError, msg):
Author.objects.filter(id=Author.objects.values("id", "name")[:1])
def test_isnull_non_boolean_value(self):
msg = "The QuerySet value for an isnull lookup must be True or False."
tests = [
Author.objects.filter(alias__isnull=1),
Article.objects.filter(author__isnull=1),
Season.objects.filter(games__isnull=1),
Freebie.objects.filter(stock__isnull=1),
]
for qs in tests:
with self.subTest(qs=qs):
with self.assertRaisesMessage(ValueError, msg):
qs.exists()
def test_isnull_textfield(self):
self.assertSequenceEqual(
Author.objects.filter(bio__isnull=True),
[self.au2],
)
self.assertSequenceEqual(
Author.objects.filter(bio__isnull=False),
[self.au1],
)
def test_lookup_rhs(self):
product = Product.objects.create(name="GME", qty_target=5000)
stock_1 = Stock.objects.create(product=product, short=True, qty_available=180)
stock_2 = Stock.objects.create(product=product, short=False, qty_available=5100)
Stock.objects.create(product=product, short=False, qty_available=4000)
self.assertCountEqual(
Stock.objects.filter(short=Q(qty_available__lt=F("product__qty_target"))),
[stock_1, stock_2],
)
self.assertCountEqual(
Stock.objects.filter(
short=ExpressionWrapper(
Q(qty_available__lt=F("product__qty_target")),
output_field=BooleanField(),
)
),
[stock_1, stock_2],
)
def test_lookup_direct_value_rhs_unwrapped(self):
with self.assertNumQueries(1) as ctx:
self.assertIs(Author.objects.filter(GreaterThan(2, 1)).exists(), True)
# Direct values on RHS are not wrapped.
self.assertIn("2 > 1", ctx.captured_queries[0]["sql"])
| LookupTests |
python | Lightning-AI__lightning | examples/fabric/kfold_cv/train_fabric.py | {
"start": 1071,
"end": 7504
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train_dataloader(model, data_loader, optimizer, fabric, epoch, hparams, fold):
# TRAINING LOOP
model.train()
for batch_idx, (data, target) in enumerate(data_loader):
# NOTE: no need to call `.to(device)` on the data, target
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
fabric.backward(loss) # instead of loss.backward()
optimizer.step()
if (batch_idx == 0) or ((batch_idx + 1) % hparams.log_interval == 0):
print(
f"Train Epoch: {epoch} [{batch_idx * len(data)}/{len(data_loader.dataset)}"
f" ({100.0 * batch_idx / len(data_loader):.0f}%)]\tLoss: {loss.item():.6f}"
)
if hparams.dry_run:
break
def validate_dataloader(model, data_loader, fabric, hparams, fold, acc_metric):
model.eval()
loss = 0
with torch.no_grad():
for data, target in data_loader:
# NOTE: no need to call `.to(device)` on the data, target
output = model(data)
loss += F.nll_loss(output, target, reduction="sum").item()
# Accuracy with torchmetrics
acc_metric.update(output, target)
if hparams.dry_run:
break
# all_gather is used to aggregate the value across processes
loss = fabric.all_gather(loss).sum() / len(data_loader.dataset)
# compute acc
acc = acc_metric.compute() * 100
print(f"\nFor fold: {fold} Validation set: Average loss: {loss:.4f}, Accuracy: ({acc:.0f}%)\n")
return acc
def run(hparams):
# Create the Lightning Fabric object. The parameters like accelerator, strategy, devices etc. will be provided
# by the command line. See all options: `fabric run --help`
fabric = Fabric()
seed_everything(hparams.seed) # instead of torch.manual_seed(...)
transform = T.Compose([T.ToTensor(), T.Normalize((0.1307,), (0.3081,))])
# Let rank 0 download the data first, then everyone will load MNIST
with fabric.rank_zero_first(local=False): # set `local=True` if your filesystem is not shared between machines
dataset = MNIST(DATASETS_PATH, train=True, download=True, transform=transform)
# Loop over different folds (shuffle = False by default so reproducible)
folds = hparams.folds
kfold = model_selection.KFold(n_splits=folds)
# initialize n_splits models and optimizers
models = [Net() for _ in range(kfold.n_splits)]
optimizers = [optim.Adadelta(model.parameters(), lr=hparams.lr) for model in models]
# fabric setup for models and optimizers
for i in range(kfold.n_splits):
models[i], optimizers[i] = fabric.setup(models[i], optimizers[i])
# Accuracy using torchmetrics
acc_metric = Accuracy(task="multiclass", num_classes=10).to(fabric.device)
# loop over epochs
for epoch in range(1, hparams.epochs + 1):
# loop over folds
epoch_acc = 0
for fold, (train_ids, val_ids) in enumerate(kfold.split(dataset)):
print(f"Working on fold {fold}")
# initialize dataloaders based on folds
batch_size = hparams.batch_size
train_loader = DataLoader(dataset, batch_size=batch_size, sampler=SubsetRandomSampler(train_ids))
val_loader = DataLoader(dataset, batch_size=batch_size, sampler=SubsetRandomSampler(val_ids))
# set up dataloaders to move data to the correct device
train_loader, val_loader = fabric.setup_dataloaders(train_loader, val_loader)
# get model and optimizer for the current fold
model, optimizer = models[fold], optimizers[fold]
# train and validate
train_dataloader(model, train_loader, optimizer, fabric, epoch, hparams, fold)
epoch_acc += validate_dataloader(model, val_loader, fabric, hparams, fold, acc_metric)
acc_metric.reset()
# log epoch metrics
print(f"Epoch {epoch} - Average acc: {epoch_acc / kfold.n_splits}")
if hparams.dry_run:
break
# When using distributed training, use `fabric.save`
# to ensure the current process is allowed to save a checkpoint
if hparams.save_model:
fabric.save(path="mnist_cnn.pt", state=model.state_dict())
if __name__ == "__main__":
# Arguments can be passed in through the CLI as normal and will be parsed here
# Example:
# fabric run image_classifier.py accelerator=cuda --epochs=3
parser = argparse.ArgumentParser(description="Fabric MNIST K-Fold Cross Validation Example")
parser.add_argument(
"--batch-size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)"
)
parser.add_argument("--epochs", type=int, default=14, metavar="N", help="number of epochs to train (default: 14)")
parser.add_argument("--lr", type=float, default=1.0, metavar="LR", help="learning rate (default: 1.0)")
parser.add_argument("--gamma", type=float, default=0.7, metavar="M", help="Learning rate step gamma (default: 0.7)")
parser.add_argument("--dry-run", action="store_true", default=False, help="quickly check a single pass")
parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)")
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument("--folds", type=int, default=5, help="number of folds for k-fold cross validation")
parser.add_argument("--save-model", action="store_true", default=False, help="For Saving the current Model")
hparams = parser.parse_args()
run(hparams)
| Net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.