language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PrefectHQ__prefect | src/prefect/server/api/concurrency_limits.py | {
"start": 15770,
"end": 15862
} | class ____(Exception):
def __init__(self, reason: str):
self.reason = reason
| Abort |
python | joke2k__faker | faker/providers/person/en_IN/__init__.py | {
"start": 44,
"end": 19865
} | class ____(PersonProvider):
formats_male = ("{{first_name_male}} {{last_name}}",)
formats_female = ("{{first_name_female}} {{last_name}}",)
formats = ("{{first_name}} {{last_name}}",)
# https://www.in.pampers.com/pregnancy/baby-names/article/indian-baby-boys-names
first_names_male = (
"Aadi",
"Aarav",
"Aarnav",
"Aarush",
"Aayush",
"Abdul",
"Abeer",
"Abhimanyu",
"Abhiram",
"Aditya",
"Advaith",
"Advay",
"Advik",
"Agastya",
"Akshay",
"Alexander",
"Amol",
"Anay",
"Andrew",
"Anirudh",
"Anmol",
"Ansh",
"Anthony",
"Arin",
"Arjun",
"Aryan",
"Atharv",
"Avi",
"Ayaan",
"Ayush",
"Ayushman",
"Azaan",
"Azad",
"Bachittar",
"Bahadurjit",
"Bakhshi",
"Balendra",
"Balhaar",
"Baljiwan",
"Balvan",
"Balveer",
"Banjeet",
"Benjamin",
"Brijesh",
"Caleb",
"Chaitanya",
"Chakradev",
"Chakradhar",
"Champak",
"Chanakya",
"Chandran",
"Chandresh",
"Charan",
"Charles",
"Chatresh",
"Chatura",
"Christopher",
"Daksh",
"Dakshesh",
"Dalbir",
"Daniel",
"Darpan",
"Darsh",
"David",
"Dev",
"Devansh",
"Dhruv",
"Dominic",
"Ekalinga",
"Ekansh",
"Ekapad",
"Ekaraj",
"Ekavir",
"Ekbal",
"Elijah",
"Ethan",
"Falan",
"Faqid",
"Faraj",
"Faras",
"Farhan",
"Fariq",
"Faris",
"Finn",
"Fitan",
"Fiyaz",
"Frado",
"Frederick",
"Gabriel",
"Gagan",
"Gaurang",
"Gaurav",
"Gautam",
"Gavin",
"George",
"Girik",
"Girindra",
"Girish",
"Gopal",
"Gunbir",
"Guneet",
"Hardik",
"Harish",
"Harrison",
"Harsh",
"Harshil",
"Hemang",
"Henry",
"Hitesh",
"Hredhaan",
"Hritik",
"Ikbal",
"Imaran",
"Indrajit",
"Isaac",
"Isaiah",
"Ishaan",
"Ishwar",
"Jack",
"Jackson",
"Jacob",
"Jagat",
"Jagdish",
"Jai",
"Jairaj",
"Jason",
"Jatin",
"Jeet",
"Jeremiah",
"Jonathan",
"Joshua",
"Kabir",
"Kai",
"Kalpit",
"Karan",
"Kevin",
"Kiaan",
"Krish",
"Krishna",
"Laban",
"Laksh",
"Lakshit",
"Liam",
"Logan",
"Lohit",
"Lucky",
"Luke",
"Maanas",
"Maanav",
"Madhav",
"Manan",
"Manbir",
"Manthan",
"Mason",
"Matthew",
"Max",
"Michael",
"Mitesh",
"Mohammed",
"Nachiket",
"Naksh",
"Nakul",
"Nathan",
"Nathaniel",
"Naveen",
"Neel",
"Nicholas",
"Nihal",
"Nitesh",
"Noah",
"Ojas",
"Oliver",
"Om",
"Omkaar",
"Onkar",
"Onveer",
"Orinder",
"Oscar",
"Owen",
"Parth",
"Patrick",
"Peter",
"Pranav",
"Praneel",
"Pranit",
"Pratyush",
"Qabil",
"Qadim",
"Qarin",
"Qasim",
"Quincy",
"Rachit",
"Raghav",
"Ranbir",
"Ranveer",
"Rayaan",
"Rehaan",
"Reyansh",
"Rishi",
"Robert",
"Rohan",
"Ronith",
"Rudra",
"Rushil",
"Ryan",
"Sai",
"Saksham",
"Samaksh",
"Samar",
"Samarth",
"Samesh",
"Samuel",
"Sarthak",
"Sathvik",
"Shaurya",
"Shivansh",
"Siddharth",
"Simon",
"Tanay",
"Tanish",
"Tanveer",
"Tarak",
"Teerth",
"Tejas",
"Theodore",
"Thomas",
"Timothy",
"Tristan",
"Udant",
"Udarsh",
"Umang",
"Upkaar",
"Utkarsh",
"Vedant",
"Veer",
"Victor",
"Vihaan",
"Vincent",
"Viraj",
"Vivaan",
"Wahab",
"Warinder",
"Warjas",
"Wazir",
"William",
"Wriddhish",
"Wridesh",
"Wyatt",
"Xavier",
"Yagnesh",
"Yash",
"Yatan",
"Yatin",
"Yug",
"Yuvraj",
"Zaid",
"Zashil",
"Zayan",
"Zayyan",
"Zehaan",
)
# https://www.pampers.com/en-us/pregnancy/baby-names/article/indian-girl-names
first_names_female = (
"Aachal",
"Aadhya",
"Aahana",
"Aarini",
"Aarna",
"Aashi",
"Abha",
"Advika",
"Adweta",
"Adya",
"Aishani",
"Alka",
"Amaira",
"Amara",
"Amrita",
"Amruta",
"Anamika",
"Anika",
"Anita",
"Anjali",
"Anusha",
"Anvi",
"Anya",
"Aradhana",
"Arunima",
"Arya",
"Ati",
"Avni",
"Baghyawati",
"Barkha",
"Bhanumati",
"Bhavani",
"Bhavika",
"Bhavini",
"Bhavna",
"Bhavya",
"Bimala",
"Bina",
"Bishakha",
"Brinda",
"Chaaya",
"Chaitaly",
"Chakrika",
"Chaman",
"Chameli",
"Chanchal",
"Chandani",
"Charita",
"Charvi",
"Chasmum",
"Chavvi",
"Daksha",
"Dalaja",
"Damini",
"Damyanti",
"Darika",
"Dayamai",
"Dayita",
"Deepa",
"Devika",
"Dhriti",
"Dipta",
"Divya",
"Diya",
"Edhitha",
"Eesha",
"Eiravati",
"Ekaja",
"Ekani",
"Ekanta",
"Ekantika",
"Ekiya",
"Ekta",
"Eshana",
"Eta",
"Falak",
"Falguni",
"Forum",
"Ganga",
"Garima",
"Gaurangi",
"Gauri",
"Gaurika",
"Gautami",
"Gayathri",
"Geetika",
"Hamsini",
"Harinakshi",
"Harini",
"Harita",
"Hema",
"Hemal",
"Hemangini",
"Hemani",
"Hiral",
"Idika",
"Ijaya",
"Ikshita",
"Inaya",
"Indali",
"Indira",
"Ira",
"Irya",
"Isha",
"Ishani",
"Ishanvi",
"Ishita",
"Jagrati",
"Jagvi",
"Jalsa",
"Janaki",
"Janani",
"Januja",
"Janya",
"Jasmit",
"Jeevika",
"Jhalak",
"Jyoti",
"Kala",
"Kamala",
"Kamya",
"Kashish",
"Kashvi",
"Kavya",
"Keya",
"Krisha",
"Krishna",
"Kritika",
"Ladli",
"Lajita",
"Lakshmi",
"Lavanya",
"Leela",
"Leena",
"Lekha",
"Libni",
"Lila",
"Lipika",
"Lopa",
"Madhavi",
"Mahika",
"Manya",
"Maya",
"Meera",
"Megha",
"Meghana",
"Mekhala",
"Mitali",
"Mohini",
"Mugdha",
"Nandini",
"Neelima",
"Neha",
"Netra",
"Nidhi",
"Nidra",
"Niharika",
"Nikita",
"Nilima",
"Nimrat",
"Nirja",
"Nisha",
"Nitara",
"Odika",
"Oeshi",
"Ojasvi",
"Omaja",
"Omisha",
"Omya",
"Oni",
"Osha",
"Oviya",
"Pahal",
"Pallavi",
"Panini",
"Pavani",
"Pooja",
"Prisha",
"Priya",
"Pushti",
"Qushi",
"Raagini",
"Rachana",
"Rachita",
"Radha",
"Radhika",
"Rajata",
"Rajeshri",
"Raksha",
"Reva",
"Ria",
"Ridhi",
"Riya",
"Saanvi",
"Sachi",
"Sai",
"Sanaya",
"Sanya",
"Sara",
"Saumya",
"Shivani",
"Shravya",
"Siya",
"Sneha",
"Sudiksha",
"Suhani",
"Tamanna",
"Tanmayi",
"Tanvi",
"Tara",
"Tripti",
"Triveni",
"Triya",
"Turvi",
"Ubika",
"Ucchal",
"Udyati",
"Unnati",
"Unni",
"Upadhriti",
"Upasna",
"Upma",
"Urishilla",
"Urmi",
"Urvashi",
"Urvi",
"Vaishnavi",
"Vamakshi",
"Vansha",
"Vanya",
"Varenya",
"Varsha",
"Vasana",
"Vasatika",
"Vasudha",
"Veda",
"Vedhika",
"Vedika",
"Vidhi",
"Vinaya",
"Vrinda",
"Vrishti",
"Vritti",
"Vyanjana",
"Waida",
"Wakeeta",
"Warda",
"Warhi",
"Watika",
"Widisha",
"Wishi",
"Xalak",
"Xiti",
"Yachana",
"Yadavi",
"Yahvi",
"Yamini",
"Yashasvi",
"Yashawini",
"Yashica",
"Yashoda",
"Yashodhara",
"Yashvi",
"Yasti",
"Yauvani",
"Yochana",
"Yoshita",
"Yutika",
"Zaitra",
"Zansi",
"Zarna",
"Zilmil",
"Zinal",
)
first_names = first_names_male + first_names_female
last_names = (
"Acharya",
"Agarwal",
"Agate",
"Aggarwal",
"Agrawal",
"Ahluwalia",
"Ahuja",
"Amble",
"Anand",
"Andra",
"Anne",
"Apte",
"Arora",
"Arya",
"Atwal",
"Aurora",
"Babu",
"Badal",
"Badami",
"Bahl",
"Bahri",
"Bail",
"Bains",
"Bajaj",
"Bajwa",
"Bakshi",
"Bal",
"Bala",
"Bala",
"Balakrishnan",
"Balan",
"Balasubramanian",
"Balay",
"Bali",
"Bandi",
"Banerjee",
"Banik",
"Bansal",
"Barad",
"Barad",
"Baral",
"Baria",
"Barman",
"Basak",
"Bassi",
"Basu",
"Bath",
"Batra",
"Batta",
"Bava",
"Bawa",
"Bedi",
"Behl",
"Ben",
"Bera",
"Bhagat",
"Bhakta",
"Bhalla",
"Bhandari",
"Bhardwaj",
"Bhargava",
"Bhasin",
"Bhat",
"Bhatia",
"Bhatnagar",
"Bhatt",
"Bhattacharyya",
"Bhatti",
"Bhavsar",
"Bir",
"Biswas",
"Boase",
"Bobal",
"Bora",
"Bora",
"Borah",
"Borde",
"Borra",
"Bose",
"Brahmbhatt",
"Brar",
"Buch",
"Buch",
"Bumb",
"Butala",
"Chacko",
"Chad",
"Chada",
"Chadha",
"Chahal",
"Chakrabarti",
"Chakraborty",
"Chana",
"Chand",
"Chanda",
"Chander",
"Chandra",
"Chandran",
"Char",
"Chatterjee",
"Chaudhari",
"Chaudhary",
"Chaudhry",
"Chaudhuri",
"Chaudry",
"Chauhan",
"Chawla",
"Cheema",
"Cherian",
"Chhabra",
"Chokshi",
"Chopra",
"Choudhary",
"Choudhry",
"Choudhury",
"Chowdhury",
"Comar",
"Contractor",
"D’Alia",
"Dada",
"Dalal",
"Dani",
"Dar",
"Dara",
"Dara",
"Das",
"Dasgupta",
"Dash",
"Dass",
"Date",
"Datta",
"Dave",
"Dayal",
"De",
"Deep",
"Deo",
"Deol",
"Desai",
"Deshmukh",
"Deshpande",
"Devan",
"Devi",
"Dewan",
"Dey",
"Dhaliwal",
"Dhar",
"Dhar",
"Dhawan",
"Dhillon",
"Dhingra",
"Din",
"Divan",
"Dixit",
"Doctor",
"Dora",
"Doshi",
"Dua",
"Dube",
"Dubey",
"Dugal",
"Dugar",
"Dugar",
"Dutt",
"Dutta",
"Dyal",
"Edwin",
"Gaba",
"Gade",
"Gala",
"Gandhi",
"Ganesan",
"Ganesh",
"Ganguly",
"Gara",
"Garde",
"Garg",
"Gera",
"Ghose",
"Ghosh",
"Gill",
"Goda",
"Goel",
"Gokhale",
"Gola",
"Gole",
"Golla",
"Gopal",
"Goswami",
"Gour",
"Goyal",
"Grewal",
"Grover",
"Guha",
"Gulati",
"Gupta",
"Halder",
"Handa",
"Hans",
"Hari",
"Hayer",
"Hayre",
"Hegde",
"Hora",
"Issac",
"Iyengar",
"Iyer",
"Jaggi",
"Jain",
"Jani",
"Jayaraman",
"Jha",
"Jhaveri",
"Johal",
"Joshi",
"Kadakia",
"Kade",
"Kakar",
"Kala",
"Kala",
"Kala",
"Kale",
"Kalita",
"Kalla",
"Kamdar",
"Kanda",
"Kannan",
"Kant",
"Kapadia",
"Kapoor",
"Kapur",
"Kar",
"Kara",
"Karan",
"Kari",
"Karnik",
"Karpe",
"Kashyap",
"Kata",
"Kaul",
"Kaur",
"Keer",
"Keer",
"Khalsa",
"Khanna",
"Khare",
"Khatri",
"Khosla",
"Khurana",
"Kibe",
"Kohli",
"Konda",
"Korpal",
"Koshy",
"Kota",
"Kothari",
"Krish",
"Krishna",
"Krishnamurthy",
"Krishnan",
"Kulkarni",
"Kumar",
"Kumer",
"Kunda",
"Kurian",
"Kuruvilla",
"Lad",
"Lad",
"Lal",
"Lala",
"Lall",
"Lalla",
"Lanka",
"Lata",
"Loke",
"Loyal",
"Luthra",
"Madan",
"Madan",
"Magar",
"Mahajan",
"Mahal",
"Maharaj",
"Majumdar",
"Malhotra",
"Mall",
"Mallick",
"Mammen",
"Mand",
"Manda",
"Mandal",
"Mander",
"Mane",
"Mangal",
"Mangat",
"Mani",
"Mani",
"Mann",
"Mannan",
"Manne",
"Master",
"Memon",
"Menon",
"Merchant",
"Minhas",
"Mishra",
"Misra",
"Mistry",
"Mital",
"Mitra",
"Mittal",
"Mitter",
"Modi",
"Mody",
"Mohan",
"Mohanty",
"Morar",
"More",
"Mukherjee",
"Mukhopadhyay",
"Muni",
"Munshi",
"Murthy",
"Murty",
"Mutti",
"Nadig",
"Nadkarni",
"Nagar",
"Nagarajan",
"Nagi",
"Nagy",
"Naidu",
"Naik",
"Nair",
"Nanda",
"Narain",
"Narang",
"Narasimhan",
"Narayan",
"Narayanan",
"Narula",
"Natarajan",
"Nath",
"Natt",
"Nayak",
"Nayar",
"Nazareth",
"Nigam",
"Nori",
"Oak",
"Om",
"Oommen",
"Oza",
"Padmanabhan",
"Pai",
"Pal",
"Palan",
"Pall",
"Palla",
"Palla",
"Panchal",
"Pandey",
"Pandit",
"Pandya",
"Pant",
"Parekh",
"Parikh",
"Parmar",
"Parmer",
"Parsa",
"Patel",
"Pathak",
"Patil",
"Patla",
"Patla",
"Pau",
"Peri",
"Pillai",
"Pillay",
"Pingle",
"Prabhakar",
"Prabhu",
"Pradhan",
"Prakash",
"Prasad",
"Prashad",
"Puri",
"Purohit",
"Radhakrishnan",
"Raghavan",
"Rai",
"Raj",
"Raja",
"Rajagopal",
"Rajagopalan",
"Rajan",
"Raju",
"Ram",
"Rama",
"Ramachandran",
"Ramakrishnan",
"Raman",
"Ramanathan",
"Ramaswamy",
"Ramesh",
"Rana",
"Randhawa",
"Ranganathan",
"Rao",
"Rastogi",
"Ratta",
"Rattan",
"Ratti",
"Rau",
"Raval",
"Ravel",
"Ravi",
"Ray",
"Reddy",
"Rege",
"Rout",
"Roy",
"Sabharwal",
"Sachar",
"Sachdev",
"Sachdeva",
"Sagar",
"Saha",
"Sahni",
"Sahota",
"Saini",
"Salvi",
"Sama",
"Sami",
"Sampath",
"Samra",
"Sandal",
"Sandhu",
"Sane",
"Sangha",
"Sanghvi",
"Sani",
"Sankar",
"Sankaran",
"Sant",
"Saraf",
"Saran",
"Sarin",
"Sarkar",
"Sarma",
"Sarna",
"Sarraf",
"Sastry",
"Sathe",
"Savant",
"Sawhney",
"Saxena",
"Sehgal",
"Sekhon",
"Sem",
"Sen",
"Sengupta",
"Seshadri",
"Seth",
"Sethi",
"Setty",
"Sha",
"Shah",
"Shan",
"Shankar",
"Shanker",
"Sharaf",
"Sharma",
"Shenoy",
"Shere",
"Sheth",
"Shetty",
"Shroff",
"Shukla",
"Sibal",
"Sidhu",
"Singh",
"Singhal",
"Sinha",
"Sodhi",
"Solanki",
"Som",
"Soman",
"Soni",
"Sood",
"Sridhar",
"Srinivas",
"Srinivasan",
"Srivastava",
"Subramaniam",
"Subramanian",
"Sule",
"Sundaram",
"Sunder",
"Sur",
"Sura",
"Suresh",
"Suri",
"Swaminathan",
"Swamy",
"Tailor",
"Tak",
"Talwar",
"Tandon",
"Taneja",
"Tank",
"Tara",
"Tata",
"Tella",
"Thaker",
"Thakkar",
"Thakur",
"Thaman",
"Tiwari",
"Toor",
"Tripathi",
"Trivedi",
"Upadhyay",
"Uppal",
"Vaidya",
"Vala",
"Varghese",
"Varkey",
"Varma",
"Varty",
"Varughese",
"Vasa",
"Venkataraman",
"Venkatesh",
"Verma",
"Vig",
"Virk",
"Viswanathan",
"Vohra",
"Vora",
"Vyas",
"Wable",
"Wadhwa",
"Wagle",
"Wali",
"Wali",
"Walia",
"Walla",
"Warrior",
"Wason",
"Yadav",
"Yogi",
"Yohannan",
"Zacharia",
"Zachariah",
)
| Provider |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/convolutional.py | {
"start": 112705,
"end": 114570
} | class ____(Layer):
"""Zero-padding layer for 1D input (e.g. temporal sequence).
Examples:
>>> input_shape = (2, 2, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[ 0 1 2]
[ 3 4 5]]
[[ 6 7 8]
[ 9 10 11]]]
>>> y = tf.keras.layers.ZeroPadding1D(padding=2)(x)
>>> print(y)
tf.Tensor(
[[[ 0 0 0]
[ 0 0 0]
[ 0 1 2]
[ 3 4 5]
[ 0 0 0]
[ 0 0 0]]
[[ 0 0 0]
[ 0 0 0]
[ 6 7 8]
[ 9 10 11]
[ 0 0 0]
[ 0 0 0]]], shape=(2, 6, 3), dtype=int64)
Args:
padding: Int, or tuple of int (length 2), or dictionary.
- If int:
How many zeros to add at the beginning and end of
the padding dimension (axis 1).
- If tuple of int (length 2):
How many zeros to add at the beginning and the end of
the padding dimension (`(left_pad, right_pad)`).
Input shape:
3D tensor with shape `(batch_size, axis_to_pad, features)`
Output shape:
3D tensor with shape `(batch_size, padded_axis, features)`
"""
def __init__(self, padding=1, **kwargs):
super(ZeroPadding1D, self).__init__(**kwargs)
self.padding = conv_utils.normalize_tuple(padding, 2, 'padding')
self.input_spec = InputSpec(ndim=3)
def compute_output_shape(self, input_shape):
if input_shape[1] is not None:
length = input_shape[1] + self.padding[0] + self.padding[1]
else:
length = None
return tensor_shape.TensorShape([input_shape[0], length, input_shape[2]])
def call(self, inputs):
return backend.temporal_padding(inputs, padding=self.padding)
def get_config(self):
config = {'padding': self.padding}
base_config = super(ZeroPadding1D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| ZeroPadding1D |
python | scipy__scipy | scipy/stats/tests/test_fit.py | {
"start": 35066,
"end": 48127
} | class ____:
def test_gof_iv(self):
dist = stats.norm
x = [1, 2, 3]
message = r"`dist` must be a \(non-frozen\) instance of..."
with pytest.raises(TypeError, match=message):
goodness_of_fit(stats.norm(), x)
message = "`data` must be a one-dimensional array of numbers."
with pytest.raises(ValueError, match=message):
goodness_of_fit(dist, [[1, 2, 3]])
message = "`statistic` must be one of..."
with pytest.raises(ValueError, match=message):
goodness_of_fit(dist, x, statistic='mm')
message = "`n_mc_samples` must be an integer."
with pytest.raises(TypeError, match=message):
goodness_of_fit(dist, x, n_mc_samples=1000.5)
message = "SeedSequence expects int or sequence"
with pytest.raises(TypeError, match=message):
goodness_of_fit(dist, x, rng='herring')
def test_against_ks(self):
rng = np.random.default_rng(8517426291317196949)
x = examgrades
known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)}
res = goodness_of_fit(stats.norm, x, known_params=known_params,
statistic='ks', rng=rng)
ref = stats.kstest(x, stats.norm(**known_params).cdf, method='exact')
assert_allclose(res.statistic, ref.statistic) # ~0.0848
assert_allclose(res.pvalue, ref.pvalue, atol=5e-3) # ~0.335
def test_against_lilliefors(self):
rng = np.random.default_rng(2291803665717442724)
x = examgrades
# preserve use of old random_state during SPEC 7 transition
res = goodness_of_fit(stats.norm, x, statistic='ks', random_state=rng)
known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)}
ref = stats.kstest(x, stats.norm(**known_params).cdf, method='exact')
assert_allclose(res.statistic, ref.statistic) # ~0.0848
assert_allclose(res.pvalue, 0.0348, atol=5e-3)
def test_against_cvm(self):
rng = np.random.default_rng(8674330857509546614)
x = examgrades
known_params = {'loc': np.mean(x), 'scale': np.std(x, ddof=1)}
res = goodness_of_fit(stats.norm, x, known_params=known_params,
statistic='cvm', rng=rng)
ref = stats.cramervonmises(x, stats.norm(**known_params).cdf)
assert_allclose(res.statistic, ref.statistic) # ~0.090
assert_allclose(res.pvalue, ref.pvalue, atol=5e-3) # ~0.636
def test_against_anderson_case_0(self):
# "Case 0" is where loc and scale are known [1]
rng = np.random.default_rng(7384539336846690410)
x = np.arange(1, 101)
# loc that produced critical value of statistic found w/ root_scalar
known_params = {'loc': 45.01575354024957, 'scale': 30}
res = goodness_of_fit(stats.norm, x, known_params=known_params,
statistic='ad', rng=rng)
assert_allclose(res.statistic, 2.492) # See [1] Table 1A 1.0
assert_allclose(res.pvalue, 0.05, atol=5e-3)
def test_against_anderson_case_1(self):
# "Case 1" is where scale is known and loc is fit [1]
rng = np.random.default_rng(5040212485680146248)
x = np.arange(1, 101)
# scale that produced critical value of statistic found w/ root_scalar
known_params = {'scale': 29.957112639101933}
res = goodness_of_fit(stats.norm, x, known_params=known_params,
statistic='ad', rng=rng)
assert_allclose(res.statistic, 0.908) # See [1] Table 1B 1.1
assert_allclose(res.pvalue, 0.1, atol=5e-3)
def test_against_anderson_case_2(self):
# "Case 2" is where loc is known and scale is fit [1]
rng = np.random.default_rng(726693985720914083)
x = np.arange(1, 101)
# loc that produced critical value of statistic found w/ root_scalar
known_params = {'loc': 44.5680212261933}
res = goodness_of_fit(stats.norm, x, known_params=known_params,
statistic='ad', rng=rng)
assert_allclose(res.statistic, 2.904) # See [1] Table 1B 1.2
assert_allclose(res.pvalue, 0.025, atol=5e-3)
def test_against_anderson_case_3(self):
# "Case 3" is where both loc and scale are fit [1]
rng = np.random.default_rng(6763691329830218206)
# c that produced critical value of statistic found w/ root_scalar
x = stats.skewnorm.rvs(1.4477847789132101, loc=1, scale=2, size=100,
random_state=rng)
res = goodness_of_fit(stats.norm, x, statistic='ad', rng=rng)
assert_allclose(res.statistic, 0.559) # See [1] Table 1B 1.2
assert_allclose(res.pvalue, 0.15, atol=5e-3)
@pytest.mark.xslow
def test_against_anderson_gumbel_r(self):
rng = np.random.default_rng(7302761058217743)
# c that produced critical value of statistic found w/ root_scalar
x = stats.genextreme(0.051896837188595134, loc=0.5,
scale=1.5).rvs(size=1000, random_state=rng)
res = goodness_of_fit(stats.gumbel_r, x, statistic='ad',
rng=rng)
ref = stats.anderson(x, dist='gumbel_r')
assert_allclose(res.statistic, ref.critical_values[0])
assert_allclose(res.pvalue, ref.significance_level[0]/100, atol=5e-3)
def test_against_filliben_norm(self):
# Test against `stats.fit` ref. [7] Section 8 "Example"
rng = np.random.default_rng(8024266430745011915)
y = [6, 1, -4, 8, -2, 5, 0]
known_params = {'loc': 0, 'scale': 1}
res = stats.goodness_of_fit(stats.norm, y, known_params=known_params,
statistic="filliben", rng=rng)
# Slight discrepancy presumably due to roundoff in Filliben's
# calculation. Using exact order statistic medians instead of
# Filliben's approximation doesn't account for it.
assert_allclose(res.statistic, 0.98538, atol=1e-4)
assert 0.75 < res.pvalue < 0.9
# Using R's ppcc library:
# library(ppcc)
# options(digits=16)
# x < - c(6, 1, -4, 8, -2, 5, 0)
# set.seed(100)
# ppccTest(x, "qnorm", ppos="Filliben")
# Discrepancy with
assert_allclose(res.statistic, 0.98540957187084, rtol=2e-5)
assert_allclose(res.pvalue, 0.8875, rtol=2e-3)
def test_filliben_property(self):
# Filliben's statistic should be independent of data location and scale
rng = np.random.default_rng(8535677809395478813)
x = rng.normal(loc=10, scale=0.5, size=100)
res = stats.goodness_of_fit(stats.norm, x,
statistic="filliben", rng=rng)
known_params = {'loc': 0, 'scale': 1}
ref = stats.goodness_of_fit(stats.norm, x, known_params=known_params,
statistic="filliben", rng=rng)
assert_allclose(res.statistic, ref.statistic, rtol=1e-15)
@pytest.mark.parametrize('case', [(25, [.928, .937, .950, .958, .966]),
(50, [.959, .965, .972, .977, .981]),
(95, [.977, .979, .983, .986, .989])])
def test_against_filliben_norm_table(self, case):
# Test against `stats.fit` ref. [7] Table 1
rng = np.random.default_rng(504569995557928957)
n, ref = case
x = rng.random(n)
known_params = {'loc': 0, 'scale': 1}
res = stats.goodness_of_fit(stats.norm, x, known_params=known_params,
statistic="filliben", rng=rng)
percentiles = np.array([0.005, 0.01, 0.025, 0.05, 0.1])
res = stats.scoreatpercentile(res.null_distribution, percentiles*100)
assert_allclose(res, ref, atol=2e-3)
@pytest.mark.xslow
@pytest.mark.parametrize('case', [(5, 0.95772790260469, 0.4755),
(6, 0.95398832257958, 0.3848),
(7, 0.9432692889277, 0.2328)])
def test_against_ppcc(self, case):
# Test against R ppcc, e.g.
# library(ppcc)
# options(digits=16)
# x < - c(0.52325412, 1.06907699, -0.36084066, 0.15305959, 0.99093194)
# set.seed(100)
# ppccTest(x, "qrayleigh", ppos="Filliben")
n, ref_statistic, ref_pvalue = case
rng = np.random.default_rng(7777775561439803116)
x = rng.normal(size=n)
res = stats.goodness_of_fit(stats.rayleigh, x, statistic="filliben",
rng=rng)
assert_allclose(res.statistic, ref_statistic, rtol=1e-4)
assert_allclose(res.pvalue, ref_pvalue, atol=1.5e-2)
def test_params_effects(self):
# Ensure that `guessed_params`, `fit_params`, and `known_params` have
# the intended effects.
rng = np.random.default_rng(9121950977643805391)
x = stats.skewnorm.rvs(-5.044559778383153, loc=1, scale=2, size=50,
random_state=rng)
# Show that `guessed_params` don't fit to the guess,
# but `fit_params` and `known_params` respect the provided fit
guessed_params = {'c': 13.4}
fit_params = {'scale': 13.73}
known_params = {'loc': -13.85}
rng = np.random.default_rng(9121950977643805391)
res1 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2,
guessed_params=guessed_params,
fit_params=fit_params,
known_params=known_params, rng=rng)
assert not np.allclose(res1.fit_result.params.c, 13.4)
assert_equal(res1.fit_result.params.scale, 13.73)
assert_equal(res1.fit_result.params.loc, -13.85)
# Show that changing the guess changes the parameter that gets fit,
# and it changes the null distribution
guessed_params = {'c': 2}
rng = np.random.default_rng(9121950977643805391)
res2 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2,
guessed_params=guessed_params,
fit_params=fit_params,
known_params=known_params, rng=rng)
assert not np.allclose(res2.fit_result.params.c,
res1.fit_result.params.c, rtol=1e-8)
assert not np.allclose(res2.null_distribution,
res1.null_distribution, rtol=1e-8)
assert_equal(res2.fit_result.params.scale, 13.73)
assert_equal(res2.fit_result.params.loc, -13.85)
# If we set all parameters as fit_params and known_params,
# they're all fixed to those values, but the null distribution
# varies.
fit_params = {'c': 13.4, 'scale': 13.73}
rng = np.random.default_rng(9121950977643805391)
res3 = goodness_of_fit(stats.weibull_min, x, n_mc_samples=2,
guessed_params=guessed_params,
fit_params=fit_params,
known_params=known_params, rng=rng)
assert_equal(res3.fit_result.params.c, 13.4)
assert_equal(res3.fit_result.params.scale, 13.73)
assert_equal(res3.fit_result.params.loc, -13.85)
assert not np.allclose(res3.null_distribution, res1.null_distribution)
def test_custom_statistic(self):
# Test support for custom statistic function.
# References:
# [1] Pyke, R. (1965). "Spacings". Journal of the Royal Statistical
# Society: Series B (Methodological), 27(3): 395-436.
# [2] Burrows, P. M. (1979). "Selected Percentage Points of
# Greenwood's Statistics". Journal of the Royal Statistical
# Society. Series A (General), 142(2): 256-258.
# Use the Greenwood statistic for illustration; see [1, p.402].
def greenwood(dist, data, *, axis):
x = np.sort(data, axis=axis)
y = dist.cdf(x)
d = np.diff(y, axis=axis, prepend=0, append=1)
return np.sum(d ** 2, axis=axis)
# Run the Monte Carlo test with sample size = 5 on a fully specified
# null distribution, and compare the simulated quantiles to the exact
# ones given in [2, Table 1, column (n = 5)].
rng = np.random.default_rng(9121950977643805391)
data = stats.expon.rvs(size=5, random_state=rng)
result = goodness_of_fit(stats.expon, data,
known_params={'loc': 0, 'scale': 1},
statistic=greenwood, rng=rng)
p = [.01, .05, .1, .2, .3, .4, .5, .6, .7, .8, .9, .95, .99]
exact_quantiles = [
.183863, .199403, .210088, .226040, .239947, .253677, .268422,
.285293, .306002, .334447, .382972, .432049, .547468]
simulated_quantiles = np.quantile(result.null_distribution, p)
assert_allclose(simulated_quantiles, exact_quantiles, atol=0.005)
| TestGoodnessOfFit |
python | google__flatbuffers | tests/MyGame/Example/AnyUniqueAliases.py | {
"start": 92,
"end": 865
} | class ____(object):
NONE = 0
M = 1
TS = 2
M2 = 3
def AnyUniqueAliasesCreator(unionType, table):
from flatbuffers.table import Table
if not isinstance(table, Table):
return None
if unionType == AnyUniqueAliases.M:
import MyGame.Example.Monster
return MyGame.Example.Monster.MonsterT.InitFromBuf(table.Bytes, table.Pos)
if unionType == AnyUniqueAliases.TS:
import MyGame.Example.TestSimpleTableWithEnum
return MyGame.Example.TestSimpleTableWithEnum.TestSimpleTableWithEnumT.InitFromBuf(table.Bytes, table.Pos)
if unionType == AnyUniqueAliases.M2:
import MyGame.Example2.Monster
return MyGame.Example2.Monster.MonsterT.InitFromBuf(table.Bytes, table.Pos)
return None
| AnyUniqueAliases |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 31353,
"end": 32312
} | class ____(OperandListAssetSelection):
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
return reduce(
operator.or_,
(
selection.resolve_inner(asset_graph, allow_missing=allow_missing)
for selection in self.operands
),
)
def resolve_checks_inner( # pyright: ignore[reportIncompatibleMethodOverride]
self, asset_graph: AssetGraph, allow_missing: bool
) -> AbstractSet[AssetCheckKey]:
return reduce(
operator.or_,
(
selection.resolve_checks_inner(asset_graph, allow_missing=allow_missing)
for selection in self.operands
),
)
def to_selection_str(self) -> str:
return " or ".join(f"{operand.operand_to_selection_str()}" for operand in self.operands)
@whitelist_for_serdes
@record
| OrAssetSelection |
python | pennersr__django-allauth | allauth/idp/oidc/views.py | {
"start": 15533,
"end": 16004
} | class ____(View):
def get(self, request, *args, **kwargs):
keys = []
for pem in [app_settings.PRIVATE_KEY]:
jwk, _ = jwkkit.load_jwk_from_pem(pem)
keys.append(jwk)
response = JsonResponse({"keys": keys})
response["Access-Control-Allow-Origin"] = "*"
return response
jwks = JwksView.as_view()
@method_decorator(csrf_exempt, name="dispatch")
@method_decorator(login_not_required, name="dispatch")
| JwksView |
python | apache__airflow | providers/alibaba/tests/unit/alibaba/cloud/operators/test_oss.py | {
"start": 3764,
"end": 4383
} | class ____:
@mock.patch("airflow.providers.alibaba.cloud.operators.oss.OSSHook")
def test_execute(self, mock_hook):
operator = OSSDeleteBatchObjectOperator(
task_id=MOCK_TASK_ID,
region=MOCK_REGION,
bucket_name=MOCK_BUCKET,
oss_conn_id=MOCK_OSS_CONN_ID,
keys=MOCK_KEYS,
)
operator.execute(None)
mock_hook.assert_called_once_with(oss_conn_id=MOCK_OSS_CONN_ID, region=MOCK_REGION)
mock_hook.return_value.delete_objects.assert_called_once_with(bucket_name=MOCK_BUCKET, key=MOCK_KEYS)
| TestOSSDeleteBatchObjectOperator |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 19769,
"end": 20000
} | class ____(models.Model):
extra = models.IntegerField()
forwards = models.ForeignKey('ThroughTargetModel', on_delete=models.CASCADE)
backwards = models.ForeignKey('RelationalModel', on_delete=models.CASCADE)
| Supplementary |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_arraysetops.py | {
"start": 1031,
"end": 23731
} | class ____(TestCase):
def test_intersect1d(self):
# unique inputs
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
ec = np.array([1, 2, 5])
c = intersect1d(a, b, assume_unique=True)
assert_array_equal(c, ec)
# non-unique inputs
a = np.array([5, 5, 7, 1, 2])
b = np.array([2, 1, 4, 3, 3, 1, 5])
ed = np.array([1, 2, 5])
c = intersect1d(a, b)
assert_array_equal(c, ed)
assert_array_equal([], intersect1d([], []))
def test_intersect1d_array_like(self):
# See gh-11772
class Test:
def __array__(self):
return np.arange(3)
a = Test()
res = intersect1d(a, a)
assert_array_equal(res, a)
res = intersect1d([1, 2, 3], [1, 2, 3])
assert_array_equal(res, [1, 2, 3])
def test_intersect1d_indices(self):
# unique inputs
a = np.array([1, 2, 3, 4])
b = np.array([2, 1, 4, 6])
c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
ee = np.array([1, 2, 4])
assert_array_equal(c, ee)
assert_array_equal(a[i1], ee)
assert_array_equal(b[i2], ee)
# non-unique inputs
a = np.array([1, 2, 2, 3, 4, 3, 2])
b = np.array([1, 8, 4, 2, 2, 3, 2, 3])
c, i1, i2 = intersect1d(a, b, return_indices=True)
ef = np.array([1, 2, 3, 4])
assert_array_equal(c, ef)
assert_array_equal(a[i1], ef)
assert_array_equal(b[i2], ef)
# non1d, unique inputs
a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])
b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])
c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)
ui1 = np.unravel_index(i1, a.shape)
ui2 = np.unravel_index(i2, b.shape)
ea = np.array([2, 6, 7, 8])
assert_array_equal(ea, a[ui1])
assert_array_equal(ea, b[ui2])
# non1d, not assumed to be uniqueinputs
a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])
b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])
c, i1, i2 = intersect1d(a, b, return_indices=True)
ui1 = np.unravel_index(i1, a.shape)
ui2 = np.unravel_index(i2, b.shape)
ea = np.array([2, 7, 8])
assert_array_equal(ea, a[ui1])
assert_array_equal(ea, b[ui2])
def test_setxor1d(self):
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
ec = np.array([3, 4, 7])
c = setxor1d(a, b)
assert_array_equal(c, ec)
a = np.array([1, 2, 3])
b = np.array([6, 5, 4])
ec = np.array([1, 2, 3, 4, 5, 6])
c = setxor1d(a, b)
assert_array_equal(c, ec)
a = np.array([1, 8, 2, 3])
b = np.array([6, 5, 4, 8])
ec = np.array([1, 2, 3, 4, 5, 6])
c = setxor1d(a, b)
assert_array_equal(c, ec)
assert_array_equal([], setxor1d([], []))
def test_ediff1d(self):
zero_elem = np.array([])
one_elem = np.array([1])
two_elem = np.array([1, 2])
assert_array_equal([], ediff1d(zero_elem))
assert_array_equal([0], ediff1d(zero_elem, to_begin=0))
assert_array_equal([0], ediff1d(zero_elem, to_end=0))
assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0))
assert_array_equal([], ediff1d(one_elem))
assert_array_equal([1], ediff1d(two_elem))
assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9))
assert_array_equal(
[5, 6, 1, 7, 8], ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8])
)
assert_array_equal([1, 9], ediff1d(two_elem, to_end=9))
assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8]))
assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7))
assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6]))
@parametrize(
"ary, prepend, append, expected",
[
# should fail because trying to cast
# np.nan standard floating point value
# into an integer array:
(np.array([1, 2, 3], dtype=np.int64), None, np.nan, "to_end"),
# should fail because attempting
# to downcast to int type:
subtest(
(
np.array([1, 2, 3], dtype=np.int64),
np.array([5, 7, 2], dtype=np.float32),
None,
"to_begin",
),
),
# should fail because attempting to cast
# two special floating point values
# to integers (on both sides of ary),
# `to_begin` is in the error message as the impl checks this first:
(np.array([1.0, 3.0, 9.0], dtype=np.int8), np.nan, np.nan, "to_begin"),
],
)
def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected):
# verify resolution of gh-11490
# specifically, raise an appropriate
# Exception when attempting to append or
# prepend with an incompatible type
msg = f"dtype of `{expected}` must be compatible"
with assert_raises_regex(TypeError, msg):
ediff1d(ary=ary, to_end=append, to_begin=prepend)
@parametrize(
"ary,prepend,append,expected",
[
(
np.array([1, 2, 3], dtype=np.int16),
2**16, # will be cast to int16 under same kind rule.
2**16 + 4,
np.array([0, 1, 1, 4], dtype=np.int16),
),
(
np.array([1, 2, 3], dtype=np.float32),
np.array([5], dtype=np.float64),
None,
np.array([5, 1, 1], dtype=np.float32),
),
(
np.array([1, 2, 3], dtype=np.int32),
0,
0,
np.array([0, 1, 1, 0], dtype=np.int32),
),
(
np.array([1, 2, 3], dtype=np.int64),
3,
-9,
np.array([3, 1, 1, -9], dtype=np.int64),
),
],
)
def test_ediff1d_scalar_handling(self, ary, prepend, append, expected):
# maintain backwards-compatibility
# of scalar prepend / append behavior
# in ediff1d following fix for gh-11490
actual = np.ediff1d(ary=ary, to_end=append, to_begin=prepend)
assert_equal(actual, expected)
assert actual.dtype == expected.dtype
@skipIf(True, reason="NP_VER: fails with NumPy 1.22.x")
@parametrize("kind", [None, "sort", "table"])
def test_isin(self, kind):
# the tests for in1d cover most of isin's behavior
# if in1d is removed, would need to change those tests to test
# isin instead.
def _isin_slow(a, b):
b = np.asarray(b).flatten().tolist()
return a in b
isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1})
def assert_isin_equal(a, b):
x = np.isin(a, b, kind=kind)
y = isin_slow(a, b)
assert_array_equal(x, y)
# multidimensional arrays in both arguments
a = np.arange(24).reshape([2, 3, 4])
b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]])
assert_isin_equal(a, b)
# array-likes as both arguments
c = [(9, 8), (7, 6)]
d = (9, 7)
assert_isin_equal(c, d)
# zero-d array:
f = np.array(3)
assert_isin_equal(f, b)
assert_isin_equal(a, f)
assert_isin_equal(f, f)
# scalar:
assert_isin_equal(5, b)
assert_isin_equal(a, 6)
assert_isin_equal(5, 6)
# empty array-like:
if kind != "table":
# An empty list will become float64,
# which is invalid for kind="table"
x = []
assert_isin_equal(x, b)
assert_isin_equal(a, x)
assert_isin_equal(x, x)
# empty array with various types:
for dtype in [bool, np.int64, np.float64]:
if kind == "table" and dtype == np.float64:
continue
if dtype in {np.int64, np.float64}:
ar = np.array([10, 20, 30], dtype=dtype)
elif dtype in {bool}:
ar = np.array([True, False, False])
empty_array = np.array([], dtype=dtype)
assert_isin_equal(empty_array, ar)
assert_isin_equal(ar, empty_array)
assert_isin_equal(empty_array, empty_array)
@parametrize("kind", [None, "sort", "table"])
def test_in1d(self, kind):
# we use two different sizes for the b array here to test the
# two different paths in in1d().
for mult in (1, 10):
# One check without np.array to make sure lists are handled correct
a = [5, 7, 1, 2]
b = [2, 4, 3, 1, 5] * mult
ec = np.array([True, False, True, True])
c = in1d(a, b, assume_unique=True, kind=kind)
assert_array_equal(c, ec)
a[0] = 8
ec = np.array([False, False, True, True])
c = in1d(a, b, assume_unique=True, kind=kind)
assert_array_equal(c, ec)
a[0], a[3] = 4, 8
ec = np.array([True, False, True, False])
c = in1d(a, b, assume_unique=True, kind=kind)
assert_array_equal(c, ec)
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
b = [2, 3, 4] * mult
ec = [
False,
True,
False,
True,
True,
True,
True,
True,
True,
False,
True,
False,
False,
False,
]
c = in1d(a, b, kind=kind)
assert_array_equal(c, ec)
b = b + [5, 5, 4] * mult
ec = [
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
True,
True,
]
c = in1d(a, b, kind=kind)
assert_array_equal(c, ec)
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5] * mult)
ec = np.array([True, False, True, True])
c = in1d(a, b, kind=kind)
assert_array_equal(c, ec)
a = np.array([5, 7, 1, 1, 2])
b = np.array([2, 4, 3, 3, 1, 5] * mult)
ec = np.array([True, False, True, True, True])
c = in1d(a, b, kind=kind)
assert_array_equal(c, ec)
a = np.array([5, 5])
b = np.array([2, 2] * mult)
ec = np.array([False, False])
c = in1d(a, b, kind=kind)
assert_array_equal(c, ec)
a = np.array([5])
b = np.array([2])
ec = np.array([False])
c = in1d(a, b, kind=kind)
assert_array_equal(c, ec)
if kind in {None, "sort"}:
assert_array_equal(in1d([], [], kind=kind), [])
def test_in1d_char_array(self):
a = np.array(["a", "b", "c", "d", "e", "c", "e", "b"])
b = np.array(["a", "c"])
ec = np.array([True, False, True, False, False, True, False, False])
c = in1d(a, b)
assert_array_equal(c, ec)
@parametrize("kind", [None, "sort", "table"])
def test_in1d_invert(self, kind):
"Test in1d's invert parameter"
# We use two different sizes for the b array here to test the
# two different paths in in1d().
for mult in (1, 10):
a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])
b = [2, 3, 4] * mult
assert_array_equal(
np.invert(in1d(a, b, kind=kind)), in1d(a, b, invert=True, kind=kind)
)
# float:
if kind in {None, "sort"}:
for mult in (1, 10):
a = np.array(
[5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5], dtype=np.float32
)
b = [2, 3, 4] * mult
b = np.array(b, dtype=np.float32)
assert_array_equal(
np.invert(in1d(a, b, kind=kind)), in1d(a, b, invert=True, kind=kind)
)
@parametrize("kind", [None, "sort", "table"])
def test_in1d_ravel(self, kind):
# Test that in1d ravels its input arrays. This is not documented
# behavior however. The test is to ensure consistentency.
a = np.arange(6).reshape(2, 3)
b = np.arange(3, 9).reshape(3, 2)
long_b = np.arange(3, 63).reshape(30, 2)
ec = np.array([False, False, False, True, True, True])
assert_array_equal(in1d(a, b, assume_unique=True, kind=kind), ec)
assert_array_equal(in1d(a, b, assume_unique=False, kind=kind), ec)
assert_array_equal(in1d(a, long_b, assume_unique=True, kind=kind), ec)
assert_array_equal(in1d(a, long_b, assume_unique=False, kind=kind), ec)
def test_in1d_hit_alternate_algorithm(self):
"""Hit the standard isin code with integers"""
# Need extreme range to hit standard code
# This hits it without the use of kind='table'
a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64)
b = np.array([2, 3, 4, 1e9], dtype=np.int64)
expected = np.array([0, 1, 0, 1, 1, 1, 1], dtype=bool)
assert_array_equal(expected, in1d(a, b))
assert_array_equal(np.invert(expected), in1d(a, b, invert=True))
a = np.array([5, 7, 1, 2], dtype=np.int64)
b = np.array([2, 4, 3, 1, 5, 1e9], dtype=np.int64)
ec = np.array([True, False, True, True])
c = in1d(a, b, assume_unique=True)
assert_array_equal(c, ec)
@parametrize("kind", [None, "sort", "table"])
def test_in1d_boolean(self, kind):
"""Test that in1d works for boolean input"""
a = np.array([True, False])
b = np.array([False, False, False])
expected = np.array([False, True])
assert_array_equal(expected, in1d(a, b, kind=kind))
assert_array_equal(np.invert(expected), in1d(a, b, invert=True, kind=kind))
@parametrize("kind", [None, "sort"])
def test_in1d_timedelta(self, kind):
"""Test that in1d works for timedelta input"""
rstate = np.random.RandomState(0)
a = rstate.randint(0, 100, size=10)
b = rstate.randint(0, 100, size=10)
truth = in1d(a, b)
a_timedelta = a.astype("timedelta64[s]")
b_timedelta = b.astype("timedelta64[s]")
assert_array_equal(truth, in1d(a_timedelta, b_timedelta, kind=kind))
def test_in1d_table_timedelta_fails(self):
a = np.array([0, 1, 2], dtype="timedelta64[s]")
b = a
# Make sure it raises a value error:
with assert_raises(ValueError):
in1d(a, b, kind="table")
@parametrize(
"dtype1,dtype2",
[
(np.int8, np.int16),
(np.int16, np.int8),
],
)
@parametrize("kind", [None, "sort", "table"])
def test_in1d_mixed_dtype(self, dtype1, dtype2, kind):
"""Test that in1d works as expected for mixed dtype input."""
is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger)
ar1 = np.array([0, 0, 1, 1], dtype=dtype1)
if is_dtype2_signed:
ar2 = np.array([-128, 0, 127], dtype=dtype2)
else:
ar2 = np.array([127, 0, 255], dtype=dtype2)
expected = np.array([True, True, False, False])
expect_failure = kind == "table" and any(
(
dtype1 == np.int8 and dtype2 == np.int16,
dtype1 == np.int16 and dtype2 == np.int8,
)
)
if expect_failure:
with assert_raises(RuntimeError, match="exceed the maximum"):
in1d(ar1, ar2, kind=kind)
else:
assert_array_equal(in1d(ar1, ar2, kind=kind), expected)
@parametrize("kind", [None, "sort", "table"])
def test_in1d_mixed_boolean(self, kind):
"""Test that in1d works as expected for bool/int input."""
for dtype in np.typecodes["AllInteger"]:
a = np.array([True, False, False], dtype=bool)
b = np.array([0, 0, 0, 0], dtype=dtype)
expected = np.array([False, True, True], dtype=bool)
assert_array_equal(in1d(a, b, kind=kind), expected)
a, b = b, a
expected = np.array([True, True, True, True], dtype=bool)
assert_array_equal(in1d(a, b, kind=kind), expected)
def test_in1d_first_array_is_object(self):
ar1 = [None]
ar2 = np.array([1] * 10)
expected = np.array([False])
result = np.in1d(ar1, ar2)
assert_array_equal(result, expected)
def test_in1d_second_array_is_object(self):
ar1 = 1
ar2 = np.array([None] * 10)
expected = np.array([False])
result = np.in1d(ar1, ar2)
assert_array_equal(result, expected)
def test_in1d_both_arrays_are_object(self):
ar1 = [None]
ar2 = np.array([None] * 10)
expected = np.array([True])
result = np.in1d(ar1, ar2)
assert_array_equal(result, expected)
@xfail
def test_in1d_both_arrays_have_structured_dtype(self):
# Test arrays of a structured data type containing an integer field
# and a field of dtype `object` allowing for arbitrary Python objects
dt = np.dtype([("field1", int), ("field2", object)])
ar1 = np.array([(1, None)], dtype=dt)
ar2 = np.array([(1, None)] * 10, dtype=dt)
expected = np.array([True])
result = np.in1d(ar1, ar2)
assert_array_equal(result, expected)
def test_in1d_with_arrays_containing_tuples(self):
ar1 = np.array([(1,), 2], dtype=object)
ar2 = np.array([(1,), 2], dtype=object)
expected = np.array([True, True])
result = np.in1d(ar1, ar2)
assert_array_equal(result, expected)
result = np.in1d(ar1, ar2, invert=True)
assert_array_equal(result, np.invert(expected))
# An integer is added at the end of the array to make sure
# that the array builder will create the array with tuples
# and after it's created the integer is removed.
# There's a bug in the array constructor that doesn't handle
# tuples properly and adding the integer fixes that.
ar1 = np.array([(1,), (2, 1), 1], dtype=object)
ar1 = ar1[:-1]
ar2 = np.array([(1,), (2, 1), 1], dtype=object)
ar2 = ar2[:-1]
expected = np.array([True, True])
result = np.in1d(ar1, ar2)
assert_array_equal(result, expected)
result = np.in1d(ar1, ar2, invert=True)
assert_array_equal(result, np.invert(expected))
ar1 = np.array([(1,), (2, 3), 1], dtype=object)
ar1 = ar1[:-1]
ar2 = np.array([(1,), 2], dtype=object)
expected = np.array([True, False])
result = np.in1d(ar1, ar2)
assert_array_equal(result, expected)
result = np.in1d(ar1, ar2, invert=True)
assert_array_equal(result, np.invert(expected))
def test_in1d_errors(self):
"""Test that in1d raises expected errors."""
# Error 1: `kind` is not one of 'sort' 'table' or None.
ar1 = np.array([1, 2, 3, 4, 5])
ar2 = np.array([2, 4, 6, 8, 10])
assert_raises(ValueError, in1d, ar1, ar2, kind="quicksort")
# Error 2: `kind="table"` does not work for non-integral arrays.
obj_ar1 = np.array([1, "a", 3, "b", 5], dtype=object)
obj_ar2 = np.array([1, "a", 3, "b", 5], dtype=object)
assert_raises(ValueError, in1d, obj_ar1, obj_ar2, kind="table")
for dtype in [np.int32, np.int64]:
ar1 = np.array([-1, 2, 3, 4, 5], dtype=dtype)
# The range of this array will overflow:
overflow_ar2 = np.array([-1, np.iinfo(dtype).max], dtype=dtype)
# Error 3: `kind="table"` will trigger a runtime error
# if there is an integer overflow expected when computing the
# range of ar2
assert_raises(RuntimeError, in1d, ar1, overflow_ar2, kind="table")
# Non-error: `kind=None` will *not* trigger a runtime error
# if there is an integer overflow, it will switch to
# the `sort` algorithm.
result = np.in1d(ar1, overflow_ar2, kind=None)
assert_array_equal(result, [True] + [False] * 4)
result = np.in1d(ar1, overflow_ar2, kind="sort")
assert_array_equal(result, [True] + [False] * 4)
def test_union1d(self):
a = np.array([5, 4, 7, 1, 2])
b = np.array([2, 4, 3, 3, 2, 1, 5])
ec = np.array([1, 2, 3, 4, 5, 7])
c = union1d(a, b)
assert_array_equal(c, ec)
# Tests gh-10340, arguments to union1d should be
# flattened if they are not already 1D
x = np.array([[0, 1, 2], [3, 4, 5]])
y = np.array([0, 1, 2, 3, 4])
ez = np.array([0, 1, 2, 3, 4, 5])
z = union1d(x, y)
assert_array_equal(z, ez)
assert_array_equal([], union1d([], []))
def test_setdiff1d(self):
a = np.array([6, 5, 4, 7, 1, 2, 7, 4])
b = np.array([2, 4, 3, 3, 2, 1, 5])
ec = np.array([6, 7])
c = setdiff1d(a, b)
assert_array_equal(c, ec)
a = np.arange(21)
b = np.arange(19)
ec = np.array([19, 20])
c = setdiff1d(a, b)
assert_array_equal(c, ec)
assert_array_equal([], setdiff1d([], []))
a = np.array((), np.uint32)
assert_equal(setdiff1d(a, []).dtype, np.uint32)
def test_setdiff1d_unique(self):
a = np.array([3, 2, 1])
b = np.array([7, 5, 2])
expected = np.array([3, 1])
actual = setdiff1d(a, b, assume_unique=True)
assert_equal(actual, expected)
def test_setdiff1d_char_array(self):
a = np.array(["a", "b", "c"])
b = np.array(["a", "b", "s"])
assert_array_equal(setdiff1d(a, b), np.array(["c"]))
def test_manyways(self):
a = np.array([5, 7, 1, 2, 8])
b = np.array([9, 8, 2, 4, 3, 1, 5])
c1 = setxor1d(a, b)
aux1 = intersect1d(a, b)
aux2 = union1d(a, b)
c2 = setdiff1d(aux2, aux1)
assert_array_equal(c1, c2)
@instantiate_parametrized_tests
| TestSetOps |
python | numba__numba | numba/core/types/misc.py | {
"start": 5107,
"end": 5372
} | class ____(Type):
# XXX unused?
mutable = True
def __init__(self, clsobj):
self.cls = clsobj
name = "Object(%s)" % clsobj.__name__
super(Object, self).__init__(name)
@property
def key(self):
return self.cls
| Object |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_gtin_base_unit.py | {
"start": 1013,
"end": 2005
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.gtin_base_unit"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_gtin_base_unit(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeGtinBaseUnit |
python | walkccc__LeetCode | solutions/3073. Maximum Increasing Triplet Value/3073.py | {
"start": 41,
"end": 586
} | class ____:
def maximumTripletValue(self, nums: list[int]) -> int:
ans = 0
rightMax = [0] * len(nums) # rightMax[i] := max(nums[i + 1..n))
leftSortedSet = SortedSet([nums[0]])
for i in range(len(nums) - 2, -1, -1):
rightMax[i] = max(nums[i + 1], rightMax[i + 1])
for j in range(1, len(nums) - 1):
i = bisect.bisect_left(leftSortedSet, nums[j])
if i > 0 and rightMax[j] > nums[j]:
ans = max(ans, leftSortedSet[i - 1] - nums[j] + rightMax[j])
leftSortedSet.add(nums[j])
return ans
| Solution |
python | redis__redis-py | tests/test_asyncio/test_multidb/test_healthcheck.py | {
"start": 8231,
"end": 9790
} | class ____:
@pytest.mark.asyncio
async def test_database_is_healthy_on_echo_response(self, mock_client, mock_cb):
"""
Mocking responses to mix error and actual responses to ensure that health check retry
according to given configuration.
"""
mock_client.execute_command = AsyncMock(side_effect=["PONG"])
hc = PingHealthCheck()
db = Database(mock_client, mock_cb, 0.9)
assert await hc.check_health(db)
assert mock_client.execute_command.call_count == 1
@pytest.mark.asyncio
async def test_database_is_unhealthy_on_incorrect_echo_response(
self, mock_client, mock_cb
):
"""
Mocking responses to mix error and actual responses to ensure that health check retry
according to given configuration.
"""
mock_client.execute_command = AsyncMock(side_effect=[False])
hc = PingHealthCheck()
db = Database(mock_client, mock_cb, 0.9)
assert not await hc.check_health(db)
assert mock_client.execute_command.call_count == 1
@pytest.mark.asyncio
async def test_database_close_circuit_on_successful_healthcheck(
self, mock_client, mock_cb
):
mock_client.execute_command = AsyncMock(side_effect=["PONG"])
mock_cb.state = CBState.HALF_OPEN
hc = PingHealthCheck()
db = Database(mock_client, mock_cb, 0.9)
assert await hc.check_health(db)
assert mock_client.execute_command.call_count == 1
@pytest.mark.onlynoncluster
| TestPingHealthCheck |
python | walkccc__LeetCode | solutions/2539. Count the Number of Good Subsequences/2539.py | {
"start": 0,
"end": 761
} | class ____:
def countGoodSubsequences(self, s: str) -> int:
MOD = 1_000_000_007
ans = 0
count = collections.Counter(s)
@functools.lru_cache(None)
def fact(i: int) -> int:
return 1 if i <= 1 else i * fact(i - 1) % MOD
@functools.lru_cache(None)
def inv(i: int) -> int:
return pow(i, MOD - 2, MOD)
@functools.lru_cache(None)
def nCk(n: int, k: int) -> int:
return fact(n) * inv(fact(k)) * inv(fact(n - k)) % MOD
for freq in range(1, max(count.values()) + 1):
numSubseqs = 1 # ""
for charFreq in count.values():
if charFreq >= freq:
numSubseqs = numSubseqs * (1 + nCk(charFreq, freq)) % MOD
ans += numSubseqs - 1 # Minus "".
ans %= MOD
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/vivit/modeling_vivit.py | {
"start": 11978,
"end": 12502
} | class ____(nn.Module):
def __init__(self, config: VivitConfig):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
| VivitOutput |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeAlias13.py | {
"start": 891,
"end": 1139
} | class ____: ...
BT = TypeVar("BT", bound=B)
Something = CoMaybeMethod[A, [BT, C], Any]
reveal_type(
Something,
expected_text="type[(A, BT@Something, C) -> Coroutine[Any, Any, Any]] | type[(BT@Something, C) -> Coroutine[Any, Any, Any]]",
)
| C |
python | plotly__plotly.py | plotly/graph_objs/ohlc/_line.py | {
"start": 233,
"end": 4113
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "ohlc"
_path_str = "ohlc.line"
_valid_props = {"dash", "width"}
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px"). Note that this style setting can also be
set per direction via `increasing.line.dash` and
`decreasing.line.dash`.
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
@property
def width(self):
"""
[object Object] Note that this style setting can also be set
per direction via `increasing.line.width` and
`decreasing.line.width`.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px"). Note that this style setting can
also be set per direction via `increasing.line.dash`
and `decreasing.line.dash`.
width
[object Object] Note that this style setting can also
be set per direction via `increasing.line.width` and
`decreasing.line.width`.
"""
def __init__(self, arg=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.ohlc.Line`
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px"). Note that this style setting can
also be set per direction via `increasing.line.dash`
and `decreasing.line.dash`.
width
[object Object] Note that this style setting can also
be set per direction via `increasing.line.width` and
`decreasing.line.width`.
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.ohlc.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.ohlc.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dash", arg, dash)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | Netflix__metaflow | metaflow/plugins/argo/argo_client.py | {
"start": 288,
"end": 373
} | class ____(MetaflowException):
headline = "Resource not found"
| ArgoResourceNotFound |
python | pypa__pipenv | pipenv/cli/options.py | {
"start": 550,
"end": 1574
} | class ____(DYMMixin, Group):
"""Custom Group class provides formatted main help"""
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
console.print(format_help(ctx.get_help()))
ctx.exit()
return Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help="Show this message and exit.",
)
def main(self, *args, **kwargs):
"""
to specify the windows_expand_args option to avoid exceptions on Windows
see: https://github.com/pallets/click/issues/1901
"""
return super().main(*args, **kwargs, windows_expand_args=False)
| PipenvGroup |
python | astropy__astropy | astropy/io/votable/tree.py | {
"start": 36677,
"end": 52725
} | class ____(
SimpleElement,
_IDProperty,
_NameProperty,
_XtypeProperty,
_UtypeProperty,
_UcdProperty,
):
"""
FIELD_ element: describes the datatype of a particular column of data.
The keyword arguments correspond to setting members of the same
name, documented below.
If *ID* is provided, it is used for the column name in the
resulting recarray of the table. If no *ID* is provided, *name*
is used instead. If neither is provided, an exception will be
raised.
"""
_attr_list_11 = [
"ID",
"name",
"datatype",
"arraysize",
"ucd",
"unit",
"width",
"precision",
"utype",
"ref",
]
_attr_list_12 = _attr_list_11 + ["xtype"]
_element_name = "FIELD"
def __init__(
self,
votable,
ID=None,
name=None,
datatype=None,
arraysize=None,
ucd=None,
unit=None,
width=None,
precision=None,
utype=None,
ref=None,
type=None,
id=None,
xtype=None,
config=None,
pos=None,
**extra,
):
self._config = _attach_default_config(votable, config)
self._pos = pos
SimpleElement.__init__(self)
if self._config.get("version_1_2_or_later"):
self._attr_list = self._attr_list_12
else:
self._attr_list = self._attr_list_11
if xtype is not None:
warn_unknown_attrs(self._element_name, ["xtype"], config, pos)
# TODO: REMOVE ME ----------------------------------------
# This is a terrible hack to support Simple Image Access
# Protocol results from https://astroarchive.noirlab.edu/ . It creates a field
# for the coordinate projection type of type "double", which
# actually contains character data. We have to hack the field
# to store character data, or we can't read it in. A warning
# will be raised when this happens.
if (
self._config.get("verify", "ignore") != "exception"
and name == "cprojection"
and ID == "cprojection"
and ucd == "VOX:WCS_CoordProjection"
and datatype == "double"
):
datatype = "char"
arraysize = "3"
vo_warn(W40, (), config, pos)
# ----------------------------------------
self.description = None
self._votable = votable
self.ID = resolve_id(ID, id, config, pos) or xmlutil.fix_id(name, config, pos)
self.name = name
if name is None:
if self._element_name == "PARAM" and not self._config.get(
"version_1_1_or_later"
):
pass
else:
warn_or_raise(W15, W15, self._element_name, config, pos)
self.name = self.ID
if self._ID is None and name is None:
vo_raise(W12, self._element_name, config, pos)
datatype_mapping = {
"string": "char",
"unicodeString": "unicodeChar",
"int16": "short",
"int32": "int",
"int64": "long",
"float32": "float",
"float64": "double",
# The following appear in some Vizier tables
"unsignedInt": "long",
"unsignedShort": "int",
}
datatype_mapping.update(self._config.get("datatype_mapping", {}))
if datatype in datatype_mapping:
warn_or_raise(W13, W13, (datatype, datatype_mapping[datatype]), config, pos)
datatype = datatype_mapping[datatype]
self.ref = ref
self.datatype = datatype
self.arraysize = arraysize
self.ucd = ucd
self.unit = unit
self.width = width
self.precision = precision
self.utype = utype
self.type = type
self._links = HomogeneousList(Link)
self.title = self.name
self.values = Values(self._votable, self)
self.xtype = xtype
self._setup(config, pos)
warn_unknown_attrs(self._element_name, extra.keys(), config, pos)
@classmethod
def uniqify_names(cls, fields):
"""
Make sure that all names and titles in a list of fields are
unique, by appending numbers if necessary.
"""
unique = {}
for field in fields:
i = 2
new_id = field.ID
while new_id in unique:
new_id = field.ID + f"_{i:d}"
i += 1
if new_id != field.ID:
vo_warn(W32, (field.ID, new_id), field._config, field._pos)
field.ID = new_id
unique[new_id] = field.ID
for field in fields:
i = 2
if field.name is None:
new_name = field.ID
implicit = True
else:
new_name = field.name
implicit = False
if new_name != field.ID:
while new_name in unique:
new_name = field.name + f" {i:d}"
i += 1
if not implicit and new_name != field.name:
vo_warn(W33, (field.name, new_name), field._config, field._pos)
field._unique_name = new_name
unique[new_name] = field.name
def _setup(self, config, pos):
if self.values._ref is not None:
self.values.ref = self.values._ref
self.converter = converters.get_converter(self, config, pos)
@property
def datatype(self):
"""The datatype of the column [*required*].
Valid values (as defined by the spec) are:
'boolean', 'bit', 'unsignedByte', 'short', 'int', 'long',
'char', 'unicodeChar', 'float', 'double', 'floatComplex', or
'doubleComplex'
Many VOTABLE files in the wild use 'string' instead of 'char',
so that is also a valid option, though 'string' will always be
converted to 'char' when writing the file back out.
"""
return self._datatype
@datatype.setter
def datatype(self, datatype):
if datatype is None:
if self._config.get("version_1_1_or_later"):
warn_or_raise(E10, E10, self._element_name, self._config, self._pos)
datatype = "char"
if datatype not in converters.converter_mapping:
vo_raise(E06, (datatype, self.ID), self._config, self._pos)
self._datatype = datatype
@property
def precision(self):
"""
Along with :attr:`width`, defines the `numerical accuracy`_
associated with the data. These values are used to limit the
precision when writing floating point values back to the XML
file. Otherwise, it is purely informational -- the Numpy
recarray containing the data itself does not use this
information.
"""
return self._precision
@precision.setter
def precision(self, precision):
if precision is not None and not re.match(r"^[FE]?[0-9]+$", precision):
vo_raise(E11, precision, self._config, self._pos)
self._precision = precision
@precision.deleter
def precision(self):
self._precision = None
@property
def width(self):
"""
Along with :attr:`precision`, defines the `numerical
accuracy`_ associated with the data. These values are used to
limit the precision when writing floating point values back to
the XML file. Otherwise, it is purely informational -- the
Numpy recarray containing the data itself does not use this
information.
"""
return self._width
@width.setter
def width(self, width):
if width is not None:
width = int(width)
if width <= 0:
vo_raise(E12, width, self._config, self._pos)
self._width = width
@width.deleter
def width(self):
self._width = None
# ref on FIELD and PARAM behave differently than elsewhere -- here
# they're just informational, such as to refer to a coordinate
# system.
@property
def ref(self):
"""
On FIELD_ elements, ref is used only for informational
purposes, for example to refer to a COOSYS_ or TIMESYS_ element.
"""
return self._ref
@ref.setter
def ref(self, ref):
xmlutil.check_id(ref, "ref", self._config, self._pos)
self._ref = ref
@ref.deleter
def ref(self):
self._ref = None
@property
def unit(self):
"""A string specifying the units_ for the FIELD_."""
return self._unit
@unit.setter
def unit(self, unit):
if unit is None:
self._unit = None
return
from astropy import units as u
# First, parse the unit in the default way, so that we can
# still emit a warning if the unit is not to spec.
default_format = _get_default_unit_format(self._config)
unit_obj = u.Unit(unit, format=default_format, parse_strict="silent")
if isinstance(unit_obj, u.UnrecognizedUnit):
warn_or_raise(W50, W50, (unit,), self._config, self._pos)
format = _get_unit_format(self._config)
if format != default_format:
unit_obj = u.Unit(unit, format=format, parse_strict="silent")
self._unit = unit_obj
@unit.deleter
def unit(self):
self._unit = None
@property
def arraysize(self):
"""
Specifies the size of the multidimensional array if this
FIELD_ contains more than a single value.
See `multidimensional arrays`_.
"""
return self._arraysize
@arraysize.setter
def arraysize(self, arraysize):
if arraysize is not None and not re.match(
r"^([0-9]+x)*[0-9]*[*]?(s\W)?$", arraysize
):
vo_raise(E13, arraysize, self._config, self._pos)
self._arraysize = arraysize
@arraysize.deleter
def arraysize(self):
self._arraysize = None
@property
def type(self):
"""
The type attribute on FIELD_ elements is reserved for future
extensions.
"""
return self._type
@type.setter
def type(self, type):
self._type = type
@type.deleter
def type(self):
self._type = None
@property
def values(self):
"""
A :class:`Values` instance (or `None`) defining the domain
of the column.
"""
return self._values
@values.setter
def values(self, values):
assert values is None or isinstance(values, Values)
self._values = values
@values.deleter
def values(self):
self._values = None
@property
def links(self):
"""
A list of :class:`Link` instances used to reference more
details about the meaning of the FIELD_. This is purely
informational and is not used by the `astropy.io.votable`
package.
"""
return self._links
def parse(self, iterator, config):
for start, tag, data, pos in iterator:
if start:
if tag == "VALUES":
self.values.__init__(
self._votable, self, config=config, pos=pos, **data
)
self.values.parse(iterator, config)
elif tag == "LINK":
link = Link(config=config, pos=pos, **data)
self.links.append(link)
link.parse(iterator, config)
elif tag == "DESCRIPTION":
warn_unknown_attrs("DESCRIPTION", data.keys(), config, pos)
elif tag != self._element_name:
self._add_unknown_tag(iterator, tag, data, config, pos)
else:
if tag == "DESCRIPTION":
if self.description is not None:
warn_or_raise(W17, W17, self._element_name, config, pos)
self.description = data or None
elif tag == self._element_name:
break
if self.description is not None:
self.title = " ".join(x.strip() for x in self.description.splitlines())
else:
self.title = self.name
self._setup(config, pos)
return self
def to_xml(self, w, **kwargs):
attrib = w.object_attrs(self, self._attr_list)
if "unit" in attrib:
format = _get_unit_format(self._config)
try:
attrib["unit"] = self.unit.to_string(format)
except ValueError as e:
# Allow non-standard units with a warning, see
# https://github.com/astropy/astropy/issues/17497#issuecomment-2520472495
attrib["unit"] = self.unit.to_string()
warn_or_raise(W50, W50, (attrib["unit"],), self._config, self._pos)
with w.tag(self._element_name, attrib=attrib):
if self.description is not None:
w.element("DESCRIPTION", self.description, wrap=True)
if not self.values.is_defaults():
self.values.to_xml(w, **kwargs)
for link in self.links:
link.to_xml(w, **kwargs)
def to_table_column(self, column):
"""
Sets the attributes of a given `astropy.table.Column` instance
to match the information in this `Field`.
"""
for key in ["ucd", "width", "precision", "utype", "xtype"]:
val = getattr(self, key, None)
if val is not None:
column.meta[key] = val
if not self.values.is_defaults():
self.values.to_table_column(column)
for link in self.links:
link.to_table_column(column)
if self.description is not None:
column.description = self.description
if self.unit is not None:
# TODO: Use units framework when it's available
column.unit = self.unit
if (
isinstance(self.converter, converters.FloatingPoint)
and self.converter.output_format != "{!s:>}"
):
column.format = self.converter.output_format
elif isinstance(self.converter, converters.Char):
column.info.meta["_votable_string_dtype"] = "char"
if self.arraysize is not None and self.arraysize.endswith("*"):
column.info.meta["_votable_arraysize"] = self.arraysize
elif isinstance(self.converter, converters.UnicodeChar):
column.info.meta["_votable_string_dtype"] = "unicodeChar"
if self.arraysize is not None and self.arraysize.endswith("*"):
column.info.meta["_votable_arraysize"] = self.arraysize
@classmethod
def from_table_column(cls, votable, column):
"""
Restores a `Field` instance from a given
`astropy.table.Column` instance.
"""
kwargs = {}
meta = column.info.meta
if meta:
for key in ["ucd", "width", "precision", "utype", "xtype"]:
val = meta.get(key, None)
if val is not None:
kwargs[key] = val
# TODO: Use the unit framework when available
if column.info.unit is not None:
kwargs["unit"] = column.info.unit
kwargs["name"] = column.info.name
result = converters.table_column_to_votable_datatype(column)
kwargs.update(result)
field = cls(votable, **kwargs)
if column.info.description is not None:
field.description = column.info.description
field.values.from_table_column(column)
if meta and "links" in meta:
for link in meta["links"]:
field.links.append(Link.from_table_column(link))
# TODO: Parse format into precision and width
return field
| Field |
python | protocolbuffers__protobuf | python/google/protobuf/descriptor_pool.py | {
"start": 2811,
"end": 48793
} | class ____(object):
"""A collection of protobufs dynamically constructed by descriptor protos."""
if _USE_C_DESCRIPTORS:
def __new__(cls, descriptor_db=None):
# pylint: disable=protected-access
return descriptor._message.DescriptorPool(descriptor_db)
def __init__(
self, descriptor_db=None, use_deprecated_legacy_json_field_conflicts=False
):
"""Initializes a Pool of proto buffs.
The descriptor_db argument to the constructor is provided to allow
specialized file descriptor proto lookup code to be triggered on demand. An
example would be an implementation which will read and compile a file
specified in a call to FindFileByName() and not require the call to Add()
at all. Results from this database will be cached internally here as well.
Args:
descriptor_db: A secondary source of file descriptors.
use_deprecated_legacy_json_field_conflicts: Unused, for compatibility with
C++.
"""
self._internal_db = descriptor_database.DescriptorDatabase()
self._descriptor_db = descriptor_db
self._descriptors = {}
self._enum_descriptors = {}
self._service_descriptors = {}
self._file_descriptors = {}
self._toplevel_extensions = {}
self._top_enum_values = {}
# We store extensions in two two-level mappings: The first key is the
# descriptor of the message being extended, the second key is the extension
# full name or its tag number.
self._extensions_by_name = collections.defaultdict(dict)
self._extensions_by_number = collections.defaultdict(dict)
self._serialized_edition_defaults = (
python_edition_defaults._PROTOBUF_INTERNAL_PYTHON_EDITION_DEFAULTS
)
self._edition_defaults = None
self._feature_cache = dict()
def _CheckConflictRegister(self, desc, desc_name, file_name):
"""Check if the descriptor name conflicts with another of the same name.
Args:
desc: Descriptor of a message, enum, service, extension or enum value.
desc_name (str): the full name of desc.
file_name (str): The file name of descriptor.
"""
for register, descriptor_type in [
(self._descriptors, descriptor.Descriptor),
(self._enum_descriptors, descriptor.EnumDescriptor),
(self._service_descriptors, descriptor.ServiceDescriptor),
(self._toplevel_extensions, descriptor.FieldDescriptor),
(self._top_enum_values, descriptor.EnumValueDescriptor)]:
if desc_name in register:
old_desc = register[desc_name]
if isinstance(old_desc, descriptor.EnumValueDescriptor):
old_file = old_desc.type.file.name
else:
old_file = old_desc.file.name
if not isinstance(desc, descriptor_type) or (
old_file != file_name):
error_msg = ('Conflict register for file "' + file_name +
'": ' + desc_name +
' is already defined in file "' +
old_file + '". Please fix the conflict by adding '
'package name on the proto file, or use different '
'name for the duplication.')
if isinstance(desc, descriptor.EnumValueDescriptor):
error_msg += ('\nNote: enum values appear as '
'siblings of the enum type instead of '
'children of it.')
raise TypeError(error_msg)
return
def Add(self, file_desc_proto):
"""Adds the FileDescriptorProto and its types to this pool.
Args:
file_desc_proto (FileDescriptorProto): The file descriptor to add.
"""
self._internal_db.Add(file_desc_proto)
def AddSerializedFile(self, serialized_file_desc_proto):
"""Adds the FileDescriptorProto and its types to this pool.
Args:
serialized_file_desc_proto (bytes): A bytes string, serialization of the
:class:`FileDescriptorProto` to add.
Returns:
FileDescriptor: Descriptor for the added file.
"""
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pb2
file_desc_proto = descriptor_pb2.FileDescriptorProto.FromString(
serialized_file_desc_proto)
file_desc = self._ConvertFileProtoToFileDescriptor(file_desc_proto)
file_desc.serialized_pb = serialized_file_desc_proto
return file_desc
# Never call this method. It is for internal usage only.
def _AddDescriptor(self, desc):
"""Adds a Descriptor to the pool, non-recursively.
If the Descriptor contains nested messages or enums, the caller must
explicitly register them. This method also registers the FileDescriptor
associated with the message.
Args:
desc: A Descriptor.
"""
if not isinstance(desc, descriptor.Descriptor):
raise TypeError('Expected instance of descriptor.Descriptor.')
self._CheckConflictRegister(desc, desc.full_name, desc.file.name)
self._descriptors[desc.full_name] = desc
self._AddFileDescriptor(desc.file)
# Never call this method. It is for internal usage only.
def _AddEnumDescriptor(self, enum_desc):
"""Adds an EnumDescriptor to the pool.
This method also registers the FileDescriptor associated with the enum.
Args:
enum_desc: An EnumDescriptor.
"""
if not isinstance(enum_desc, descriptor.EnumDescriptor):
raise TypeError('Expected instance of descriptor.EnumDescriptor.')
file_name = enum_desc.file.name
self._CheckConflictRegister(enum_desc, enum_desc.full_name, file_name)
self._enum_descriptors[enum_desc.full_name] = enum_desc
# Top enum values need to be indexed.
# Count the number of dots to see whether the enum is toplevel or nested
# in a message. We cannot use enum_desc.containing_type at this stage.
if enum_desc.file.package:
top_level = (enum_desc.full_name.count('.')
- enum_desc.file.package.count('.') == 1)
else:
top_level = enum_desc.full_name.count('.') == 0
if top_level:
file_name = enum_desc.file.name
package = enum_desc.file.package
for enum_value in enum_desc.values:
full_name = _NormalizeFullyQualifiedName(
'.'.join((package, enum_value.name)))
self._CheckConflictRegister(enum_value, full_name, file_name)
self._top_enum_values[full_name] = enum_value
self._AddFileDescriptor(enum_desc.file)
# Never call this method. It is for internal usage only.
def _AddServiceDescriptor(self, service_desc):
"""Adds a ServiceDescriptor to the pool.
Args:
service_desc: A ServiceDescriptor.
"""
if not isinstance(service_desc, descriptor.ServiceDescriptor):
raise TypeError('Expected instance of descriptor.ServiceDescriptor.')
self._CheckConflictRegister(service_desc, service_desc.full_name,
service_desc.file.name)
self._service_descriptors[service_desc.full_name] = service_desc
# Never call this method. It is for internal usage only.
def _AddExtensionDescriptor(self, extension):
"""Adds a FieldDescriptor describing an extension to the pool.
Args:
extension: A FieldDescriptor.
Raises:
AssertionError: when another extension with the same number extends the
same message.
TypeError: when the specified extension is not a
descriptor.FieldDescriptor.
"""
if not (isinstance(extension, descriptor.FieldDescriptor) and
extension.is_extension):
raise TypeError('Expected an extension descriptor.')
if extension.extension_scope is None:
self._CheckConflictRegister(
extension, extension.full_name, extension.file.name)
self._toplevel_extensions[extension.full_name] = extension
try:
existing_desc = self._extensions_by_number[
extension.containing_type][extension.number]
except KeyError:
pass
else:
if extension is not existing_desc:
raise AssertionError(
'Extensions "%s" and "%s" both try to extend message type "%s" '
'with field number %d.' %
(extension.full_name, existing_desc.full_name,
extension.containing_type.full_name, extension.number))
self._extensions_by_number[extension.containing_type][
extension.number] = extension
self._extensions_by_name[extension.containing_type][
extension.full_name] = extension
# Also register MessageSet extensions with the type name.
if _IsMessageSetExtension(extension):
self._extensions_by_name[extension.containing_type][
extension.message_type.full_name] = extension
if hasattr(extension.containing_type, '_concrete_class'):
python_message._AttachFieldHelpers(
extension.containing_type._concrete_class, extension)
# Never call this method. It is for internal usage only.
def _InternalAddFileDescriptor(self, file_desc):
"""Adds a FileDescriptor to the pool, non-recursively.
If the FileDescriptor contains messages or enums, the caller must explicitly
register them.
Args:
file_desc: A FileDescriptor.
"""
self._AddFileDescriptor(file_desc)
def _AddFileDescriptor(self, file_desc):
"""Adds a FileDescriptor to the pool, non-recursively.
If the FileDescriptor contains messages or enums, the caller must explicitly
register them.
Args:
file_desc: A FileDescriptor.
"""
if not isinstance(file_desc, descriptor.FileDescriptor):
raise TypeError('Expected instance of descriptor.FileDescriptor.')
self._file_descriptors[file_desc.name] = file_desc
def FindFileByName(self, file_name):
"""Gets a FileDescriptor by file name.
Args:
file_name (str): The path to the file to get a descriptor for.
Returns:
FileDescriptor: The descriptor for the named file.
Raises:
KeyError: if the file cannot be found in the pool.
"""
try:
return self._file_descriptors[file_name]
except KeyError:
pass
try:
file_proto = self._internal_db.FindFileByName(file_name)
except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileByName(file_name)
else:
raise error
if not file_proto:
raise KeyError('Cannot find a file named %s' % file_name)
return self._ConvertFileProtoToFileDescriptor(file_proto)
def FindFileContainingSymbol(self, symbol):
"""Gets the FileDescriptor for the file containing the specified symbol.
Args:
symbol (str): The name of the symbol to search for.
Returns:
FileDescriptor: Descriptor for the file that contains the specified
symbol.
Raises:
KeyError: if the file cannot be found in the pool.
"""
symbol = _NormalizeFullyQualifiedName(symbol)
try:
return self._InternalFindFileContainingSymbol(symbol)
except KeyError:
pass
try:
# Try fallback database. Build and find again if possible.
self._FindFileContainingSymbolInDb(symbol)
return self._InternalFindFileContainingSymbol(symbol)
except KeyError:
raise KeyError('Cannot find a file containing %s' % symbol)
def _InternalFindFileContainingSymbol(self, symbol):
"""Gets the already built FileDescriptor containing the specified symbol.
Args:
symbol (str): The name of the symbol to search for.
Returns:
FileDescriptor: Descriptor for the file that contains the specified
symbol.
Raises:
KeyError: if the file cannot be found in the pool.
"""
try:
return self._descriptors[symbol].file
except KeyError:
pass
try:
return self._enum_descriptors[symbol].file
except KeyError:
pass
try:
return self._service_descriptors[symbol].file
except KeyError:
pass
try:
return self._top_enum_values[symbol].type.file
except KeyError:
pass
try:
return self._toplevel_extensions[symbol].file
except KeyError:
pass
# Try fields, enum values and nested extensions inside a message.
top_name, _, sub_name = symbol.rpartition('.')
try:
message = self.FindMessageTypeByName(top_name)
assert (sub_name in message.extensions_by_name or
sub_name in message.fields_by_name or
sub_name in message.enum_values_by_name)
return message.file
except (KeyError, AssertionError):
raise KeyError('Cannot find a file containing %s' % symbol)
def FindMessageTypeByName(self, full_name):
"""Loads the named descriptor from the pool.
Args:
full_name (str): The full name of the descriptor to load.
Returns:
Descriptor: The descriptor for the named type.
Raises:
KeyError: if the message cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._descriptors[full_name]
def FindEnumTypeByName(self, full_name):
"""Loads the named enum descriptor from the pool.
Args:
full_name (str): The full name of the enum descriptor to load.
Returns:
EnumDescriptor: The enum descriptor for the named type.
Raises:
KeyError: if the enum cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._enum_descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._enum_descriptors[full_name]
def FindFieldByName(self, full_name):
"""Loads the named field descriptor from the pool.
Args:
full_name (str): The full name of the field descriptor to load.
Returns:
FieldDescriptor: The field descriptor for the named field.
Raises:
KeyError: if the field cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
message_name, _, field_name = full_name.rpartition('.')
message_descriptor = self.FindMessageTypeByName(message_name)
return message_descriptor.fields_by_name[field_name]
def FindOneofByName(self, full_name):
"""Loads the named oneof descriptor from the pool.
Args:
full_name (str): The full name of the oneof descriptor to load.
Returns:
OneofDescriptor: The oneof descriptor for the named oneof.
Raises:
KeyError: if the oneof cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
message_name, _, oneof_name = full_name.rpartition('.')
message_descriptor = self.FindMessageTypeByName(message_name)
return message_descriptor.oneofs_by_name[oneof_name]
def FindExtensionByName(self, full_name):
"""Loads the named extension descriptor from the pool.
Args:
full_name (str): The full name of the extension descriptor to load.
Returns:
FieldDescriptor: The field descriptor for the named extension.
Raises:
KeyError: if the extension cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
try:
# The proto compiler does not give any link between the FileDescriptor
# and top-level extensions unless the FileDescriptorProto is added to
# the DescriptorDatabase, but this can impact memory usage.
# So we registered these extensions by name explicitly.
return self._toplevel_extensions[full_name]
except KeyError:
pass
message_name, _, extension_name = full_name.rpartition('.')
try:
# Most extensions are nested inside a message.
scope = self.FindMessageTypeByName(message_name)
except KeyError:
# Some extensions are defined at file scope.
scope = self._FindFileContainingSymbolInDb(full_name)
return scope.extensions_by_name[extension_name]
def FindExtensionByNumber(self, message_descriptor, number):
"""Gets the extension of the specified message with the specified number.
Extensions have to be registered to this pool by calling :func:`Add` or
:func:`AddExtensionDescriptor`.
Args:
message_descriptor (Descriptor): descriptor of the extended message.
number (int): Number of the extension field.
Returns:
FieldDescriptor: The descriptor for the extension.
Raises:
KeyError: when no extension with the given number is known for the
specified message.
"""
try:
return self._extensions_by_number[message_descriptor][number]
except KeyError:
self._TryLoadExtensionFromDB(message_descriptor, number)
return self._extensions_by_number[message_descriptor][number]
def FindAllExtensions(self, message_descriptor):
"""Gets all the known extensions of a given message.
Extensions have to be registered to this pool by build related
:func:`Add` or :func:`AddExtensionDescriptor`.
Args:
message_descriptor (Descriptor): Descriptor of the extended message.
Returns:
list[FieldDescriptor]: Field descriptors describing the extensions.
"""
# Fallback to descriptor db if FindAllExtensionNumbers is provided.
if self._descriptor_db and hasattr(
self._descriptor_db, 'FindAllExtensionNumbers'):
full_name = message_descriptor.full_name
try:
all_numbers = self._descriptor_db.FindAllExtensionNumbers(full_name)
except:
pass
else:
if isinstance(all_numbers, list):
for number in all_numbers:
if number in self._extensions_by_number[message_descriptor]:
continue
self._TryLoadExtensionFromDB(message_descriptor, number)
else:
warnings.warn(
'FindAllExtensionNumbers() on fall back DB must return a list,'
' not {0}'.format(type(all_numbers))
)
return list(self._extensions_by_number[message_descriptor].values())
def _TryLoadExtensionFromDB(self, message_descriptor, number):
"""Try to Load extensions from descriptor db.
Args:
message_descriptor: descriptor of the extended message.
number: the extension number that needs to be loaded.
"""
if not self._descriptor_db:
return
# Only supported when FindFileContainingExtension is provided.
if not hasattr(
self._descriptor_db, 'FindFileContainingExtension'):
return
full_name = message_descriptor.full_name
file_proto = None
try:
file_proto = self._descriptor_db.FindFileContainingExtension(
full_name, number
)
except:
return
if file_proto is None:
return
try:
self._ConvertFileProtoToFileDescriptor(file_proto)
except:
warn_msg = ('Unable to load proto file %s for extension number %d.' %
(file_proto.name, number))
warnings.warn(warn_msg, RuntimeWarning)
def FindServiceByName(self, full_name):
"""Loads the named service descriptor from the pool.
Args:
full_name (str): The full name of the service descriptor to load.
Returns:
ServiceDescriptor: The service descriptor for the named service.
Raises:
KeyError: if the service cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
if full_name not in self._service_descriptors:
self._FindFileContainingSymbolInDb(full_name)
return self._service_descriptors[full_name]
def FindMethodByName(self, full_name):
"""Loads the named service method descriptor from the pool.
Args:
full_name (str): The full name of the method descriptor to load.
Returns:
MethodDescriptor: The method descriptor for the service method.
Raises:
KeyError: if the method cannot be found in the pool.
"""
full_name = _NormalizeFullyQualifiedName(full_name)
service_name, _, method_name = full_name.rpartition('.')
service_descriptor = self.FindServiceByName(service_name)
return service_descriptor.methods_by_name[method_name]
def SetFeatureSetDefaults(self, defaults):
"""Sets the default feature mappings used during the build.
Args:
defaults: a FeatureSetDefaults message containing the new mappings.
"""
if self._edition_defaults is not None:
raise ValueError(
"Feature set defaults can't be changed once the pool has started"
' building!'
)
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pb2
if not isinstance(defaults, descriptor_pb2.FeatureSetDefaults):
raise TypeError('SetFeatureSetDefaults called with invalid type')
if defaults.minimum_edition > defaults.maximum_edition:
raise ValueError(
'Invalid edition range %s to %s'
% (
descriptor_pb2.Edition.Name(defaults.minimum_edition),
descriptor_pb2.Edition.Name(defaults.maximum_edition),
)
)
prev_edition = descriptor_pb2.Edition.EDITION_UNKNOWN
for d in defaults.defaults:
if d.edition == descriptor_pb2.Edition.EDITION_UNKNOWN:
raise ValueError('Invalid edition EDITION_UNKNOWN specified')
if prev_edition >= d.edition:
raise ValueError(
'Feature set defaults are not strictly increasing. %s is greater'
' than or equal to %s'
% (
descriptor_pb2.Edition.Name(prev_edition),
descriptor_pb2.Edition.Name(d.edition),
)
)
prev_edition = d.edition
self._edition_defaults = defaults
def _CreateDefaultFeatures(self, edition):
"""Creates a FeatureSet message with defaults for a specific edition.
Args:
edition: the edition to generate defaults for.
Returns:
A FeatureSet message with defaults for a specific edition.
"""
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pb2
with _edition_defaults_lock:
if not self._edition_defaults:
self._edition_defaults = descriptor_pb2.FeatureSetDefaults()
self._edition_defaults.ParseFromString(
self._serialized_edition_defaults
)
if edition < self._edition_defaults.minimum_edition:
raise TypeError(
'Edition %s is earlier than the minimum supported edition %s!'
% (
descriptor_pb2.Edition.Name(edition),
descriptor_pb2.Edition.Name(
self._edition_defaults.minimum_edition
),
)
)
if edition > self._edition_defaults.maximum_edition:
raise TypeError(
'Edition %s is later than the maximum supported edition %s!'
% (
descriptor_pb2.Edition.Name(edition),
descriptor_pb2.Edition.Name(
self._edition_defaults.maximum_edition
),
)
)
found = None
for d in self._edition_defaults.defaults:
if d.edition > edition:
break
found = d
if found is None:
raise TypeError(
'No valid default found for edition %s!'
% descriptor_pb2.Edition.Name(edition)
)
defaults = descriptor_pb2.FeatureSet()
defaults.CopyFrom(found.fixed_features)
defaults.MergeFrom(found.overridable_features)
return defaults
def _InternFeatures(self, features):
serialized = features.SerializeToString()
with _edition_defaults_lock:
cached = self._feature_cache.get(serialized)
if cached is None:
self._feature_cache[serialized] = features
cached = features
return cached
def _FindFileContainingSymbolInDb(self, symbol):
"""Finds the file in descriptor DB containing the specified symbol.
Args:
symbol (str): The name of the symbol to search for.
Returns:
FileDescriptor: The file that contains the specified symbol.
Raises:
KeyError: if the file cannot be found in the descriptor database.
"""
try:
file_proto = self._internal_db.FindFileContainingSymbol(symbol)
except KeyError as error:
if self._descriptor_db:
file_proto = self._descriptor_db.FindFileContainingSymbol(symbol)
else:
raise error
if not file_proto:
raise KeyError('Cannot find a file containing %s' % symbol)
return self._ConvertFileProtoToFileDescriptor(file_proto)
def _ConvertFileProtoToFileDescriptor(self, file_proto):
"""Creates a FileDescriptor from a proto or returns a cached copy.
This method also has the side effect of loading all the symbols found in
the file into the appropriate dictionaries in the pool.
Args:
file_proto: The proto to convert.
Returns:
A FileDescriptor matching the passed in proto.
"""
if file_proto.name not in self._file_descriptors:
built_deps = list(self._GetDeps(file_proto.dependency))
direct_deps = [self.FindFileByName(n) for n in file_proto.dependency]
public_deps = [direct_deps[i] for i in file_proto.public_dependency]
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pb2
file_descriptor = descriptor.FileDescriptor(
pool=self,
name=file_proto.name,
package=file_proto.package,
syntax=file_proto.syntax,
edition=descriptor_pb2.Edition.Name(file_proto.edition),
options=_OptionsOrNone(file_proto),
serialized_pb=file_proto.SerializeToString(),
dependencies=direct_deps,
public_dependencies=public_deps,
# pylint: disable=protected-access
create_key=descriptor._internal_create_key,
)
scope = {}
# This loop extracts all the message and enum types from all the
# dependencies of the file_proto. This is necessary to create the
# scope of available message types when defining the passed in
# file proto.
for dependency in built_deps:
scope.update(self._ExtractSymbols(
dependency.message_types_by_name.values()))
scope.update((_PrefixWithDot(enum.full_name), enum)
for enum in dependency.enum_types_by_name.values())
for message_type in file_proto.message_type:
message_desc = self._ConvertMessageDescriptor(
message_type, file_proto.package, file_descriptor, scope,
file_proto.syntax)
file_descriptor.message_types_by_name[message_desc.name] = (
message_desc)
for enum_type in file_proto.enum_type:
file_descriptor.enum_types_by_name[enum_type.name] = (
self._ConvertEnumDescriptor(enum_type, file_proto.package,
file_descriptor, None, scope, True))
for index, extension_proto in enumerate(file_proto.extension):
extension_desc = self._MakeFieldDescriptor(
extension_proto, file_proto.package, index, file_descriptor,
is_extension=True)
extension_desc.containing_type = self._GetTypeFromScope(
file_descriptor.package, extension_proto.extendee, scope)
self._SetFieldType(extension_proto, extension_desc,
file_descriptor.package, scope)
file_descriptor.extensions_by_name[extension_desc.name] = (
extension_desc)
for desc_proto in file_proto.message_type:
self._SetAllFieldTypes(file_proto.package, desc_proto, scope)
if file_proto.package:
desc_proto_prefix = _PrefixWithDot(file_proto.package)
else:
desc_proto_prefix = ''
for desc_proto in file_proto.message_type:
desc = self._GetTypeFromScope(
desc_proto_prefix, desc_proto.name, scope)
file_descriptor.message_types_by_name[desc_proto.name] = desc
for index, service_proto in enumerate(file_proto.service):
file_descriptor.services_by_name[service_proto.name] = (
self._MakeServiceDescriptor(service_proto, index, scope,
file_proto.package, file_descriptor))
self._file_descriptors[file_proto.name] = file_descriptor
# Add extensions to the pool
def AddExtensionForNested(message_type):
for nested in message_type.nested_types:
AddExtensionForNested(nested)
for extension in message_type.extensions:
self._AddExtensionDescriptor(extension)
file_desc = self._file_descriptors[file_proto.name]
for extension in file_desc.extensions_by_name.values():
self._AddExtensionDescriptor(extension)
for message_type in file_desc.message_types_by_name.values():
AddExtensionForNested(message_type)
return file_desc
def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None,
scope=None, syntax=None):
"""Adds the proto to the pool in the specified package.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: The package the proto should be located in.
file_desc: The file containing this message.
scope: Dict mapping short and full symbols to message and enum types.
syntax: string indicating syntax of the file ("proto2" or "proto3")
Returns:
The added descriptor.
"""
if package:
desc_name = '.'.join((package, desc_proto.name))
else:
desc_name = desc_proto.name
if file_desc is None:
file_name = None
else:
file_name = file_desc.name
if scope is None:
scope = {}
nested = [
self._ConvertMessageDescriptor(
nested, desc_name, file_desc, scope, syntax)
for nested in desc_proto.nested_type]
enums = [
self._ConvertEnumDescriptor(enum, desc_name, file_desc, None,
scope, False)
for enum in desc_proto.enum_type]
fields = [self._MakeFieldDescriptor(field, desc_name, index, file_desc)
for index, field in enumerate(desc_proto.field)]
extensions = [
self._MakeFieldDescriptor(extension, desc_name, index, file_desc,
is_extension=True)
for index, extension in enumerate(desc_proto.extension)]
oneofs = [
# pylint: disable=g-complex-comprehension
descriptor.OneofDescriptor(
desc.name,
'.'.join((desc_name, desc.name)),
index,
None,
[],
_OptionsOrNone(desc),
# pylint: disable=protected-access
create_key=descriptor._internal_create_key)
for index, desc in enumerate(desc_proto.oneof_decl)
]
extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range]
if extension_ranges:
is_extendable = True
else:
is_extendable = False
desc = descriptor.Descriptor(
name=desc_proto.name,
full_name=desc_name,
filename=file_name,
containing_type=None,
fields=fields,
oneofs=oneofs,
nested_types=nested,
enum_types=enums,
extensions=extensions,
options=_OptionsOrNone(desc_proto),
is_extendable=is_extendable,
extension_ranges=extension_ranges,
file=file_desc,
serialized_start=None,
serialized_end=None,
is_map_entry=desc_proto.options.map_entry,
# pylint: disable=protected-access
create_key=descriptor._internal_create_key,
)
for nested in desc.nested_types:
nested.containing_type = desc
for enum in desc.enum_types:
enum.containing_type = desc
for field_index, field_desc in enumerate(desc_proto.field):
if field_desc.HasField('oneof_index'):
oneof_index = field_desc.oneof_index
oneofs[oneof_index].fields.append(fields[field_index])
fields[field_index].containing_oneof = oneofs[oneof_index]
scope[_PrefixWithDot(desc_name)] = desc
self._CheckConflictRegister(desc, desc.full_name, desc.file.name)
self._descriptors[desc_name] = desc
return desc
def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None,
containing_type=None, scope=None, top_level=False):
"""Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf.
Args:
enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message.
package: Optional package name for the new message EnumDescriptor.
file_desc: The file containing the enum descriptor.
containing_type: The type containing this enum.
scope: Scope containing available types.
top_level: If True, the enum is a top level symbol. If False, the enum
is defined inside a message.
Returns:
The added descriptor
"""
if package:
enum_name = '.'.join((package, enum_proto.name))
else:
enum_name = enum_proto.name
if file_desc is None:
file_name = None
else:
file_name = file_desc.name
values = [self._MakeEnumValueDescriptor(value, index)
for index, value in enumerate(enum_proto.value)]
desc = descriptor.EnumDescriptor(name=enum_proto.name,
full_name=enum_name,
filename=file_name,
file=file_desc,
values=values,
containing_type=containing_type,
options=_OptionsOrNone(enum_proto),
# pylint: disable=protected-access
create_key=descriptor._internal_create_key)
scope['.%s' % enum_name] = desc
self._CheckConflictRegister(desc, desc.full_name, desc.file.name)
self._enum_descriptors[enum_name] = desc
# Add top level enum values.
if top_level:
for value in values:
full_name = _NormalizeFullyQualifiedName(
'.'.join((package, value.name)))
self._CheckConflictRegister(value, full_name, file_name)
self._top_enum_values[full_name] = value
return desc
def _MakeFieldDescriptor(self, field_proto, message_name, index,
file_desc, is_extension=False):
"""Creates a field descriptor from a FieldDescriptorProto.
For message and enum type fields, this method will do a look up
in the pool for the appropriate descriptor for that type. If it
is unavailable, it will fall back to the _source function to
create it. If this type is still unavailable, construction will
fail.
Args:
field_proto: The proto describing the field.
message_name: The name of the containing message.
index: Index of the field
file_desc: The file containing the field descriptor.
is_extension: Indication that this field is for an extension.
Returns:
An initialized FieldDescriptor object
"""
if message_name:
full_name = '.'.join((message_name, field_proto.name))
else:
full_name = field_proto.name
if field_proto.json_name:
json_name = field_proto.json_name
else:
json_name = None
return descriptor.FieldDescriptor(
name=field_proto.name,
full_name=full_name,
index=index,
number=field_proto.number,
type=field_proto.type,
cpp_type=None,
message_type=None,
enum_type=None,
containing_type=None,
label=field_proto.label,
has_default_value=False,
default_value=None,
is_extension=is_extension,
extension_scope=None,
options=_OptionsOrNone(field_proto),
json_name=json_name,
file=file_desc,
# pylint: disable=protected-access
create_key=descriptor._internal_create_key)
def _SetAllFieldTypes(self, package, desc_proto, scope):
"""Sets all the descriptor's fields's types.
This method also sets the containing types on any extensions.
Args:
package: The current package of desc_proto.
desc_proto: The message descriptor to update.
scope: Enclosing scope of available types.
"""
package = _PrefixWithDot(package)
main_desc = self._GetTypeFromScope(package, desc_proto.name, scope)
if package == '.':
nested_package = _PrefixWithDot(desc_proto.name)
else:
nested_package = '.'.join([package, desc_proto.name])
for field_proto, field_desc in zip(desc_proto.field, main_desc.fields):
self._SetFieldType(field_proto, field_desc, nested_package, scope)
for extension_proto, extension_desc in (
zip(desc_proto.extension, main_desc.extensions)):
extension_desc.containing_type = self._GetTypeFromScope(
nested_package, extension_proto.extendee, scope)
self._SetFieldType(extension_proto, extension_desc, nested_package, scope)
for nested_type in desc_proto.nested_type:
self._SetAllFieldTypes(nested_package, nested_type, scope)
def _SetFieldType(self, field_proto, field_desc, package, scope):
"""Sets the field's type, cpp_type, message_type and enum_type.
Args:
field_proto: Data about the field in proto format.
field_desc: The descriptor to modify.
package: The package the field's container is in.
scope: Enclosing scope of available types.
"""
if field_proto.type_name:
desc = self._GetTypeFromScope(package, field_proto.type_name, scope)
else:
desc = None
if not field_proto.HasField('type'):
if isinstance(desc, descriptor.Descriptor):
field_proto.type = descriptor.FieldDescriptor.TYPE_MESSAGE
else:
field_proto.type = descriptor.FieldDescriptor.TYPE_ENUM
field_desc.cpp_type = descriptor.FieldDescriptor.ProtoTypeToCppProtoType(
field_proto.type)
if (field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE
or field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP):
field_desc.message_type = desc
if field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.enum_type = desc
if field_proto.label == descriptor.FieldDescriptor.LABEL_REPEATED:
field_desc.has_default_value = False
field_desc.default_value = []
elif field_proto.HasField('default_value'):
field_desc.has_default_value = True
if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or
field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT):
field_desc.default_value = float(field_proto.default_value)
elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING:
field_desc.default_value = field_proto.default_value
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL:
field_desc.default_value = field_proto.default_value.lower() == 'true'
elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.default_value = field_desc.enum_type.values_by_name[
field_proto.default_value].number
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES:
field_desc.default_value = text_encoding.CUnescape(
field_proto.default_value)
elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE:
field_desc.default_value = None
else:
# All other types are of the "int" type.
field_desc.default_value = int(field_proto.default_value)
else:
field_desc.has_default_value = False
if (field_proto.type == descriptor.FieldDescriptor.TYPE_DOUBLE or
field_proto.type == descriptor.FieldDescriptor.TYPE_FLOAT):
field_desc.default_value = 0.0
elif field_proto.type == descriptor.FieldDescriptor.TYPE_STRING:
field_desc.default_value = u''
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BOOL:
field_desc.default_value = False
elif field_proto.type == descriptor.FieldDescriptor.TYPE_ENUM:
field_desc.default_value = field_desc.enum_type.values[0].number
elif field_proto.type == descriptor.FieldDescriptor.TYPE_BYTES:
field_desc.default_value = b''
elif field_proto.type == descriptor.FieldDescriptor.TYPE_MESSAGE:
field_desc.default_value = None
elif field_proto.type == descriptor.FieldDescriptor.TYPE_GROUP:
field_desc.default_value = None
else:
# All other types are of the "int" type.
field_desc.default_value = 0
field_desc.type = field_proto.type
def _MakeEnumValueDescriptor(self, value_proto, index):
"""Creates a enum value descriptor object from a enum value proto.
Args:
value_proto: The proto describing the enum value.
index: The index of the enum value.
Returns:
An initialized EnumValueDescriptor object.
"""
return descriptor.EnumValueDescriptor(
name=value_proto.name,
index=index,
number=value_proto.number,
options=_OptionsOrNone(value_proto),
type=None,
# pylint: disable=protected-access
create_key=descriptor._internal_create_key)
def _MakeServiceDescriptor(self, service_proto, service_index, scope,
package, file_desc):
"""Make a protobuf ServiceDescriptor given a ServiceDescriptorProto.
Args:
service_proto: The descriptor_pb2.ServiceDescriptorProto protobuf message.
service_index: The index of the service in the File.
scope: Dict mapping short and full symbols to message and enum types.
package: Optional package name for the new message EnumDescriptor.
file_desc: The file containing the service descriptor.
Returns:
The added descriptor.
"""
if package:
service_name = '.'.join((package, service_proto.name))
else:
service_name = service_proto.name
methods = [self._MakeMethodDescriptor(method_proto, service_name, package,
scope, index)
for index, method_proto in enumerate(service_proto.method)]
desc = descriptor.ServiceDescriptor(
name=service_proto.name,
full_name=service_name,
index=service_index,
methods=methods,
options=_OptionsOrNone(service_proto),
file=file_desc,
# pylint: disable=protected-access
create_key=descriptor._internal_create_key)
self._CheckConflictRegister(desc, desc.full_name, desc.file.name)
self._service_descriptors[service_name] = desc
return desc
def _MakeMethodDescriptor(self, method_proto, service_name, package, scope,
index):
"""Creates a method descriptor from a MethodDescriptorProto.
Args:
method_proto: The proto describing the method.
service_name: The name of the containing service.
package: Optional package name to look up for types.
scope: Scope containing available types.
index: Index of the method in the service.
Returns:
An initialized MethodDescriptor object.
"""
full_name = '.'.join((service_name, method_proto.name))
input_type = self._GetTypeFromScope(
package, method_proto.input_type, scope)
output_type = self._GetTypeFromScope(
package, method_proto.output_type, scope)
return descriptor.MethodDescriptor(
name=method_proto.name,
full_name=full_name,
index=index,
containing_service=None,
input_type=input_type,
output_type=output_type,
client_streaming=method_proto.client_streaming,
server_streaming=method_proto.server_streaming,
options=_OptionsOrNone(method_proto),
# pylint: disable=protected-access
create_key=descriptor._internal_create_key)
def _ExtractSymbols(self, descriptors):
"""Pulls out all the symbols from descriptor protos.
Args:
descriptors: The messages to extract descriptors from.
Yields:
A two element tuple of the type name and descriptor object.
"""
for desc in descriptors:
yield (_PrefixWithDot(desc.full_name), desc)
for symbol in self._ExtractSymbols(desc.nested_types):
yield symbol
for enum in desc.enum_types:
yield (_PrefixWithDot(enum.full_name), enum)
def _GetDeps(self, dependencies, visited=None):
"""Recursively finds dependencies for file protos.
Args:
dependencies: The names of the files being depended on.
visited: The names of files already found.
Yields:
Each direct and indirect dependency.
"""
visited = visited or set()
for dependency in dependencies:
if dependency not in visited:
visited.add(dependency)
dep_desc = self.FindFileByName(dependency)
yield dep_desc
public_files = [d.name for d in dep_desc.public_dependencies]
yield from self._GetDeps(public_files, visited)
def _GetTypeFromScope(self, package, type_name, scope):
"""Finds a given type name in the current scope.
Args:
package: The package the proto should be located in.
type_name: The name of the type to be found in the scope.
scope: Dict mapping short and full symbols to message and enum types.
Returns:
The descriptor for the requested type.
"""
if type_name not in scope:
components = _PrefixWithDot(package).split('.')
while components:
possible_match = '.'.join(components + [type_name])
if possible_match in scope:
type_name = possible_match
break
else:
components.pop(-1)
return scope[type_name]
def _PrefixWithDot(name):
return name if name.startswith('.') else '.%s' % name
if _USE_C_DESCRIPTORS:
# TODO: This pool could be constructed from Python code, when we
# support a flag like 'use_cpp_generated_pool=True'.
# pylint: disable=protected-access
_DEFAULT = descriptor._message.default_pool
else:
_DEFAULT = DescriptorPool()
def Default():
return _DEFAULT
| DescriptorPool |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_pubsub.py | {
"start": 11643,
"end": 14040
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.pubsub.PubSubHook")
def test_publish(self, mock_hook):
operator = PubSubPublishMessageOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
messages=TEST_MESSAGES,
)
operator.execute(None)
mock_hook.return_value.publish.assert_called_once_with(
project_id=TEST_PROJECT, topic=TEST_TOPIC, messages=TEST_MESSAGES
)
@mock.patch("airflow.providers.google.cloud.operators.pubsub.PubSubHook")
def test_publish_with_ordering_key(self, mock_hook):
operator = PubSubPublishMessageOperator(
task_id=TASK_ID,
project_id=TEST_PROJECT,
topic=TEST_TOPIC,
messages=TEST_MESSAGES_ORDERING_KEY,
enable_message_ordering=True,
)
operator.execute(None)
mock_hook.return_value.publish.assert_called_once_with(
project_id=TEST_PROJECT, topic=TEST_TOPIC, messages=TEST_MESSAGES_ORDERING_KEY
)
@pytest.mark.parametrize(
("project_id", "expected_dataset"),
[
# 1. project_id provided
(TEST_PROJECT, f"topic:{TEST_PROJECT}:{TEST_TOPIC}"),
# 2. project_id not provided (use project_id from connection)
(None, f"topic:connection-project:{TEST_TOPIC}"),
],
)
@mock.patch("airflow.providers.google.cloud.operators.pubsub.PubSubHook")
def test_get_openlineage_facets(self, mock_hook, project_id, expected_dataset):
operator = PubSubPublishMessageOperator(
task_id=TASK_ID,
project_id=project_id,
topic=TEST_TOPIC,
messages=TEST_MESSAGES,
)
operator.execute(None)
mock_hook.return_value.publish.assert_called_once_with(
project_id=project_id, topic=TEST_TOPIC, messages=TEST_MESSAGES
)
mock_hook.return_value.project_id = project_id or "connection-project"
result = operator.get_openlineage_facets_on_complete(operator)
assert not result.run_facets
assert not result.job_facets
assert len(result.inputs) == 0
assert len(result.outputs) == 1
assert result.outputs[0].namespace == "pubsub"
assert result.outputs[0].name == expected_dataset
| TestPubSubPublishOperator |
python | huggingface__transformers | src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | {
"start": 6801,
"end": 9564
} | class ____(nn.Module):
"""
This layer applies a self-attention with linear complexity, as described in MobileViTV2 paper:
https://huggingface.co/papers/2206.02680
Args:
config (`MobileVitv2Config`):
Model configuration object
embed_dim (`int`):
`input_channels` from an expected input of size :math:`(batch_size, input_channels, height, width)`
"""
def __init__(self, config: MobileViTV2Config, embed_dim: int) -> None:
super().__init__()
self.qkv_proj = MobileViTV2ConvLayer(
config=config,
in_channels=embed_dim,
out_channels=1 + (2 * embed_dim),
bias=True,
kernel_size=1,
use_normalization=False,
use_activation=False,
)
self.attn_dropout = nn.Dropout(p=config.attn_dropout)
self.out_proj = MobileViTV2ConvLayer(
config=config,
in_channels=embed_dim,
out_channels=embed_dim,
bias=True,
kernel_size=1,
use_normalization=False,
use_activation=False,
)
self.embed_dim = embed_dim
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# (batch_size, embed_dim, num_pixels_in_patch, num_patches) --> (batch_size, 1+2*embed_dim, num_pixels_in_patch, num_patches)
qkv = self.qkv_proj(hidden_states)
# Project hidden_states into query, key and value
# Query --> [batch_size, 1, num_pixels_in_patch, num_patches]
# value, key --> [batch_size, embed_dim, num_pixels_in_patch, num_patches]
query, key, value = torch.split(qkv, split_size_or_sections=[1, self.embed_dim, self.embed_dim], dim=1)
# apply softmax along num_patches dimension
context_scores = torch.nn.functional.softmax(query, dim=-1)
context_scores = self.attn_dropout(context_scores)
# Compute context vector
# [batch_size, embed_dim, num_pixels_in_patch, num_patches] x [batch_size, 1, num_pixels_in_patch, num_patches] -> [batch_size, embed_dim, num_pixels_in_patch, num_patches]
context_vector = key * context_scores
# [batch_size, embed_dim, num_pixels_in_patch, num_patches] --> [batch_size, embed_dim, num_pixels_in_patch, 1]
context_vector = torch.sum(context_vector, dim=-1, keepdim=True)
# combine context vector with values
# [batch_size, embed_dim, num_pixels_in_patch, num_patches] * [batch_size, embed_dim, num_pixels_in_patch, 1] --> [batch_size, embed_dim, num_pixels_in_patch, num_patches]
out = torch.nn.functional.relu(value) * context_vector.expand_as(value)
out = self.out_proj(out)
return out
| MobileViTV2LinearSelfAttention |
python | python-openxml__python-docx | src/docx/oxml/shape.py | {
"start": 680,
"end": 1066
} | class ____(BaseOxmlElement):
"""``<a:blip>`` element, specifies image source and adjustments such as alpha and
tint."""
embed: str | None = OptionalAttribute( # pyright: ignore[reportAssignmentType]
"r:embed", ST_RelationshipId
)
link: str | None = OptionalAttribute( # pyright: ignore[reportAssignmentType]
"r:link", ST_RelationshipId
)
| CT_Blip |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-klaviyo/components.py | {
"start": 12661,
"end": 13555
} | class ____(DefaultErrorHandler):
def interpret_response(self, response_or_exception: Optional[Union[requests.Response, Exception]]) -> ErrorResolution:
"""
We have seen `[Errno -3] Temporary failure in name resolution` a couple of times on two different connections
(1fed2ede-2d33-4543-85e3-7d6e5736075d and 1b276f7d-358a-4fe3-a437-6747fd780eed). Retrying the requests on later syncs is working
which makes it sound like a transient issue.
"""
if isinstance(response_or_exception, InvalidURL):
return ErrorResolution(
response_action=ResponseAction.RETRY,
failure_type=FailureType.transient_error,
error_message="source-klaviyo has faced a temporary DNS resolution issue. Retrying...",
)
return super().interpret_response(response_or_exception)
| KlaviyoErrorHandler |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/optimization/filter_parallelization_test.py | {
"start": 2145,
"end": 9418
} | class ____(test_base.DatasetTestBase,
parameterized.TestCase):
def enableFilterParallelization(self, dataset):
options = options_lib.Options()
options.experimental_optimization.filter_parallelization = True
return dataset.with_options(options)
@combinations.generate(_test_combinations())
def testFilterDataset(self, apply_filter):
components = (np.arange(7, dtype=np.int64),
np.array([[1, 2, 3]], dtype=np.int64) *
np.arange(7, dtype=np.int64)[:, np.newaxis],
np.array(37.0, dtype=np.float64) * np.arange(7))
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
def do_test(count, modulus): # pylint: disable=missing-docstring
dataset = dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn).repeat(count)
dataset = self.enableFilterParallelization(dataset)
dataset = dataset.apply(testing.assert_next(["ParallelFilter"]))
# pylint: disable=g-long-lambda
dataset = apply_filter(
dataset,
lambda x, _y, _z: math_ops.equal(math_ops.mod(x, modulus), 0))
# pylint: enable=g-long-lambda
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
get_next = self.getNext(dataset)
for _ in range(count):
for i in [x for x in range(7) if x**2 % modulus == 0]:
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
do_test(14, 2)
do_test(4, 18)
# Test an empty dataset.
do_test(0, 1)
@combinations.generate(_test_combinations())
def testFilterRange(self, apply_filter):
dataset = dataset_ops.Dataset.range(4)
dataset = self.enableFilterParallelization(dataset)
dataset = dataset.apply(testing.assert_next(["ParallelFilter"]))
dataset = apply_filter(dataset,
lambda x: math_ops.not_equal(math_ops.mod(x, 3), 2))
self.assertDatasetProduces(dataset, expected_output=[0, 1, 3])
@combinations.generate(_test_combinations())
def testFilterDict(self, apply_filter):
dataset = dataset_ops.Dataset.range(10).map(
lambda x: {"foo": x * 2, "bar": x**2})
dataset = self.enableFilterParallelization(dataset)
dataset = dataset.apply(testing.assert_next(["ParallelFilter"]))
dataset = apply_filter(dataset, lambda d: math_ops.equal(d["bar"] % 2, 0))
dataset = dataset.map(lambda d: d["foo"] + d["bar"])
self.assertDatasetProduces(
dataset,
expected_output=[(i * 2 + i**2) for i in range(10) if not (i**2) % 2])
@combinations.generate(_test_combinations())
def testUseStepContainerInFilter(self, apply_filter):
input_data = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64)
# Define a predicate that returns true for the first element of
# the sequence and not the second, and uses `tf.map_fn()`.
def _predicate(xs):
squared_xs = map_fn.map_fn(lambda x: x * x, xs)
summed = math_ops.reduce_sum(squared_xs)
return math_ops.equal(summed, 1 + 4 + 9)
dataset = dataset_ops.Dataset.from_tensor_slices([[1, 2, 3], [4, 5, 6]])
dataset = self.enableFilterParallelization(dataset)
# Calling `legacy_filter_fn` with `_predicate` makes the predicate passed to
# `Filter` stateful and therefore not parallelizable.
if repr(apply_filter) != "legacy_filter_fn":
dataset = dataset.apply(testing.assert_next(["ParallelFilter"]))
else:
dataset = dataset.apply(testing.assert_next(["Filter"]))
dataset = apply_filter(dataset, _predicate)
self.assertDatasetProduces(dataset, expected_output=[input_data[0]])
@combinations.generate(_test_combinations())
def testSparse(self, apply_filter):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1])), i
def _filter_fn(_, i):
return math_ops.equal(i % 2, 0)
dataset = dataset_ops.Dataset.range(10).map(_map_fn)
dataset = self.enableFilterParallelization(dataset)
dataset = dataset.apply(testing.assert_next(["ParallelFilter"]))
dataset = apply_filter(dataset, _filter_fn)
dataset = dataset.map(lambda x, i: x)
self.assertDatasetProduces(
dataset, expected_output=[_map_fn(i * 2)[0] for i in range(5)])
@combinations.generate(_test_combinations())
def testShortCircuit(self, apply_filter):
dataset = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(10),
dataset_ops.Dataset.from_tensors(True).repeat(None)))
dataset = self.enableFilterParallelization(dataset)
dataset = dataset.apply(testing.assert_next(["ParallelFilter"]))
dataset = apply_filter(dataset, lambda x, y: y)
self.assertDatasetProduces(
dataset, expected_output=[(i, True) for i in range(10)])
@combinations.generate(_test_combinations())
def testParallelFilters(self, apply_filter):
dataset = dataset_ops.Dataset.range(10)
dataset = self.enableFilterParallelization(dataset)
dataset = dataset.apply(testing.assert_next(["ParallelFilter"]))
dataset = apply_filter(dataset, lambda x: math_ops.equal(x % 2, 0))
next_elements = [self.getNext(dataset) for _ in range(10)]
self.assertEqual([0 for _ in range(10)],
self.evaluate(
[next_element() for next_element in next_elements]))
@combinations.generate(test_base.default_test_combinations())
def testName(self):
dataset = dataset_ops.Dataset.from_tensors(42).filter(
lambda x: True, name="filter")
self.assertDatasetProduces(dataset, [42])
@combinations.generate(test_base.default_test_combinations())
def testInputOutOfRange(self):
def py_fn(_):
raise StopIteration()
dataset = dataset_ops.Dataset.range(5)
dataset = self.enableFilterParallelization(dataset)
dataset = dataset.apply(testing.assert_next(["ParallelFilter"]))
dataset = dataset.filter(
lambda x: script_ops.py_func(py_fn, [x], dtypes.bool, stateful=False))
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(autotune=[False, True])))
def testAutotuneSetting(self, autotune):
dataset = dataset_ops.Dataset.range(4)
options = options_lib.Options()
options.experimental_optimization.filter_parallelization = True
options.autotune.enabled = autotune
dataset = dataset.with_options(options)
if autotune:
dataset = dataset.apply(testing.assert_next(["ParallelFilter"]))
else:
dataset = dataset.apply(testing.assert_next(["Filter"]))
dataset = dataset.filter(
lambda x: math_ops.not_equal(math_ops.mod(x, 3), 2))
self.assertDatasetProduces(dataset, expected_output=[0, 1, 3])
| FilterParallelizationTest |
python | coleifer__peewee | peewee.py | {
"start": 21523,
"end": 21753
} | class ____(object):
__slots__ = ()
def __get__(self, instance, instance_type=None):
if instance is not None:
return ColumnFactory(instance) # Implements __getattr__().
return self
| _DynamicColumn |
python | ray-project__ray | python/ray/tests/test_runtime_env_packaging.py | {
"start": 28394,
"end": 38605
} | class ____:
async def test_download_and_unpack_package_with_gcs_uri_without_gcs_client(
self, ray_start_regular
):
# Test the guard clause for giving GCS URIs without a GCS client.
with tempfile.TemporaryDirectory() as temp_dir:
zipfile_path = Path(temp_dir) / "test-zip-file.zip"
with zipfile.ZipFile(zipfile_path, "x") as zip:
# Add a file to the zip file so we can verify the file was extracted.
zip.writestr("file.txt", "Hello, world!")
# upload the zip file to GCS pkg_uri
pkg_uri = "gcs://my-zipfile.zip"
upload_package_to_gcs(pkg_uri, zipfile_path.read_bytes())
with pytest.raises(ValueError):
# Download the zip file from GCS pkg_uri
await download_and_unpack_package(
pkg_uri=pkg_uri,
base_directory=temp_dir,
gcs_client=None,
)
async def test_download_and_unpack_package_with_gcs_uri(self, ray_start_regular):
# Test downloading and unpacking a GCS package with a GCS client.
gcs_client = ray._private.worker.global_worker.gcs_client
with tempfile.TemporaryDirectory() as temp_dir:
zipfile_path = Path(temp_dir) / "test-zip-file.zip"
with zipfile.ZipFile(zipfile_path, "x") as zip:
# Add a file to the zip file so we can verify the file was extracted.
zip.writestr("file.txt", "Hello, world!")
# upload the zip file to GCS pkg_uri
pkg_uri = "gcs://my-zipfile.zip"
upload_package_to_gcs(pkg_uri, zipfile_path.read_bytes())
# Download the zip file from GCS pkg_uri
local_dir = await download_and_unpack_package(
pkg_uri=pkg_uri,
base_directory=temp_dir,
gcs_client=gcs_client,
)
# Check that the file was extracted to the destination directory
assert (Path(local_dir) / "file.txt").exists()
async def test_download_and_unpack_package_with_https_uri(self):
with tempfile.TemporaryDirectory() as temp_dest_dir:
local_dir = await download_and_unpack_package(
pkg_uri=HTTPS_PACKAGE_URI, base_directory=temp_dest_dir
)
assert (Path(local_dir) / "test_module").exists()
async def test_download_and_unpack_package_with_s3_uri(self):
# Note: running this test requires AWS credentials to be set up
# any crediential will do, as long as it's valid
with tempfile.TemporaryDirectory() as temp_dest_dir:
local_dir = await download_and_unpack_package(
pkg_uri=S3_PACKAGE_URI, base_directory=temp_dest_dir
)
assert (Path(local_dir) / "test_module").exists()
# test download whl from remote S3
with tempfile.TemporaryDirectory() as temp_dest_dir:
wheel_uri = await download_and_unpack_package(
pkg_uri=S3_WHL_PACKAGE_URI, base_directory=temp_dest_dir
)
assert (Path(local_dir) / wheel_uri).exists()
async def test_download_and_unpack_package_with_file_uri(self):
with tempfile.TemporaryDirectory() as temp_dir:
zipfile_path = Path(temp_dir) / "test-zip-file.zip"
with zipfile.ZipFile(zipfile_path, "x") as zip:
# Add a file to the zip file so we can verify the file was extracted.
zip.writestr("file.txt", "Hello, world!")
from urllib.parse import urljoin
from urllib.request import pathname2url
# in windows, file_path = ///C:/Users/...
# in linux, file_path = /tmp/...
file_path = pathname2url(str(zipfile_path))
# remove the first slash in file_path to avoid invalid path in windows
pkg_uri = urljoin("file:", file_path[1:])
local_dir = await download_and_unpack_package(
pkg_uri=pkg_uri, base_directory=temp_dir
)
# Check that the file was extracted to the destination directory
assert (Path(local_dir) / "file.txt").exists()
@pytest.mark.parametrize(
"protocol",
[
Protocol.CONDA,
Protocol.PIP,
],
)
async def test_download_and_unpack_package_with_unsupported_protocol(
self, protocol: Protocol
):
# Test giving an unsupported protocol.
pkg_uri = f"{protocol.value}://some-package.zip"
with pytest.raises(NotImplementedError) as excinfo:
await download_and_unpack_package(pkg_uri=pkg_uri, base_directory="/tmp")
assert f"{protocol.name} is not supported" in str(excinfo.value)
@pytest.mark.parametrize(
"invalid_pkg_uri",
[
"gcs://gcs-cannot-have-a-folder/my-zipfile.zip",
"s3://file-wihout-file-extension",
],
)
async def test_download_and_unpack_package_with_invalid_uri(
self, invalid_pkg_uri: str
):
with pytest.raises(ValueError) as excinfo:
await download_and_unpack_package(
pkg_uri=invalid_pkg_uri, base_directory="/tmp"
)
assert "Invalid package URI" in str(excinfo.value)
def test_get_gitignore(tmp_path):
gitignore_path = tmp_path / ".gitignore"
gitignore_path.write_text("*.pyc")
gitignore_func = _get_ignore_file(tmp_path, ".gitignore")
assert gitignore_func(Path(tmp_path / "foo.pyc")) is True
assert gitignore_func(Path(tmp_path / "foo.py")) is False
@pytest.mark.parametrize(
"include_gitignore,expected_excludes",
[
# Default: both .gitignore and .rayignore are used
(True, ["gitignore", "rayignore"]),
# Only .rayignore is used, no inheritance
(False, ["rayignore"]),
],
)
def test_ray_ignore_and_git_ignore_together(
tmp_path, include_gitignore, expected_excludes, monkeypatch
):
"""Test get_excludes_from_ignore_files with different environment variable combinations."""
# Create test ignore files
gitignore_path = tmp_path / ".gitignore"
gitignore_path.write_text("*.pyc")
git_ignore_file = tmp_path / "test.pyc"
rayignore_path = tmp_path / ".rayignore"
rayignore_path.write_text("*.cache")
ray_ignore_file = tmp_path / "test.cache"
# Get exclusion functions
exclude_funcs = get_excludes_from_ignore_files(
tmp_path, include_gitignore=include_gitignore
)
# Check the number of exclusion functions returned
assert len(exclude_funcs) == len(
expected_excludes
), f"Should have {expected_excludes}"
# .gitignore patterns
assert any(f(git_ignore_file) for f in exclude_funcs) == include_gitignore
# .rayignore patterns is always used
assert any(f(ray_ignore_file) for f in exclude_funcs)
@pytest.mark.parametrize("ignore_gitignore", [True, False])
@pytest.mark.skipif(sys.platform == "win32", reason="Fails on windows")
def test_travel(tmp_path, ignore_gitignore, monkeypatch):
dir_paths = set()
file_paths = set()
item_num = 0
excludes = []
root = tmp_path / "test"
def construct(path, excluded=False, depth=0):
nonlocal item_num
path.mkdir(parents=True)
if not excluded:
dir_paths.add(str(path))
if depth > 8:
return
if item_num > 500:
return
dir_num = random.randint(0, 10)
file_num = random.randint(0, 10)
for _ in range(dir_num):
uid = str(uuid.uuid4()).split("-")[0]
dir_path = path / uid
exclud_sub = random.randint(0, 5) == 0
if not excluded and exclud_sub:
excludes.append(str(dir_path.relative_to(root)))
if not excluded:
construct(dir_path, exclud_sub or excluded, depth + 1)
item_num += 1
if item_num > 1000:
return
for _ in range(file_num):
uid = str(uuid.uuid4()).split("-")[0]
v = random.randint(0, 1000)
with (path / uid).open("w") as f:
f.write(str(v))
if not excluded:
if random.randint(0, 5) == 0:
excludes.append(str((path / uid).relative_to(root)))
else:
file_paths.add((str(path / uid), str(v)))
item_num += 1
# Add gitignore file
gitignore = root / ".gitignore"
gitignore.write_text("*.pyc")
file_paths.add((str(gitignore), "*.pyc"))
# Add file that should be ignored by gitignore
with (root / "foo.pyc").open("w") as f:
f.write("foo")
if ignore_gitignore:
# If ignore_gitignore is True, then the file should be visited
file_paths.add((str(root / "foo.pyc"), "foo"))
construct(root)
exclude_spec = _get_excludes(root, excludes)
visited_dir_paths = set()
visited_file_paths = set()
def handler(path):
if path.is_dir():
visited_dir_paths.add(str(path))
else:
with open(path) as f:
visited_file_paths.add((str(path), f.read()))
_dir_travel(root, [exclude_spec], handler, include_gitignore=not ignore_gitignore)
assert file_paths == visited_file_paths
assert dir_paths == visited_dir_paths
def test_is_whl_uri():
assert is_whl_uri("gcs://my-package.whl")
assert not is_whl_uri("gcs://asdf.zip")
assert not is_whl_uri("invalid_format")
def test_is_zip_uri():
assert is_zip_uri("s3://my-package.zip")
assert is_zip_uri("gcs://asdf.zip")
assert not is_zip_uri("invalid_format")
assert not is_zip_uri("gcs://a.whl")
def test_get_uri_for_package():
assert get_uri_for_package(Path("/tmp/my-pkg.whl")) == "gcs://my-pkg.whl"
def test_get_local_dir_from_uri():
uri = "gcs://<working_dir_content_hash>.zip"
assert get_local_dir_from_uri(uri, "base_dir") == Path(
"base_dir/<working_dir_content_hash>"
)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| TestDownloadAndUnpackPackage |
python | huggingface__transformers | src/transformers/models/bart/modeling_bart.py | {
"start": 38512,
"end": 46225
} | class ____(BartPreTrainedModel):
_tied_weights_keys = {
"decoder.embed_tokens.weight": "shared.weight",
"encoder.embed_tokens.weight": "shared.weight",
}
def __init__(self, config: BartConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.shared = BartScaledWordEmbedding(vocab_size, config.d_model, padding_idx, embed_scale=embed_scale)
self.encoder = BartEncoder(config)
self.decoder = BartDecoder(config)
# Initialize weights and apply final processing
self.post_init()
def tie_weights(self, missing_keys: Optional[set[str]] = None, recompute_mapping: bool = True):
"""We need to overload here to handle the wrong key saved in some main checkpoints."""
if self.config.tie_word_embeddings:
# Some model checkpoints like "facebook/bart-large-cnn"'s embedding weight is in decoder.embed_tokens,
# need check here, see issue #36247
if missing_keys is not None:
if "shared.weight" in missing_keys and "decoder.embed_tokens.weight" not in missing_keys:
self.encoder.embed_tokens.weight = self.decoder.embed_tokens.weight
self.shared.weight = self.decoder.embed_tokens.weight
missing_keys.discard("encoder.embed_token.weight")
missing_keys.discard("shared.weight")
# needs to be done after, otherwise it raises an Error because the correct weights are not present
super().tie_weights(missing_keys=missing_keys, recompute_mapping=recompute_mapping)
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[list[torch.FloatTensor]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple, Seq2SeqModelOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_bart._prepare_decoder_attention_mask`]
and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more
information on the default strategy.
"""
# different to other models, Bart automatically creates decoder_input_ids from
# input_ids if no decoder_input_ids are provided
if decoder_input_ids is None and decoder_inputs_embeds is None:
if input_ids is None:
raise ValueError(
"If no `decoder_input_ids` or `decoder_inputs_embeds` are "
"passed, `input_ids` cannot be `None`. Please pass either "
"`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
)
decoder_input_ids = shift_tokens_right(
input_ids, self.config.pad_token_id, self.config.decoder_start_token_id
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The BART Model with a language modeling head. Can be used for summarization.
"""
)
| BartModel |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/profile_analyzer_cli.py | {
"start": 1481,
"end": 7557
} | class ____(object):
"""Table View of profiling data."""
def __init__(self, profile_datum_list, time_unit=cli_shared.TIME_UNIT_US):
"""Constructor.
Args:
profile_datum_list: List of `ProfileDatum` objects.
time_unit: must be in cli_shared.TIME_UNITS.
"""
self._profile_datum_list = profile_datum_list
self.formatted_start_time = [
datum.start_time for datum in profile_datum_list]
self.formatted_op_time = [
cli_shared.time_to_readable_str(datum.op_time,
force_time_unit=time_unit)
for datum in profile_datum_list]
self.formatted_exec_time = [
cli_shared.time_to_readable_str(
datum.node_exec_stats.all_end_rel_micros,
force_time_unit=time_unit)
for datum in profile_datum_list]
self._column_names = ["Node",
"Op Type",
"Start Time (us)",
"Op Time (%s)" % time_unit,
"Exec Time (%s)" % time_unit,
"Filename:Lineno(function)"]
self._column_sort_ids = [SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE,
SORT_OPS_BY_START_TIME, SORT_OPS_BY_OP_TIME,
SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_LINE]
def value(self,
row,
col,
device_name_filter=None,
node_name_filter=None,
op_type_filter=None):
"""Get the content of a cell of the table.
Args:
row: (int) row index.
col: (int) column index.
device_name_filter: Regular expression to filter by device name.
node_name_filter: Regular expression to filter by node name.
op_type_filter: Regular expression to filter by op type.
Returns:
A debuggre_cli_common.RichLine object representing the content of the
cell, potentially with a clickable MenuItem.
Raises:
IndexError: if row index is out of range.
"""
menu_item = None
if col == 0:
text = self._profile_datum_list[row].node_exec_stats.node_name
elif col == 1:
text = self._profile_datum_list[row].op_type
elif col == 2:
text = str(self.formatted_start_time[row])
elif col == 3:
text = str(self.formatted_op_time[row])
elif col == 4:
text = str(self.formatted_exec_time[row])
elif col == 5:
command = "ps"
if device_name_filter:
command += " --%s %s" % (_DEVICE_NAME_FILTER_FLAG,
device_name_filter)
if node_name_filter:
command += " --%s %s" % (_NODE_NAME_FILTER_FLAG, node_name_filter)
if op_type_filter:
command += " --%s %s" % (_OP_TYPE_FILTER_FLAG, op_type_filter)
command += " %s --init_line %d" % (
self._profile_datum_list[row].file_path,
self._profile_datum_list[row].line_number)
menu_item = debugger_cli_common.MenuItem(None, command)
text = self._profile_datum_list[row].file_line_func
else:
raise IndexError("Invalid column index %d." % col)
return RL(text, font_attr=menu_item)
def row_count(self):
return len(self._profile_datum_list)
def column_count(self):
return len(self._column_names)
def column_names(self):
return self._column_names
def column_sort_id(self, col):
return self._column_sort_ids[col]
def _list_profile_filter(
profile_datum,
node_name_regex,
file_path_regex,
op_type_regex,
op_time_interval,
exec_time_interval,
min_lineno=-1,
max_lineno=-1):
"""Filter function for list_profile command.
Args:
profile_datum: A `ProfileDatum` object.
node_name_regex: Regular expression pattern object to filter by name.
file_path_regex: Regular expression pattern object to filter by file path.
op_type_regex: Regular expression pattern object to filter by op type.
op_time_interval: `Interval` for filtering op time.
exec_time_interval: `Interval` for filtering exec time.
min_lineno: Lower bound for 1-based line number, inclusive.
If <= 0, has no effect.
max_lineno: Upper bound for 1-based line number, exclusive.
If <= 0, has no effect.
# TODO(cais): Maybe filter by function name.
Returns:
True iff profile_datum should be included.
"""
if node_name_regex and not node_name_regex.match(
profile_datum.node_exec_stats.node_name):
return False
if file_path_regex:
if (not profile_datum.file_path or
not file_path_regex.match(profile_datum.file_path)):
return False
if (min_lineno > 0 and profile_datum.line_number and
profile_datum.line_number < min_lineno):
return False
if (max_lineno > 0 and profile_datum.line_number and
profile_datum.line_number >= max_lineno):
return False
if (profile_datum.op_type is not None and op_type_regex and
not op_type_regex.match(profile_datum.op_type)):
return False
if op_time_interval is not None and not op_time_interval.contains(
profile_datum.op_time):
return False
if exec_time_interval and not exec_time_interval.contains(
profile_datum.node_exec_stats.all_end_rel_micros):
return False
return True
def _list_profile_sort_key(profile_datum, sort_by):
"""Get a profile_datum property to sort by in list_profile command.
Args:
profile_datum: A `ProfileDatum` object.
sort_by: (string) indicates a value to sort by.
Must be one of SORT_BY* constants.
Returns:
profile_datum property to sort by.
"""
if sort_by == SORT_OPS_BY_OP_NAME:
return profile_datum.node_exec_stats.node_name
elif sort_by == SORT_OPS_BY_OP_TYPE:
return profile_datum.op_type
elif sort_by == SORT_OPS_BY_LINE:
return profile_datum.file_line_func
elif sort_by == SORT_OPS_BY_OP_TIME:
return profile_datum.op_time
elif sort_by == SORT_OPS_BY_EXEC_TIME:
return profile_datum.node_exec_stats.all_end_rel_micros
else: # sort by start time
return profile_datum.node_exec_stats.all_start_micros
| ProfileDataTableView |
python | agronholm__apscheduler | src/apscheduler/_structures.py | {
"start": 9516,
"end": 9996
} | class ____:
"""
Represents a result of a schedule processing operation.
:ivar schedule_id: ID of the schedule
:ivar task_id: ID of the schedule's task
:ivar trigger: the schedule's trigger
:ivar last_fire_time: the schedule's trigger
:ivar next_fire_time: the next
"""
schedule_id: str
task_id: str
trigger: Trigger
last_fire_time: datetime
next_fire_time: datetime | None
@attrs.define(kw_only=True, order=False)
| ScheduleResult |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 826988,
"end": 827465
} | class ____(
sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("permission", "permission_was")
permission = sgqlc.types.Field(
OrgUpdateMemberAuditEntryPermission, graphql_name="permission"
)
permission_was = sgqlc.types.Field(
OrgUpdateMemberAuditEntryPermission, graphql_name="permissionWas"
)
| OrgUpdateMemberAuditEntry |
python | getsentry__sentry | src/sentry/utils/snuba.py | {
"start": 14548,
"end": 14684
} | class ____(QueryExecutionError):
"""
Exception raised when a query would exceed the memory limit.
"""
| QueryMemoryLimitExceeded |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-confluence/tests/test_integration.py | {
"start": 721,
"end": 9953
} | class ____:
"""Integration tests for all new features working together."""
@patch("llama_index.readers.confluence.html_parser.HtmlTextParser")
def test_full_feature_integration(self, mock_html_parser_class):
"""Test all new features working together in a realistic scenario."""
mock_text_maker = MagicMock()
mock_text_maker.convert.return_value = "processed text content"
mock_html_parser_class.return_value = mock_text_maker
# Setup custom parser
mock_parser = MagicMock()
mock_parser.load_data.return_value = [
Document(text="custom parsed content", doc_id="custom")
]
# Setup callbacks
def attachment_filter(
media_type: str, file_size: int, title: str
) -> tuple[bool, str]:
if "skip" in title.lower():
return False, "Filename contains 'skip'"
if file_size > 5000000: # 5MB
return False, "File too large"
return True, ""
def document_filter(page_id: str) -> bool:
return not page_id.startswith("draft_")
# Setup event tracking using new event system
events_log = []
class TestEventHandler(BaseEventHandler):
def handle(self, event):
events_log.append(
{
"class_name": event.class_name(),
"page_id": getattr(event, "page_id", None),
"attachment_name": getattr(event, "attachment_name", None),
}
)
# Create reader with all new features
with tempfile.TemporaryDirectory() as temp_dir:
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="test_token",
custom_parsers={FileType.PDF: mock_parser},
custom_folder=temp_dir,
process_attachment_callback=attachment_filter,
process_document_callback=document_filter,
fail_on_error=False,
)
# Subscribe to events using new event system
dispatcher = get_dispatcher("llama_index.readers.confluence.base")
event_handler = TestEventHandler()
dispatcher.add_event_handler(event_handler)
# Mock confluence client
reader.confluence = MagicMock()
# Test document processing
normal_page = {
"id": "normal_page",
"title": "Normal Page",
"status": "current",
"body": {"export_view": {"value": "<p>Content</p>"}},
"_links": {"webui": "/pages/123"},
}
draft_page = {
"id": "draft_page_001",
"title": "Draft Page",
"status": "draft",
"body": {"export_view": {"value": "<p>Draft content</p>"}},
"_links": {"webui": "/pages/456"},
}
# Process normal page (should succeed)
result1 = reader.process_page(normal_page, False, mock_text_maker)
assert result1 is not None
assert result1.doc_id == "normal_page"
# Process draft page (should be skipped by callback)
result2 = reader.process_page(draft_page, False, mock_text_maker)
assert result2 is None # Skipped by document callback
# Verify events were logged
assert len(events_log) >= 2 # At least page started and skipped events
# Check that we have the expected event types
event_class_names = [event["class_name"] for event in events_log]
assert "PageDataFetchStartedEvent" in event_class_names
assert "PageSkippedEvent" in event_class_names
# Verify custom folder is set correctly
assert reader.custom_folder == temp_dir
assert reader.custom_parser_manager is not None
# Verify callbacks are working
should_process, reason = reader.process_attachment_callback(
"application/pdf", 1000, "normal.pdf"
)
assert should_process is True
should_process, reason = reader.process_attachment_callback(
"application/pdf", 1000, "skip_this.pdf"
)
assert should_process is False
assert "skip" in reason.lower()
assert reader.process_document_callback("normal_page") is True
assert (
reader.process_document_callback("draft_something") is False
) # Clean up
if event_handler in dispatcher.event_handlers:
dispatcher.event_handlers.remove(event_handler)
def test_event_system_with_realistic_simulation(self):
"""Test event system with a realistic event flow simulation."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki", api_token="test_token"
)
# Track different types of events separately
page_events = []
attachment_events = []
error_events = []
class PageEventHandler(BaseEventHandler):
def handle(self, event):
if isinstance(
event,
(
PageDataFetchStartedEvent,
PageDataFetchCompletedEvent,
PageSkippedEvent,
),
):
page_events.append(event)
class AttachmentEventHandler(BaseEventHandler):
def handle(self, event):
if isinstance(
event,
(
AttachmentProcessingStartedEvent,
AttachmentProcessedEvent,
AttachmentSkippedEvent,
),
):
attachment_events.append(event)
class ErrorEventHandler(BaseEventHandler):
def handle(self, event):
if isinstance(event, (PageFailedEvent, AttachmentFailedEvent)):
error_events.append(event)
# Subscribe to different event types using new event system
dispatcher = get_dispatcher("llama_index.readers.confluence.base")
page_handler = PageEventHandler()
attachment_handler = AttachmentEventHandler()
error_handler = ErrorEventHandler()
dispatcher.add_event_handler(page_handler)
dispatcher.add_event_handler(attachment_handler)
dispatcher.add_event_handler(error_handler)
# Simulate a realistic processing flow by manually emitting events
# 1. Start processing pages
dispatcher.event(TotalPagesToProcessEvent(total_pages=3))
# 2. Process first page successfully
dispatcher.event(PageDataFetchStartedEvent(page_id="page1"))
dispatcher.event(
AttachmentProcessingStartedEvent(
page_id="page1",
attachment_id="att1",
attachment_name="doc1.pdf",
attachment_type=FileType.PDF,
attachment_size=1024,
attachment_link="http://example.com/att1",
)
)
dispatcher.event(
AttachmentProcessedEvent(
page_id="page1",
attachment_id="att1",
attachment_name="doc1.pdf",
attachment_type=FileType.PDF,
attachment_size=1024,
attachment_link="http://example.com/att1",
)
)
dispatcher.event(
PageDataFetchCompletedEvent(
page_id="page1", document=Document(text="content1", doc_id="page1")
)
)
# 3. Skip second page
dispatcher.event(PageSkippedEvent(page_id="page2"))
# 4. Fail to process third page
dispatcher.event(PageDataFetchStartedEvent(page_id="page3"))
dispatcher.event(PageFailedEvent(page_id="page3", error="Network timeout"))
# Verify event counts
assert len(page_events) == 4 # 2 started, 1 completed, 1 skipped
assert len(attachment_events) == 2 # 1 started, 1 processed
assert len(error_events) == 1 # 1 page failed
# Verify event content
page_event_types = [type(event).__name__ for event in page_events]
assert "PageDataFetchStartedEvent" in page_event_types
assert "PageDataFetchCompletedEvent" in page_event_types
assert "PageSkippedEvent" in page_event_types
attachment_event_types = [type(event).__name__ for event in attachment_events]
assert "AttachmentProcessingStartedEvent" in attachment_event_types
assert "AttachmentProcessedEvent" in attachment_event_types
error_event_types = [type(event).__name__ for event in error_events]
assert "PageFailedEvent" in error_event_types
# Clean up
for handler in [page_handler, attachment_handler, error_handler]:
if handler in dispatcher.event_handlers:
dispatcher.event_handlers.remove(handler)
| TestIntegration |
python | apache__airflow | providers/edge3/tests/unit/edge3/worker_api/routes/test_jobs.py | {
"start": 1325,
"end": 3109
} | class ____:
@pytest.fixture(autouse=True)
def setup_test_cases(self, dag_maker, session: Session):
session.query(EdgeJobModel).delete()
session.commit()
@patch("airflow.stats.Stats.incr")
def test_state(self, mock_stats_incr, session: Session):
with create_session() as session:
job = EdgeJobModel(
dag_id=DAG_ID,
task_id=TASK_ID,
run_id=RUN_ID,
try_number=1,
map_index=-1,
state=TaskInstanceState.RUNNING,
queue=QUEUE,
concurrency_slots=1,
command="execute",
)
session.add(job)
session.commit()
state(
dag_id=DAG_ID,
task_id=TASK_ID,
run_id=RUN_ID,
try_number=1,
map_index=-1,
state=TaskInstanceState.RUNNING,
session=session,
)
mock_stats_incr.assert_not_called()
state(
dag_id=DAG_ID,
task_id=TASK_ID,
run_id=RUN_ID,
try_number=1,
map_index=-1,
state=TaskInstanceState.SUCCESS,
session=session,
)
mock_stats_incr.assert_called_with(
"edge_worker.ti.finish",
tags={
"dag_id": DAG_ID,
"queue": QUEUE,
"state": TaskInstanceState.SUCCESS,
"task_id": TASK_ID,
},
)
mock_stats_incr.call_count == 2
assert session.query(EdgeJobModel).scalar().state == TaskInstanceState.SUCCESS
| TestJobsApiRoutes |
python | Netflix__metaflow | test/unit/spin/flows/simple_parameter_flow.py | {
"start": 106,
"end": 1006
} | class ____(FlowSpec):
alpha = Parameter("alpha", help="Learning rate", default=0.01)
@step
def start(self):
print("SimpleParameterFlow is starting.")
print(f"Parameter alpha is set to: {self.alpha}")
self.a = 10
self.b = 20
self.next(self.end)
@step
def end(self):
self.a = 50
self.x = 100
self.y = 200
print("Parameter alpha in end step is: ", self.alpha)
print(
f"Pathspec: {current.pathspec}, flow_name: {current.flow_name}, run_id: {current.run_id}"
)
print(f"step_name: {current.step_name}, task_id: {current.task_id}")
print(f"Project name: {current.project_name}, Namespace: {current.namespace}")
del self.a
del self.x
print("SimpleParameterFlow is all done.")
if __name__ == "__main__":
SimpleParameterFlow()
| SimpleParameterFlow |
python | Pylons__pyramid | tests/test_predicates.py | {
"start": 2098,
"end": 3102
} | class ____(unittest.TestCase):
def _makeOne(self, val):
from pyramid.predicates import PathInfoPredicate
return PathInfoPredicate(val, None)
def test_ctor_compilefail(self):
from pyramid.exceptions import ConfigurationError
self.assertRaises(ConfigurationError, self._makeOne, '\\')
def test___call___true(self):
inst = self._makeOne(r'/\d{2}')
request = Dummy()
request.upath_info = text_('/12')
result = inst(None, request)
self.assertTrue(result)
def test___call___false(self):
inst = self._makeOne(r'/\d{2}')
request = Dummy()
request.upath_info = text_('/n12')
result = inst(None, request)
self.assertFalse(result)
def test_text(self):
inst = self._makeOne('/')
self.assertEqual(inst.text(), 'path_info = /')
def test_phash(self):
inst = self._makeOne('/')
self.assertEqual(inst.phash(), 'path_info = /')
| TestPathInfoPredicate |
python | python__mypy | mypy/test/testdaemon.py | {
"start": 680,
"end": 2939
} | class ____(DataSuite):
files = daemon_files
def run_case(self, testcase: DataDrivenTestCase) -> None:
try:
test_daemon(testcase)
finally:
# Kill the daemon if it's still running.
run_cmd("dmypy kill")
def test_daemon(testcase: DataDrivenTestCase) -> None:
assert testcase.old_cwd is not None, "test was not properly set up"
for i, step in enumerate(parse_script(testcase.input)):
cmd = step[0]
expected_lines = step[1:]
assert cmd.startswith("$")
cmd = cmd[1:].strip()
cmd = cmd.replace("{python}", sys.executable)
sts, output = run_cmd(cmd)
output_lines = output.splitlines()
output_lines = normalize_error_messages(output_lines)
if sts:
output_lines.append("== Return code: %d" % sts)
assert_string_arrays_equal(
expected_lines,
output_lines,
"Command %d (%s) did not give expected output" % (i + 1, cmd),
)
def parse_script(input: list[str]) -> list[list[str]]:
"""Parse testcase.input into steps.
Each command starts with a line starting with '$'.
The first line (less '$') is sent to the shell.
The remaining lines are expected output.
"""
steps = []
step: list[str] = []
for line in input:
if line.startswith("$"):
if step:
assert step[0].startswith("$")
steps.append(step)
step = []
step.append(line)
if step:
steps.append(step)
return steps
def run_cmd(input: str) -> tuple[int, str]:
if input[1:].startswith("mypy run --") and "--show-error-codes" not in input:
input += " --hide-error-codes"
if input.startswith("dmypy "):
input = sys.executable + " -m mypy." + input
if input.startswith("mypy "):
input = sys.executable + " -m" + input
env = os.environ.copy()
env["PYTHONPATH"] = PREFIX
try:
output = subprocess.check_output(
input, shell=True, stderr=subprocess.STDOUT, text=True, cwd=test_temp_dir, env=env
)
return 0, output
except subprocess.CalledProcessError as err:
return err.returncode, err.output
| DaemonSuite |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 103386,
"end": 110768
} | class ____(TypedDict, total=False):
"""
:class:`altair.HeaderConfig` ``TypedDict`` wrapper.
Parameters
----------
format
The text format specifier for formatting number and date/time in labels of guides
(axes, legends, headers) and text marks.
If the format type is ``"number"`` (e.g., for quantitative fields), this is a D3's
`number format pattern string <https://github.com/d3/d3-format#locale_format>`__.
If the format type is ``"time"`` (e.g., for temporal fields), this is either: a)
D3's `time format pattern <https://d3js.org/d3-time-format#locale_format>`__ if you
desire to set a static time format.
b) `dynamic time format specifier object
<https://vega.github.io/vega-lite/docs/format.html#dynamic-time-format>`__ if you
desire to set a dynamic time format that uses different formats depending on the
granularity of the input date (e.g., if the date lies on a year, month, date, hour,
etc. boundary).
When used with a `custom formatType
<https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__, this
value will be passed as ``format`` alongside ``datum.value`` to the registered
function.
**Default value:** Derived from `numberFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for number
format and from `timeFormat
<https://vega.github.io/vega-lite/docs/config.html#format>`__ config for time
format.
formatType
The format type for labels. One of ``"number"``, ``"time"``, or a `registered custom
format type
<https://vega.github.io/vega-lite/docs/config.html#custom-format-type>`__.
**Default value:**
* ``"time"`` for temporal fields and ordinal and nominal fields with ``timeUnit``.
* ``"number"`` for quantitative fields as well as ordinal and nominal fields without
``timeUnit``.
labelAlign
Horizontal text alignment of header labels. One of ``"left"``, ``"center"``, or
``"right"``.
labelAnchor
The anchor position for placing the labels. One of ``"start"``, ``"middle"``, or
``"end"``. For example, with a label orientation of top these anchor positions map
to a left-, center-, or right-aligned label.
labelAngle
The rotation angle of the header labels.
**Default value:** ``0`` for column header, ``-90`` for row header.
labelBaseline
The vertical text baseline for the header labels. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, or ``"line-bottom"``. The
``"line-top"`` and ``"line-bottom"`` values operate similarly to ``"top"`` and
``"bottom"``, but are calculated relative to the ``titleLineHeight`` rather than
``titleFontSize`` alone.
labelColor
The color of the header label, can be in hex color code or regular color name.
labelExpr
`Vega expression <https://vega.github.io/vega/docs/expressions/>`__ for customizing
labels.
**Note:** The label text and value can be assessed via the ``label`` and ``value``
properties of the header's backing ``datum`` object.
labelFont
The font of the header label.
labelFontSize
The font size of the header label, in pixels.
labelFontStyle
The font style of the header label.
labelFontWeight
The font weight of the header label.
labelLimit
The maximum length of the header label in pixels. The text value will be
automatically truncated if the rendered size exceeds the limit.
**Default value:** ``0``, indicating no limit
labelLineHeight
Line height in pixels for multi-line header labels or title text with ``"line-top"``
or ``"line-bottom"`` baseline.
labelOrient
The orientation of the header label. One of ``"top"``, ``"bottom"``, ``"left"`` or
``"right"``.
labelPadding
The padding, in pixel, between facet header's label and the plot.
**Default value:** ``10``
labels
A boolean flag indicating if labels should be included as part of the header.
**Default value:** ``true``.
orient
Shortcut for setting both labelOrient and titleOrient.
title
Set to null to disable title for the axis, legend, or header.
titleAlign
Horizontal text alignment (to the anchor) of header titles.
titleAnchor
The anchor position for placing the title. One of ``"start"``, ``"middle"``, or
``"end"``. For example, with an orientation of top these anchor positions map to a
left-, center-, or right-aligned title.
titleAngle
The rotation angle of the header title.
**Default value:** ``0``.
titleBaseline
The vertical text baseline for the header title. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, or ``"line-bottom"``. The
``"line-top"`` and ``"line-bottom"`` values operate similarly to ``"top"`` and
``"bottom"``, but are calculated relative to the ``titleLineHeight`` rather than
``titleFontSize`` alone.
**Default value:** ``"middle"``
titleColor
Color of the header title, can be in hex color code or regular color name.
titleFont
Font of the header title. (e.g., ``"Helvetica Neue"``).
titleFontSize
Font size of the header title.
titleFontStyle
The font style of the header title.
titleFontWeight
Font weight of the header title. This can be either a string (e.g ``"bold"``,
``"normal"``) or a number (``100``, ``200``, ``300``, ..., ``900`` where
``"normal"`` = ``400`` and ``"bold"`` = ``700``).
titleLimit
The maximum length of the header title in pixels. The text value will be
automatically truncated if the rendered size exceeds the limit.
**Default value:** ``0``, indicating no limit
titleLineHeight
Line height in pixels for multi-line header title text or title text with
``"line-top"`` or ``"line-bottom"`` baseline.
titleOrient
The orientation of the header title. One of ``"top"``, ``"bottom"``, ``"left"`` or
``"right"``.
titlePadding
The padding, in pixel, between facet header's title and the label.
**Default value:** ``10``
"""
format: str | TimeFormatSpecifierKwds
formatType: str
labelAlign: Align_T
labelAnchor: TitleAnchor_T
labelAngle: float
labelBaseline: TextBaseline_T
labelColor: ColorHex | ColorName_T
labelExpr: str
labelFont: str
labelFontSize: float
labelFontStyle: str
labelFontWeight: FontWeight_T
labelLimit: float
labelLineHeight: float
labelOrient: Orient_T
labelPadding: float
labels: bool
orient: Orient_T
title: None
titleAlign: Align_T
titleAnchor: TitleAnchor_T
titleAngle: float
titleBaseline: TextBaseline_T
titleColor: ColorHex | ColorName_T
titleFont: str
titleFontSize: float
titleFontStyle: str
titleFontWeight: FontWeight_T
titleLimit: float
titleLineHeight: float
titleOrient: Orient_T
titlePadding: float
| HeaderConfigKwds |
python | scrapy__scrapy | tests/test_downloader_handler_twisted_http10.py | {
"start": 1432,
"end": 1763
} | class ____(HTTP10DownloadHandlerMixin, TestHttpProxyBase):
@deferred_f_from_coro_f
async def test_download_with_proxy_https_timeout(self):
pytest.skip("Not implemented")
@deferred_f_from_coro_f
async def test_download_with_proxy_without_http_scheme(self):
pytest.skip("Not implemented")
| TestHttp10Proxy |
python | vyperlang__vyper | tests/evm_backends/base_env.py | {
"start": 938,
"end": 1023
} | class ____(EvmError):
"""Exception raised when a call reverts."""
| ExecutionReverted |
python | ray-project__ray | python/ray/actor.py | {
"start": 2970,
"end": 3311
} | class ____(Generic[_Ret, _T0, _T1, _T2]):
def remote(
self,
__arg0: "Union[_T0, ObjectRef[_T0]]",
__arg1: "Union[_T1, ObjectRef[_T1]]",
__arg2: "Union[_T2, ObjectRef[_T2]]",
) -> "ObjectRef[_Ret]":
...
def bind(self, __arg0: _T0, __arg1: _T1, __arg2: _T2) -> Any:
...
| _RemoteMethod2 |
python | scrapy__scrapy | scrapy/utils/datatypes.py | {
"start": 666,
"end": 3233
} | class ____(dict):
__slots__ = ()
def __new__(cls, *args: Any, **kwargs: Any) -> Self:
# circular import
from scrapy.http.headers import Headers # noqa: PLC0415
if issubclass(cls, CaselessDict) and not issubclass(cls, Headers):
warnings.warn(
"scrapy.utils.datatypes.CaselessDict is deprecated,"
" please use scrapy.utils.datatypes.CaseInsensitiveDict instead",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
return super().__new__(cls, *args, **kwargs)
def __init__(
self,
seq: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None = None,
):
super().__init__()
if seq:
self.update(seq)
def __getitem__(self, key: AnyStr) -> Any:
return dict.__getitem__(self, self.normkey(key))
def __setitem__(self, key: AnyStr, value: Any) -> None:
dict.__setitem__(self, self.normkey(key), self.normvalue(value))
def __delitem__(self, key: AnyStr) -> None:
dict.__delitem__(self, self.normkey(key))
def __contains__(self, key: AnyStr) -> bool: # type: ignore[override]
return dict.__contains__(self, self.normkey(key))
has_key = __contains__
def __copy__(self) -> Self:
return self.__class__(self)
copy = __copy__
def normkey(self, key: AnyStr) -> AnyStr:
"""Method to normalize dictionary key access"""
return key.lower()
def normvalue(self, value: Any) -> Any:
"""Method to normalize values prior to be set"""
return value
def get(self, key: AnyStr, def_val: Any = None) -> Any:
return dict.get(self, self.normkey(key), self.normvalue(def_val))
def setdefault(self, key: AnyStr, def_val: Any = None) -> Any:
return dict.setdefault(self, self.normkey(key), self.normvalue(def_val)) # type: ignore[arg-type]
# doesn't fully implement MutableMapping.update()
def update(self, seq: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]]) -> None: # type: ignore[override]
seq = seq.items() if isinstance(seq, Mapping) else seq
iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq)
super().update(iseq)
@classmethod
def fromkeys(cls, keys: Iterable[AnyStr], value: Any = None) -> Self: # type: ignore[override]
return cls((k, value) for k in keys) # type: ignore[misc]
def pop(self, key: AnyStr, *args: Any) -> Any:
return dict.pop(self, self.normkey(key), *args)
| CaselessDict |
python | Lightning-AI__lightning | src/lightning/fabric/strategies/launchers/launcher.py | {
"start": 652,
"end": 1421
} | class ____(ABC):
r"""Abstract base class for all Launchers.
Launchers are responsible for the creation and instrumentation of new processes so that the
:class:`~lightning.fabric.strategies.strategy.Strategy` can set up communication between all them.
Subclass this class and override any of the relevant methods to provide a custom implementation depending on
cluster environment, hardware, strategy, etc.
"""
@property
@abstractmethod
def is_interactive_compatible(self) -> bool:
"""Returns whether this launcher can work in interactive environments such as Jupyter notebooks."""
@abstractmethod
def launch(self, function: Callable, *args: Any, **kwargs: Any) -> Any:
"""Launches the processes."""
| _Launcher |
python | huggingface__transformers | src/transformers/models/speecht5/modeling_speecht5.py | {
"start": 51950,
"end": 56808
} | class ____(SpeechT5PreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* layers. Each layer is a [`SpeechT5EncoderLayer`].
"""
def __init__(self, config: SpeechT5Config):
super().__init__(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layerdrop = config.encoder_layerdrop
self.layers = nn.ModuleList([SpeechT5EncoderLayer(config) for _ in range(config.encoder_layers)])
self.embed_positions = SpeechT5RelativePositionalEncoding(
config.hidden_size // config.encoder_attention_heads, config.encoder_max_relative_position
)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, feature_size)`):
Features extracted from the speech or text input by the encoder prenet.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in
`[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
position_bias = self.embed_positions(hidden_states)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
skip_the_layer = False
if self.training:
dropout_probability = torch.rand([])
skip_the_layer = dropout_probability < self.layerdrop
if not skip_the_layer or synced_gpus:
# under fsdp or deepspeed zero3 all gpus must run in sync
layer_outputs = encoder_layer(
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
| SpeechT5Encoder |
python | automl__auto-sklearn | autosklearn/pipeline/components/data_preprocessing/text_feature_reduction/truncated_svd.py | {
"start": 519,
"end": 3468
} | class ____(AutoSklearnPreprocessingAlgorithm):
"""
Reduces the features created by a bag of words encoding
"""
def __init__(
self,
n_components: Optional[int] = None,
random_state: Optional[Union[int, np.random.RandomState]] = None,
) -> None:
self.n_components = n_components
self.random_state = random_state
def fit(
self, X: PIPELINE_DATA_DTYPE, y: Optional[PIPELINE_DATA_DTYPE] = None
) -> "TextFeatureReduction":
if X.shape[1] > self.n_components:
self.preprocessor = TruncatedSVD(
n_components=self.n_components, random_state=self.random_state
)
self.preprocessor.fit(X)
elif X.shape[1] <= self.n_components and X.shape[1] != 1:
self.preprocessor = TruncatedSVD(
n_components=X.shape[1] - 1, random_state=self.random_state
)
self.preprocessor.fit(X)
elif X.shape[1] == 1:
self.preprocessor = "passthrough"
else:
raise ValueError(
"The text embedding consists only of a single dimension.\n"
"Are you sure that your text data is necessary?"
)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if self.preprocessor is None:
raise NotImplementedError()
elif self.preprocessor == "passthrough":
return X
else:
return self.preprocessor.transform(X)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
"shortname": "TextFeatureReduction",
"name": "TextFeatureReduction",
"handles_missing_values": True,
"handles_nominal_values": True,
"handles_numerical_features": True,
"prefers_data_scaled": False,
"prefers_data_normalized": False,
"handles_regression": True,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": True,
"is_deterministic": True,
"handles_sparse": True,
"handles_dense": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (INPUT,),
"preferred_dtype": None,
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None,
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None,
) -> ConfigurationSpace:
cs = ConfigurationSpace()
cs.add_hyperparameter(
CSH.UniformIntegerHyperparameter(
"n_components", lower=1, upper=10000, default_value=100, log=True
)
)
return cs
| TextFeatureReduction |
python | falconry__falcon | examples/quote.py | {
"start": 16,
"end": 368
} | class ____:
def on_get(self, req: falcon.Request, resp: falcon.Response) -> None:
"""Handle GET requests."""
resp.media = {
'quote': "I've always been more interested in the future than in the past.",
'author': 'Grace Hopper',
}
app = falcon.App()
app.add_route('/quote', QuoteResource())
| QuoteResource |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/pool/impl.py | {
"start": 1071,
"end": 8590
} | class ____(Pool):
"""A :class:`_pool.Pool`
that imposes a limit on the number of open connections.
:class:`.QueuePool` is the default pooling implementation used for
all :class:`_engine.Engine` objects other than SQLite with a ``:memory:``
database.
The :class:`.QueuePool` class **is not compatible** with asyncio and
:func:`_asyncio.create_async_engine`. The
:class:`.AsyncAdaptedQueuePool` class is used automatically when
using :func:`_asyncio.create_async_engine`, if no other kind of pool
is specified.
.. seealso::
:class:`.AsyncAdaptedQueuePool`
"""
_is_asyncio = False
_queue_class: Type[sqla_queue.QueueCommon[ConnectionPoolEntry]] = (
sqla_queue.Queue
)
_pool: sqla_queue.QueueCommon[ConnectionPoolEntry]
def __init__(
self,
creator: Union[_CreatorFnType, _CreatorWRecFnType],
pool_size: int = 5,
max_overflow: int = 10,
timeout: float = 30.0,
use_lifo: bool = False,
**kw: Any,
):
r"""
Construct a QueuePool.
:param creator: a callable function that returns a DB-API
connection object, same as that of :paramref:`_pool.Pool.creator`.
:param pool_size: The size of the pool to be maintained,
defaults to 5. This is the largest number of connections that
will be kept persistently in the pool. Note that the pool
begins with no connections; once this number of connections
is requested, that number of connections will remain.
``pool_size`` can be set to 0 to indicate no size limit; to
disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
instead.
:param max_overflow: The maximum overflow size of the
pool. When the number of checked-out connections reaches the
size set in pool_size, additional connections will be
returned up to this limit. When those additional connections
are returned to the pool, they are disconnected and
discarded. It follows then that the total number of
simultaneous connections the pool will allow is pool_size +
`max_overflow`, and the total number of "sleeping"
connections the pool will allow is pool_size. `max_overflow`
can be set to -1 to indicate no overflow limit; no limit
will be placed on the total number of concurrent
connections. Defaults to 10.
:param timeout: The number of seconds to wait before giving up
on returning a connection. Defaults to 30.0. This can be a float
but is subject to the limitations of Python time functions which
may not be reliable in the tens of milliseconds.
:param use_lifo: use LIFO (last-in-first-out) when retrieving
connections instead of FIFO (first-in-first-out). Using LIFO, a
server-side timeout scheme can reduce the number of connections used
during non-peak periods of use. When planning for server-side
timeouts, ensure that a recycle or pre-ping strategy is in use to
gracefully handle stale connections.
.. seealso::
:ref:`pool_use_lifo`
:ref:`pool_disconnects`
:param \**kw: Other keyword arguments including
:paramref:`_pool.Pool.recycle`, :paramref:`_pool.Pool.echo`,
:paramref:`_pool.Pool.reset_on_return` and others are passed to the
:class:`_pool.Pool` constructor.
"""
Pool.__init__(self, creator, **kw)
self._pool = self._queue_class(pool_size, use_lifo=use_lifo)
self._overflow = 0 - pool_size
self._max_overflow = -1 if pool_size == 0 else max_overflow
self._timeout = timeout
self._overflow_lock = threading.Lock()
def _do_return_conn(self, record: ConnectionPoolEntry) -> None:
try:
self._pool.put(record, False)
except sqla_queue.Full:
try:
record.close()
finally:
self._dec_overflow()
def _do_get(self) -> ConnectionPoolEntry:
use_overflow = self._max_overflow > -1
wait = use_overflow and self._overflow >= self._max_overflow
try:
return self._pool.get(wait, self._timeout)
except sqla_queue.Empty:
# don't do things inside of "except Empty", because when we say
# we timed out or can't connect and raise, Python 3 tells
# people the real error is queue.Empty which it isn't.
pass
if use_overflow and self._overflow >= self._max_overflow:
if not wait:
return self._do_get()
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
"connection timed out, timeout %0.2f"
% (self.size(), self.overflow(), self._timeout),
code="3o7r",
)
if self._inc_overflow():
try:
return self._create_connection()
except:
with util.safe_reraise():
self._dec_overflow()
raise
else:
return self._do_get()
def _inc_overflow(self) -> bool:
if self._max_overflow == -1:
self._overflow += 1
return True
with self._overflow_lock:
if self._overflow < self._max_overflow:
self._overflow += 1
return True
else:
return False
def _dec_overflow(self) -> Literal[True]:
if self._max_overflow == -1:
self._overflow -= 1
return True
with self._overflow_lock:
self._overflow -= 1
return True
def recreate(self) -> QueuePool:
self.logger.info("Pool recreating")
return self.__class__(
self._creator,
pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
pre_ping=self._pre_ping,
use_lifo=self._pool.use_lifo,
timeout=self._timeout,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def dispose(self) -> None:
while True:
try:
conn = self._pool.get(False)
conn.close()
except sqla_queue.Empty:
break
self._overflow = 0 - self.size()
self.logger.info("Pool disposed. %s", self.status())
def status(self) -> str:
return (
"Pool size: %d Connections in pool: %d "
"Current Overflow: %d Current Checked out "
"connections: %d"
% (
self.size(),
self.checkedin(),
self.overflow(),
self.checkedout(),
)
)
def size(self) -> int:
return self._pool.maxsize
def timeout(self) -> float:
return self._timeout
def checkedin(self) -> int:
return self._pool.qsize()
def overflow(self) -> int:
return self._overflow if self._pool.maxsize else 0
def checkedout(self) -> int:
return self._pool.maxsize - self._pool.qsize() + self._overflow
| QueuePool |
python | huggingface__transformers | src/transformers/models/roc_bert/tokenization_roc_bert.py | {
"start": 50475,
"end": 57219
} | class ____:
"""
Constructs a RoCBertBasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
Args:
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
do_split_on_punc (`bool`, *optional*, defaults to `True`):
In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
the full context of the words, such as contractions.
"""
def __init__(
self,
do_lower_case=True,
never_split=None,
tokenize_chinese_chars=True,
strip_accents=None,
do_split_on_punc=True,
):
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = set(never_split)
self.tokenize_chinese_chars = tokenize_chinese_chars
self.strip_accents = strip_accents
self.do_split_on_punc = do_split_on_punc
def tokenize(self, text, never_split=None):
"""
Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
Args:
never_split (`List[str]`, *optional*)
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
[`PreTrainedTokenizer.tokenize`]) List of token not to split.
"""
# union() returns a new set by concatenating the two sets.
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
# prevents treating the same character with different unicode codepoints as different characters
unicode_normalized_text = unicodedata.normalize("NFC", text)
orig_tokens = whitespace_tokenize(unicode_normalized_text)
split_tokens = []
for token in orig_tokens:
if token not in never_split:
if self.do_lower_case:
token = token.lower()
if self.strip_accents is not False:
token = self._run_strip_accents(token)
elif self.strip_accents:
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token, never_split))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if not self.do_split_on_punc or (never_split is not None and text in never_split):
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF)
or (cp >= 0x20000 and cp <= 0x2A6DF)
or (cp >= 0x2A700 and cp <= 0x2B73F)
or (cp >= 0x2B740 and cp <= 0x2B81F)
or (cp >= 0x2B820 and cp <= 0x2CEAF)
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F)
):
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
| RoCBertBasicTokenizer |
python | scipy__scipy | scipy/stats/_page_trend_test.py | {
"start": 238,
"end": 16782
} | class ____:
statistic: float
pvalue: float
method: str
@xp_capabilities(np_only=True)
def page_trend_test(data, ranked=False, predicted_ranks=None, method='auto'):
r"""
Perform Page's Test, a measure of trend in observations between treatments.
Page's Test (also known as Page's :math:`L` test) is useful when:
* there are :math:`n \geq 3` treatments,
* :math:`m \geq 2` subjects are observed for each treatment, and
* the observations are hypothesized to have a particular order.
Specifically, the test considers the null hypothesis that
.. math::
m_1 = m_2 = m_3 \cdots = m_n,
where :math:`m_j` is the mean of the observed quantity under treatment
:math:`j`, against the alternative hypothesis that
.. math::
m_1 \leq m_2 \leq m_3 \leq \cdots \leq m_n,
where at least one inequality is strict.
As noted by [4]_, Page's :math:`L` test has greater statistical power than
the Friedman test against the alternative that there is a difference in
trend, as Friedman's test only considers a difference in the means of the
observations without considering their order. Whereas Spearman :math:`\rho`
considers the correlation between the ranked observations of two variables
(e.g. the airspeed velocity of a swallow vs. the weight of the coconut it
carries), Page's :math:`L` is concerned with a trend in an observation
(e.g. the airspeed velocity of a swallow) across several distinct
treatments (e.g. carrying each of five coconuts of different weight) even
as the observation is repeated with multiple subjects (e.g. one European
swallow and one African swallow).
Parameters
----------
data : array-like
A :math:`m \times n` array; the element in row :math:`i` and
column :math:`j` is the observation corresponding with subject
:math:`i` and treatment :math:`j`. By default, the columns are
assumed to be arranged in order of increasing predicted mean.
ranked : boolean, optional
By default, `data` is assumed to be observations rather than ranks;
it will be ranked with `scipy.stats.rankdata` along ``axis=1``. If
`data` is provided in the form of ranks, pass argument ``True``.
predicted_ranks : array-like, optional
The predicted ranks of the column means. If not specified,
the columns are assumed to be arranged in order of increasing
predicted mean, so the default `predicted_ranks` are
:math:`[1, 2, \dots, n-1, n]`.
method : {'auto', 'asymptotic', 'exact'}, optional
Selects the method used to calculate the *p*-value. The following
options are available.
* 'auto': selects between 'exact' and 'asymptotic' to
achieve reasonably accurate results in reasonable time (default)
* 'asymptotic': compares the standardized test statistic against
the normal distribution
* 'exact': computes the exact *p*-value by comparing the observed
:math:`L` statistic against those realized by all possible
permutations of ranks (under the null hypothesis that each
permutation is equally likely)
Returns
-------
res : PageTrendTestResult
An object containing attributes:
statistic : float
Page's :math:`L` test statistic.
pvalue : float
The associated *p*-value
method : {'asymptotic', 'exact'}
The method used to compute the *p*-value
See Also
--------
rankdata, friedmanchisquare, spearmanr
Notes
-----
As noted in [1]_, "the :math:`n` 'treatments' could just as well represent
:math:`n` objects or events or performances or persons or trials ranked."
Similarly, the :math:`m` 'subjects' could equally stand for :math:`m`
"groupings by ability or some other control variable, or judges doing
the ranking, or random replications of some other sort."
The procedure for calculating the :math:`L` statistic, adapted from
[1]_, is:
1. "Predetermine with careful logic the appropriate hypotheses
concerning the predicted ordering of the experimental results.
If no reasonable basis for ordering any treatments is known, the
:math:`L` test is not appropriate."
2. "As in other experiments, determine at what level of confidence
you will reject the null hypothesis that there is no agreement of
experimental results with the monotonic hypothesis."
3. "Cast the experimental material into a two-way table of :math:`n`
columns (treatments, objects ranked, conditions) and :math:`m`
rows (subjects, replication groups, levels of control variables)."
4. "When experimental observations are recorded, rank them across each
row", e.g. ``ranks = scipy.stats.rankdata(data, axis=1)``.
5. "Add the ranks in each column", e.g.
``colsums = np.sum(ranks, axis=0)``.
6. "Multiply each sum of ranks by the predicted rank for that same
column", e.g. ``products = predicted_ranks * colsums``.
7. "Sum all such products", e.g. ``L = products.sum()``.
[1]_ continues by suggesting use of the standardized statistic
.. math::
\chi_L^2 = \frac{\left[12L-3mn(n+1)^2\right]^2}{mn^2(n^2-1)(n+1)}
"which is distributed approximately as chi-square with 1 degree of
freedom. The ordinary use of :math:`\chi^2` tables would be
equivalent to a two-sided test of agreement. If a one-sided test
is desired, *as will almost always be the case*, the probability
discovered in the chi-square table should be *halved*."
However, this standardized statistic does not distinguish between the
observed values being well correlated with the predicted ranks and being
_anti_-correlated with the predicted ranks. Instead, we follow [2]_
and calculate the standardized statistic
.. math::
\Lambda = \frac{L - E_0}{\sqrt{V_0}},
where :math:`E_0 = \frac{1}{4} mn(n+1)^2` and
:math:`V_0 = \frac{1}{144} mn^2(n+1)(n^2-1)`, "which is asymptotically
normal under the null hypothesis".
The *p*-value for ``method='exact'`` is generated by comparing the observed
value of :math:`L` against the :math:`L` values generated for all
:math:`(n!)^m` possible permutations of ranks. The calculation is performed
using the recursive method of [5].
The *p*-values are not adjusted for the possibility of ties. When
ties are present, the reported ``'exact'`` *p*-values may be somewhat
larger (i.e. more conservative) than the true *p*-value [2]_. The
``'asymptotic'``` *p*-values, however, tend to be smaller (i.e. less
conservative) than the ``'exact'`` *p*-values.
References
----------
.. [1] Ellis Batten Page, "Ordered hypotheses for multiple treatments:
a significant test for linear ranks", *Journal of the American
Statistical Association* 58(301), p. 216--230, 1963.
.. [2] Markus Neuhauser, *Nonparametric Statistical Test: A computational
approach*, CRC Press, p. 150--152, 2012.
.. [3] Statext LLC, "Page's L Trend Test - Easy Statistics", *Statext -
Statistics Study*, https://www.statext.com/practice/PageTrendTest03.php,
Accessed July 12, 2020.
.. [4] "Page's Trend Test", *Wikipedia*, WikimediaFoundation,
https://en.wikipedia.org/wiki/Page%27s_trend_test,
Accessed July 12, 2020.
.. [5] Robert E. Odeh, "The exact distribution of Page's L-statistic in
the two-way layout", *Communications in Statistics - Simulation and
Computation*, 6(1), p. 49--61, 1977.
Examples
--------
We use the example from [3]_: 10 students are asked to rate three
teaching methods - tutorial, lecture, and seminar - on a scale of 1-5,
with 1 being the lowest and 5 being the highest. We have decided that
a confidence level of 99% is required to reject the null hypothesis in
favor of our alternative: that the seminar will have the highest ratings
and the tutorial will have the lowest. Initially, the data have been
tabulated with each row representing an individual student's ratings of
the three methods in the following order: tutorial, lecture, seminar.
>>> table = [[3, 4, 3],
... [2, 2, 4],
... [3, 3, 5],
... [1, 3, 2],
... [2, 3, 2],
... [2, 4, 5],
... [1, 2, 4],
... [3, 4, 4],
... [2, 4, 5],
... [1, 3, 4]]
Because the tutorial is hypothesized to have the lowest ratings, the
column corresponding with tutorial rankings should be first; the seminar
is hypothesized to have the highest ratings, so its column should be last.
Since the columns are already arranged in this order of increasing
predicted mean, we can pass the table directly into `page_trend_test`.
>>> from scipy.stats import page_trend_test
>>> res = page_trend_test(table)
>>> res
PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822,
method='exact')
This *p*-value indicates that there is a 0.1819% chance that
the :math:`L` statistic would reach such an extreme value under the null
hypothesis. Because 0.1819% is less than 1%, we have evidence to reject
the null hypothesis in favor of our alternative at a 99% confidence level.
The value of the :math:`L` statistic is 133.5. To check this manually,
we rank the data such that high scores correspond with high ranks, settling
ties with an average rank:
>>> from scipy.stats import rankdata
>>> ranks = rankdata(table, axis=1)
>>> ranks
array([[1.5, 3. , 1.5],
[1.5, 1.5, 3. ],
[1.5, 1.5, 3. ],
[1. , 3. , 2. ],
[1.5, 3. , 1.5],
[1. , 2. , 3. ],
[1. , 2. , 3. ],
[1. , 2.5, 2.5],
[1. , 2. , 3. ],
[1. , 2. , 3. ]])
We add the ranks within each column, multiply the sums by the
predicted ranks, and sum the products.
>>> import numpy as np
>>> m, n = ranks.shape
>>> predicted_ranks = np.arange(1, n+1)
>>> L = (predicted_ranks * np.sum(ranks, axis=0)).sum()
>>> res.statistic == L
True
As presented in [3]_, the asymptotic approximation of the *p*-value is the
survival function of the normal distribution evaluated at the standardized
test statistic:
>>> from scipy.stats import norm
>>> E0 = (m*n*(n+1)**2)/4
>>> V0 = (m*n**2*(n+1)*(n**2-1))/144
>>> Lambda = (L-E0)/np.sqrt(V0)
>>> p = norm.sf(Lambda)
>>> p
0.0012693433690751756
This does not precisely match the *p*-value reported by `page_trend_test`
above. The asymptotic distribution is not very accurate, nor conservative,
for :math:`m \leq 12` and :math:`n \leq 8`, so `page_trend_test` chose to
use ``method='exact'`` based on the dimensions of the table and the
recommendations in Page's original paper [1]_. To override
`page_trend_test`'s choice, provide the `method` argument.
>>> res = page_trend_test(table, method="asymptotic")
>>> res
PageTrendTestResult(statistic=133.5, pvalue=0.0012693433690751756,
method='asymptotic')
If the data are already ranked, we can pass in the ``ranks`` instead of
the ``table`` to save computation time.
>>> res = page_trend_test(ranks, # ranks of data
... ranked=True, # data is already ranked
... )
>>> res
PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822,
method='exact')
Suppose the raw data had been tabulated in an order different from the
order of predicted means, say lecture, seminar, tutorial.
>>> table = np.asarray(table)[:, [1, 2, 0]]
Since the arrangement of this table is not consistent with the assumed
ordering, we can either rearrange the table or provide the
`predicted_ranks`. Remembering that the lecture is predicted
to have the middle rank, the seminar the highest, and tutorial the lowest,
we pass:
>>> res = page_trend_test(table, # data as originally tabulated
... predicted_ranks=[2, 3, 1], # our predicted order
... )
>>> res
PageTrendTestResult(statistic=133.5, pvalue=0.0018191161948127822,
method='exact')
"""
if not hasattr(_pagel_state, 'state'):
_pagel_state.state = _PageL()
# Possible values of the method parameter and the corresponding function
# used to evaluate the p value
methods = {"asymptotic": _l_p_asymptotic,
"exact": _l_p_exact,
"auto": None}
if method not in methods:
raise ValueError(f"`method` must be in {set(methods)}")
ranks = np.asarray(data)
if ranks.ndim != 2: # TODO: relax this to accept 3d arrays?
raise ValueError("`data` must be a 2d array.")
m, n = ranks.shape
if m < 2 or n < 3:
raise ValueError("Page's L is only appropriate for data with two "
"or more rows and three or more columns.")
if np.any(np.isnan(data)):
raise ValueError("`data` contains NaNs, which cannot be ranked "
"meaningfully")
# ensure NumPy array and rank the data if it's not already ranked
if ranked:
# Only a basic check on whether data is ranked. Checking that the data
# is properly ranked could take as much time as ranking it.
if not (ranks.min() >= 1 and ranks.max() <= ranks.shape[1]):
raise ValueError("`data` is not properly ranked. Rank the data or "
"pass `ranked=False`.")
else:
ranks = scipy.stats.rankdata(data, axis=-1)
# generate predicted ranks if not provided, ensure valid NumPy array
if predicted_ranks is None:
predicted_ranks = np.arange(1, n+1)
else:
predicted_ranks = np.asarray(predicted_ranks)
if (predicted_ranks.ndim < 1 or
(set(predicted_ranks) != set(range(1, n+1)) or
len(predicted_ranks) != n)):
raise ValueError(f"`predicted_ranks` must include each integer "
f"from 1 to {n} (the number of columns in "
f"`data`) exactly once.")
if not isinstance(ranked, bool):
raise TypeError("`ranked` must be boolean.")
# Calculate the L statistic
L = _l_vectorized(ranks, predicted_ranks)
# Calculate the p-value
if method == "auto":
method = _choose_method(ranks)
p_fun = methods[method] # get the function corresponding with the method
p = p_fun(L, m, n)
page_result = PageTrendTestResult(statistic=L, pvalue=p, method=method)
return page_result
def _choose_method(ranks):
'''Choose method for computing p-value automatically'''
m, n = ranks.shape
if n > 8 or (m > 12 and n > 3) or m > 20: # as in [1], [4]
method = "asymptotic"
else:
method = "exact"
return method
def _l_vectorized(ranks, predicted_ranks):
'''Calculate's Page's L statistic for each page of a 3d array'''
colsums = ranks.sum(axis=-2, keepdims=True)
products = predicted_ranks * colsums
Ls = products.sum(axis=-1)
Ls = Ls[0] if Ls.size == 1 else Ls.ravel()
return Ls
def _l_p_asymptotic(L, m, n):
'''Calculate the p-value of Page's L from the asymptotic distribution'''
# Using [1] as a reference, the asymptotic p-value would be calculated as:
# chi_L = (12*L - 3*m*n*(n+1)**2)**2/(m*n**2*(n**2-1)*(n+1))
# p = chi2.sf(chi_L, df=1, loc=0, scale=1)/2
# but this is insensitive to the direction of the hypothesized ranking
# See [2] page 151
E0 = (m*n*(n+1)**2)/4
V0 = (m*n**2*(n+1)*(n**2-1))/144
Lambda = (L-E0)/np.sqrt(V0)
# This is a one-sided "greater" test - calculate the probability that the
# L statistic under H0 would be greater than the observed L statistic
p = norm.sf(Lambda)
return p
def _l_p_exact(L, m, n):
'''Calculate the p-value of Page's L exactly'''
# [1] uses m, n; [5] uses n, k.
# Switch convention here because exact calculation code references [5].
L, n, k = int(L), int(m), int(n)
_pagel_state.state.set_k(k)
return _pagel_state.state.sf(L, n)
| PageTrendTestResult |
python | getsentry__sentry | src/sentry/relay/types/rule_condition.py | {
"start": 495,
"end": 609
} | class ____(TypedDict):
"""Options specific to the equality condition"""
ignoreCase: bool
| EqConditionOptions |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingLiteralMember1.py | {
"start": 2896,
"end": 2928
} | class ____:
type: Literal[1]
| H |
python | mwaskom__seaborn | seaborn/_core/moves.py | {
"start": 4367,
"end": 5650
} | class ____(Move):
"""
Displacement of overlapping bar or area marks along the value axis.
Examples
--------
.. include:: ../docstrings/objects.Stack.rst
"""
# TODO center? (or should this be a different move, eg. Stream())
def _stack(self, df, orient):
# TODO should stack do something with ymin/ymax style marks?
# Should there be an upstream conversion to baseline/height parameterization?
if df["baseline"].nunique() > 1:
err = "Stack move cannot be used when baselines are already heterogeneous"
raise RuntimeError(err)
other = {"x": "y", "y": "x"}[orient]
stacked_lengths = (df[other] - df["baseline"]).dropna().cumsum()
offsets = stacked_lengths.shift(1).fillna(0)
df[other] = stacked_lengths
df["baseline"] = df["baseline"] + offsets
return df
def __call__(
self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],
) -> DataFrame:
# TODO where to ensure that other semantic variables are sorted properly?
# TODO why are we not using the passed in groupby here?
groupers = ["col", "row", orient]
return GroupBy(groupers).apply(data, self._stack, orient)
@dataclass
| Stack |
python | sympy__sympy | sympy/physics/quantum/spin.py | {
"start": 57182,
"end": 72957
} | class ____(CoupledSpinState, Bra):
"""Coupled eigenbra of Jz.
See the JzKetCoupled for the usage of coupled spin eigenstates.
See Also
========
JzKetCoupled: Usage of coupled spin states
"""
@classmethod
def dual_class(self):
return JzKetCoupled
@classmethod
def uncoupled_class(self):
return JzBra
#-----------------------------------------------------------------------------
# Coupling/uncoupling
#-----------------------------------------------------------------------------
def couple(expr, jcoupling_list=None):
""" Couple a tensor product of spin states
This function can be used to couple an uncoupled tensor product of spin
states. All of the eigenstates to be coupled must be of the same class. It
will return a linear combination of eigenstates that are subclasses of
CoupledSpinState determined by Clebsch-Gordan angular momentum coupling
coefficients.
Parameters
==========
expr : Expr
An expression involving TensorProducts of spin states to be coupled.
Each state must be a subclass of SpinState and they all must be the
same class.
jcoupling_list : list or tuple
Elements of this list are sub-lists of length 2 specifying the order of
the coupling of the spin spaces. The length of this must be N-1, where N
is the number of states in the tensor product to be coupled. The
elements of this sublist are the same as the first two elements of each
sublist in the ``jcoupling`` parameter defined for JzKetCoupled. If this
parameter is not specified, the default value is taken, which couples
the first and second product basis spaces, then couples this new coupled
space to the third product space, etc
Examples
========
Couple a tensor product of numerical states for two spaces:
>>> from sympy.physics.quantum.spin import JzKet, couple
>>> from sympy.physics.quantum.tensorproduct import TensorProduct
>>> couple(TensorProduct(JzKet(1,0), JzKet(1,1)))
-sqrt(2)*|1,1,j1=1,j2=1>/2 + sqrt(2)*|2,1,j1=1,j2=1>/2
Numerical coupling of three spaces using the default coupling method, i.e.
first and second spaces couple, then this couples to the third space:
>>> couple(TensorProduct(JzKet(1,1), JzKet(1,1), JzKet(1,0)))
sqrt(6)*|2,2,j1=1,j2=1,j3=1,j(1,2)=2>/3 + sqrt(3)*|3,2,j1=1,j2=1,j3=1,j(1,2)=2>/3
Perform this same coupling, but we define the coupling to first couple
the first and third spaces:
>>> couple(TensorProduct(JzKet(1,1), JzKet(1,1), JzKet(1,0)), ((1,3),(1,2)) )
sqrt(2)*|2,2,j1=1,j2=1,j3=1,j(1,3)=1>/2 - sqrt(6)*|2,2,j1=1,j2=1,j3=1,j(1,3)=2>/6 + sqrt(3)*|3,2,j1=1,j2=1,j3=1,j(1,3)=2>/3
Couple a tensor product of symbolic states:
>>> from sympy import symbols
>>> j1,m1,j2,m2 = symbols('j1 m1 j2 m2')
>>> couple(TensorProduct(JzKet(j1,m1), JzKet(j2,m2)))
Sum(CG(j1, m1, j2, m2, j, m1 + m2)*|j,m1 + m2,j1=j1,j2=j2>, (j, m1 + m2, j1 + j2))
"""
a = expr.atoms(TensorProduct)
for tp in a:
# Allow other tensor products to be in expression
if not all(isinstance(state, SpinState) for state in tp.args):
continue
# If tensor product has all spin states, raise error for invalid tensor product state
if not all(state.__class__ is tp.args[0].__class__ for state in tp.args):
raise TypeError('All states must be the same basis')
expr = expr.subs(tp, _couple(tp, jcoupling_list))
return expr
def _couple(tp, jcoupling_list):
states = tp.args
coupled_evect = states[0].coupled_class()
# Define default coupling if none is specified
if jcoupling_list is None:
jcoupling_list = []
for n in range(1, len(states)):
jcoupling_list.append( (1, n + 1) )
# Check jcoupling_list valid
if not len(jcoupling_list) == len(states) - 1:
raise TypeError('jcoupling_list must be length %d, got %d' %
(len(states) - 1, len(jcoupling_list)))
if not all( len(coupling) == 2 for coupling in jcoupling_list):
raise ValueError('Each coupling must define 2 spaces')
if any(n1 == n2 for n1, n2 in jcoupling_list):
raise ValueError('Spin spaces cannot couple to themselves')
if all(sympify(n1).is_number and sympify(n2).is_number for n1, n2 in jcoupling_list):
j_test = [0]*len(states)
for n1, n2 in jcoupling_list:
if j_test[n1 - 1] == -1 or j_test[n2 - 1] == -1:
raise ValueError('Spaces coupling j_n\'s are referenced by smallest n value')
j_test[max(n1, n2) - 1] = -1
# j values of states to be coupled together
jn = [state.j for state in states]
mn = [state.m for state in states]
# Create coupling_list, which defines all the couplings between all
# the spaces from jcoupling_list
coupling_list = []
n_list = [ [i + 1] for i in range(len(states)) ]
for j_coupling in jcoupling_list:
# Least n for all j_n which is coupled as first and second spaces
n1, n2 = j_coupling
# List of all n's coupled in first and second spaces
j1_n = list(n_list[n1 - 1])
j2_n = list(n_list[n2 - 1])
coupling_list.append( (j1_n, j2_n) )
# Set new j_n to be coupling of all j_n in both first and second spaces
n_list[ min(n1, n2) - 1 ] = sorted(j1_n + j2_n)
if all(state.j.is_number and state.m.is_number for state in states):
# Numerical coupling
# Iterate over difference between maximum possible j value of each coupling and the actual value
diff_max = [ Add( *[ jn[n - 1] - mn[n - 1] for n in coupling[0] +
coupling[1] ] ) for coupling in coupling_list ]
result = []
for diff in range(diff_max[-1] + 1):
# Determine available configurations
n = len(coupling_list)
tot = binomial(diff + n - 1, diff)
for config_num in range(tot):
diff_list = _confignum_to_difflist(config_num, diff, n)
# Skip the configuration if non-physical
# This is a lazy check for physical states given the loose restrictions of diff_max
if any(d > m for d, m in zip(diff_list, diff_max)):
continue
# Determine term
cg_terms = []
coupled_j = list(jn)
jcoupling = []
for (j1_n, j2_n), coupling_diff in zip(coupling_list, diff_list):
j1 = coupled_j[ min(j1_n) - 1 ]
j2 = coupled_j[ min(j2_n) - 1 ]
j3 = j1 + j2 - coupling_diff
coupled_j[ min(j1_n + j2_n) - 1 ] = j3
m1 = Add( *[ mn[x - 1] for x in j1_n] )
m2 = Add( *[ mn[x - 1] for x in j2_n] )
m3 = m1 + m2
cg_terms.append( (j1, m1, j2, m2, j3, m3) )
jcoupling.append( (min(j1_n), min(j2_n), j3) )
# Better checks that state is physical
if any(abs(term[5]) > term[4] for term in cg_terms):
continue
if any(term[0] + term[2] < term[4] for term in cg_terms):
continue
if any(abs(term[0] - term[2]) > term[4] for term in cg_terms):
continue
coeff = Mul( *[ CG(*term).doit() for term in cg_terms] )
state = coupled_evect(j3, m3, jn, jcoupling)
result.append(coeff*state)
return Add(*result)
else:
# Symbolic coupling
cg_terms = []
jcoupling = []
sum_terms = []
coupled_j = list(jn)
for j1_n, j2_n in coupling_list:
j1 = coupled_j[ min(j1_n) - 1 ]
j2 = coupled_j[ min(j2_n) - 1 ]
if len(j1_n + j2_n) == len(states):
j3 = symbols('j')
else:
j3_name = 'j' + ''.join(["%s" % n for n in j1_n + j2_n])
j3 = symbols(j3_name)
coupled_j[ min(j1_n + j2_n) - 1 ] = j3
m1 = Add( *[ mn[x - 1] for x in j1_n] )
m2 = Add( *[ mn[x - 1] for x in j2_n] )
m3 = m1 + m2
cg_terms.append( (j1, m1, j2, m2, j3, m3) )
jcoupling.append( (min(j1_n), min(j2_n), j3) )
sum_terms.append((j3, m3, j1 + j2))
coeff = Mul( *[ CG(*term) for term in cg_terms] )
state = coupled_evect(j3, m3, jn, jcoupling)
return Sum(coeff*state, *sum_terms)
def uncouple(expr, jn=None, jcoupling_list=None):
""" Uncouple a coupled spin state
Gives the uncoupled representation of a coupled spin state. Arguments must
be either a spin state that is a subclass of CoupledSpinState or a spin
state that is a subclass of SpinState and an array giving the j values
of the spaces that are to be coupled
Parameters
==========
expr : Expr
The expression containing states that are to be coupled. If the states
are a subclass of SpinState, the ``jn`` and ``jcoupling`` parameters
must be defined. If the states are a subclass of CoupledSpinState,
``jn`` and ``jcoupling`` will be taken from the state.
jn : list or tuple
The list of the j-values that are coupled. If state is a
CoupledSpinState, this parameter is ignored. This must be defined if
state is not a subclass of CoupledSpinState. The syntax of this
parameter is the same as the ``jn`` parameter of JzKetCoupled.
jcoupling_list : list or tuple
The list defining how the j-values are coupled together. If state is a
CoupledSpinState, this parameter is ignored. This must be defined if
state is not a subclass of CoupledSpinState. The syntax of this
parameter is the same as the ``jcoupling`` parameter of JzKetCoupled.
Examples
========
Uncouple a numerical state using a CoupledSpinState state:
>>> from sympy.physics.quantum.spin import JzKetCoupled, uncouple
>>> from sympy import S
>>> uncouple(JzKetCoupled(1, 0, (S(1)/2, S(1)/2)))
sqrt(2)*|1/2,-1/2>|1/2,1/2>/2 + sqrt(2)*|1/2,1/2>|1/2,-1/2>/2
Perform the same calculation using a SpinState state:
>>> from sympy.physics.quantum.spin import JzKet
>>> uncouple(JzKet(1, 0), (S(1)/2, S(1)/2))
sqrt(2)*|1/2,-1/2>|1/2,1/2>/2 + sqrt(2)*|1/2,1/2>|1/2,-1/2>/2
Uncouple a numerical state of three coupled spaces using a CoupledSpinState state:
>>> uncouple(JzKetCoupled(1, 1, (1, 1, 1), ((1,3,1),(1,2,1)) ))
|1,-1>|1,1>|1,1>/2 - |1,0>|1,0>|1,1>/2 + |1,1>|1,0>|1,0>/2 - |1,1>|1,1>|1,-1>/2
Perform the same calculation using a SpinState state:
>>> uncouple(JzKet(1, 1), (1, 1, 1), ((1,3,1),(1,2,1)) )
|1,-1>|1,1>|1,1>/2 - |1,0>|1,0>|1,1>/2 + |1,1>|1,0>|1,0>/2 - |1,1>|1,1>|1,-1>/2
Uncouple a symbolic state using a CoupledSpinState state:
>>> from sympy import symbols
>>> j,m,j1,j2 = symbols('j m j1 j2')
>>> uncouple(JzKetCoupled(j, m, (j1, j2)))
Sum(CG(j1, m1, j2, m2, j, m)*|j1,m1>|j2,m2>, (m1, -j1, j1), (m2, -j2, j2))
Perform the same calculation using a SpinState state
>>> uncouple(JzKet(j, m), (j1, j2))
Sum(CG(j1, m1, j2, m2, j, m)*|j1,m1>|j2,m2>, (m1, -j1, j1), (m2, -j2, j2))
"""
a = expr.atoms(SpinState)
for state in a:
expr = expr.subs(state, _uncouple(state, jn, jcoupling_list))
return expr
def _uncouple(state, jn, jcoupling_list):
if isinstance(state, CoupledSpinState):
jn = state.jn
coupled_n = state.coupled_n
coupled_jn = state.coupled_jn
evect = state.uncoupled_class()
elif isinstance(state, SpinState):
if jn is None:
raise ValueError("Must specify j-values for coupled state")
if not isinstance(jn, (list, tuple)):
raise TypeError("jn must be list or tuple")
if jcoupling_list is None:
# Use default
jcoupling_list = []
for i in range(1, len(jn)):
jcoupling_list.append(
(1, 1 + i, Add(*[jn[j] for j in range(i + 1)])) )
if not isinstance(jcoupling_list, (list, tuple)):
raise TypeError("jcoupling must be a list or tuple")
if not len(jcoupling_list) == len(jn) - 1:
raise ValueError("Must specify 2 fewer coupling terms than the number of j values")
coupled_n, coupled_jn = _build_coupled(jcoupling_list, len(jn))
evect = state.__class__
else:
raise TypeError("state must be a spin state")
j = state.j
m = state.m
coupling_list = []
j_list = list(jn)
# Create coupling, which defines all the couplings between all the spaces
for j3, (n1, n2) in zip(coupled_jn, coupled_n):
# j's which are coupled as first and second spaces
j1 = j_list[n1[0] - 1]
j2 = j_list[n2[0] - 1]
# Build coupling list
coupling_list.append( (n1, n2, j1, j2, j3) )
# Set new value in j_list
j_list[min(n1 + n2) - 1] = j3
if j.is_number and m.is_number:
diff_max = [ 2*x for x in jn ]
diff = Add(*jn) - m
n = len(jn)
tot = binomial(diff + n - 1, diff)
result = []
for config_num in range(tot):
diff_list = _confignum_to_difflist(config_num, diff, n)
if any(d > p for d, p in zip(diff_list, diff_max)):
continue
cg_terms = []
for coupling in coupling_list:
j1_n, j2_n, j1, j2, j3 = coupling
m1 = Add( *[ jn[x - 1] - diff_list[x - 1] for x in j1_n ] )
m2 = Add( *[ jn[x - 1] - diff_list[x - 1] for x in j2_n ] )
m3 = m1 + m2
cg_terms.append( (j1, m1, j2, m2, j3, m3) )
coeff = Mul( *[ CG(*term).doit() for term in cg_terms ] )
state = TensorProduct(
*[ evect(j, j - d) for j, d in zip(jn, diff_list) ] )
result.append(coeff*state)
return Add(*result)
else:
# Symbolic coupling
m_str = "m1:%d" % (len(jn) + 1)
mvals = symbols(m_str)
cg_terms = [(j1, Add(*[mvals[n - 1] for n in j1_n]),
j2, Add(*[mvals[n - 1] for n in j2_n]),
j3, Add(*[mvals[n - 1] for n in j1_n + j2_n])) for j1_n, j2_n, j1, j2, j3 in coupling_list[:-1] ]
cg_terms.append(*[(j1, Add(*[mvals[n - 1] for n in j1_n]),
j2, Add(*[mvals[n - 1] for n in j2_n]),
j, m) for j1_n, j2_n, j1, j2, j3 in [coupling_list[-1]] ])
cg_coeff = Mul(*[CG(*cg_term) for cg_term in cg_terms])
sum_terms = [ (m, -j, j) for j, m in zip(jn, mvals) ]
state = TensorProduct( *[ evect(j, m) for j, m in zip(jn, mvals) ] )
return Sum(cg_coeff*state, *sum_terms)
def _confignum_to_difflist(config_num, diff, list_len):
# Determines configuration of diffs into list_len number of slots
diff_list = []
for n in range(list_len):
prev_diff = diff
# Number of spots after current one
rem_spots = list_len - n - 1
# Number of configurations of distributing diff among the remaining spots
rem_configs = binomial(diff + rem_spots - 1, diff)
while config_num >= rem_configs:
config_num -= rem_configs
diff -= 1
rem_configs = binomial(diff + rem_spots - 1, diff)
diff_list.append(prev_diff - diff)
return diff_list
| JzBraCoupled |
python | pola-rs__polars | py-polars/src/polars/dataframe/group_by.py | {
"start": 33201,
"end": 40173
} | class ____:
"""
A dynamic grouper.
This has an `.agg` method which allows you to run all polars expressions in a
group by context.
"""
def __init__(
self,
df: DataFrame,
index_column: IntoExpr,
*,
every: str | timedelta,
period: str | timedelta | None,
offset: str | timedelta | None,
include_boundaries: bool,
closed: ClosedInterval,
label: Label,
group_by: IntoExpr | Iterable[IntoExpr] | None,
start_by: StartBy,
predicates: Iterable[Any] | None,
) -> None:
every = parse_as_duration_string(every)
period = parse_as_duration_string(period)
offset = parse_as_duration_string(offset)
self.df = df
self.time_column = index_column
self.every = every
self.period = period
self.offset = offset
self.label = label
self.include_boundaries = include_boundaries
self.closed = closed
self.group_by = group_by
self.start_by = start_by
self.predicates = predicates
def __iter__(self) -> Self:
from polars.lazyframe.opt_flags import QueryOptFlags
temp_col = "__POLARS_GB_GROUP_INDICES"
groups_df = (
self.df.lazy()
.with_row_index("__POLARS_GB_ROW_INDEX")
.group_by_dynamic(
index_column=self.time_column,
every=self.every,
period=self.period,
offset=self.offset,
label=self.label,
include_boundaries=self.include_boundaries,
closed=self.closed,
group_by=self.group_by,
start_by=self.start_by,
)
.agg(F.first().alias(temp_col))
.collect(optimizations=QueryOptFlags.none())
)
self._group_names = groups_df.select(F.all().exclude(temp_col)).iter_rows()
self._group_indices = groups_df.select(temp_col).to_series()
self._current_index = 0
return self
def __next__(self) -> tuple[tuple[object, ...], DataFrame]:
if self._current_index >= len(self._group_indices):
raise StopIteration
group_name = next(self._group_names)
group_data = self.df[self._group_indices[self._current_index], :]
self._current_index += 1
return group_name, group_data
def having(self, *predicates: IntoExpr | Iterable[IntoExpr]) -> DynamicGroupBy:
"""
Filter groups with a list of predicates after aggregation.
Using this method is equivalent to adding the predicates to the aggregation and
filtering afterwards.
This method can be chained and all conditions will be combined using `&`.
Parameters
----------
*predicates
Expressions that evaluate to a boolean value for each group. Typically, this
requires the use of an aggregation function. Multiple predicates are
combined using `&`.
"""
return DynamicGroupBy(
self.df,
self.time_column,
every=self.every,
period=self.period,
offset=self.offset,
include_boundaries=self.include_boundaries,
closed=self.closed,
label=self.label,
group_by=self.group_by,
start_by=self.start_by,
predicates=_chain_predicates(self.predicates, predicates),
)
def agg(
self,
*aggs: IntoExpr | Iterable[IntoExpr],
**named_aggs: IntoExpr,
) -> DataFrame:
"""
Compute aggregations for each group of a group by operation.
Parameters
----------
*aggs
Aggregations to compute for each group of the group by operation,
specified as positional arguments.
Accepts expression input. Strings are parsed as column names.
**named_aggs
Additional aggregations, specified as keyword arguments.
The resulting columns will be renamed to the keyword used.
"""
from polars.lazyframe.opt_flags import QueryOptFlags
group_by = self.df.lazy().group_by_dynamic(
index_column=self.time_column,
every=self.every,
period=self.period,
offset=self.offset,
label=self.label,
include_boundaries=self.include_boundaries,
closed=self.closed,
group_by=self.group_by,
start_by=self.start_by,
)
if self.predicates:
group_by = group_by.having(self.predicates)
return group_by.agg(*aggs, **named_aggs).collect(
optimizations=QueryOptFlags.none()
)
def map_groups(
self,
function: Callable[[DataFrame], DataFrame],
schema: SchemaDict | None,
) -> DataFrame:
"""
Apply a custom/user-defined function (UDF) over the groups as a new DataFrame.
Using this is considered an anti-pattern as it will be very slow because:
- it forces the engine to materialize the whole `DataFrames` for the groups.
- it is not parallelized.
- it blocks optimizations as the passed python function is opaque to the
optimizer.
The idiomatic way to apply custom functions over multiple columns is using:
`pl.struct([my_columns]).map_elements(lambda struct_series: ..)`
Parameters
----------
function
Function to apply over each group of the `LazyFrame`; it receives
a DataFrame and should return a DataFrame.
schema
Schema of the output function. This has to be known statically. If the
given schema is incorrect, this is a bug in the caller's query and may
lead to errors. If set to None, polars assumes the schema is unchanged.
"""
from polars.lazyframe.opt_flags import QueryOptFlags
if self.predicates:
msg = "cannot call `map_groups` when filtering groups with `having`"
raise TypeError(msg)
return (
self.df.lazy()
.group_by_dynamic(
index_column=self.time_column,
every=self.every,
period=self.period,
offset=self.offset,
include_boundaries=self.include_boundaries,
closed=self.closed,
group_by=self.group_by,
start_by=self.start_by,
)
.map_groups(function, schema)
.collect(optimizations=QueryOptFlags.none())
)
def _chain_predicates(
lhs: Iterable[IntoExpr] | None, rhs: tuple[IntoExpr | Iterable[IntoExpr], ...]
) -> Iterable[Any]:
return (
chain(lhs, _parse_inputs_as_iterable(rhs))
if lhs is not None
else _parse_inputs_as_iterable(rhs)
)
| DynamicGroupBy |
python | PyCQA__pylint | tests/functional/a/alternative/alternative_union_syntax.py | {
"start": 1046,
"end": 1280
} | class ____(typing.NamedTuple):
my_var: int | str
# Check typing.TypedDict
CustomTypedDict = TypedDict("CustomTypedDict", my_var=(int | str))
CustomTypedDict2 = TypedDict("CustomTypedDict2", {"my_var": int | str})
| CustomNamedTuple3 |
python | ray-project__ray | python/ray/dashboard/modules/reporter/tests/test_gpu_providers.py | {
"start": 311,
"end": 763
} | class ____(unittest.TestCase):
"""Test ProcessGPUInfo TypedDict."""
def test_creation(self):
"""Test ProcessGPUInfo creation."""
process_info = ProcessGPUInfo(
pid=1234, gpu_memory_usage=256, gpu_utilization=None
)
self.assertEqual(process_info["pid"], 1234)
self.assertEqual(process_info["gpu_memory_usage"], 256)
self.assertIsNone(process_info["gpu_utilization"])
| TestProcessGPUInfo |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor16.py | {
"start": 122,
"end": 178
} | class ____:
def __init__(self) -> None:
pass
| A |
python | Netflix__metaflow | metaflow/decorators.py | {
"start": 1826,
"end": 2407
} | class ____(MetaflowException):
headline = "Unknown step decorator"
def __init__(self, deconame):
decos = ", ".join(
[
x
for x in UserStepDecoratorMeta.all_decorators().keys()
if not x.endswith("_internal")
]
)
msg = (
"Unknown step decorator *{deconame}*. The following decorators are "
"supported: *{decos}*".format(deconame=deconame, decos=decos)
)
super(UnknownStepDecoratorException, self).__init__(msg)
| UnknownStepDecoratorException |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_compiler.py | {
"start": 156584,
"end": 159593
} | class ____(fixtures.CacheKeyFixture, fixtures.TestBase):
def test_aggregate_order_by(self):
"""test #8574"""
self._run_cache_key_fixture(
lambda: (
aggregate_order_by(column("a"), column("a")),
aggregate_order_by(column("a"), column("b")),
aggregate_order_by(column("a"), column("a").desc()),
aggregate_order_by(column("a"), column("a").nulls_first()),
aggregate_order_by(
column("a"), column("a").desc().nulls_first()
),
aggregate_order_by(column("a", Integer), column("b")),
aggregate_order_by(column("a"), column("b"), column("c")),
aggregate_order_by(column("a"), column("c"), column("b")),
aggregate_order_by(
column("a"), column("b").desc(), column("c")
),
aggregate_order_by(
column("a"), column("b").nulls_first(), column("c")
),
aggregate_order_by(
column("a"), column("b").desc().nulls_first(), column("c")
),
aggregate_order_by(
column("a", Integer), column("a"), column("b")
),
),
compare_values=False,
)
def test_array_equivalent_keys_one_element(self):
self._run_cache_key_equal_fixture(
lambda: (
array([random.randint(0, 10)]),
array([random.randint(0, 10)], type_=Integer),
array([random.randint(0, 10)], type_=Integer),
),
compare_values=False,
)
def test_array_equivalent_keys_two_elements(self):
self._run_cache_key_equal_fixture(
lambda: (
array([random.randint(0, 10), random.randint(0, 10)]),
array(
[random.randint(0, 10), random.randint(0, 10)],
type_=Integer,
),
array(
[random.randint(0, 10), random.randint(0, 10)],
type_=Integer,
),
),
compare_values=False,
)
def test_array_heterogeneous(self):
self._run_cache_key_fixture(
lambda: (
array([], type_=Integer),
array([], type_=Text),
array([]),
array([random.choice(["t1", "t2", "t3"])]),
array(
[
random.choice(["t1", "t2", "t3"]),
random.choice(["t1", "t2", "t3"]),
]
),
array([random.choice(["t1", "t2", "t3"])], type_=Text),
array([random.choice(["t1", "t2", "t3"])], type_=VARCHAR(30)),
array([random.randint(0, 10), random.randint(0, 10)]),
),
compare_values=False,
)
| CacheKeyTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/decl_api.py | {
"start": 5100,
"end": 7523
} | class ____(DeclarativeAttributeIntercept):
metadata: MetaData
registry: RegistryType
def __init__(
cls, classname: Any, bases: Any, dict_: Any, **kw: Any
) -> None:
# use cls.__dict__, which can be modified by an
# __init_subclass__() method (#7900)
dict_ = cls.__dict__
# early-consume registry from the initial declarative base,
# assign privately to not conflict with subclass attributes named
# "registry"
reg = getattr(cls, "_sa_registry", None)
if reg is None:
reg = dict_.get("registry", None)
if not isinstance(reg, registry):
raise exc.InvalidRequestError(
"Declarative base class has no 'registry' attribute, "
"or registry is not a sqlalchemy.orm.registry() object"
)
else:
cls._sa_registry = reg
if not cls.__dict__.get("__abstract__", False):
_ORMClassConfigurator._as_declarative(reg, cls, dict_)
type.__init__(cls, classname, bases, dict_)
def synonym_for(
name: str, map_column: bool = False
) -> Callable[[Callable[..., Any]], Synonym[Any]]:
"""Decorator that produces an :func:`_orm.synonym`
attribute in conjunction with a Python descriptor.
The function being decorated is passed to :func:`_orm.synonym` as the
:paramref:`.orm.synonym.descriptor` parameter::
class MyClass(Base):
__tablename__ = "my_table"
id = Column(Integer, primary_key=True)
_job_status = Column("job_status", String(50))
@synonym_for("job_status")
@property
def job_status(self):
return "Status: %s" % self._job_status
The :ref:`hybrid properties <mapper_hybrids>` feature of SQLAlchemy
is typically preferred instead of synonyms, which is a more legacy
feature.
.. seealso::
:ref:`synonyms` - Overview of synonyms
:func:`_orm.synonym` - the mapper-level function
:ref:`mapper_hybrids` - The Hybrid Attribute extension provides an
updated approach to augmenting attribute behavior more flexibly than
can be achieved with synonyms.
"""
def decorate(fn: Callable[..., Any]) -> Synonym[Any]:
return _orm_synonym(name, map_column=map_column, descriptor=fn)
return decorate
| DeclarativeMeta |
python | readthedocs__readthedocs.org | readthedocs/api/v3/serializers.py | {
"start": 2238,
"end": 2530
} | class ____(serializers.ModelSerializer):
"""
Used when triggering (create action) a ``Build`` for a specific ``Version``.
This serializer validates that no field is sent at all in the request.
"""
class Meta:
model = Build
fields = []
| BuildCreateSerializer |
python | pytorch__pytorch | torch/distributed/flight_recorder/components/types.py | {
"start": 3277,
"end": 3872
} | class ____(NamedTuple):
id: int
group_id: str
pass_check: bool
collective_seq_id: int
p2p_seq_id: int
record_id: int
pg_desc: str
collective_name: str
input_sizes: list[list[int]]
output_sizes: list[list[int]]
expected_ranks: set[int]
collective_state: str
collective_frames: list[dict[str, str]]
input_numel: Optional[int] = None
output_numel: Optional[int] = None
missing_ranks: Optional[set[int]] = None
mismatch_collectives: Optional[dict[int, "Collective"]] = None
type_of_mismatch: Optional[MatchInfo] = None
| Collective |
python | run-llama__llama_index | llama-index-core/tests/playground/test_base.py | {
"start": 469,
"end": 4738
} | class ____(BaseEmbedding):
@classmethod
def class_name(cls) -> str:
return "MockEmbedding"
async def _aget_query_embedding(self, query: str) -> List[float]:
del query
return [0, 0, 1, 0, 0]
async def _aget_text_embedding(self, text: str) -> List[float]:
text = text.strip()
# assume dimensions are 5
if text == "They're taking the Hobbits to Isengard!":
return [1, 0, 0, 0, 0]
elif (
text == "They're taking the Hobbits to Isengard! I can't carry it for you."
):
return [1, 1, 0, 0, 0]
elif (
text
== "They're taking the Hobbits to Isengard! I can't carry it for you. But I can carry you!"
):
return [1, 1, 1, 0, 0]
elif text == "I can't carry it for you.":
return [0, 1, 0, 0, 0]
elif text == "I can't carry it for you. But I can carry you!":
return [0, 1, 1, 0, 0]
elif text == "But I can carry you!":
return [0, 0, 1, 0, 0]
else:
print(text)
raise ValueError(f"Invalid text for `mock_get_text_embedding`.")
def _get_text_embedding(self, text: str) -> List[float]:
"""Mock get text embedding."""
text = text.strip()
# assume dimensions are 5
if text == "They're taking the Hobbits to Isengard!":
return [1, 0, 0, 0, 0]
elif (
text == "They're taking the Hobbits to Isengard! I can't carry it for you."
):
return [1, 1, 0, 0, 0]
elif (
text
== "They're taking the Hobbits to Isengard! I can't carry it for you. But I can carry you!"
):
return [1, 1, 1, 0, 0]
elif text == "I can't carry it for you.":
return [0, 1, 0, 0, 0]
elif text == "I can't carry it for you. But I can carry you!":
return [0, 1, 1, 0, 0]
elif text == "But I can carry you!":
return [0, 0, 1, 0, 0]
else:
print(text)
raise ValueError("Invalid text for `mock_get_text_embedding`.")
def _get_query_embedding(self, query: str) -> List[float]:
"""Mock get query embedding."""
del query
return [0, 0, 1, 0, 0]
def test_get_set_compare(patch_llm_predictor, patch_token_text_splitter) -> None:
"""Test basic comparison of indices."""
documents = [Document(text="They're taking the Hobbits to Isengard!")]
indices = [
VectorStoreIndex.from_documents(
documents=documents, embed_model=MockEmbedding()
),
SummaryIndex.from_documents(documents),
TreeIndex.from_documents(documents=documents),
]
playground = Playground(indices=indices) # type: ignore
assert len(playground.indices) == 3
results = playground.compare("Who is?", to_pandas=False)
assert len(results) > 0
assert len(results) <= 3 * len(DEFAULT_MODES)
playground.indices = [
VectorStoreIndex.from_documents(
documents=documents, embed_model=MockEmbedding()
)
]
assert len(playground.indices) == 1
def test_from_docs(patch_llm_predictor, patch_token_text_splitter) -> None:
"""Test initialization via a list of documents."""
documents = [
Document(text="I can't carry it for you."),
Document(text="But I can carry you!"),
]
playground = Playground.from_docs(documents=documents)
assert len(playground.indices) == len(DEFAULT_INDEX_CLASSES)
assert len(playground.retriever_modes) == len(DEFAULT_MODES)
with pytest.raises(ValueError):
playground = Playground.from_docs(documents=documents, retriever_modes={})
def test_validation() -> None:
"""Test validation of indices and modes."""
with pytest.raises(ValueError):
_ = Playground(indices=["VectorStoreIndex"]) # type: ignore
with pytest.raises(ValueError):
_ = Playground(
indices=[VectorStoreIndex, SummaryIndex, TreeIndex] # type: ignore
)
with pytest.raises(ValueError):
_ = Playground(indices=[]) # type: ignore
with pytest.raises(TypeError):
_ = Playground(retriever_modes={}) # type: ignore
| MockEmbedding |
python | python-attrs__attrs | tests/test_make.py | {
"start": 34989,
"end": 40247
} | class ____:
"""
Tests for `make_class`.
"""
@pytest.mark.parametrize("ls", [list, tuple])
def test_simple(self, ls):
"""
Passing a list of strings creates attributes with default args.
"""
C1 = make_class("C1", ls(["a", "b"]))
@attr.s
class C2:
a = attr.ib()
b = attr.ib()
assert C1.__attrs_attrs__ == C2.__attrs_attrs__
def test_dict(self):
"""
Passing a dict of name: _CountingAttr creates an equivalent class.
"""
C1 = make_class(
"C1", {"a": attr.ib(default=42), "b": attr.ib(default=None)}
)
@attr.s
class C2:
a = attr.ib(default=42)
b = attr.ib(default=None)
assert C1.__attrs_attrs__ == C2.__attrs_attrs__
def test_attr_args(self):
"""
attributes_arguments are passed to attributes
"""
C = make_class("C", ["x"], repr=False)
assert repr(C(1)).startswith("<tests.test_make.C object at 0x")
def test_normalized_unicode_attr_args(self):
"""
Unicode identifiers are valid in Python.
"""
clsname = "ü"
assert clsname == unicodedata.normalize("NFKC", clsname)
attrname = "ß"
assert attrname == unicodedata.normalize("NFKC", attrname)
C = make_class(clsname, [attrname], repr=False)
assert repr(C(1)).startswith("<tests.test_make.ü object at 0x")
kwargs = {"ß": 1}
c = C(**kwargs)
assert 1 == c.ß
def test_unnormalized_unicode_attr_args(self):
"""
Unicode identifiers are normalized to NFKC form in Python.
"""
clsname = "Ŀ"
assert clsname != unicodedata.normalize("NFKC", clsname)
attrname = "ㅁ"
assert attrname != unicodedata.normalize("NFKC", attrname)
C = make_class(clsname, [attrname], repr=False)
assert repr(C(1)).startswith("<tests.test_make.L· object at 0x")
kwargs = {unicodedata.normalize("NFKC", attrname): 1}
c = C(**kwargs)
assert 1 == c.ㅁ
def test_catches_wrong_attrs_type(self):
"""
Raise `TypeError` if an invalid type for attrs is passed.
"""
with pytest.raises(TypeError) as e:
make_class("C", object())
assert ("attrs argument must be a dict or a list.",) == e.value.args
def test_bases(self):
"""
Parameter bases default to (object,) and subclasses correctly
"""
class D:
pass
cls = make_class("C", {})
assert cls.__mro__[-1] is object
cls = make_class("C", {}, bases=(D,))
assert D in cls.__mro__
assert isinstance(cls(), D)
def test_additional_class_body(self):
"""
Additional class_body is added to newly created class.
"""
def echo_func(cls, *args):
return args
cls = make_class("C", {}, class_body={"echo": classmethod(echo_func)})
assert ("a", "b") == cls.echo("a", "b")
def test_clean_class(self, slots):
"""
Attribute definitions do not appear on the class body.
"""
C = make_class("C", ["x"], slots=slots)
x = getattr(C, "x", None)
assert not isinstance(x, _CountingAttr)
def test_missing_sys_getframe(self, monkeypatch):
"""
`make_class()` does not fail when `sys._getframe()` is not available.
"""
monkeypatch.delattr(sys, "_getframe")
C = make_class("C", ["x"])
assert 1 == len(C.__attrs_attrs__)
def test_make_class_ordered(self):
"""
If `make_class()` is passed ordered attrs, their order is respected
instead of the counter.
"""
b = attr.ib(default=2)
a = attr.ib(default=1)
C = attr.make_class("C", {"a": a, "b": b})
assert "C(a=1, b=2)" == repr(C())
def test_generic_dynamic_class(self):
"""
make_class can create generic dynamic classes.
https://github.com/python-attrs/attrs/issues/756
https://bugs.python.org/issue33188
"""
from types import new_class
from typing import Generic, TypeVar
MyTypeVar = TypeVar("MyTypeVar")
MyParent = new_class("MyParent", (Generic[MyTypeVar],), {})
attr.make_class("test", {"id": attr.ib(type=str)}, (MyParent[int],))
def test_annotations(self):
"""
make_class fills the __annotations__ dict for attributes with a known
type.
"""
a = attr.ib(type=bool)
b = attr.ib(
type=None
) # Won't be added to ann. b/c of unfavorable default
c = attr.ib()
C = attr.make_class("C", {"a": a, "b": b, "c": c})
C = attr.resolve_types(C)
assert {"a": bool} == C.__annotations__
def test_annotations_resolve(self):
"""
resolve_types() resolves the annotations added by make_class().
"""
a = attr.ib(type="bool")
C = attr.make_class("C", {"a": a})
C = attr.resolve_types(C)
assert attr.fields(C).a.type is bool
assert {"a": "bool"} == C.__annotations__
| TestMakeClass |
python | mlflow__mlflow | mlflow/telemetry/schemas.py | {
"start": 305,
"end": 872
} | class ____:
event_name: str
timestamp_ns: int
params: dict[str, Any] | None = None
status: Status = Status.UNKNOWN
duration_ms: int | None = None
def to_dict(self) -> dict[str, Any]:
return {
"timestamp_ns": self.timestamp_ns,
"event_name": self.event_name,
# dump params to string so we can parse them easily in ETL pipeline
"params": json.dumps(self.params) if self.params else None,
"status": self.status.value,
"duration_ms": self.duration_ms,
}
| Record |
python | PyCQA__pylint | tests/functional/n/new_style_class_py_30.py | {
"start": 151,
"end": 1203
} | class ____(file): # pylint: disable=undefined-variable
""" Testing new-style class inheritance from file"""
def __init__(self, name, mode="r", buffering=-1, verbose=False):
"""Constructor"""
self.was_modified = False
self.verbose = verbose
super(File, self).__init__(name, mode, buffering) # [super-with-arguments]
if self.verbose:
print(f"File {self.name} is opened. The mode is: {self.mode}")
def write(self, a_string):
""" Write a string to the file."""
super(File, self).write(a_string) # [super-with-arguments]
self.was_modified = True
def writelines(self, sequence):
""" Write a sequence of strings to the file. """
super(File, self).writelines(sequence) # [super-with-arguments]
self.was_modified = True
def close(self):
"""Close the file."""
if self.verbose:
print(f"Closing file {self.name}")
super(File, self).close() # [super-with-arguments]
self.was_modified = False
| File |
python | dask__dask | dask/tests/test_expr.py | {
"start": 250,
"end": 618
} | class ____(MyExpr):
# A subclass that inherits parameters
pass
def test_setattr():
e = MyExpr(foo=1, bar=2)
e.bar = 3
assert e.bar == 3
with pytest.raises(AttributeError):
e.baz = 4
def test_setattr2():
e = MyExpr2(foo=1, bar=2)
e.bar = 3
assert e.bar == 3
with pytest.raises(AttributeError):
e.baz = 4
| MyExpr2 |
python | scrapy__scrapy | tests/AsyncCrawlerRunner/simple_default_reactor.py | {
"start": 216,
"end": 493
} | class ____(Spider):
name = "no_request"
async def start(self):
return
yield
@deferred_f_from_coro_f
async def main(reactor):
configure_logging()
runner = AsyncCrawlerRunner()
await runner.crawl(NoRequestsSpider)
react(main)
| NoRequestsSpider |
python | numba__numba | numba/core/byteflow.py | {
"start": 1908,
"end": 12158
} | class ____(object):
"""Data+Control Flow analysis.
Simulate execution to recover dataflow and controlflow information.
"""
def __init__(self, bytecode):
_logger.debug("bytecode dump:\n%s",
_lazy_pformat(bytecode, lazy_func=lambda x: x.dump()))
self._bytecode = bytecode
self.block_infos = UniqueDict()
def run(self):
"""Run a trace over the bytecode over all reachable path.
The trace starts at bytecode offset 0 and gathers stack and control-
flow information by partially interpreting each bytecode.
Each ``State`` instance in the trace corresponds to a basic-block.
The State instances forks when a jump instruction is encountered.
A newly forked state is then added to the list of pending states.
The trace ends when there are no more pending states.
"""
firststate = State(bytecode=self._bytecode, pc=0, nstack=0,
blockstack=())
runner = TraceRunner(debug_filename=self._bytecode.func_id.filename)
runner.pending.append(firststate)
# Enforce unique-ness on initial PC to avoid re-entering the PC with
# a different stack-depth. We don't know if such a case is ever
# possible, but no such case has been encountered in our tests.
first_encounter = UniqueDict()
# Loop over each pending state at a initial PC.
# Each state is tracing a basic block
while runner.pending:
_logger.debug("pending: %s", runner.pending)
state = runner.pending.popleft()
if state not in runner.finished:
_logger.debug("stack: %s", state._stack)
_logger.debug("state.pc_initial: %s", state)
first_encounter[state.pc_initial] = state
# Loop over the state until it is terminated.
while True:
runner.dispatch(state)
# Terminated?
if state.has_terminated():
break
else:
if self._run_handle_exception(runner, state):
break
if self._is_implicit_new_block(state):
# check if this is a with...as, abort if so
self._guard_with_as(state)
# else split
state.split_new_block()
break
_logger.debug("end state. edges=%s", state.outgoing_edges)
runner.finished.add(state)
out_states = state.get_outgoing_states()
runner.pending.extend(out_states)
# Complete controlflow
self._build_cfg(runner.finished)
# Prune redundant PHI-nodes
self._prune_phis(runner)
# Post process
for state in sorted(runner.finished, key=lambda x: x.pc_initial):
self.block_infos[state.pc_initial] = si = adapt_state_infos(state)
_logger.debug("block_infos %s:\n%s", state, si)
if PYVERSION in ((3, 11), (3, 12), (3, 13), (3, 14)):
def _run_handle_exception(self, runner, state):
if not state.in_with() and (
state.has_active_try() and
state.get_inst().opname not in _NO_RAISE_OPS):
# Is in a *try* block
state.fork(pc=state.get_inst().next)
runner._adjust_except_stack(state)
return True
else:
state.advance_pc()
# Must the new PC be a new block?
if not state.in_with() and state.is_in_exception():
_logger.debug("3.11 exception %s PC=%s",
state.get_exception(), state._pc)
eh = state.get_exception()
eh_top = state.get_top_block('TRY')
if eh_top and eh_top['end'] == eh.target:
# Same exception
eh_block = None
else:
eh_block = state.make_block("TRY", end=eh.target)
eh_block['end_offset'] = eh.end
eh_block['stack_depth'] = eh.depth
eh_block['push_lasti'] = eh.lasti
state.fork(pc=state._pc, extra_block=eh_block)
return True
elif PYVERSION in ((3, 10),):
def _run_handle_exception(self, runner, state):
if (state.has_active_try() and
state.get_inst().opname not in _NO_RAISE_OPS):
# Is in a *try* block
state.fork(pc=state.get_inst().next)
tryblk = state.get_top_block('TRY')
state.pop_block_and_above(tryblk)
nstack = state.stack_depth
kwargs = {}
if nstack > tryblk['entry_stack']:
kwargs['npop'] = nstack - tryblk['entry_stack']
handler = tryblk['handler']
kwargs['npush'] = {
BlockKind('EXCEPT'): _EXCEPT_STACK_OFFSET,
BlockKind('FINALLY'): _FINALLY_POP
}[handler['kind']]
kwargs['extra_block'] = handler
state.fork(pc=tryblk['end'], **kwargs)
return True
else:
state.advance_pc()
else:
raise NotImplementedError(PYVERSION)
def _build_cfg(self, all_states):
graph = CFGraph()
for state in all_states:
b = state.pc_initial
graph.add_node(b)
for state in all_states:
for edge in state.outgoing_edges:
graph.add_edge(state.pc_initial, edge.pc, 0)
graph.set_entry_point(0)
graph.process()
self.cfgraph = graph
def _prune_phis(self, runner):
# Find phis that are unused in the local block
_logger.debug("Prune PHIs".center(60, '-'))
# Compute dataflow for used phis and propagate
# 1. Get used-phis for each block
# Map block to used_phis
def get_used_phis_per_state():
used_phis = defaultdict(set)
phi_set = set()
for state in runner.finished:
used = set(state._used_regs)
phis = set(state._phis)
used_phis[state] |= phis & used
phi_set |= phis
return used_phis, phi_set
# Find use-defs
def find_use_defs():
defmap = {}
phismap = defaultdict(set)
for state in runner.finished:
for phi, rhs in state._outgoing_phis.items():
if rhs not in phi_set:
# Is a definition
defmap[phi] = state
phismap[phi].add((rhs, state))
_logger.debug("defmap: %s", _lazy_pformat(defmap))
_logger.debug("phismap: %s", _lazy_pformat(phismap))
return defmap, phismap
def propagate_phi_map(phismap):
"""An iterative dataflow algorithm to find the definition
(the source) of each PHI node.
"""
blacklist = defaultdict(set)
while True:
changing = False
for phi, defsites in sorted(list(phismap.items())):
for rhs, state in sorted(list(defsites)):
if rhs in phi_set:
defsites |= phismap[rhs]
blacklist[phi].add((rhs, state))
to_remove = blacklist[phi]
if to_remove & defsites:
defsites -= to_remove
changing = True
_logger.debug("changing phismap: %s", _lazy_pformat(phismap))
if not changing:
break
def apply_changes(used_phis, phismap):
keep = {}
for state, used_set in used_phis.items():
for phi in used_set:
keep[phi] = phismap[phi]
_logger.debug("keep phismap: %s", _lazy_pformat(keep))
new_out = defaultdict(dict)
for phi in keep:
for rhs, state in keep[phi]:
new_out[state][phi] = rhs
_logger.debug("new_out: %s", _lazy_pformat(new_out))
for state in runner.finished:
state._outgoing_phis.clear()
state._outgoing_phis.update(new_out[state])
used_phis, phi_set = get_used_phis_per_state()
_logger.debug("Used_phis: %s", _lazy_pformat(used_phis))
defmap, phismap = find_use_defs()
propagate_phi_map(phismap)
apply_changes(used_phis, phismap)
_logger.debug("DONE Prune PHIs".center(60, '-'))
def _is_implicit_new_block(self, state):
inst = state.get_inst()
if inst.offset in self._bytecode.labels:
return True
elif inst.opname in NEW_BLOCKERS:
return True
else:
return False
if PYVERSION in ((3, 14),):
def _guard_with_as(self, state):
# Handled as part of `LOAD_SPECIAL` as of 3.14.
pass
elif PYVERSION in ((3, 10), (3, 11), (3, 12), (3, 13)):
def _guard_with_as(self, state):
"""Checks if the next instruction after a SETUP_WITH is something
other than a POP_TOP, if it is something else it'll be some sort of
store which is not supported (this corresponds to `with CTXMGR as
VAR(S)`)."""
current_inst = state.get_inst()
if current_inst.opname in {"SETUP_WITH", "BEFORE_WITH"}:
next_op = self._bytecode[current_inst.next].opname
if next_op != "POP_TOP":
msg = ("The 'with (context manager) as (variable):' "
"construct is not supported.")
raise UnsupportedBytecodeError(msg)
else:
raise NotImplementedError(PYVERSION)
def _is_null_temp_reg(reg):
return reg.startswith("$null$")
| Flow |
python | coleifer__peewee | playhouse/psycopg3_ext.py | {
"start": 4067,
"end": 5382
} | class ____(PostgresqlDatabase):
def _connect(self):
if psycopg is None:
raise ImproperlyConfigured('psycopg3 is not installed!')
if self.database.startswith('postgresql://'):
conn = psycopg.connect(self.database, **self.connect_params)
else:
conn = psycopg.connect(dbname=self.database, **self.connect_params)
if self._isolation_level is not None:
conn.isolation_level = self._isolation_level
conn.autocommit = True
return conn
def get_binary_type(self):
return psycopg.Binary
def _set_server_version(self, conn):
self.server_version = conn.pgconn.server_version
if self.server_version >= 90600:
self.safe_create_index = True
def is_connection_usable(self):
if self._state.closed:
return False
# Returns True if we are idle, running a command, or in an active
# connection. If the connection is in an error state or the connection
# is otherwise unusable, return False.
conn = self._state.conn
return conn.pgconn.transaction_status < TransactionStatus.INERROR
def extract_date(self, date_part, date_field):
return fn.EXTRACT(NodeList((SQL(date_part), SQL('FROM'), date_field)))
| Psycopg3Database |
python | doocs__leetcode | lcof2/剑指 Offer II 026. 重排链表/Solution.py | {
"start": 151,
"end": 1160
} | class ____:
def reorderList(self, head: ListNode) -> None:
mid = self.middleNode(head)
tmp = mid.next
mid.next = None
tmp = self.reverseList(tmp)
head = self.mergeTwoLists(head, tmp)
def middleNode(self, head: ListNode) -> ListNode:
slow, fast = head, head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
return slow
def reverseList(self, head: ListNode) -> ListNode:
pre, cur = None, head
while cur:
tmp = cur.next
cur.next = pre
pre = cur
cur = tmp
return pre
def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:
dummy = ListNode()
cur = dummy
while l1 and l2:
cur.next = l1
l1 = l1.next
cur = cur.next
cur.next = l2
l2 = l2.next
cur = cur.next
cur.next = l1 or l2
return dummy.next
| Solution |
python | huggingface__transformers | src/transformers/models/patchtst/modeling_patchtst.py | {
"start": 59578,
"end": 62941
} | class ____(PatchTSTPreTrainedModel):
def __init__(self, config: PatchTSTConfig):
super().__init__(config)
# Turn off masking
if config.do_mask_input:
logger.warning("Setting `do_mask_input` parameter to False.")
config.do_mask_input = False
self.model = PatchTSTModel(config)
self.head = PatchTSTClassificationHead(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
past_values: torch.Tensor,
target_values: Optional[torch.Tensor] = None,
past_observed_mask: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, PatchTSTForClassificationOutput]:
r"""
past_values (`torch.Tensor` of shape `(bs, sequence_length, num_input_channels)`, *required*):
Input sequence to the model
target_values (`torch.Tensor`, *optional*):
Labels associates with the `past_values`
past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`, *optional*):
Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected
in `[0, 1]`:
- 1 for values that are **observed**,
- 0 for values that are **missing** (i.e. NaNs that were replaced by zeros).
Examples:
```python
>>> from transformers import PatchTSTConfig, PatchTSTForClassification
>>> # classification task with two input channel2 and 3 classes
>>> config = PatchTSTConfig(
... num_input_channels=2,
... num_targets=3,
... context_length=512,
... patch_length=12,
... stride=12,
... use_cls_token=True,
... )
>>> model = PatchTSTForClassification(config=config)
>>> # during inference, one only provides past values
>>> past_values = torch.randn(20, 512, 2)
>>> outputs = model(past_values=past_values)
>>> labels = outputs.prediction_logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
model_output = self.model(
past_values=past_values,
past_observed_mask=past_observed_mask,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=True,
)
y_hat = self.head(model_output.last_hidden_state)
loss_val = None
if target_values is not None:
loss = nn.CrossEntropyLoss()
loss_val = loss(y_hat, target_values)
if not return_dict:
outputs = (y_hat,) + model_output[1:-3]
outputs = (loss_val,) + outputs if loss_val is not None else outputs
return outputs
return PatchTSTForClassificationOutput(
loss=loss_val,
prediction_logits=y_hat,
hidden_states=model_output.hidden_states,
attentions=model_output.attentions,
)
@auto_docstring(
custom_intro="""
The PatchTST for regression Model.
"""
)
| PatchTSTForClassification |
python | ray-project__ray | python/ray/data/_internal/stats.py | {
"start": 53498,
"end": 68280
} | class ____:
operator_name: str
# Whether the operator associated with this OperatorStatsSummary object
# is a suboperator
is_sub_operator: bool
# This is the total walltime of the entire operator, typically obtained from
# `DatasetStats.time_total_s`. An important distinction is that this is the
# overall runtime of the operator, pulled from the stats actor, whereas the
# computed walltimes in `self.wall_time` are calculated on a operator level.
time_total_s: float
earliest_start_time: float
latest_end_time: float
# String summarizing high-level statistics from executing the operator
block_execution_summary_str: str
# The fields below are dicts with stats aggregated across blocks
# processed in this operator. For example:
# {"min": ..., "max": ..., "mean": ..., "sum": ...}
wall_time: Optional[Dict[str, float]] = None
cpu_time: Optional[Dict[str, float]] = None
udf_time: Optional[Dict[str, float]] = None
# memory: no "sum" stat
memory: Optional[Dict[str, float]] = None
# Use the output_num_rows of the parent Operator as output_num_rows
total_input_num_rows: Optional[int] = None
output_num_rows: Optional[Dict[str, float]] = None
output_size_bytes: Optional[Dict[str, float]] = None
# node_count: "count" stat instead of "sum"
node_count: Optional[Dict[str, float]] = None
task_rows: Optional[Dict[str, float]] = None
@property
def num_rows_per_s(self) -> float:
# The observed Ray Data operator throughput is computed by dividing the
# total number of rows produced by the wall time of the operator,
# time_total_s.
if not self.output_num_rows or not self.time_total_s:
return 0.0
return self.output_num_rows["sum"] / self.time_total_s
@property
def num_rows_per_task_s(self) -> float:
"""Calculates the estimated single-task throughput in rows per second."""
# The estimated single task operator throughput is computed by dividing the
# total number of rows produced by the sum of the wall times across all
# blocks of the operator. This assumes that on a single task the work done
# would be equivalent, with no concurrency.
if not self.output_num_rows or not self.wall_time or not self.wall_time["sum"]:
return 0.0
return self.output_num_rows["sum"] / self.wall_time["sum"]
@classmethod
def from_block_metadata(
cls,
operator_name: str,
block_stats: List[BlockStats],
is_sub_operator: bool,
) -> "OperatorStatsSummary":
"""Calculate the stats for a operator from a given list of blocks,
and generates a `OperatorStatsSummary` object with the results.
Args:
block_stats: List of `BlockStats` to calculate stats of
operator_name: Name of operator associated with `blocks`
is_sub_operator: Whether this set of blocks belongs to a sub operator.
Returns:
A `OperatorStatsSummary` object initialized with the calculated statistics
"""
exec_stats = [m.exec_stats for m in block_stats if m.exec_stats is not None]
rounded_total = 0
time_total_s = 0
earliest_start_time, latest_end_time = 0, 0
if exec_stats:
# Calculate the total execution time of operator as
# the difference between the latest end time and
# the earliest start time of all blocks in the operator.
earliest_start_time = min(s.start_time_s for s in exec_stats)
latest_end_time = max(s.end_time_s for s in exec_stats)
time_total_s = latest_end_time - earliest_start_time
if is_sub_operator:
exec_summary_str = "{} blocks produced\n".format(len(exec_stats))
else:
if exec_stats:
rounded_total = round(time_total_s, 2)
if rounded_total <= 0:
# Handle -0.0 case.
rounded_total = 0
exec_summary_str = "{} blocks produced in {}s".format(
len(exec_stats), rounded_total
)
else:
exec_summary_str = ""
exec_summary_str += "\n"
task_rows = collections.defaultdict(int)
for meta in block_stats:
if meta.num_rows is not None and meta.exec_stats is not None:
task_rows[meta.exec_stats.task_idx] += meta.num_rows
task_rows_stats = None
if len(task_rows) > 0:
task_rows_stats = {
"min": min(task_rows.values()),
"max": max(task_rows.values()),
"mean": int(np.mean(list(task_rows.values()))),
"count": len(task_rows),
}
exec_summary_str = "{} tasks executed, {}".format(
len(task_rows), exec_summary_str
)
wall_time_stats, cpu_stats, memory_stats, udf_stats = None, None, None, None
if exec_stats:
wall_time_stats = {
"min": min([e.wall_time_s for e in exec_stats]),
"max": max([e.wall_time_s for e in exec_stats]),
"mean": np.mean([e.wall_time_s for e in exec_stats]),
"sum": sum([e.wall_time_s for e in exec_stats]),
}
cpu_stats = {
"min": min([e.cpu_time_s for e in exec_stats]),
"max": max([e.cpu_time_s for e in exec_stats]),
"mean": np.mean([e.cpu_time_s for e in exec_stats]),
"sum": sum([e.cpu_time_s for e in exec_stats]),
}
memory_stats_mb = [
round((e.max_uss_bytes or 0) / (1024 * 1024), 2) for e in exec_stats
]
memory_stats = {
"min": min(memory_stats_mb),
"max": max(memory_stats_mb),
"mean": int(np.mean(memory_stats_mb)),
}
udf_stats = {
"min": min([e.udf_time_s for e in exec_stats]),
"max": max([e.udf_time_s for e in exec_stats]),
"mean": np.mean([e.udf_time_s for e in exec_stats]),
"sum": sum([e.udf_time_s for e in exec_stats]),
}
output_num_rows_stats = None
output_num_rows = [m.num_rows for m in block_stats if m.num_rows is not None]
if output_num_rows:
output_num_rows_stats = {
"min": min(output_num_rows),
"max": max(output_num_rows),
"mean": int(np.mean(output_num_rows)),
"sum": sum(output_num_rows),
}
output_size_bytes_stats = None
output_size_bytes = [
m.size_bytes for m in block_stats if m.size_bytes is not None
]
if output_size_bytes:
output_size_bytes_stats = {
"min": min(output_size_bytes),
"max": max(output_size_bytes),
"mean": int(np.mean(output_size_bytes)),
"sum": sum(output_size_bytes),
}
node_counts_stats = None
if exec_stats:
node_tasks = collections.defaultdict(set)
for s in exec_stats:
node_tasks[s.node_id].add(s.task_idx)
node_counts = {node: len(tasks) for node, tasks in node_tasks.items()}
node_counts_stats = {
"min": min(node_counts.values()),
"max": max(node_counts.values()),
"mean": int(np.mean(list(node_counts.values()))),
"count": len(node_counts),
}
# Assign a value in to_summary and initialize it as None.
total_input_num_rows = None
return OperatorStatsSummary(
operator_name=operator_name,
is_sub_operator=is_sub_operator,
time_total_s=time_total_s,
earliest_start_time=earliest_start_time,
latest_end_time=latest_end_time,
block_execution_summary_str=exec_summary_str,
wall_time=wall_time_stats,
cpu_time=cpu_stats,
udf_time=udf_stats,
memory=memory_stats,
total_input_num_rows=total_input_num_rows,
output_num_rows=output_num_rows_stats,
output_size_bytes=output_size_bytes_stats,
node_count=node_counts_stats,
task_rows=task_rows_stats,
)
def __str__(self) -> str:
"""For a given (pre-calculated) `OperatorStatsSummary` object (e.g. generated from
`OperatorStatsSummary.from_block_metadata()`), returns a human-friendly string
that summarizes operator execution statistics.
Returns:
String with summary statistics for executing the given operator.
"""
indent = "\t" if self.is_sub_operator else ""
out = self.block_execution_summary_str
wall_time_stats = self.wall_time
if wall_time_stats:
out += indent
out += "* Remote wall time: {} min, {} max, {} mean, {} total\n".format(
fmt(wall_time_stats["min"]),
fmt(wall_time_stats["max"]),
fmt(wall_time_stats["mean"]),
fmt(wall_time_stats["sum"]),
)
cpu_stats = self.cpu_time
if cpu_stats:
out += indent
out += "* Remote cpu time: {} min, {} max, {} mean, {} total\n".format(
fmt(cpu_stats["min"]),
fmt(cpu_stats["max"]),
fmt(cpu_stats["mean"]),
fmt(cpu_stats["sum"]),
)
udf_stats = self.udf_time
if udf_stats:
out += indent
out += "* UDF time: {} min, {} max, {} mean, {} total\n".format(
fmt(udf_stats["min"]),
fmt(udf_stats["max"]),
fmt(udf_stats["mean"]),
fmt(udf_stats["sum"]),
)
memory_stats = self.memory
if memory_stats:
out += indent
out += "* Peak heap memory usage (MiB): {} min, {} max, {} mean\n".format(
memory_stats["min"],
memory_stats["max"],
memory_stats["mean"],
)
output_num_rows_stats = self.output_num_rows
if output_num_rows_stats:
out += indent
out += (
"* Output num rows per block: {} min, {} max, {} mean, {} total\n"
).format(
output_num_rows_stats["min"],
output_num_rows_stats["max"],
output_num_rows_stats["mean"],
output_num_rows_stats["sum"],
)
output_size_bytes_stats = self.output_size_bytes
if output_size_bytes_stats:
out += indent
out += (
"* Output size bytes per block: {} min, {} max, {} mean, {} total\n"
).format(
output_size_bytes_stats["min"],
output_size_bytes_stats["max"],
output_size_bytes_stats["mean"],
output_size_bytes_stats["sum"],
)
task_rows = self.task_rows
if task_rows:
out += indent
out += (
"* Output rows per task: {} min, {} max, {} mean, {} tasks used\n"
).format(
task_rows["min"],
task_rows["max"],
task_rows["mean"],
task_rows["count"],
)
node_count_stats = self.node_count
if node_count_stats:
out += indent
out += "* Tasks per node: {} min, {} max, {} mean; {} nodes used\n".format(
node_count_stats["min"],
node_count_stats["max"],
node_count_stats["mean"],
node_count_stats["count"],
)
if self.num_rows_per_s and self.num_rows_per_task_s:
total_num_in_rows = (
self.total_input_num_rows if self.total_input_num_rows else 0
)
total_num_out_rows = output_num_rows_stats["sum"]
out += indent
out += "* Operator throughput:\n"
out += (
indent + "\t* Total input num rows:" f" {total_num_in_rows} " "rows\n"
)
out += (
indent + "\t* Total output num rows:" f" {total_num_out_rows} " "rows\n"
)
out += (
indent + "\t* Ray Data throughput:"
f" {self.num_rows_per_s} "
"rows/s\n"
)
out += (
indent + "\t* Estimated single task throughput:"
f" {self.num_rows_per_task_s} "
"rows/s\n"
)
return out
def __repr__(self, level=0) -> str:
"""For a given (pre-calculated) `OperatorStatsSummary` object (e.g. generated from
`OperatorStatsSummary.from_block_metadata()`), returns a human-friendly string
that summarizes operator execution statistics.
Returns:
String with summary statistics for executing the given operator.
"""
indent = leveled_indent(level)
indent += leveled_indent(1) if self.is_sub_operator else ""
wall_time_stats = {k: fmt(v) for k, v in (self.wall_time or {}).items()}
cpu_stats = {k: fmt(v) for k, v in (self.cpu_time or {}).items()}
memory_stats = {k: fmt(v) for k, v in (self.memory or {}).items()}
output_num_rows_stats = {
k: fmt(v) for k, v in (self.output_num_rows or {}).items()
}
output_size_bytes_stats = {
k: fmt(v) for k, v in (self.output_size_bytes or {}).items()
}
node_conut_stats = {k: fmt(v) for k, v in (self.node_count or {}).items()}
out = (
f"{indent}OperatorStatsSummary(\n"
f"{indent} operator_name='{self.operator_name}',\n"
f"{indent} is_suboperator={self.is_sub_operator},\n"
f"{indent} time_total_s={fmt(self.time_total_s)},\n"
# block_execution_summary_str already ends with \n
f"{indent} block_execution_summary_str={self.block_execution_summary_str}"
f"{indent} wall_time={wall_time_stats or None},\n"
f"{indent} cpu_time={cpu_stats or None},\n"
f"{indent} memory={memory_stats or None},\n"
f"{indent} output_num_rows={output_num_rows_stats or None},\n"
f"{indent} output_size_bytes={output_size_bytes_stats or None},\n"
f"{indent} node_count={node_conut_stats or None},\n"
f"{indent})"
)
return out
@dataclass
| OperatorStatsSummary |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/query_metrics/query_column_pair.py | {
"start": 565,
"end": 3205
} | class ____(QueryMetricProvider):
metric_name = "query.column_pair"
value_keys = (
"column_A",
"column_B",
"query",
)
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
) -> list[dict]:
batch_selectable, _, _ = execution_engine.get_compute_domain(
metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
)
query = cls._get_query_from_metric_value_kwargs(metric_value_kwargs)
column_A: Optional[str] = metric_value_kwargs.get("column_A")
column_B: Optional[str] = metric_value_kwargs.get("column_B")
if column_A and column_B:
query_parameters = QueryParameters(
column_A=column_A,
column_B=column_B,
)
else:
raise ValueError("Both `column_A` and `column_B` must be provided.") # noqa: TRY003 # FIXME CoP
substituted_batch_subquery = (
cls._get_substituted_batch_subquery_from_query_and_batch_selectable(
query=query,
batch_selectable=batch_selectable,
execution_engine=execution_engine,
query_parameters=query_parameters,
)
)
return cls._get_sqlalchemy_records_from_substituted_batch_subquery(
substituted_batch_subquery=substituted_batch_subquery,
execution_engine=execution_engine,
)
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: Dict[str, Any],
runtime_configuration: dict,
) -> List[dict]:
query = cls._get_query_from_metric_value_kwargs(metric_value_kwargs)
df: pyspark.DataFrame
df, _, _ = execution_engine.get_compute_domain(
metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
)
df.createOrReplaceTempView("tmp_view")
column_A: Optional[str] = metric_value_kwargs.get("column_A")
column_B: Optional[str] = metric_value_kwargs.get("column_B")
query = query.format(column_A=column_A, column_B=column_B, batch="tmp_view")
engine: pyspark.SparkSession = execution_engine.spark
result: List[pyspark.Row] = engine.sql(query).collect()
return [element.asDict() for element in result]
| QueryColumnPair |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF008_attrs.py | {
"start": 159,
"end": 462
} | class ____:
mutable_default: list[int] = []
immutable_annotation: typing.Sequence[int] = []
without_annotation = []
correct_code: list[int] = KNOWINGLY_MUTABLE_DEFAULT
perfectly_fine: list[int] = field(default_factory=list)
class_variable: typing.ClassVar[list[int]] = []
@frozen
| A |
python | tiangolo__fastapi | fastapi/datastructures.py | {
"start": 788,
"end": 5008
} | class ____(StarletteUploadFile):
"""
A file uploaded in a request.
Define it as a *path operation function* (or dependency) parameter.
If you are using a regular `def` function, you can use the `upload_file.file`
attribute to access the raw standard Python file (blocking, not async), useful and
needed for non-async code.
Read more about it in the
[FastAPI docs for Request Files](https://fastapi.tiangolo.com/tutorial/request-files/).
## Example
```python
from typing import Annotated
from fastapi import FastAPI, File, UploadFile
app = FastAPI()
@app.post("/files/")
async def create_file(file: Annotated[bytes, File()]):
return {"file_size": len(file)}
@app.post("/uploadfile/")
async def create_upload_file(file: UploadFile):
return {"filename": file.filename}
```
"""
file: Annotated[
BinaryIO,
Doc("The standard Python file object (non-async)."),
]
filename: Annotated[Optional[str], Doc("The original file name.")]
size: Annotated[Optional[int], Doc("The size of the file in bytes.")]
headers: Annotated[Headers, Doc("The headers of the request.")]
content_type: Annotated[
Optional[str], Doc("The content type of the request, from the headers.")
]
async def write(
self,
data: Annotated[
bytes,
Doc(
"""
The bytes to write to the file.
"""
),
],
) -> None:
"""
Write some bytes to the file.
You normally wouldn't use this from a file you read in a request.
To be awaitable, compatible with async, this is run in threadpool.
"""
return await super().write(data)
async def read(
self,
size: Annotated[
int,
Doc(
"""
The number of bytes to read from the file.
"""
),
] = -1,
) -> bytes:
"""
Read some bytes from the file.
To be awaitable, compatible with async, this is run in threadpool.
"""
return await super().read(size)
async def seek(
self,
offset: Annotated[
int,
Doc(
"""
The position in bytes to seek to in the file.
"""
),
],
) -> None:
"""
Move to a position in the file.
Any next read or write will be done from that position.
To be awaitable, compatible with async, this is run in threadpool.
"""
return await super().seek(offset)
async def close(self) -> None:
"""
Close the file.
To be awaitable, compatible with async, this is run in threadpool.
"""
return await super().close()
@classmethod
def __get_validators__(cls: Type["UploadFile"]) -> Iterable[Callable[..., Any]]:
yield cls.validate
@classmethod
def validate(cls: Type["UploadFile"], v: Any) -> Any:
if not isinstance(v, StarletteUploadFile):
raise ValueError(f"Expected UploadFile, received: {type(v)}")
return v
@classmethod
def _validate(cls, __input_value: Any, _: Any) -> "UploadFile":
if not isinstance(__input_value, StarletteUploadFile):
raise ValueError(f"Expected UploadFile, received: {type(__input_value)}")
return cast(UploadFile, __input_value)
# TODO: remove when deprecating Pydantic v1
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update({"type": "string", "format": "binary"})
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: CoreSchema, handler: GetJsonSchemaHandler
) -> JsonSchemaValue:
return {"type": "string", "format": "binary"}
@classmethod
def __get_pydantic_core_schema__(
cls, source: Type[Any], handler: Callable[[Any], CoreSchema]
) -> CoreSchema:
from ._compat.v2 import with_info_plain_validator_function
return with_info_plain_validator_function(cls._validate)
| UploadFile |
python | getsentry__sentry | src/sentry/preprod/authentication.py | {
"start": 733,
"end": 991
} | class ____(BasePermission):
def has_permission(self, request: Request, view: object) -> bool:
return bool(request.auth) and isinstance(
request.successful_authenticator, LaunchpadRpcSignatureAuthentication
)
| LaunchpadRpcPermission |
python | chroma-core__chroma | chromadb/utils/embedding_functions/chroma_bm25_embedding_function.py | {
"start": 1381,
"end": 5398
} | class ____(SparseEmbeddingFunction[Documents]):
def __init__(
self,
k: float = DEFAULT_K,
b: float = DEFAULT_B,
avg_doc_length: float = DEFAULT_AVG_DOC_LENGTH,
token_max_length: int = DEFAULT_TOKEN_MAX_LENGTH,
stopwords: Optional[Iterable[str]] = None,
store_tokens: bool = False,
) -> None:
"""Initialize the BM25 sparse embedding function."""
self.k = float(k)
self.b = float(b)
self.avg_doc_length = float(avg_doc_length)
self.token_max_length = int(token_max_length)
self.store_tokens = bool(store_tokens)
if stopwords is not None:
self.stopwords: Optional[List[str]] = [str(word) for word in stopwords]
stopword_list: Iterable[str] = self.stopwords
else:
self.stopwords = None
stopword_list = DEFAULT_CHROMA_BM25_STOPWORDS
stemmer = get_english_stemmer()
self._tokenizer = Bm25Tokenizer(stemmer, stopword_list, self.token_max_length)
self._hasher = Murmur3AbsHasher()
def _encode(self, text: str) -> SparseVector:
tokens = self._tokenizer.tokenize(text)
if not tokens:
return SparseVector(indices=[], values=[])
doc_len = float(len(tokens))
counts = Counter(
_HashedToken(self._hasher.hash(token), token if self.store_tokens else None)
for token in tokens
)
sorted_keys = sorted(counts.keys())
indices: List[int] = []
values: List[float] = []
tokens: Optional[List[str]] = [] if self.store_tokens else None
for key in sorted_keys:
tf = float(counts[key])
denominator = tf + self.k * (
1 - self.b + (self.b * doc_len) / self.avg_doc_length
)
score = tf * (self.k + 1) / denominator
indices.append(key.hash)
values.append(score)
if tokens is not None:
tokens.append(key.label)
return SparseVector(indices=indices, values=values, labels=tokens)
def __call__(self, input: Documents) -> SparseVectors:
sparse_vectors: SparseVectors = []
if not input:
return sparse_vectors
for document in input:
sparse_vectors.append(self._encode(document))
return sparse_vectors
def embed_query(self, input: Documents) -> SparseVectors:
return self.__call__(input)
@staticmethod
def name() -> str:
return NAME
@staticmethod
def build_from_config(
config: Dict[str, Any]
) -> "SparseEmbeddingFunction[Documents]":
return ChromaBm25EmbeddingFunction(
k=config.get("k", DEFAULT_K),
b=config.get("b", DEFAULT_B),
avg_doc_length=config.get("avg_doc_length", DEFAULT_AVG_DOC_LENGTH),
token_max_length=config.get("token_max_length", DEFAULT_TOKEN_MAX_LENGTH),
stopwords=config.get("stopwords"),
store_tokens=config.get("store_tokens", False),
)
def get_config(self) -> Dict[str, Any]:
config: Dict[str, Any] = {
"k": self.k,
"b": self.b,
"avg_doc_length": self.avg_doc_length,
"token_max_length": self.token_max_length,
"store_tokens": self.store_tokens,
}
if self.stopwords is not None:
config["stopwords"] = list(self.stopwords)
return config
def validate_config_update(
self, old_config: Dict[str, Any], new_config: Dict[str, Any]
) -> None:
mutable_keys = {"k", "b", "avg_doc_length", "token_max_length", "stopwords", "store_tokens"}
for key in new_config:
if key not in mutable_keys:
raise ValueError(f"Updating '{key}' is not supported for {NAME}")
@staticmethod
def validate_config(config: Dict[str, Any]) -> None:
validate_config_schema(config, NAME) | ChromaBm25EmbeddingFunction |
python | pydantic__pydantic | pydantic/warnings.py | {
"start": 2905,
"end": 3226
} | class ____(PydanticDeprecationWarning):
"""A specific `PydanticDeprecationWarning` subclass defining functionality deprecated since Pydantic 2.10."""
def __init__(self, message: str, *args: object) -> None:
super().__init__(message, *args, since=(2, 10), expected_removal=(3, 0))
| PydanticDeprecatedSince210 |
python | pypa__pipenv | pipenv/patched/pip/_internal/index/package_finder.py | {
"start": 13167,
"end": 14100
} | class ____:
"""A collection of candidates, returned by `PackageFinder.find_best_candidate`.
This class is only intended to be instantiated by CandidateEvaluator's
`compute_best_candidate()` method.
:param all_candidates: A sequence of all available candidates found.
:param applicable_candidates: The applicable candidates.
:param best_candidate: The most preferred candidate found, or None
if no applicable candidates were found.
"""
all_candidates: List[InstallationCandidate]
applicable_candidates: List[InstallationCandidate]
best_candidate: Optional[InstallationCandidate]
def __post_init__(self) -> None:
assert set(self.applicable_candidates) <= set(self.all_candidates)
if self.best_candidate is None:
assert not self.applicable_candidates
else:
assert self.best_candidate in self.applicable_candidates
| BestCandidateResult |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 33668,
"end": 33904
} | class ____(object):
"""
Index and value
"""
def __init__(self, ind, val):
self.index = ind
self.value = val
def __repr__(self):
return 'IndexValue(%f, %f)' % (self.index, self.value)
| IndexValue |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/array_ops_test.py | {
"start": 21087,
"end": 22389
} | class ____(test_util.TensorFlowTestCase):
def _compareDiff(self, x, y, use_gpu):
for index in ("ij", "xy"):
numpy_out = np.meshgrid(x, y, indexing=index)
tf_out = array_ops.meshgrid(x, y, indexing=index)
with self.cached_session(use_gpu=use_gpu):
for xx, yy in zip(numpy_out, tf_out):
self.assertAllEqual(xx, yy)
def _compareDiffType(self, n, np_dtype, use_gpu):
inputs = []
for index in ("ij", "xy"):
for _ in range(n):
x = np.linspace(-10, 10, 5).astype(np_dtype)
if np_dtype in (np.complex64, np.complex128):
x += 1j
inputs.append(x)
numpy_out = np.meshgrid(*inputs, indexing=index)
with test_util.device(use_gpu=use_gpu):
tf_out = array_ops.meshgrid(*inputs, indexing=index)
for x_np, x_tf in zip(numpy_out, tf_out):
self.assertAllEqual(x_np, x_tf)
def testCompare(self):
for t in (np.float16, np.float32, np.float64, np.int32, np.int64,
np.complex64, np.complex128):
with self.subTest(t=t):
self._compareDiffType(2, t, False)
self._compareDiffType(3, t, False)
x = [1, 2, 3]
y = [4, 5]
a = [[1, 1], [1, 1]]
self._compareDiff(x, y, False)
self._compareDiff(x, a, False)
| MeshgridTest |
python | davidhalter__jedi | jedi/inference/gradual/stub_value.py | {
"start": 2393,
"end": 2624
} | class ____(ModuleContext):
def get_filters(self, *args, **kwargs):
filters = super().get_filters(*args, **kwargs)
yield TypingModuleFilterWrapper(next(filters, None))
yield from filters
| TypingModuleContext |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_object_position12.py | {
"start": 315,
"end": 896
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("object_position12.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column(1, 1, 5, None)
worksheet.insert_image("E9", self.image_dir + "red.png")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/base_env.py | {
"start": 18069,
"end": 18381
} | class ____(Enum):
"""
An Enum which defines the type of information carried in the observation
of the agent.
"""
DEFAULT = 0
"""
Observation information is generic.
"""
GOAL_SIGNAL = 1
"""
Observation contains goal information for current task.
"""
| ObservationType |
python | mlflow__mlflow | tests/openai/test_openai_model_export.py | {
"start": 13765,
"end": 21531
} | class ____(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
completion = chat_completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": "What is MLflow?"}],
)
return completion.choices[0].message.content
def test_embeddings(tmp_path):
mlflow.openai.save_model(
model="text-embedding-ada-002",
task=embeddings(),
path=tmp_path,
)
model = mlflow.models.Model.load(tmp_path)
assert model.signature.inputs.to_dict() == [{"type": "string", "required": True}]
assert model.signature.outputs.to_dict() == [
{"type": "tensor", "tensor-spec": {"dtype": "float64", "shape": (-1,)}}
]
model = mlflow.pyfunc.load_model(tmp_path)
data = pd.DataFrame({"text": ["a", "b"]})
preds = model.predict(data)
assert list(map(len, preds)) == [1536, 1536]
data = pd.DataFrame({"text": ["a"] * 100})
preds = model.predict(data)
assert list(map(len, preds)) == [1536] * 100
def test_embeddings_batch_size_azure(tmp_path, monkeypatch):
monkeypatch.setenv("OPENAI_API_TYPE", "azure")
monkeypatch.setenv("OPENAI_ENGINE", "test_engine")
mlflow.openai.save_model(
model="text-embedding-ada-002",
task=embeddings(),
path=tmp_path,
)
model = mlflow.pyfunc.load_model(tmp_path)
assert model._model_impl.api_config.batch_size == 16
def test_embeddings_pyfunc_server_and_score():
df = pd.DataFrame({"text": ["a", "b"]})
with mlflow.start_run():
model_info = mlflow.openai.log_model(
"text-embedding-ada-002",
embeddings(),
name="model",
input_example=df,
)
inference_payload = load_serving_example(model_info.model_uri)
resp = pyfunc_serve_and_score_model(
model_info.model_uri,
data=inference_payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
expected = mlflow.pyfunc.load_model(model_info.model_uri).predict(df)
actual = pd.DataFrame(data=json.loads(resp.content.decode("utf-8")))
pd.testing.assert_frame_equal(actual, pd.DataFrame({"predictions": expected}))
def test_spark_udf_embeddings(tmp_path, spark):
mlflow.openai.save_model(
model="text-embedding-ada-002",
task=embeddings(),
path=tmp_path,
)
udf = mlflow.pyfunc.spark_udf(spark, tmp_path, result_type="array<double>")
df = spark.createDataFrame(
[
("a",),
("b",),
],
["x"],
)
df = df.withColumn("z", udf("x")).toPandas()
assert list(map(len, df["z"])) == [1536, 1536]
def test_inference_params(tmp_path):
mlflow.openai.save_model(
model="text-embedding-ada-002",
task=embeddings(),
path=tmp_path,
signature=ModelSignature(
inputs=Schema([ColSpec(type="string", name=None)]),
outputs=Schema([TensorSpec(type=np.dtype("float64"), shape=(-1,))]),
params=ParamSchema([ParamSpec(name="batch_size", dtype="long", default=16)]),
),
)
model_info = mlflow.models.Model.load(tmp_path)
assert (
len([p for p in model_info.signature.params if p.name == "batch_size" and p.default == 16])
== 1
)
model = mlflow.pyfunc.load_model(tmp_path)
data = pd.DataFrame({"text": ["a", "b"]})
preds = model.predict(data, params={"batch_size": 5})
assert list(map(len, preds)) == [1536, 1536]
def test_inference_params_overlap(tmp_path):
with pytest.raises(mlflow.MlflowException, match=r"any of \['prefix'\] as parameters"):
mlflow.openai.save_model(
model="text-davinci-003",
task=completions(),
path=tmp_path,
prefix="Classify the following text's sentiment:",
signature=ModelSignature(
inputs=Schema([ColSpec(type="string", name=None)]),
outputs=Schema([ColSpec(type="string", name=None)]),
params=ParamSchema([ParamSpec(name="prefix", default=None, dtype="string")]),
),
)
def test_multimodal_messages(tmp_path):
# Test multimodal content with variable placeholders
mlflow.openai.save_model(
model="gpt-4o-mini",
task=chat_completions(),
path=tmp_path,
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "{system_prompt}"},
{
"type": "image_url",
"image_url": {
"url": "data:image/jpeg;base64,{image_base64}",
"detail": "low",
},
},
],
}
],
)
model = mlflow.models.Model.load(tmp_path)
assert model.signature.inputs.to_dict() == [
{"name": "image_base64", "type": "string", "required": True},
{"name": "system_prompt", "type": "string", "required": True},
]
assert model.signature.outputs.to_dict() == [
{"type": "string", "required": True},
]
model = mlflow.pyfunc.load_model(tmp_path)
data = pd.DataFrame(
{
"system_prompt": ["Analyze this image"],
"image_base64": [
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg=="
],
}
)
expected_output = [
[
{
"content": [
{"type": "text", "text": "Analyze this image"},
{
"type": "image_url",
"image_url": {
"url": (
"data:image/jpeg;base64,"
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg=="
),
"detail": "low",
},
},
],
"role": "user",
}
]
]
assert list(map(json.loads, model.predict(data))) == expected_output
def test_multimodal_messages_no_variables(tmp_path):
mlflow.openai.save_model(
model="gpt-4o-mini",
task=chat_completions(),
path=tmp_path,
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": {"url": "data:image/jpeg;base64,abc123", "detail": "low"},
},
],
}
],
)
model = mlflow.models.Model.load(tmp_path)
# Should add default content variable since no variables found
assert model.signature.inputs.to_dict() == [
{"type": "string", "required": True},
]
model = mlflow.pyfunc.load_model(tmp_path)
data = pd.DataFrame({"content": ["Additional context"]})
expected_output = [
[
{
"content": [
{"type": "text", "text": "What's in this image?"},
{
"type": "image_url",
"image_url": {"url": "data:image/jpeg;base64,abc123", "detail": "low"},
},
],
"role": "user",
},
{"content": "Additional context", "role": "user"},
]
]
assert list(map(json.loads, model.predict(data))) == expected_output
| ChatCompletionModel |
python | apache__airflow | airflow-core/src/airflow/models/base.py | {
"start": 3376,
"end": 3806
} | class ____(Base):
"""Base class for depending models linked to TaskInstance."""
__abstract__ = True
task_id: Mapped[str] = mapped_column(StringID(), nullable=False)
dag_id: Mapped[str] = mapped_column(StringID(), nullable=False)
run_id: Mapped[str] = mapped_column(StringID(), nullable=False)
map_index: Mapped[int] = mapped_column(Integer, nullable=False, server_default=text("-1"))
| TaskInstanceDependencies |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/test_cards.py | {
"start": 2835,
"end": 3096
} | class ____(MetaflowCardComponent):
REALTIME_UPDATABLE = True
def __init__(self, data):
self._data = data
@render_safely
def render(self):
return self._data
def update(self, data):
self._data = data
| TestJSONComponent |
python | huggingface__transformers | src/transformers/models/minimax/modular_minimax.py | {
"start": 21612,
"end": 21665
} | class ____(MixtralAttention):
pass
| MiniMaxAttention |
python | huggingface__transformers | src/transformers/models/conditional_detr/modeling_conditional_detr.py | {
"start": 4655,
"end": 7244
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the
unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[dict] = None
logits: Optional[torch.FloatTensor] = None
pred_boxes: Optional[torch.FloatTensor] = None
auxiliary_outputs: Optional[list[dict]] = None
last_hidden_state: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`ConditionalDetrForSegmentation`].
"""
)
# Copied from transformers.models.detr.modeling_detr.DetrSegmentationOutput with Detr->ConditionalDetr
| ConditionalDetrObjectDetectionOutput |
python | spack__spack | var/spack/test_repos/spack_repo/duplicates_test/packages/py_numpy/package.py | {
"start": 216,
"end": 612
} | class ____(Package):
"""An extension that depends on pinned build dependencies"""
homepage = "http://www.example.com"
url = "http://www.example.com/tdep-1.0.tar.gz"
tags = ["build-tools"]
version("1.25.0", md5="0123456789abcdef0123456789abcdef")
extends("python")
depends_on("py-setuptools@=59", type=("build", "run"))
depends_on("gmake@4.1", type="build")
| PyNumpy |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 850505,
"end": 852343
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"files",
"package",
"platform",
"pre_release",
"readme",
"release",
"statistics",
"summary",
"version",
)
files = sgqlc.types.Field(
sgqlc.types.non_null(PackageFileConnection),
graphql_name="files",
args=sgqlc.types.ArgDict(
(
(
"order_by",
sgqlc.types.Arg(
PackageFileOrder,
graphql_name="orderBy",
default={"field": "CREATED_AT", "direction": "ASC"},
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
package = sgqlc.types.Field(Package, graphql_name="package")
platform = sgqlc.types.Field(String, graphql_name="platform")
pre_release = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="preRelease"
)
readme = sgqlc.types.Field(String, graphql_name="readme")
release = sgqlc.types.Field("Release", graphql_name="release")
statistics = sgqlc.types.Field(PackageVersionStatistics, graphql_name="statistics")
summary = sgqlc.types.Field(String, graphql_name="summary")
version = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="version")
| PackageVersion |
python | ApeWorX__ape | src/ape_node/query.py | {
"start": 339,
"end": 1781
} | class ____(QueryAPI):
@singledispatchmethod
def estimate_query(self, query: QueryType) -> Optional[int]: # type: ignore[override]
return None
@singledispatchmethod
def perform_query(self, query: QueryType) -> Iterator: # type: ignore[override]
raise QueryEngineError(
f"{self.__class__.__name__} cannot handle {query.__class__.__name__} queries."
)
@estimate_query.register
def estimate_contract_creation_query(self, query: ContractCreationQuery) -> Optional[int]:
if getattr(self.provider, "_ots_api_level", None) is not None:
return 250
return None
@perform_query.register
def get_contract_creation_receipt(
self, query: ContractCreationQuery
) -> Iterator[ContractCreation]:
if self.network_manager.active_provider and isinstance(self.provider, EthereumNodeProvider):
ots = self.provider.make_request("ots_getContractCreator", [query.contract])
if ots is None:
return None
creator = self.conversion_manager.convert(ots["creator"], AddressType)
receipt = self.provider.get_receipt(ots["hash"])
yield ContractCreation(
txn_hash=ots["hash"],
block=receipt.block_number,
deployer=receipt.sender,
factory=creator if creator != receipt.sender else None,
)
| OtterscanQueryEngine |
python | python-poetry__poetry | src/poetry/utils/env/python/exceptions.py | {
"start": 1007,
"end": 1374
} | class ____(PythonVersionError):
def __init__(self, expected: str, given: str) -> None:
message = (
f"Current Python version ({given}) "
f"is not allowed by the project ({expected}).\n"
'Please change python executable via the "env use" command.'
)
super().__init__(message)
| InvalidCurrentPythonVersionError |
python | pypa__pip | src/pip/_vendor/rich/layout.py | {
"start": 947,
"end": 1025
} | class ____(LayoutError):
"""Requested splitter does not exist."""
| NoSplitter |
python | encode__django-rest-framework | rest_framework/generics.py | {
"start": 7415,
"end": 7669
} | class ____(mixins.DestroyModelMixin,
GenericAPIView):
"""
Concrete view for deleting a model instance.
"""
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
| DestroyAPIView |
python | pytorch__pytorch | torch/ao/quantization/fake_quantize.py | {
"start": 12593,
"end": 14231
} | class ____(FakeQuantize):
"""Simulate quantize and dequantize in training time.
Simulate quantize and dequantize with fixed quantization
parameters in training time. Only per tensor quantization
is supported.
"""
# TODO: rename observer to observer_ctr
def __init__(self, observer):
super().__init__(observer=observer)
if type(self.activation_post_process) is not FixedQParamsObserver:
raise AssertionError(
f"{self.__class__.__name__}'s observer must be a {FixedQParamsObserver.__name__}"
)
self._observer_ctr = observer
self.scale = self.activation_post_process.scale
self.zero_point = self.activation_post_process.zero_point
if not _is_per_tensor(self.qscheme):
raise AssertionError(
"Only per tensor quantization is supported"
+ " FixedQParamsFakeQuantize module, got qscheme:"
+ str(self.qscheme)
)
@torch.jit.export
def calculate_qparams(self): # type: ignore[override]
return self.scale, self.zero_point
@torch.jit.export
def extra_repr(self):
"""Define a string representation of the object's attributes."""
return (
f"fake_quant_enabled={self.fake_quant_enabled}, observer_enabled={self.observer_enabled}, "
f"scale={self.scale}, zero_point={self.zero_point}, "
f"dtype={self.dtype}, quant_min={self.activation_post_process.quant_min}, "
f"quant_max={self.activation_post_process.quant_max}, qscheme={self.qscheme}"
)
| FixedQParamsFakeQuantize |
python | allegroai__clearml | clearml/backend_api/session/jsonmodels/fields.py | {
"start": 4770,
"end": 4850
} | class ____(BaseField):
"""Float field."""
types = (float, int)
| FloatField |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.