body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def __getitem__(self, i):
'\n Indexing method.\n Returns a Molecule object for given index (frame).\n Returns a Trajectory object if used as slicing.\n\n '
if isinstance(i, slice):
indices = range(len(self))[i.start:i.stop:i.step]
if (len(indices) == 0):
return []
else:
new_traj = Trajectory(molecule=self[indices[0]])
for j in indices[1:]:
new_traj.append(self[j])
return new_traj
else:
return Molecule(atoms=self.atoms[i], coordinates=self.coordinates[i])
| -6,427,802,718,642,753,000
|
Indexing method.
Returns a Molecule object for given index (frame).
Returns a Trajectory object if used as slicing.
|
angstrom/trajectory/trajectory.py
|
__getitem__
|
kbsezginel/angstrom
|
python
|
def __getitem__(self, i):
'\n Indexing method.\n Returns a Molecule object for given index (frame).\n Returns a Trajectory object if used as slicing.\n\n '
if isinstance(i, slice):
indices = range(len(self))[i.start:i.stop:i.step]
if (len(indices) == 0):
return []
else:
new_traj = Trajectory(molecule=self[indices[0]])
for j in indices[1:]:
new_traj.append(self[j])
return new_traj
else:
return Molecule(atoms=self.atoms[i], coordinates=self.coordinates[i])
|
def __iter__(self):
'\n Initialize iterator, reset frame index.\n\n '
self.current_frame = 0
return self
| 5,526,493,697,258,412,000
|
Initialize iterator, reset frame index.
|
angstrom/trajectory/trajectory.py
|
__iter__
|
kbsezginel/angstrom
|
python
|
def __iter__(self):
'\n \n\n '
self.current_frame = 0
return self
|
def __next__(self):
'\n Returns the next frame in Trajectory as a Molecule object.\n\n '
if (self.current_frame >= len(self)):
raise StopIteration
next_mol = self[self.current_frame]
self.current_frame += 1
return next_mol
| -5,573,255,124,753,591,000
|
Returns the next frame in Trajectory as a Molecule object.
|
angstrom/trajectory/trajectory.py
|
__next__
|
kbsezginel/angstrom
|
python
|
def __next__(self):
'\n \n\n '
if (self.current_frame >= len(self)):
raise StopIteration
next_mol = self[self.current_frame]
self.current_frame += 1
return next_mol
|
def append(self, mol):
'\n Append molecule to trajectory.\n The number of atoms in the molecule must match that of the trajectory.\n\n Parameters\n ----------\n mol : Molecule\n Molecule object to be added\n\n Returns\n -------\n None\n Added to Trajectory object.\n\n '
if (len(mol.atoms) != self.atoms.shape[1]):
raise Exception('Trajectory cannot have different number of atoms per frame')
self.atoms = np.append(self.atoms, [mol.atoms], axis=0)
self.coordinates = np.append(self.coordinates, [mol.coordinates], axis=0)
| -5,508,576,439,384,275,000
|
Append molecule to trajectory.
The number of atoms in the molecule must match that of the trajectory.
Parameters
----------
mol : Molecule
Molecule object to be added
Returns
-------
None
Added to Trajectory object.
|
angstrom/trajectory/trajectory.py
|
append
|
kbsezginel/angstrom
|
python
|
def append(self, mol):
'\n Append molecule to trajectory.\n The number of atoms in the molecule must match that of the trajectory.\n\n Parameters\n ----------\n mol : Molecule\n Molecule object to be added\n\n Returns\n -------\n None\n Added to Trajectory object.\n\n '
if (len(mol.atoms) != self.atoms.shape[1]):
raise Exception('Trajectory cannot have different number of atoms per frame')
self.atoms = np.append(self.atoms, [mol.atoms], axis=0)
self.coordinates = np.append(self.coordinates, [mol.coordinates], axis=0)
|
def read(self, filename):
"\n Read xyz formatted trajectory file.\n\n Parameters\n ----------\n filename : str\n Trajectory file name.\n\n Returns\n -------\n None\n Assigns 'coordinates', 'atoms', and 'headers' attributes.\n\n "
self.name = os.path.splitext(os.path.basename(filename))[0]
traj = read_xyz_traj(filename)
(self.atoms, self.coordinates, self.headers) = (traj['atoms'], traj['coordinates'], traj['headers'])
| -9,214,895,155,869,046,000
|
Read xyz formatted trajectory file.
Parameters
----------
filename : str
Trajectory file name.
Returns
-------
None
Assigns 'coordinates', 'atoms', and 'headers' attributes.
|
angstrom/trajectory/trajectory.py
|
read
|
kbsezginel/angstrom
|
python
|
def read(self, filename):
"\n Read xyz formatted trajectory file.\n\n Parameters\n ----------\n filename : str\n Trajectory file name.\n\n Returns\n -------\n None\n Assigns 'coordinates', 'atoms', and 'headers' attributes.\n\n "
self.name = os.path.splitext(os.path.basename(filename))[0]
traj = read_xyz_traj(filename)
(self.atoms, self.coordinates, self.headers) = (traj['atoms'], traj['coordinates'], traj['headers'])
|
def write(self, filename):
'\n Write xyz formatted trajectory file.\n\n Parameters\n ----------\n filename : str\n Trajectory file name (formats: xyz).\n\n Returns\n -------\n None\n Writes molecule information to given file name.\n\n '
with open(filename, 'w') as traj_file:
if hasattr(self, 'headers'):
write_xyz_traj(traj_file, self.atoms, self.coordinates, headers=self.headers)
else:
write_xyz_traj(traj_file, self.atoms, self.coordinates)
| -412,128,814,471,408,600
|
Write xyz formatted trajectory file.
Parameters
----------
filename : str
Trajectory file name (formats: xyz).
Returns
-------
None
Writes molecule information to given file name.
|
angstrom/trajectory/trajectory.py
|
write
|
kbsezginel/angstrom
|
python
|
def write(self, filename):
'\n Write xyz formatted trajectory file.\n\n Parameters\n ----------\n filename : str\n Trajectory file name (formats: xyz).\n\n Returns\n -------\n None\n Writes molecule information to given file name.\n\n '
with open(filename, 'w') as traj_file:
if hasattr(self, 'headers'):
write_xyz_traj(traj_file, self.atoms, self.coordinates, headers=self.headers)
else:
write_xyz_traj(traj_file, self.atoms, self.coordinates)
|
def get_center(self, mass=True):
'\n Get coordinates of molecule center at each frame.\n\n Parameters\n ----------\n mass : bool\n Calculate center of mass (True) or geometric center (False).\n\n Returns\n -------\n ndarray\n Molecule center coordinates for each frame.\n\n '
centers = np.empty((len(self.atoms), 3))
for (f, (frame_atoms, frame_coors)) in enumerate(zip(self.atoms, self.coordinates)):
centers[f] = get_molecule_center(frame_atoms, frame_coors, mass=mass)
return centers
| -114,730,489,028,035,970
|
Get coordinates of molecule center at each frame.
Parameters
----------
mass : bool
Calculate center of mass (True) or geometric center (False).
Returns
-------
ndarray
Molecule center coordinates for each frame.
|
angstrom/trajectory/trajectory.py
|
get_center
|
kbsezginel/angstrom
|
python
|
def get_center(self, mass=True):
'\n Get coordinates of molecule center at each frame.\n\n Parameters\n ----------\n mass : bool\n Calculate center of mass (True) or geometric center (False).\n\n Returns\n -------\n ndarray\n Molecule center coordinates for each frame.\n\n '
centers = np.empty((len(self.atoms), 3))
for (f, (frame_atoms, frame_coors)) in enumerate(zip(self.atoms, self.coordinates)):
centers[f] = get_molecule_center(frame_atoms, frame_coors, mass=mass)
return centers
|
def main():
'\n Main entry point for the script.\n '
movie_list = get_movie_list('src/data/movies.json')
fresh_tomatoes.open_movies_page(movie_list)
| -4,412,794,696,642,767,000
|
Main entry point for the script.
|
src/entertainment_center.py
|
main
|
golgistudio/udacity-movie-trailer
|
python
|
def main():
'\n \n '
movie_list = get_movie_list('src/data/movies.json')
fresh_tomatoes.open_movies_page(movie_list)
|
def testInit(self):
'Testing initialization from valid units'
d = Distance(m=100)
self.assertEqual(d.m, 100)
(d1, d2, d3) = (D(m=100), D(meter=100), D(metre=100))
for d in (d1, d2, d3):
self.assertEqual(d.m, 100)
d = D(nm=100)
self.assertEqual(d.m, 185200)
(y1, y2, y3) = (D(yd=100), D(yard=100), D(Yard=100))
for d in (y1, y2, y3):
self.assertEqual(d.yd, 100)
(mm1, mm2) = (D(millimeter=1000), D(MiLLiMeTeR=1000))
for d in (mm1, mm2):
self.assertEqual(d.m, 1.0)
self.assertEqual(d.mm, 1000.0)
| -939,912,387,337,035,100
|
Testing initialization from valid units
|
tests/gis_tests/test_measure.py
|
testInit
|
iMerica/dj-models
|
python
|
def testInit(self):
d = Distance(m=100)
self.assertEqual(d.m, 100)
(d1, d2, d3) = (D(m=100), D(meter=100), D(metre=100))
for d in (d1, d2, d3):
self.assertEqual(d.m, 100)
d = D(nm=100)
self.assertEqual(d.m, 185200)
(y1, y2, y3) = (D(yd=100), D(yard=100), D(Yard=100))
for d in (y1, y2, y3):
self.assertEqual(d.yd, 100)
(mm1, mm2) = (D(millimeter=1000), D(MiLLiMeTeR=1000))
for d in (mm1, mm2):
self.assertEqual(d.m, 1.0)
self.assertEqual(d.mm, 1000.0)
|
def testInitInvalid(self):
'Testing initialization from invalid units'
with self.assertRaises(AttributeError):
D(banana=100)
| 6,802,256,834,421,843,000
|
Testing initialization from invalid units
|
tests/gis_tests/test_measure.py
|
testInitInvalid
|
iMerica/dj-models
|
python
|
def testInitInvalid(self):
with self.assertRaises(AttributeError):
D(banana=100)
|
def testAccess(self):
'Testing access in different units'
d = D(m=100)
self.assertEqual(d.km, 0.1)
self.assertAlmostEqual(d.ft, 328.084, 3)
| 8,387,798,650,130,048,000
|
Testing access in different units
|
tests/gis_tests/test_measure.py
|
testAccess
|
iMerica/dj-models
|
python
|
def testAccess(self):
d = D(m=100)
self.assertEqual(d.km, 0.1)
self.assertAlmostEqual(d.ft, 328.084, 3)
|
def testAccessInvalid(self):
'Testing access in invalid units'
d = D(m=100)
self.assertFalse(hasattr(d, 'banana'))
| 8,163,053,566,261,297,000
|
Testing access in invalid units
|
tests/gis_tests/test_measure.py
|
testAccessInvalid
|
iMerica/dj-models
|
python
|
def testAccessInvalid(self):
d = D(m=100)
self.assertFalse(hasattr(d, 'banana'))
|
def testAddition(self):
'Test addition & subtraction'
d1 = D(m=100)
d2 = D(m=200)
d3 = (d1 + d2)
self.assertEqual(d3.m, 300)
d3 += d1
self.assertEqual(d3.m, 400)
d4 = (d1 - d2)
self.assertEqual(d4.m, (- 100))
d4 -= d1
self.assertEqual(d4.m, (- 200))
with self.assertRaises(TypeError):
(d1 + 1)
with self.assertRaises(TypeError):
(d1 - 1)
with self.assertRaises(TypeError):
d1 += 1
with self.assertRaises(TypeError):
d1 -= 1
| -696,819,538,585,398,100
|
Test addition & subtraction
|
tests/gis_tests/test_measure.py
|
testAddition
|
iMerica/dj-models
|
python
|
def testAddition(self):
d1 = D(m=100)
d2 = D(m=200)
d3 = (d1 + d2)
self.assertEqual(d3.m, 300)
d3 += d1
self.assertEqual(d3.m, 400)
d4 = (d1 - d2)
self.assertEqual(d4.m, (- 100))
d4 -= d1
self.assertEqual(d4.m, (- 200))
with self.assertRaises(TypeError):
(d1 + 1)
with self.assertRaises(TypeError):
(d1 - 1)
with self.assertRaises(TypeError):
d1 += 1
with self.assertRaises(TypeError):
d1 -= 1
|
def testMultiplication(self):
'Test multiplication & division'
d1 = D(m=100)
d3 = (d1 * 2)
self.assertEqual(d3.m, 200)
d3 = (2 * d1)
self.assertEqual(d3.m, 200)
d3 *= 5
self.assertEqual(d3.m, 1000)
d4 = (d1 / 2)
self.assertEqual(d4.m, 50)
d4 /= 5
self.assertEqual(d4.m, 10)
d5 = (d1 / D(m=2))
self.assertEqual(d5, 50)
a5 = (d1 * D(m=10))
self.assertIsInstance(a5, Area)
self.assertEqual(a5.sq_m, (100 * 10))
with self.assertRaises(TypeError):
d1 *= D(m=1)
with self.assertRaises(TypeError):
d1 /= D(m=1)
| 4,051,457,820,995,021,000
|
Test multiplication & division
|
tests/gis_tests/test_measure.py
|
testMultiplication
|
iMerica/dj-models
|
python
|
def testMultiplication(self):
d1 = D(m=100)
d3 = (d1 * 2)
self.assertEqual(d3.m, 200)
d3 = (2 * d1)
self.assertEqual(d3.m, 200)
d3 *= 5
self.assertEqual(d3.m, 1000)
d4 = (d1 / 2)
self.assertEqual(d4.m, 50)
d4 /= 5
self.assertEqual(d4.m, 10)
d5 = (d1 / D(m=2))
self.assertEqual(d5, 50)
a5 = (d1 * D(m=10))
self.assertIsInstance(a5, Area)
self.assertEqual(a5.sq_m, (100 * 10))
with self.assertRaises(TypeError):
d1 *= D(m=1)
with self.assertRaises(TypeError):
d1 /= D(m=1)
|
def testUnitConversions(self):
'Testing default units during maths'
d1 = D(m=100)
d2 = D(km=1)
d3 = (d1 + d2)
self.assertEqual(d3._default_unit, 'm')
d4 = (d2 + d1)
self.assertEqual(d4._default_unit, 'km')
d5 = (d1 * 2)
self.assertEqual(d5._default_unit, 'm')
d6 = (d1 / 2)
self.assertEqual(d6._default_unit, 'm')
| 1,915,829,929,822,303,200
|
Testing default units during maths
|
tests/gis_tests/test_measure.py
|
testUnitConversions
|
iMerica/dj-models
|
python
|
def testUnitConversions(self):
d1 = D(m=100)
d2 = D(km=1)
d3 = (d1 + d2)
self.assertEqual(d3._default_unit, 'm')
d4 = (d2 + d1)
self.assertEqual(d4._default_unit, 'km')
d5 = (d1 * 2)
self.assertEqual(d5._default_unit, 'm')
d6 = (d1 / 2)
self.assertEqual(d6._default_unit, 'm')
|
def testComparisons(self):
'Testing comparisons'
d1 = D(m=100)
d2 = D(km=1)
d3 = D(km=0)
self.assertGreater(d2, d1)
self.assertEqual(d1, d1)
self.assertLess(d1, d2)
self.assertFalse(d3)
| 6,504,463,429,873,153,000
|
Testing comparisons
|
tests/gis_tests/test_measure.py
|
testComparisons
|
iMerica/dj-models
|
python
|
def testComparisons(self):
d1 = D(m=100)
d2 = D(km=1)
d3 = D(km=0)
self.assertGreater(d2, d1)
self.assertEqual(d1, d1)
self.assertLess(d1, d2)
self.assertFalse(d3)
|
def testUnitsStr(self):
'Testing conversion to strings'
d1 = D(m=100)
d2 = D(km=3.5)
self.assertEqual(str(d1), '100.0 m')
self.assertEqual(str(d2), '3.5 km')
self.assertEqual(repr(d1), 'Distance(m=100.0)')
self.assertEqual(repr(d2), 'Distance(km=3.5)')
| 5,147,494,180,421,207,000
|
Testing conversion to strings
|
tests/gis_tests/test_measure.py
|
testUnitsStr
|
iMerica/dj-models
|
python
|
def testUnitsStr(self):
d1 = D(m=100)
d2 = D(km=3.5)
self.assertEqual(str(d1), '100.0 m')
self.assertEqual(str(d2), '3.5 km')
self.assertEqual(repr(d1), 'Distance(m=100.0)')
self.assertEqual(repr(d2), 'Distance(km=3.5)')
|
def testUnitAttName(self):
'Testing the `unit_attname` class method'
unit_tuple = [('Yard', 'yd'), ('Nautical Mile', 'nm'), ('German legal metre', 'german_m'), ('Indian yard', 'indian_yd'), ('Chain (Sears)', 'chain_sears'), ('Chain', 'chain')]
for (nm, att) in unit_tuple:
with self.subTest(nm=nm):
self.assertEqual(att, D.unit_attname(nm))
| 6,803,828,265,511,653,000
|
Testing the `unit_attname` class method
|
tests/gis_tests/test_measure.py
|
testUnitAttName
|
iMerica/dj-models
|
python
|
def testUnitAttName(self):
unit_tuple = [('Yard', 'yd'), ('Nautical Mile', 'nm'), ('German legal metre', 'german_m'), ('Indian yard', 'indian_yd'), ('Chain (Sears)', 'chain_sears'), ('Chain', 'chain')]
for (nm, att) in unit_tuple:
with self.subTest(nm=nm):
self.assertEqual(att, D.unit_attname(nm))
|
def testInit(self):
'Testing initialization from valid units'
a = Area(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_mi=100)
self.assertEqual(a.sq_m, 258998811.0336)
| -5,344,861,859,352,767,000
|
Testing initialization from valid units
|
tests/gis_tests/test_measure.py
|
testInit
|
iMerica/dj-models
|
python
|
def testInit(self):
a = Area(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_m=100)
self.assertEqual(a.sq_m, 100)
a = A(sq_mi=100)
self.assertEqual(a.sq_m, 258998811.0336)
|
def testInitInvaliA(self):
'Testing initialization from invalid units'
with self.assertRaises(AttributeError):
A(banana=100)
| -8,369,222,064,090,536,000
|
Testing initialization from invalid units
|
tests/gis_tests/test_measure.py
|
testInitInvaliA
|
iMerica/dj-models
|
python
|
def testInitInvaliA(self):
with self.assertRaises(AttributeError):
A(banana=100)
|
def testAccess(self):
'Testing access in different units'
a = A(sq_m=100)
self.assertEqual(a.sq_km, 0.0001)
self.assertAlmostEqual(a.sq_ft, 1076.391, 3)
| 1,555,348,347,969,236,700
|
Testing access in different units
|
tests/gis_tests/test_measure.py
|
testAccess
|
iMerica/dj-models
|
python
|
def testAccess(self):
a = A(sq_m=100)
self.assertEqual(a.sq_km, 0.0001)
self.assertAlmostEqual(a.sq_ft, 1076.391, 3)
|
def testAccessInvaliA(self):
'Testing access in invalid units'
a = A(sq_m=100)
self.assertFalse(hasattr(a, 'banana'))
| -4,501,221,602,945,441,000
|
Testing access in invalid units
|
tests/gis_tests/test_measure.py
|
testAccessInvaliA
|
iMerica/dj-models
|
python
|
def testAccessInvaliA(self):
a = A(sq_m=100)
self.assertFalse(hasattr(a, 'banana'))
|
def testAddition(self):
'Test addition & subtraction'
a1 = A(sq_m=100)
a2 = A(sq_m=200)
a3 = (a1 + a2)
self.assertEqual(a3.sq_m, 300)
a3 += a1
self.assertEqual(a3.sq_m, 400)
a4 = (a1 - a2)
self.assertEqual(a4.sq_m, (- 100))
a4 -= a1
self.assertEqual(a4.sq_m, (- 200))
with self.assertRaises(TypeError):
(a1 + 1)
with self.assertRaises(TypeError):
(a1 - 1)
with self.assertRaises(TypeError):
a1 += 1
with self.assertRaises(TypeError):
a1 -= 1
| 2,796,483,916,572,844,000
|
Test addition & subtraction
|
tests/gis_tests/test_measure.py
|
testAddition
|
iMerica/dj-models
|
python
|
def testAddition(self):
a1 = A(sq_m=100)
a2 = A(sq_m=200)
a3 = (a1 + a2)
self.assertEqual(a3.sq_m, 300)
a3 += a1
self.assertEqual(a3.sq_m, 400)
a4 = (a1 - a2)
self.assertEqual(a4.sq_m, (- 100))
a4 -= a1
self.assertEqual(a4.sq_m, (- 200))
with self.assertRaises(TypeError):
(a1 + 1)
with self.assertRaises(TypeError):
(a1 - 1)
with self.assertRaises(TypeError):
a1 += 1
with self.assertRaises(TypeError):
a1 -= 1
|
def testMultiplication(self):
'Test multiplication & division'
a1 = A(sq_m=100)
a3 = (a1 * 2)
self.assertEqual(a3.sq_m, 200)
a3 = (2 * a1)
self.assertEqual(a3.sq_m, 200)
a3 *= 5
self.assertEqual(a3.sq_m, 1000)
a4 = (a1 / 2)
self.assertEqual(a4.sq_m, 50)
a4 /= 5
self.assertEqual(a4.sq_m, 10)
with self.assertRaises(TypeError):
(a1 * A(sq_m=1))
with self.assertRaises(TypeError):
a1 *= A(sq_m=1)
with self.assertRaises(TypeError):
(a1 / A(sq_m=1))
with self.assertRaises(TypeError):
a1 /= A(sq_m=1)
| 1,916,095,202,108,542,500
|
Test multiplication & division
|
tests/gis_tests/test_measure.py
|
testMultiplication
|
iMerica/dj-models
|
python
|
def testMultiplication(self):
a1 = A(sq_m=100)
a3 = (a1 * 2)
self.assertEqual(a3.sq_m, 200)
a3 = (2 * a1)
self.assertEqual(a3.sq_m, 200)
a3 *= 5
self.assertEqual(a3.sq_m, 1000)
a4 = (a1 / 2)
self.assertEqual(a4.sq_m, 50)
a4 /= 5
self.assertEqual(a4.sq_m, 10)
with self.assertRaises(TypeError):
(a1 * A(sq_m=1))
with self.assertRaises(TypeError):
a1 *= A(sq_m=1)
with self.assertRaises(TypeError):
(a1 / A(sq_m=1))
with self.assertRaises(TypeError):
a1 /= A(sq_m=1)
|
def testUnitConversions(self):
'Testing default units during maths'
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = (a1 + a2)
self.assertEqual(a3._default_unit, 'sq_m')
a4 = (a2 + a1)
self.assertEqual(a4._default_unit, 'sq_km')
a5 = (a1 * 2)
self.assertEqual(a5._default_unit, 'sq_m')
a6 = (a1 / 2)
self.assertEqual(a6._default_unit, 'sq_m')
| 5,208,338,653,270,393,000
|
Testing default units during maths
|
tests/gis_tests/test_measure.py
|
testUnitConversions
|
iMerica/dj-models
|
python
|
def testUnitConversions(self):
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = (a1 + a2)
self.assertEqual(a3._default_unit, 'sq_m')
a4 = (a2 + a1)
self.assertEqual(a4._default_unit, 'sq_km')
a5 = (a1 * 2)
self.assertEqual(a5._default_unit, 'sq_m')
a6 = (a1 / 2)
self.assertEqual(a6._default_unit, 'sq_m')
|
def testComparisons(self):
'Testing comparisons'
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = A(sq_km=0)
self.assertGreater(a2, a1)
self.assertEqual(a1, a1)
self.assertLess(a1, a2)
self.assertFalse(a3)
| -1,874,166,189,157,217,500
|
Testing comparisons
|
tests/gis_tests/test_measure.py
|
testComparisons
|
iMerica/dj-models
|
python
|
def testComparisons(self):
a1 = A(sq_m=100)
a2 = A(sq_km=1)
a3 = A(sq_km=0)
self.assertGreater(a2, a1)
self.assertEqual(a1, a1)
self.assertLess(a1, a2)
self.assertFalse(a3)
|
def testUnitsStr(self):
'Testing conversion to strings'
a1 = A(sq_m=100)
a2 = A(sq_km=3.5)
self.assertEqual(str(a1), '100.0 sq_m')
self.assertEqual(str(a2), '3.5 sq_km')
self.assertEqual(repr(a1), 'Area(sq_m=100.0)')
self.assertEqual(repr(a2), 'Area(sq_km=3.5)')
| 7,429,780,586,714,596,000
|
Testing conversion to strings
|
tests/gis_tests/test_measure.py
|
testUnitsStr
|
iMerica/dj-models
|
python
|
def testUnitsStr(self):
a1 = A(sq_m=100)
a2 = A(sq_km=3.5)
self.assertEqual(str(a1), '100.0 sq_m')
self.assertEqual(str(a2), '3.5 sq_km')
self.assertEqual(repr(a1), 'Area(sq_m=100.0)')
self.assertEqual(repr(a2), 'Area(sq_km=3.5)')
|
def get_pref(prefs, name, request_fn):
'Get a preference from existing preference dictionary or invoke a function that can collect it from the user'
val = prefs.get(name)
if (not val):
val = request_fn()
prefs[name] = val
return val
| 3,936,301,530,224,665,000
|
Get a preference from existing preference dictionary or invoke a function that can collect it from the user
|
release.py
|
get_pref
|
SharaWeil/kafka-0.11.0
|
python
|
def get_pref(prefs, name, request_fn):
val = prefs.get(name)
if (not val):
val = request_fn()
prefs[name] = val
return val
|
def get_icon_name(category, artifact):
' Returns the icon name from the feathericons collection. To add an icon type for \n an artifact, select one of the types from ones listed @ feathericons.com\n If no icon is available, the alert triangle is returned as default icon.\n '
category = category.upper()
artifact = artifact.upper()
icon = 'alert-triangle'
if (category.find('ACCOUNT') >= 0):
if (artifact.find('AUTH') >= 0):
icon = 'key'
else:
icon = 'user'
elif (category == 'ADDRESS BOOK'):
icon = 'book-open'
elif (category == 'ALARMS'):
icon = 'clock'
elif (category == 'AIRTAGS'):
icon = 'map-pin'
elif (category == 'APPLE PODCASTS'):
icon = 'play-circle'
elif (category == 'APPLE WALLET'):
if (artifact == 'TRANSACTIONS'):
icon = 'dollar-sign'
if (artifact == 'CARDS'):
icon = 'credit-card'
if (artifact == 'PASSES'):
icon = 'send'
elif (category == 'APP CONDUIT'):
icon = 'activity'
elif (category == 'APP PERMISSIONS'):
icon = 'key'
elif (category == 'CARPLAY'):
icon = 'package'
elif (category == 'CASH APP'):
icon = 'credit-card'
elif (category == 'APP UPDATES'):
icon = 'codepen'
elif (category == 'APPLICATIONS'):
icon = 'grid'
elif (category == 'AGGREGATE DICTIONARY'):
icon = 'book'
elif (category == 'BLUETOOTH'):
icon = 'bluetooth'
elif (category == 'CALENDAR'):
icon = 'calendar'
elif (category == 'CALL HISTORY'):
icon = 'phone-call'
elif (category == 'CELLULAR WIRELESS'):
icon = 'bar-chart'
elif (category == 'CLOUDKIT'):
if (artifact == 'PARTICIPANTS'):
icon = 'user'
elif (artifact == 'NOTE SHARING'):
icon = 'share-2'
elif (category == 'CONNECTED TO'):
icon = 'zap'
elif (category == 'COREDUET'):
if (artifact == 'AIRPLANE MODE'):
icon = 'pause'
if (artifact == 'LOCK STATE'):
icon = 'lock'
if (artifact == 'PLUGGED IN'):
icon = 'battery-charging'
elif (category == 'DATA USAGE'):
icon = 'wifi'
elif (category == 'DEVICE INFO'):
if (artifact == 'BUILD INFO'):
icon = 'terminal'
elif (artifact == 'IOS SYSTEM VERSION'):
icon = 'git-commit'
elif (artifact == 'PARTNER SETTINGS'):
icon = 'settings'
elif (artifact.find('SETTINGS_SECURE_') >= 0):
icon = 'settings'
else:
icon = 'info'
elif (category == 'DHCP'):
icon = 'settings'
elif (category == 'DISCORD'):
if (artifact == 'DISCORD MESSAGES'):
icon = 'message-square'
if (artifact == 'DISCORD ACCOUNT'):
icon = 'user'
if (artifact == 'DISCORD MANIFEST'):
icon = 'file-text'
elif (category == 'FACEBOOK MESSENGER'):
icon = 'facebook'
elif (category == 'FILES APP'):
icon = 'file-text'
elif (category == 'GEOLOCATION'):
if (artifact == 'APPLICATIONS'):
icon = 'grid'
elif (artifact == 'MAP TILE CACHE'):
icon = 'map'
elif (artifact == 'PD PLACE CACHE'):
icon = 'map-pin'
elif (category == 'GOOGLE DUO'):
if (artifact == 'GOOGLE DUO - CALL HISTORY'):
icon = 'phone-call'
if (artifact == 'GOOGLE DUO - CONTACTS'):
icon = 'user'
if (artifact == 'GOOGLE DUO - CLIPS'):
icon = 'video'
elif (category == 'HEALTH DATA'):
icon = 'heart'
elif (category == 'ICLOUD QUICK LOOK'):
icon = 'file'
elif (category == 'ICLOUD RETURNS'):
icon = 'cloud'
elif (category == 'ICLOUD SHARED ALBUMS'):
icon = 'cloud'
elif (category == 'IMO HD CHAT'):
if (artifact == 'IMO HD CHAT - MESSAGES'):
icon = 'message-circle'
if (artifact == 'IMO HD CHAT - CONTACTS'):
icon = 'user'
elif (category == 'INSTAGRAM'):
if (artifact == 'INSTAGRAM THREADS'):
icon = 'message-square'
if (artifact == 'INSTAGRAM THREADS CALLS'):
icon = 'phone'
elif (category == 'INSTALLED APPS'):
icon = 'package'
elif (category == 'INTERACTIONC'):
if (artifact == 'CONTACTS'):
icon = 'user'
elif (artifact == 'ATTACHMENTS'):
icon = 'paperclip'
elif (category == 'IOS BUILD'):
icon = 'git-commit'
elif (category == 'IOS MAIL'):
icon = 'mail'
elif (category == 'IOS SCREENS'):
icon = 'maximize'
elif (category == 'KEYBOARD'):
if (artifact == 'KEYBOARD DYNAMIC LEXICON'):
icon = 'type'
elif (artifact == 'KEYBOARD APPLICATION USAGE'):
icon = 'type'
elif (category == 'KIK'):
if (artifact == 'KIK MESSAGES'):
icon = 'message-square'
if (artifact == 'KIK USERS'):
icon = 'user'
if (artifact == 'KIK MEDIA METADATA'):
icon = 'file-plus'
if (artifact == 'KIK PENDING UPLOADS'):
icon = 'upload'
elif (category == 'KNOWLEDGEC'):
if (artifact == 'KNOWLEDGEC DEVICE LOCKED'):
icon = 'lock'
elif (artifact == 'KNOWLEDGEC PLUGGED IN'):
icon = 'battery-charging'
elif (artifact == 'KNOWLEDGEC BATTERY LEVEL'):
icon = 'battery'
else:
icon = 'activity'
elif (category == 'LOCATIONS'):
if (artifact == 'APPLE MAPS SEARCH HISTORY'):
icon = 'search'
else:
icon = 'map-pin'
elif (category == 'LOCATION SERVICES CONFIGURATIONS'):
icon = 'settings'
elif (category == 'MEDIA LIBRARY'):
icon = 'play-circle'
elif (category == 'MEDIA METADATA'):
icon = 'file-plus'
elif (category == 'MEDICAL ID'):
icon = 'thermometer'
elif (category == 'MICROSOFT TEAMS - LOGS'):
if (artifact == 'TEAMS LOCATIONS'):
icon = 'map-pin'
if (artifact == 'TEAMS MOTION'):
icon = 'move'
if (artifact == 'TEAMS STATE CHANGE'):
icon = 'truck'
if (artifact == 'TEAMS POWER LOG'):
icon = 'battery-charging'
if (artifact == 'TEAMS TIMEZONE'):
icon = 'clock'
elif (category == 'MICROSOFT TEAMS'):
if (artifact == 'TEAMS MESSAGES'):
icon = 'message-square'
if (artifact == 'TEAMS CONTACT'):
icon = 'users'
if (artifact == 'TEAMS USER'):
icon = 'user'
if (artifact == 'TEAMS CALL LOGS'):
icon = 'phone'
if (artifact == 'TEAMS SHARED LOCATIONS'):
icon = 'map-pin'
elif (category == 'MOBILE ACTIVATION LOGS'):
icon = 'clipboard'
elif (category == 'MOBILE BACKUP'):
icon = 'save'
elif (category == 'MOBILE CONTAINER MANAGER'):
icon = 'save'
elif (category == 'MOBILE INSTALLATION LOGS'):
icon = 'clipboard'
elif (category == 'MOBILE SOFTWARE UPDATE'):
icon = 'refresh-cw'
elif (category == 'NOTES'):
icon = 'file-text'
elif (category == 'NOTIFICATIONS'):
icon = 'bell'
elif (category == 'PHOTOS'):
icon = 'image'
elif (category == 'POWERLOG'):
icon = 'power'
elif (category == 'POWERLOG BACKUPS'):
icon = 'power'
elif (category == 'PROTON MAIL'):
icon = 'mail'
elif (category == 'RECENT ACTIVITY'):
icon = 'activity'
elif (category == 'REMINDERS'):
icon = 'list'
elif (category == 'ROUTINED'):
icon = 'map'
elif (category == 'SAFARI BROWSER'):
icon = 'compass'
elif (category == 'SCREENTIME'):
icon = 'monitor'
elif (category == 'SCRIPT LOGS'):
icon = 'archive'
elif (category == 'SLACK'):
if (artifact == 'SLACK MESSAGES'):
icon = 'message-square'
if (artifact == 'SLACK USER DATA'):
icon = 'user'
if (artifact == 'SLACK ATTACHMENTS'):
icon = 'paperclip'
if (artifact == 'SLACK WORKSPACE DATA'):
icon = 'slack'
if (artifact == 'SLACK TEAM DATA'):
icon = 'slack'
if (artifact == 'SLACK CHANNEL DATA'):
icon = 'slack'
elif (category == 'SMS & IMESSAGE'):
icon = 'message-square'
elif (category == 'SQLITE JOURNALING'):
icon = 'book-open'
elif (category == 'TEXT INPUT MESSAGES'):
icon = 'message-square'
elif (category == 'TIKTOK'):
if (artifact == 'TIKTOK MESSAGES'):
icon = 'message-square'
if (artifact == 'TIKTOK CONTACTS'):
icon = 'user'
elif (category == 'USER DICTIONARY'):
icon = 'book'
elif (category == 'VENMO'):
icon = 'dollar-sign'
elif (category == 'VIBER'):
if (artifact == 'VIBER - SETTINGS'):
icon = 'settings'
if (artifact == 'VIBER - CONTACTS'):
icon = 'users'
if (artifact == 'VIBER - CHATS'):
icon = 'message-square'
if (artifact == 'VIBER - CALL REMNANTS'):
icon = 'phone-call'
elif (category == 'VOICE-RECORDINGS'):
icon = 'mic'
elif (category == 'VOICE-TRIGGERS'):
icon = 'mic'
elif (category == 'WHATSAPP'):
if (artifact == 'WHATSAPP - MESSAGES'):
icon = 'message-square'
if (artifact == 'WHATSAPP - CONTACTS'):
icon = 'users'
elif (category == 'WIFI CONNECTIONS'):
icon = 'wifi'
elif (category == 'WIFI KNOWN NETWORKS'):
icon = 'wifi'
return icon
| 889,248,293,334,245,900
|
Returns the icon name from the feathericons collection. To add an icon type for
an artifact, select one of the types from ones listed @ feathericons.com
If no icon is available, the alert triangle is returned as default icon.
|
scripts/report.py
|
get_icon_name
|
theAtropos4n6/iLEAPP
|
python
|
def get_icon_name(category, artifact):
' Returns the icon name from the feathericons collection. To add an icon type for \n an artifact, select one of the types from ones listed @ feathericons.com\n If no icon is available, the alert triangle is returned as default icon.\n '
category = category.upper()
artifact = artifact.upper()
icon = 'alert-triangle'
if (category.find('ACCOUNT') >= 0):
if (artifact.find('AUTH') >= 0):
icon = 'key'
else:
icon = 'user'
elif (category == 'ADDRESS BOOK'):
icon = 'book-open'
elif (category == 'ALARMS'):
icon = 'clock'
elif (category == 'AIRTAGS'):
icon = 'map-pin'
elif (category == 'APPLE PODCASTS'):
icon = 'play-circle'
elif (category == 'APPLE WALLET'):
if (artifact == 'TRANSACTIONS'):
icon = 'dollar-sign'
if (artifact == 'CARDS'):
icon = 'credit-card'
if (artifact == 'PASSES'):
icon = 'send'
elif (category == 'APP CONDUIT'):
icon = 'activity'
elif (category == 'APP PERMISSIONS'):
icon = 'key'
elif (category == 'CARPLAY'):
icon = 'package'
elif (category == 'CASH APP'):
icon = 'credit-card'
elif (category == 'APP UPDATES'):
icon = 'codepen'
elif (category == 'APPLICATIONS'):
icon = 'grid'
elif (category == 'AGGREGATE DICTIONARY'):
icon = 'book'
elif (category == 'BLUETOOTH'):
icon = 'bluetooth'
elif (category == 'CALENDAR'):
icon = 'calendar'
elif (category == 'CALL HISTORY'):
icon = 'phone-call'
elif (category == 'CELLULAR WIRELESS'):
icon = 'bar-chart'
elif (category == 'CLOUDKIT'):
if (artifact == 'PARTICIPANTS'):
icon = 'user'
elif (artifact == 'NOTE SHARING'):
icon = 'share-2'
elif (category == 'CONNECTED TO'):
icon = 'zap'
elif (category == 'COREDUET'):
if (artifact == 'AIRPLANE MODE'):
icon = 'pause'
if (artifact == 'LOCK STATE'):
icon = 'lock'
if (artifact == 'PLUGGED IN'):
icon = 'battery-charging'
elif (category == 'DATA USAGE'):
icon = 'wifi'
elif (category == 'DEVICE INFO'):
if (artifact == 'BUILD INFO'):
icon = 'terminal'
elif (artifact == 'IOS SYSTEM VERSION'):
icon = 'git-commit'
elif (artifact == 'PARTNER SETTINGS'):
icon = 'settings'
elif (artifact.find('SETTINGS_SECURE_') >= 0):
icon = 'settings'
else:
icon = 'info'
elif (category == 'DHCP'):
icon = 'settings'
elif (category == 'DISCORD'):
if (artifact == 'DISCORD MESSAGES'):
icon = 'message-square'
if (artifact == 'DISCORD ACCOUNT'):
icon = 'user'
if (artifact == 'DISCORD MANIFEST'):
icon = 'file-text'
elif (category == 'FACEBOOK MESSENGER'):
icon = 'facebook'
elif (category == 'FILES APP'):
icon = 'file-text'
elif (category == 'GEOLOCATION'):
if (artifact == 'APPLICATIONS'):
icon = 'grid'
elif (artifact == 'MAP TILE CACHE'):
icon = 'map'
elif (artifact == 'PD PLACE CACHE'):
icon = 'map-pin'
elif (category == 'GOOGLE DUO'):
if (artifact == 'GOOGLE DUO - CALL HISTORY'):
icon = 'phone-call'
if (artifact == 'GOOGLE DUO - CONTACTS'):
icon = 'user'
if (artifact == 'GOOGLE DUO - CLIPS'):
icon = 'video'
elif (category == 'HEALTH DATA'):
icon = 'heart'
elif (category == 'ICLOUD QUICK LOOK'):
icon = 'file'
elif (category == 'ICLOUD RETURNS'):
icon = 'cloud'
elif (category == 'ICLOUD SHARED ALBUMS'):
icon = 'cloud'
elif (category == 'IMO HD CHAT'):
if (artifact == 'IMO HD CHAT - MESSAGES'):
icon = 'message-circle'
if (artifact == 'IMO HD CHAT - CONTACTS'):
icon = 'user'
elif (category == 'INSTAGRAM'):
if (artifact == 'INSTAGRAM THREADS'):
icon = 'message-square'
if (artifact == 'INSTAGRAM THREADS CALLS'):
icon = 'phone'
elif (category == 'INSTALLED APPS'):
icon = 'package'
elif (category == 'INTERACTIONC'):
if (artifact == 'CONTACTS'):
icon = 'user'
elif (artifact == 'ATTACHMENTS'):
icon = 'paperclip'
elif (category == 'IOS BUILD'):
icon = 'git-commit'
elif (category == 'IOS MAIL'):
icon = 'mail'
elif (category == 'IOS SCREENS'):
icon = 'maximize'
elif (category == 'KEYBOARD'):
if (artifact == 'KEYBOARD DYNAMIC LEXICON'):
icon = 'type'
elif (artifact == 'KEYBOARD APPLICATION USAGE'):
icon = 'type'
elif (category == 'KIK'):
if (artifact == 'KIK MESSAGES'):
icon = 'message-square'
if (artifact == 'KIK USERS'):
icon = 'user'
if (artifact == 'KIK MEDIA METADATA'):
icon = 'file-plus'
if (artifact == 'KIK PENDING UPLOADS'):
icon = 'upload'
elif (category == 'KNOWLEDGEC'):
if (artifact == 'KNOWLEDGEC DEVICE LOCKED'):
icon = 'lock'
elif (artifact == 'KNOWLEDGEC PLUGGED IN'):
icon = 'battery-charging'
elif (artifact == 'KNOWLEDGEC BATTERY LEVEL'):
icon = 'battery'
else:
icon = 'activity'
elif (category == 'LOCATIONS'):
if (artifact == 'APPLE MAPS SEARCH HISTORY'):
icon = 'search'
else:
icon = 'map-pin'
elif (category == 'LOCATION SERVICES CONFIGURATIONS'):
icon = 'settings'
elif (category == 'MEDIA LIBRARY'):
icon = 'play-circle'
elif (category == 'MEDIA METADATA'):
icon = 'file-plus'
elif (category == 'MEDICAL ID'):
icon = 'thermometer'
elif (category == 'MICROSOFT TEAMS - LOGS'):
if (artifact == 'TEAMS LOCATIONS'):
icon = 'map-pin'
if (artifact == 'TEAMS MOTION'):
icon = 'move'
if (artifact == 'TEAMS STATE CHANGE'):
icon = 'truck'
if (artifact == 'TEAMS POWER LOG'):
icon = 'battery-charging'
if (artifact == 'TEAMS TIMEZONE'):
icon = 'clock'
elif (category == 'MICROSOFT TEAMS'):
if (artifact == 'TEAMS MESSAGES'):
icon = 'message-square'
if (artifact == 'TEAMS CONTACT'):
icon = 'users'
if (artifact == 'TEAMS USER'):
icon = 'user'
if (artifact == 'TEAMS CALL LOGS'):
icon = 'phone'
if (artifact == 'TEAMS SHARED LOCATIONS'):
icon = 'map-pin'
elif (category == 'MOBILE ACTIVATION LOGS'):
icon = 'clipboard'
elif (category == 'MOBILE BACKUP'):
icon = 'save'
elif (category == 'MOBILE CONTAINER MANAGER'):
icon = 'save'
elif (category == 'MOBILE INSTALLATION LOGS'):
icon = 'clipboard'
elif (category == 'MOBILE SOFTWARE UPDATE'):
icon = 'refresh-cw'
elif (category == 'NOTES'):
icon = 'file-text'
elif (category == 'NOTIFICATIONS'):
icon = 'bell'
elif (category == 'PHOTOS'):
icon = 'image'
elif (category == 'POWERLOG'):
icon = 'power'
elif (category == 'POWERLOG BACKUPS'):
icon = 'power'
elif (category == 'PROTON MAIL'):
icon = 'mail'
elif (category == 'RECENT ACTIVITY'):
icon = 'activity'
elif (category == 'REMINDERS'):
icon = 'list'
elif (category == 'ROUTINED'):
icon = 'map'
elif (category == 'SAFARI BROWSER'):
icon = 'compass'
elif (category == 'SCREENTIME'):
icon = 'monitor'
elif (category == 'SCRIPT LOGS'):
icon = 'archive'
elif (category == 'SLACK'):
if (artifact == 'SLACK MESSAGES'):
icon = 'message-square'
if (artifact == 'SLACK USER DATA'):
icon = 'user'
if (artifact == 'SLACK ATTACHMENTS'):
icon = 'paperclip'
if (artifact == 'SLACK WORKSPACE DATA'):
icon = 'slack'
if (artifact == 'SLACK TEAM DATA'):
icon = 'slack'
if (artifact == 'SLACK CHANNEL DATA'):
icon = 'slack'
elif (category == 'SMS & IMESSAGE'):
icon = 'message-square'
elif (category == 'SQLITE JOURNALING'):
icon = 'book-open'
elif (category == 'TEXT INPUT MESSAGES'):
icon = 'message-square'
elif (category == 'TIKTOK'):
if (artifact == 'TIKTOK MESSAGES'):
icon = 'message-square'
if (artifact == 'TIKTOK CONTACTS'):
icon = 'user'
elif (category == 'USER DICTIONARY'):
icon = 'book'
elif (category == 'VENMO'):
icon = 'dollar-sign'
elif (category == 'VIBER'):
if (artifact == 'VIBER - SETTINGS'):
icon = 'settings'
if (artifact == 'VIBER - CONTACTS'):
icon = 'users'
if (artifact == 'VIBER - CHATS'):
icon = 'message-square'
if (artifact == 'VIBER - CALL REMNANTS'):
icon = 'phone-call'
elif (category == 'VOICE-RECORDINGS'):
icon = 'mic'
elif (category == 'VOICE-TRIGGERS'):
icon = 'mic'
elif (category == 'WHATSAPP'):
if (artifact == 'WHATSAPP - MESSAGES'):
icon = 'message-square'
if (artifact == 'WHATSAPP - CONTACTS'):
icon = 'users'
elif (category == 'WIFI CONNECTIONS'):
icon = 'wifi'
elif (category == 'WIFI KNOWN NETWORKS'):
icon = 'wifi'
return icon
|
def create_index_html(reportfolderbase, time_in_secs, time_HMS, extraction_type, image_input_path, nav_list_data):
'Write out the index.html page to the report folder'
content = '<br />'
content += '\n <div class="card bg-white" style="padding: 20px;">\n <h2 class="card-title">Case Information</h2>\n '
case_list = [['Extraction location', image_input_path], ['Extraction type', extraction_type], ['Report directory', reportfolderbase], ['Processing time', f'{time_HMS} (Total {time_in_secs} seconds)']]
tab1_content = (generate_key_val_table_without_headings('', case_list) + ' <p class="note note-primary mb-4">\n All dates and times are in UTC unless noted otherwise!\n </p>\n ')
devinfo_files_path = os.path.join(reportfolderbase, 'Script Logs', 'DeviceInfo.html')
tab2_content = get_file_content(devinfo_files_path)
script_log_path = os.path.join(reportfolderbase, 'Script Logs', 'Screen Output.html')
tab3_content = get_file_content(script_log_path)
processed_files_path = os.path.join(reportfolderbase, 'Script Logs', 'ProcessedFilesLog.html')
tab4_content = get_file_content(processed_files_path)
content += tabs_code.format(tab1_content, tab2_content, tab3_content, tab4_content)
content += '</div>'
authors_data = generate_authors_table_code(aleapp_contributors)
credits_code = credits_block.format(authors_data)
filename = 'index.html'
page_title = 'iLEAPP Report'
body_heading = 'iOS Logs Events And Protobuf Parser'
body_description = 'iLEAPP is an open source project that aims to parse every known iOS artifact for the purpose of forensic analysis.'
active_nav_list_data = (mark_item_active(nav_list_data, filename) + nav_bar_script)
f = open(os.path.join(reportfolderbase, filename), 'w', encoding='utf8')
f.write(page_header.format(page_title))
f.write(body_start.format(f'iLEAPP {aleapp_version}'))
f.write(((body_sidebar_setup + active_nav_list_data) + body_sidebar_trailer))
f.write((body_main_header + body_main_data_title.format(body_heading, body_description)))
f.write(content)
f.write(thank_you_note)
f.write(credits_code)
f.write((((body_main_trailer + body_end) + nav_bar_script_footer) + page_footer))
f.close()
| 526,606,604,849,340,740
|
Write out the index.html page to the report folder
|
scripts/report.py
|
create_index_html
|
theAtropos4n6/iLEAPP
|
python
|
def create_index_html(reportfolderbase, time_in_secs, time_HMS, extraction_type, image_input_path, nav_list_data):
content = '<br />'
content += '\n <div class="card bg-white" style="padding: 20px;">\n <h2 class="card-title">Case Information</h2>\n '
case_list = [['Extraction location', image_input_path], ['Extraction type', extraction_type], ['Report directory', reportfolderbase], ['Processing time', f'{time_HMS} (Total {time_in_secs} seconds)']]
tab1_content = (generate_key_val_table_without_headings(, case_list) + ' <p class="note note-primary mb-4">\n All dates and times are in UTC unless noted otherwise!\n </p>\n ')
devinfo_files_path = os.path.join(reportfolderbase, 'Script Logs', 'DeviceInfo.html')
tab2_content = get_file_content(devinfo_files_path)
script_log_path = os.path.join(reportfolderbase, 'Script Logs', 'Screen Output.html')
tab3_content = get_file_content(script_log_path)
processed_files_path = os.path.join(reportfolderbase, 'Script Logs', 'ProcessedFilesLog.html')
tab4_content = get_file_content(processed_files_path)
content += tabs_code.format(tab1_content, tab2_content, tab3_content, tab4_content)
content += '</div>'
authors_data = generate_authors_table_code(aleapp_contributors)
credits_code = credits_block.format(authors_data)
filename = 'index.html'
page_title = 'iLEAPP Report'
body_heading = 'iOS Logs Events And Protobuf Parser'
body_description = 'iLEAPP is an open source project that aims to parse every known iOS artifact for the purpose of forensic analysis.'
active_nav_list_data = (mark_item_active(nav_list_data, filename) + nav_bar_script)
f = open(os.path.join(reportfolderbase, filename), 'w', encoding='utf8')
f.write(page_header.format(page_title))
f.write(body_start.format(f'iLEAPP {aleapp_version}'))
f.write(((body_sidebar_setup + active_nav_list_data) + body_sidebar_trailer))
f.write((body_main_header + body_main_data_title.format(body_heading, body_description)))
f.write(content)
f.write(thank_you_note)
f.write(credits_code)
f.write((((body_main_trailer + body_end) + nav_bar_script_footer) + page_footer))
f.close()
|
def generate_key_val_table_without_headings(title, data_list, html_escape=True, width='70%'):
'Returns the html code for a key-value table (2 cols) without col names'
code = ''
if title:
code += f'<h2>{title}</h2>'
table_header_code = '\n <div class="table-responsive">\n <table class="table table-bordered table-hover table-sm" width={}>\n <tbody>\n '
table_footer_code = '\n </tbody>\n </table>\n </div>\n '
code += table_header_code.format(width)
if html_escape:
for row in data_list:
code += (('<tr>' + ''.join(('<td>{}</td>'.format(html.escape(str(x))) for x in row))) + '</tr>')
else:
for row in data_list:
code += (('<tr>' + ''.join(('<td>{}</td>'.format(str(x)) for x in row))) + '</tr>')
code += table_footer_code
return code
| -2,558,255,663,354,864,600
|
Returns the html code for a key-value table (2 cols) without col names
|
scripts/report.py
|
generate_key_val_table_without_headings
|
theAtropos4n6/iLEAPP
|
python
|
def generate_key_val_table_without_headings(title, data_list, html_escape=True, width='70%'):
code =
if title:
code += f'<h2>{title}</h2>'
table_header_code = '\n <div class="table-responsive">\n <table class="table table-bordered table-hover table-sm" width={}>\n <tbody>\n '
table_footer_code = '\n </tbody>\n </table>\n </div>\n '
code += table_header_code.format(width)
if html_escape:
for row in data_list:
code += (('<tr>' + .join(('<td>{}</td>'.format(html.escape(str(x))) for x in row))) + '</tr>')
else:
for row in data_list:
code += (('<tr>' + .join(('<td>{}</td>'.format(str(x)) for x in row))) + '</tr>')
code += table_footer_code
return code
|
def mark_item_active(data, itemname):
'Finds itemname in data, then marks that node as active. Return value is changed data'
pos = data.find(f'" href="{itemname}"')
if (pos < 0):
logfunc(f'Error, could not find {itemname} in {data}')
return data
else:
ret = ((data[0:pos] + ' active') + data[pos:])
return ret
| 8,354,774,601,941,152,000
|
Finds itemname in data, then marks that node as active. Return value is changed data
|
scripts/report.py
|
mark_item_active
|
theAtropos4n6/iLEAPP
|
python
|
def mark_item_active(data, itemname):
pos = data.find(f'" href="{itemname}"')
if (pos < 0):
logfunc(f'Error, could not find {itemname} in {data}')
return data
else:
ret = ((data[0:pos] + ' active') + data[pos:])
return ret
|
@register.filter
def markdown_to_html(text):
'マークダウンをhtmlに変換する。'
return mark_safe(markdownify(text))
| 7,837,217,836,842,435,000
|
マークダウンをhtmlに変換する。
|
blog/templatetags/markdown_html.py
|
markdown_to_html
|
whitecat-22/blog_site
|
python
|
@register.filter
def markdown_to_html(text):
return mark_safe(markdownify(text))
|
@register.filter
def markdown_to_html_with_escape(text):
'マークダウンをhtmlに変換する。\n\n 生のHTMLやCSS、JavaScript等のコードをエスケープした上で、マークダウンをHTMLに変換します。\n 公開しているコメント欄等には、こちらを使ってください。\n\n '
extensions = (MARKDOWNX_MARKDOWN_EXTENSIONS + [EscapeHtml()])
html = markdown.markdown(text, extensions=extensions, extension_configs=MARKDOWNX_MARKDOWN_EXTENSION_CONFIGS)
return mark_safe(html)
| -4,383,165,759,974,250,500
|
マークダウンをhtmlに変換する。
生のHTMLやCSS、JavaScript等のコードをエスケープした上で、マークダウンをHTMLに変換します。
公開しているコメント欄等には、こちらを使ってください。
|
blog/templatetags/markdown_html.py
|
markdown_to_html_with_escape
|
whitecat-22/blog_site
|
python
|
@register.filter
def markdown_to_html_with_escape(text):
'マークダウンをhtmlに変換する。\n\n 生のHTMLやCSS、JavaScript等のコードをエスケープした上で、マークダウンをHTMLに変換します。\n 公開しているコメント欄等には、こちらを使ってください。\n\n '
extensions = (MARKDOWNX_MARKDOWN_EXTENSIONS + [EscapeHtml()])
html = markdown.markdown(text, extensions=extensions, extension_configs=MARKDOWNX_MARKDOWN_EXTENSION_CONFIGS)
return mark_safe(html)
|
def cursor_iter(cursor, sentinel, col_count):
'\n Yields blocks of rows from a cursor and ensures the cursor is closed when\n done.\n '
try:
for rows in iter((lambda : cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), sentinel):
(yield [r[0:col_count] for r in rows])
finally:
cursor.close()
| 442,839,228,491,569,100
|
Yields blocks of rows from a cursor and ensures the cursor is closed when
done.
|
django/db/models/sql/compiler.py
|
cursor_iter
|
hottwaj/django
|
python
|
def cursor_iter(cursor, sentinel, col_count):
'\n Yields blocks of rows from a cursor and ensures the cursor is closed when\n done.\n '
try:
for rows in iter((lambda : cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), sentinel):
(yield [r[0:col_count] for r in rows])
finally:
cursor.close()
|
def pre_sql_setup(self):
"\n Does any necessary class setup immediately prior to producing SQL. This\n is for things that can't necessarily be done in __init__ because we\n might not have all the pieces in place at that time.\n "
self.setup_query()
order_by = self.get_order_by()
(self.where, self.having) = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
group_by = self.get_group_by((self.select + extra_select), order_by)
return (extra_select, order_by, group_by)
| -5,332,599,130,163,166,000
|
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
|
django/db/models/sql/compiler.py
|
pre_sql_setup
|
hottwaj/django
|
python
|
def pre_sql_setup(self):
"\n Does any necessary class setup immediately prior to producing SQL. This\n is for things that can't necessarily be done in __init__ because we\n might not have all the pieces in place at that time.\n "
self.setup_query()
order_by = self.get_order_by()
(self.where, self.having) = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
group_by = self.get_group_by((self.select + extra_select), order_by)
return (extra_select, order_by, group_by)
|
def get_group_by(self, select, order_by):
'\n Returns a list of 2-tuples of form (sql, params).\n\n The logic of what exactly the GROUP BY clause contains is hard\n to describe in other words than "if it passes the test suite,\n then it is correct".\n '
if (self.query.group_by is None):
return []
expressions = []
if (self.query.group_by is not True):
for expr in self.query.group_by:
if (not hasattr(expr, 'as_sql')):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
for (expr, _, _) in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for (expr, (sql, params, is_ref)) in order_by:
if expr.contains_aggregate:
continue
if is_ref:
continue
expressions.extend(expr.get_source_expressions())
having_group_by = (self.having.get_group_by_cols() if self.having else ())
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
(sql, params) = self.compile(expr)
if ((sql, tuple(params)) not in seen):
result.append((sql, params))
seen.add((sql, tuple(params)))
return result
| -1,263,401,982,640,389,000
|
Returns a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
|
django/db/models/sql/compiler.py
|
get_group_by
|
hottwaj/django
|
python
|
def get_group_by(self, select, order_by):
'\n Returns a list of 2-tuples of form (sql, params).\n\n The logic of what exactly the GROUP BY clause contains is hard\n to describe in other words than "if it passes the test suite,\n then it is correct".\n '
if (self.query.group_by is None):
return []
expressions = []
if (self.query.group_by is not True):
for expr in self.query.group_by:
if (not hasattr(expr, 'as_sql')):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
for (expr, _, _) in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for (expr, (sql, params, is_ref)) in order_by:
if expr.contains_aggregate:
continue
if is_ref:
continue
expressions.extend(expr.get_source_expressions())
having_group_by = (self.having.get_group_by_cols() if self.having else ())
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
(sql, params) = self.compile(expr)
if ((sql, tuple(params)) not in seen):
result.append((sql, params))
seen.add((sql, tuple(params)))
return result
|
def get_select(self):
'\n Returns three values:\n - a list of 3-tuples of (expression, (sql, params), alias)\n - a klass_info structure,\n - a dictionary of annotations\n\n The (sql, params) is what the expression will produce, and alias is the\n "AS alias" for the column (possibly None).\n\n The klass_info structure contains the following information:\n - Which model to instantiate\n - Which columns for that model are present in the query (by\n position of the select clause).\n - related_klass_infos: [f, klass_info] to descent into\n\n The annotations is a dictionary of {\'attname\': column position} values.\n '
select = []
klass_info = None
annotations = {}
select_idx = 0
for (alias, (sql, params)) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert (not (self.query.select and self.query.default_cols))
if self.query.default_cols:
select_list = []
for c in self.get_default_columns():
select_list.append(select_idx)
select.append((c, None))
select_idx += 1
klass_info = {'model': self.query.model, 'select_fields': select_list}
for col in self.query.select:
select.append((col, None))
select_idx += 1
for (alias, annotation) in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] + ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for (col, alias) in select:
ret.append((col, self.compile(col, select_format=True), alias))
return (ret, klass_info, annotations)
| -421,693,087,299,432,060
|
Returns three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- Which model to instantiate
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
|
django/db/models/sql/compiler.py
|
get_select
|
hottwaj/django
|
python
|
def get_select(self):
'\n Returns three values:\n - a list of 3-tuples of (expression, (sql, params), alias)\n - a klass_info structure,\n - a dictionary of annotations\n\n The (sql, params) is what the expression will produce, and alias is the\n "AS alias" for the column (possibly None).\n\n The klass_info structure contains the following information:\n - Which model to instantiate\n - Which columns for that model are present in the query (by\n position of the select clause).\n - related_klass_infos: [f, klass_info] to descent into\n\n The annotations is a dictionary of {\'attname\': column position} values.\n '
select = []
klass_info = None
annotations = {}
select_idx = 0
for (alias, (sql, params)) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert (not (self.query.select and self.query.default_cols))
if self.query.default_cols:
select_list = []
for c in self.get_default_columns():
select_list.append(select_idx)
select.append((c, None))
select_idx += 1
klass_info = {'model': self.query.model, 'select_fields': select_list}
for col in self.query.select:
select.append((col, None))
select_idx += 1
for (alias, annotation) in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] + ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for (col, alias) in select:
ret.append((col, self.compile(col, select_format=True), alias))
return (ret, klass_info, annotations)
|
def get_order_by(self):
'\n Returns a list of 2-tuples of form (expr, (sql, params, is_ref)) for the\n ORDER BY clause.\n\n The order_by clause can alter the select clause (for example it\n can add aliases to clauses that do not yet have one, or it can\n add totally new select clauses).\n '
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif (not self.query.default_ordering):
ordering = self.query.order_by
else:
ordering = (self.query.order_by or self.query.get_meta().ordering or [])
if self.query.standard_ordering:
(asc, desc) = ORDER_DIR['ASC']
else:
(asc, desc) = ORDER_DIR['DESC']
order_by = []
for (pos, field) in enumerate(ordering):
if hasattr(field, 'resolve_expression'):
if (not isinstance(field, OrderBy)):
field = field.asc()
if (not self.query.standard_ordering):
field.reverse_ordering()
order_by.append((field, False))
continue
if (field == '?'):
order_by.append((OrderBy(Random()), False))
continue
(col, order) = get_order_dir(field, asc)
descending = (True if (order == 'DESC') else False)
if (col in self.query.annotation_select):
order_by.append((OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending), True))
continue
if (col in self.query.annotations):
order_by.append((OrderBy(self.query.annotations[col], descending=descending), False))
continue
if ('.' in field):
(table, col) = col.split('.', 1)
order_by.append((OrderBy(RawSQL(('%s.%s' % (self.quote_name_unless_alias(table), col)), []), descending=descending), False))
continue
if ((not self.query._extra) or (col not in self.query._extra)):
order_by.extend(self.find_ordering_name(field, self.query.get_meta(), default_order=asc))
elif (col not in self.query.extra_select):
order_by.append((OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False))
else:
order_by.append((OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending), True))
result = []
seen = set()
for (expr, is_ref) in order_by:
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
(sql, params) = self.compile(resolved)
without_ordering = self.ordering_parts.search(sql).group(1)
if ((without_ordering, tuple(params)) in seen):
continue
seen.add((without_ordering, tuple(params)))
result.append((resolved, (sql, params, is_ref)))
return result
| -882,710,292,227,771,400
|
Returns a list of 2-tuples of form (expr, (sql, params, is_ref)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
|
django/db/models/sql/compiler.py
|
get_order_by
|
hottwaj/django
|
python
|
def get_order_by(self):
'\n Returns a list of 2-tuples of form (expr, (sql, params, is_ref)) for the\n ORDER BY clause.\n\n The order_by clause can alter the select clause (for example it\n can add aliases to clauses that do not yet have one, or it can\n add totally new select clauses).\n '
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif (not self.query.default_ordering):
ordering = self.query.order_by
else:
ordering = (self.query.order_by or self.query.get_meta().ordering or [])
if self.query.standard_ordering:
(asc, desc) = ORDER_DIR['ASC']
else:
(asc, desc) = ORDER_DIR['DESC']
order_by = []
for (pos, field) in enumerate(ordering):
if hasattr(field, 'resolve_expression'):
if (not isinstance(field, OrderBy)):
field = field.asc()
if (not self.query.standard_ordering):
field.reverse_ordering()
order_by.append((field, False))
continue
if (field == '?'):
order_by.append((OrderBy(Random()), False))
continue
(col, order) = get_order_dir(field, asc)
descending = (True if (order == 'DESC') else False)
if (col in self.query.annotation_select):
order_by.append((OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending), True))
continue
if (col in self.query.annotations):
order_by.append((OrderBy(self.query.annotations[col], descending=descending), False))
continue
if ('.' in field):
(table, col) = col.split('.', 1)
order_by.append((OrderBy(RawSQL(('%s.%s' % (self.quote_name_unless_alias(table), col)), []), descending=descending), False))
continue
if ((not self.query._extra) or (col not in self.query._extra)):
order_by.extend(self.find_ordering_name(field, self.query.get_meta(), default_order=asc))
elif (col not in self.query.extra_select):
order_by.append((OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False))
else:
order_by.append((OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending), True))
result = []
seen = set()
for (expr, is_ref) in order_by:
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
(sql, params) = self.compile(resolved)
without_ordering = self.ordering_parts.search(sql).group(1)
if ((without_ordering, tuple(params)) in seen):
continue
seen.add((without_ordering, tuple(params)))
result.append((resolved, (sql, params, is_ref)))
return result
|
def quote_name_unless_alias(self, name):
"\n A wrapper around connection.ops.quote_name that doesn't quote aliases\n for table names. This avoids problems with some SQL dialects that treat\n quoted strings specially (e.g. PostgreSQL).\n "
if (name in self.quote_cache):
return self.quote_cache[name]
if (((name in self.query.alias_map) and (name not in self.query.table_map)) or (name in self.query.extra_select) or ((name in self.query.external_aliases) and (name not in self.query.table_map))):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
| -1,623,040,495,631,383,600
|
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
|
django/db/models/sql/compiler.py
|
quote_name_unless_alias
|
hottwaj/django
|
python
|
def quote_name_unless_alias(self, name):
"\n A wrapper around connection.ops.quote_name that doesn't quote aliases\n for table names. This avoids problems with some SQL dialects that treat\n quoted strings specially (e.g. PostgreSQL).\n "
if (name in self.quote_cache):
return self.quote_cache[name]
if (((name in self.query.alias_map) and (name not in self.query.table_map)) or (name in self.query.extra_select) or ((name in self.query.external_aliases) and (name not in self.query.table_map))):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
|
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
"\n Creates the SQL for this query. Returns the SQL string and list of\n parameters.\n\n If 'with_limits' is False, any limit/offset information is not included\n in the query.\n "
if (with_limits and (self.query.low_mark == self.query.high_mark)):
return ('', ())
self.subquery = subquery
refcounts_before = self.query.alias_refcount.copy()
try:
(extra_select, order_by, group_by) = self.pre_sql_setup()
if (with_limits and (self.query.low_mark == self.query.high_mark)):
return ('', ())
distinct_fields = self.get_distinct()
(from_, f_params) = self.get_from_clause()
(where, w_params) = (self.compile(self.where) if (self.where is not None) else ('', []))
(having, h_params) = (self.compile(self.having) if (self.having is not None) else ('', []))
params = []
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
out_cols = []
col_idx = 1
for (_, (s_sql, s_params), alias) in (self.select + extra_select):
if alias:
s_sql = ('%s AS %s' % (s_sql, self.connection.ops.quote_name(alias)))
elif with_col_aliases:
s_sql = ('%s AS %s' % (s_sql, ('Col%d' % col_idx)))
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result.append(', '.join(out_cols))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append(('WHERE %s' % where))
params.extend(w_params)
grouping = []
for (g_sql, g_params) in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError('annotate() + distinct(fields) is not implemented.')
if (not order_by):
order_by = self.connection.ops.force_no_ordering()
result.append(('GROUP BY %s' % ', '.join(grouping)))
if having:
result.append(('HAVING %s' % having))
params.extend(h_params)
if order_by:
ordering = []
for (_, (o_sql, o_params, _)) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append(('ORDER BY %s' % ', '.join(ordering)))
if with_limits:
if (self.query.high_mark is not None):
result.append(('LIMIT %d' % (self.query.high_mark - self.query.low_mark)))
if self.query.low_mark:
if (self.query.high_mark is None):
val = self.connection.ops.no_limit_value()
if val:
result.append(('LIMIT %d' % val))
result.append(('OFFSET %d' % self.query.low_mark))
if (self.query.select_for_update and self.connection.features.has_select_for_update):
if self.connection.get_autocommit():
raise TransactionManagementError('select_for_update cannot be used outside of a transaction.')
nowait = self.query.select_for_update_nowait
if (nowait and (not self.connection.features.has_select_for_update_nowait)):
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
return (' '.join(result), tuple(params))
finally:
self.query.reset_refcounts(refcounts_before)
| -2,196,786,343,217,834,000
|
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
|
django/db/models/sql/compiler.py
|
as_sql
|
hottwaj/django
|
python
|
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
"\n Creates the SQL for this query. Returns the SQL string and list of\n parameters.\n\n If 'with_limits' is False, any limit/offset information is not included\n in the query.\n "
if (with_limits and (self.query.low_mark == self.query.high_mark)):
return (, ())
self.subquery = subquery
refcounts_before = self.query.alias_refcount.copy()
try:
(extra_select, order_by, group_by) = self.pre_sql_setup()
if (with_limits and (self.query.low_mark == self.query.high_mark)):
return (, ())
distinct_fields = self.get_distinct()
(from_, f_params) = self.get_from_clause()
(where, w_params) = (self.compile(self.where) if (self.where is not None) else (, []))
(having, h_params) = (self.compile(self.having) if (self.having is not None) else (, []))
params = []
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
out_cols = []
col_idx = 1
for (_, (s_sql, s_params), alias) in (self.select + extra_select):
if alias:
s_sql = ('%s AS %s' % (s_sql, self.connection.ops.quote_name(alias)))
elif with_col_aliases:
s_sql = ('%s AS %s' % (s_sql, ('Col%d' % col_idx)))
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result.append(', '.join(out_cols))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append(('WHERE %s' % where))
params.extend(w_params)
grouping = []
for (g_sql, g_params) in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError('annotate() + distinct(fields) is not implemented.')
if (not order_by):
order_by = self.connection.ops.force_no_ordering()
result.append(('GROUP BY %s' % ', '.join(grouping)))
if having:
result.append(('HAVING %s' % having))
params.extend(h_params)
if order_by:
ordering = []
for (_, (o_sql, o_params, _)) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append(('ORDER BY %s' % ', '.join(ordering)))
if with_limits:
if (self.query.high_mark is not None):
result.append(('LIMIT %d' % (self.query.high_mark - self.query.low_mark)))
if self.query.low_mark:
if (self.query.high_mark is None):
val = self.connection.ops.no_limit_value()
if val:
result.append(('LIMIT %d' % val))
result.append(('OFFSET %d' % self.query.low_mark))
if (self.query.select_for_update and self.connection.features.has_select_for_update):
if self.connection.get_autocommit():
raise TransactionManagementError('select_for_update cannot be used outside of a transaction.')
nowait = self.query.select_for_update_nowait
if (nowait and (not self.connection.features.has_select_for_update_nowait)):
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
return (' '.join(result), tuple(params))
finally:
self.query.reset_refcounts(refcounts_before)
|
def as_nested_sql(self):
"\n Perform the same functionality as the as_sql() method, returning an\n SQL string and parameters. However, the alias prefixes are bumped\n beforehand (in a copy -- the current query isn't changed), and any\n ordering is removed if the query is unsliced.\n\n Used when nesting this query inside another.\n "
obj = self.query.clone()
if ((obj.low_mark == 0) and (obj.high_mark is None) and (not self.query.distinct_fields)):
obj.clear_ordering(True)
nested_sql = obj.get_compiler(connection=self.connection).as_sql(subquery=True)
if (nested_sql == ('', ())):
raise EmptyResultSet
return nested_sql
| 4,420,097,920,054,420,500
|
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
|
django/db/models/sql/compiler.py
|
as_nested_sql
|
hottwaj/django
|
python
|
def as_nested_sql(self):
"\n Perform the same functionality as the as_sql() method, returning an\n SQL string and parameters. However, the alias prefixes are bumped\n beforehand (in a copy -- the current query isn't changed), and any\n ordering is removed if the query is unsliced.\n\n Used when nesting this query inside another.\n "
obj = self.query.clone()
if ((obj.low_mark == 0) and (obj.high_mark is None) and (not self.query.distinct_fields)):
obj.clear_ordering(True)
nested_sql = obj.get_compiler(connection=self.connection).as_sql(subquery=True)
if (nested_sql == (, ())):
raise EmptyResultSet
return nested_sql
|
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
'\n Computes the default columns for selecting every field in the base\n model. Will sometimes be called to pull in related models (e.g. via\n select_related), in which case "opts" and "start_alias" will be given\n to provide a starting point for the traversal.\n\n Returns a list of strings, quoted appropriately for use in SQL\n directly, as well as a set of aliases used in the select statement (if\n \'as_pairs\' is True, returns a list of (alias, col_name) pairs instead\n of strings as the first component and None as the second component).\n '
result = []
if (opts is None):
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
if (not start_alias):
start_alias = self.query.get_initial_alias()
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
if (model == opts.model):
model = None
if (from_parent and (model is not None) and issubclass(from_parent._meta.concrete_model, model._meta.concrete_model)):
continue
if ((field.model in only_load) and (field.attname not in only_load[field.model])):
continue
alias = self.query.join_parent_model(opts, model, start_alias, seen_models)
column = field.get_col(alias)
result.append(column)
return result
| -1,544,447,054,007,162,000
|
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
|
django/db/models/sql/compiler.py
|
get_default_columns
|
hottwaj/django
|
python
|
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
'\n Computes the default columns for selecting every field in the base\n model. Will sometimes be called to pull in related models (e.g. via\n select_related), in which case "opts" and "start_alias" will be given\n to provide a starting point for the traversal.\n\n Returns a list of strings, quoted appropriately for use in SQL\n directly, as well as a set of aliases used in the select statement (if\n \'as_pairs\' is True, returns a list of (alias, col_name) pairs instead\n of strings as the first component and None as the second component).\n '
result = []
if (opts is None):
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
if (not start_alias):
start_alias = self.query.get_initial_alias()
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
if (model == opts.model):
model = None
if (from_parent and (model is not None) and issubclass(from_parent._meta.concrete_model, model._meta.concrete_model)):
continue
if ((field.model in only_load) and (field.attname not in only_load[field.model])):
continue
alias = self.query.join_parent_model(opts, model, start_alias, seen_models)
column = field.get_col(alias)
result.append(column)
return result
|
def get_distinct(self):
'\n Returns a quoted list of fields to use in DISTINCT ON part of the query.\n\n Note that this method can alter the tables in the query, and thus it\n must be called before get_from_clause().\n '
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
(_, targets, alias, joins, path, _) = self._setup_joins(parts, opts, None)
(targets, alias, _) = self.query.trim_joins(targets, joins, path)
for target in targets:
if (name in self.query.annotation_select):
result.append(name)
else:
result.append(('%s.%s' % (qn(alias), qn2(target.column))))
return result
| 7,028,599,802,511,206,000
|
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
|
django/db/models/sql/compiler.py
|
get_distinct
|
hottwaj/django
|
python
|
def get_distinct(self):
'\n Returns a quoted list of fields to use in DISTINCT ON part of the query.\n\n Note that this method can alter the tables in the query, and thus it\n must be called before get_from_clause().\n '
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
(_, targets, alias, joins, path, _) = self._setup_joins(parts, opts, None)
(targets, alias, _) = self.query.trim_joins(targets, joins, path)
for target in targets:
if (name in self.query.annotation_select):
result.append(name)
else:
result.append(('%s.%s' % (qn(alias), qn2(target.column))))
return result
|
def find_ordering_name(self, name, opts, alias=None, default_order='ASC', already_seen=None):
"\n Returns the table alias (the name might be ambiguous, the alias will\n not be) and column name for ordering by the given 'name' parameter.\n The 'name' is of the form 'field1__field2__...__fieldN'.\n "
(name, order) = get_order_dir(name, default_order)
descending = (True if (order == 'DESC') else False)
pieces = name.split(LOOKUP_SEP)
(field, targets, alias, joins, path, opts) = self._setup_joins(pieces, opts, alias)
if (field.is_relation and path and opts.ordering and (name != field.attname)):
if (not already_seen):
already_seen = set()
join_tuple = tuple((getattr(self.query.alias_map[j], 'join_cols', None) for j in joins))
if (join_tuple in already_seen):
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias, order, already_seen))
return results
(targets, alias, _) = self.query.trim_joins(targets, joins, path)
return [(OrderBy(t.get_col(alias), descending=descending), False) for t in targets]
| 5,748,303,804,264,708,000
|
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
|
django/db/models/sql/compiler.py
|
find_ordering_name
|
hottwaj/django
|
python
|
def find_ordering_name(self, name, opts, alias=None, default_order='ASC', already_seen=None):
"\n Returns the table alias (the name might be ambiguous, the alias will\n not be) and column name for ordering by the given 'name' parameter.\n The 'name' is of the form 'field1__field2__...__fieldN'.\n "
(name, order) = get_order_dir(name, default_order)
descending = (True if (order == 'DESC') else False)
pieces = name.split(LOOKUP_SEP)
(field, targets, alias, joins, path, opts) = self._setup_joins(pieces, opts, alias)
if (field.is_relation and path and opts.ordering and (name != field.attname)):
if (not already_seen):
already_seen = set()
join_tuple = tuple((getattr(self.query.alias_map[j], 'join_cols', None) for j in joins))
if (join_tuple in already_seen):
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias, order, already_seen))
return results
(targets, alias, _) = self.query.trim_joins(targets, joins, path)
return [(OrderBy(t.get_col(alias), descending=descending), False) for t in targets]
|
def _setup_joins(self, pieces, opts, alias):
'\n A helper method for get_order_by and get_distinct.\n\n Note that get_ordering and get_distinct must produce same target\n columns on same input, as the prefixes of get_ordering and get_distinct\n must match. Executing SQL where this is not true is an error.\n '
if (not alias):
alias = self.query.get_initial_alias()
(field, targets, opts, joins, path) = self.query.setup_joins(pieces, opts, alias)
alias = joins[(- 1)]
return (field, targets, alias, joins, path, opts)
| -8,333,750,037,689,660,000
|
A helper method for get_order_by and get_distinct.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
|
django/db/models/sql/compiler.py
|
_setup_joins
|
hottwaj/django
|
python
|
def _setup_joins(self, pieces, opts, alias):
'\n A helper method for get_order_by and get_distinct.\n\n Note that get_ordering and get_distinct must produce same target\n columns on same input, as the prefixes of get_ordering and get_distinct\n must match. Executing SQL where this is not true is an error.\n '
if (not alias):
alias = self.query.get_initial_alias()
(field, targets, opts, joins, path) = self.query.setup_joins(pieces, opts, alias)
alias = joins[(- 1)]
return (field, targets, alias, joins, path, opts)
|
def get_from_clause(self):
'\n Returns a list of strings that are joined together to go after the\n "FROM" part of the query, as well as a list any extra parameters that\n need to be included. Sub-classes, can override this to create a\n from-clause via a "select".\n\n This should only be called after any SQL construction methods that\n might change the tables we need. This means the select columns,\n ordering and distinct must be done first.\n '
result = []
params = []
for alias in self.query.tables:
if (not self.query.alias_refcount[alias]):
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
continue
(clause_sql, clause_params) = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
(alias, _) = self.query.table_alias(t)
if ((alias not in self.query.alias_map) or (self.query.alias_refcount[alias] == 1)):
result.append((', %s' % self.quote_name_unless_alias(alias)))
return (result, params)
| -6,299,220,823,378,438,000
|
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
|
django/db/models/sql/compiler.py
|
get_from_clause
|
hottwaj/django
|
python
|
def get_from_clause(self):
'\n Returns a list of strings that are joined together to go after the\n "FROM" part of the query, as well as a list any extra parameters that\n need to be included. Sub-classes, can override this to create a\n from-clause via a "select".\n\n This should only be called after any SQL construction methods that\n might change the tables we need. This means the select columns,\n ordering and distinct must be done first.\n '
result = []
params = []
for alias in self.query.tables:
if (not self.query.alias_refcount[alias]):
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
continue
(clause_sql, clause_params) = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
(alias, _) = self.query.table_alias(t)
if ((alias not in self.query.alias_map) or (self.query.alias_refcount[alias] == 1)):
result.append((', %s' % self.quote_name_unless_alias(alias)))
return (result, params)
|
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None):
'\n Fill in the information needed for a select_related query. The current\n depth is measured as the number of connections away from the root model\n (for example, cur_depth=1 means we are looking at models with direct\n connections to the root model).\n '
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (f.field.related_query_name() for f in opts.related_objects if f.field.unique)
return chain(direct_choices, reverse_choices)
related_klass_infos = []
if ((not restricted) and self.query.max_depth and (cur_depth > self.query.max_depth)):
return related_klass_infos
if (not opts):
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
fields_found = set()
if (requested is None):
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if (not f.is_relation):
if (next or ((cur_depth == 1) and (f.name in requested))):
raise FieldError(("Non-relational field given in select_related: '%s'. Choices are: %s" % (f.name, (', '.join(_get_field_choices()) or '(none)'))))
else:
next = False
if (not select_related_descend(f, restricted, requested, only_load.get(field_model))):
continue
klass_info = {'model': f.remote_field.model, 'field': f, 'reverse': False, 'from_parent': False}
related_klass_infos.append(klass_info)
select_fields = []
(_, _, _, joins, _) = self.query.setup_joins([f.name], opts, root_alias)
alias = joins[(- 1)]
columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(select, f.remote_field.model._meta, alias, (cur_depth + 1), next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [(o.field, o.related_model) for o in opts.related_objects if (o.field.unique and (not o.many_to_many))]
for (f, model) in related_fields:
if (not select_related_descend(f, restricted, requested, only_load.get(model), reverse=True)):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
(_, _, _, joins, _) = self.query.setup_joins([related_field_name], opts, root_alias)
alias = joins[(- 1)]
from_parent = issubclass(model, opts.model)
klass_info = {'model': model, 'field': f, 'reverse': True, 'from_parent': from_parent}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(select, model._meta, alias, (cur_depth + 1), next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested.keys()).difference(fields_found)
if fields_not_found:
invalid_fields = (("'%s'" % s) for s in fields_not_found)
raise FieldError(('Invalid field name(s) given in select_related: %s. Choices are: %s' % (', '.join(invalid_fields), (', '.join(_get_field_choices()) or '(none)'))))
return related_klass_infos
| 5,857,042,856,470,152,000
|
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
|
django/db/models/sql/compiler.py
|
get_related_selections
|
hottwaj/django
|
python
|
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None):
'\n Fill in the information needed for a select_related query. The current\n depth is measured as the number of connections away from the root model\n (for example, cur_depth=1 means we are looking at models with direct\n connections to the root model).\n '
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (f.field.related_query_name() for f in opts.related_objects if f.field.unique)
return chain(direct_choices, reverse_choices)
related_klass_infos = []
if ((not restricted) and self.query.max_depth and (cur_depth > self.query.max_depth)):
return related_klass_infos
if (not opts):
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
fields_found = set()
if (requested is None):
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if (not f.is_relation):
if (next or ((cur_depth == 1) and (f.name in requested))):
raise FieldError(("Non-relational field given in select_related: '%s'. Choices are: %s" % (f.name, (', '.join(_get_field_choices()) or '(none)'))))
else:
next = False
if (not select_related_descend(f, restricted, requested, only_load.get(field_model))):
continue
klass_info = {'model': f.remote_field.model, 'field': f, 'reverse': False, 'from_parent': False}
related_klass_infos.append(klass_info)
select_fields = []
(_, _, _, joins, _) = self.query.setup_joins([f.name], opts, root_alias)
alias = joins[(- 1)]
columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(select, f.remote_field.model._meta, alias, (cur_depth + 1), next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [(o.field, o.related_model) for o in opts.related_objects if (o.field.unique and (not o.many_to_many))]
for (f, model) in related_fields:
if (not select_related_descend(f, restricted, requested, only_load.get(model), reverse=True)):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
(_, _, _, joins, _) = self.query.setup_joins([related_field_name], opts, root_alias)
alias = joins[(- 1)]
from_parent = issubclass(model, opts.model)
klass_info = {'model': model, 'field': f, 'reverse': True, 'from_parent': from_parent}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(select, model._meta, alias, (cur_depth + 1), next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested.keys()).difference(fields_found)
if fields_not_found:
invalid_fields = (("'%s'" % s) for s in fields_not_found)
raise FieldError(('Invalid field name(s) given in select_related: %s. Choices are: %s' % (', '.join(invalid_fields), (', '.join(_get_field_choices()) or '(none)'))))
return related_klass_infos
|
def deferred_to_columns(self):
'\n Converts the self.deferred_loading data structure to mapping of table\n names to sets of column names which are to be loaded. Returns the\n dictionary.\n '
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
| -7,688,170,534,660,855,000
|
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
|
django/db/models/sql/compiler.py
|
deferred_to_columns
|
hottwaj/django
|
python
|
def deferred_to_columns(self):
'\n Converts the self.deferred_loading data structure to mapping of table\n names to sets of column names which are to be loaded. Returns the\n dictionary.\n '
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
|
def results_iter(self, results=None):
'\n Returns an iterator over the results from executing this query.\n '
converters = None
if (results is None):
results = self.execute_sql(MULTI)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
for rows in results:
for row in rows:
if converters:
row = self.apply_converters(row, converters)
(yield row)
| 3,676,796,479,780,158,000
|
Returns an iterator over the results from executing this query.
|
django/db/models/sql/compiler.py
|
results_iter
|
hottwaj/django
|
python
|
def results_iter(self, results=None):
'\n \n '
converters = None
if (results is None):
results = self.execute_sql(MULTI)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
for rows in results:
for row in rows:
if converters:
row = self.apply_converters(row, converters)
(yield row)
|
def has_results(self):
'\n Backends (e.g. NoSQL) can override this in order to use optimized\n versions of "query has any results."\n '
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
| -878,524,087,765,658,400
|
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
|
django/db/models/sql/compiler.py
|
has_results
|
hottwaj/django
|
python
|
def has_results(self):
'\n Backends (e.g. NoSQL) can override this in order to use optimized\n versions of "query has any results."\n '
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
|
def execute_sql(self, result_type=MULTI):
"\n Run the query against the database and returns the result(s). The\n return value is a single data item if result_type is SINGLE, or an\n iterator over the results if the result_type is MULTI.\n\n result_type is either MULTI (use fetchmany() to retrieve all rows),\n SINGLE (only retrieve a single row), or None. In this last case, the\n cursor is returned if any query is executed, since it's used by\n subclasses such as InsertQuery). It's possible, however, that no query\n is needed, as the filters describe an empty set. In that case, None is\n returned, to avoid any unnecessary database interaction.\n "
if (not result_type):
result_type = NO_RESULTS
try:
(sql, params) = self.as_sql()
if (not sql):
raise EmptyResultSet
except EmptyResultSet:
if (result_type == MULTI):
return iter([])
else:
return
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
cursor.close()
raise
if (result_type == CURSOR):
return cursor
if (result_type == SINGLE):
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
cursor.close()
if (result_type == NO_RESULTS):
cursor.close()
return
result = cursor_iter(cursor, self.connection.features.empty_fetchmany_value, self.col_count)
if (not self.connection.features.can_use_chunked_reads):
try:
return list(result)
finally:
cursor.close()
return result
| 1,245,196,509,513,170,400
|
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
|
django/db/models/sql/compiler.py
|
execute_sql
|
hottwaj/django
|
python
|
def execute_sql(self, result_type=MULTI):
"\n Run the query against the database and returns the result(s). The\n return value is a single data item if result_type is SINGLE, or an\n iterator over the results if the result_type is MULTI.\n\n result_type is either MULTI (use fetchmany() to retrieve all rows),\n SINGLE (only retrieve a single row), or None. In this last case, the\n cursor is returned if any query is executed, since it's used by\n subclasses such as InsertQuery). It's possible, however, that no query\n is needed, as the filters describe an empty set. In that case, None is\n returned, to avoid any unnecessary database interaction.\n "
if (not result_type):
result_type = NO_RESULTS
try:
(sql, params) = self.as_sql()
if (not sql):
raise EmptyResultSet
except EmptyResultSet:
if (result_type == MULTI):
return iter([])
else:
return
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
cursor.close()
raise
if (result_type == CURSOR):
return cursor
if (result_type == SINGLE):
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
cursor.close()
if (result_type == NO_RESULTS):
cursor.close()
return
result = cursor_iter(cursor, self.connection.features.empty_fetchmany_value, self.col_count)
if (not self.connection.features.can_use_chunked_reads):
try:
return list(result)
finally:
cursor.close()
return result
|
def field_as_sql(self, field, val):
'\n Take a field and a value intended to be saved on that field, and\n return placeholder SQL and accompanying params. Checks for raw values,\n expressions and fields with get_placeholder() defined in that order.\n\n When field is None, the value is considered raw and is used as the\n placeholder, with no corresponding parameters returned.\n '
if (field is None):
(sql, params) = (val, [])
elif hasattr(val, 'as_sql'):
(sql, params) = self.compile(val)
elif hasattr(field, 'get_placeholder'):
(sql, params) = (field.get_placeholder(val, self, self.connection), [val])
else:
(sql, params) = ('%s', [val])
params = self.connection.ops.modify_insert_params(sql, params)
return (sql, params)
| -1,086,004,953,535,969,000
|
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Checks for raw values,
expressions and fields with get_placeholder() defined in that order.
When field is None, the value is considered raw and is used as the
placeholder, with no corresponding parameters returned.
|
django/db/models/sql/compiler.py
|
field_as_sql
|
hottwaj/django
|
python
|
def field_as_sql(self, field, val):
'\n Take a field and a value intended to be saved on that field, and\n return placeholder SQL and accompanying params. Checks for raw values,\n expressions and fields with get_placeholder() defined in that order.\n\n When field is None, the value is considered raw and is used as the\n placeholder, with no corresponding parameters returned.\n '
if (field is None):
(sql, params) = (val, [])
elif hasattr(val, 'as_sql'):
(sql, params) = self.compile(val)
elif hasattr(field, 'get_placeholder'):
(sql, params) = (field.get_placeholder(val, self, self.connection), [val])
else:
(sql, params) = ('%s', [val])
params = self.connection.ops.modify_insert_params(sql, params)
return (sql, params)
|
def prepare_value(self, field, value):
"\n Prepare a value to be used in a query by resolving it if it is an\n expression and otherwise calling the field's get_db_prep_save().\n "
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(self.query, allow_joins=False, for_save=True)
if value.contains_column_references:
raise ValueError(('Failed to insert expression "%s" on %s. F() expressions can only be used to update, not to insert.' % (value, field)))
if value.contains_aggregate:
raise FieldError('Aggregate functions are not allowed in this query')
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
| 5,424,878,748,118,091,000
|
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
|
django/db/models/sql/compiler.py
|
prepare_value
|
hottwaj/django
|
python
|
def prepare_value(self, field, value):
"\n Prepare a value to be used in a query by resolving it if it is an\n expression and otherwise calling the field's get_db_prep_save().\n "
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(self.query, allow_joins=False, for_save=True)
if value.contains_column_references:
raise ValueError(('Failed to insert expression "%s" on %s. F() expressions can only be used to update, not to insert.' % (value, field)))
if value.contains_aggregate:
raise FieldError('Aggregate functions are not allowed in this query')
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
|
def pre_save_val(self, field, obj):
"\n Get the given field's value off the given obj. pre_save() is used for\n things like auto_now on DateTimeField. Skip it if this is a raw query.\n "
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
| -4,987,961,374,691,074,000
|
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
|
django/db/models/sql/compiler.py
|
pre_save_val
|
hottwaj/django
|
python
|
def pre_save_val(self, field, obj):
"\n Get the given field's value off the given obj. pre_save() is used for\n things like auto_now on DateTimeField. Skip it if this is a raw query.\n "
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
|
def assemble_as_sql(self, fields, value_rows):
"\n Take a sequence of N fields and a sequence of M rows of values,\n generate placeholder SQL and parameters for each field and value, and\n return a pair containing:\n * a sequence of M rows of N SQL placeholder strings, and\n * a sequence of M rows of corresponding parameter values.\n\n Each placeholder string may contain any number of '%s' interpolation\n strings, and each parameter row will contain exactly as many params\n as the total number of '%s's in the corresponding placeholder row.\n "
if (not value_rows):
return ([], [])
rows_of_fields_as_sql = ((self.field_as_sql(field, v) for (field, v) in zip(fields, row)) for row in value_rows)
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
(placeholder_rows, param_rows) = zip(*sql_and_param_pair_rows)
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return (placeholder_rows, param_rows)
| -6,079,551,684,280,164,000
|
Take a sequence of N fields and a sequence of M rows of values,
generate placeholder SQL and parameters for each field and value, and
return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
|
django/db/models/sql/compiler.py
|
assemble_as_sql
|
hottwaj/django
|
python
|
def assemble_as_sql(self, fields, value_rows):
"\n Take a sequence of N fields and a sequence of M rows of values,\n generate placeholder SQL and parameters for each field and value, and\n return a pair containing:\n * a sequence of M rows of N SQL placeholder strings, and\n * a sequence of M rows of corresponding parameter values.\n\n Each placeholder string may contain any number of '%s' interpolation\n strings, and each parameter row will contain exactly as many params\n as the total number of '%s's in the corresponding placeholder row.\n "
if (not value_rows):
return ([], [])
rows_of_fields_as_sql = ((self.field_as_sql(field, v) for (field, v) in zip(fields, row)) for row in value_rows)
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
(placeholder_rows, param_rows) = zip(*sql_and_param_pair_rows)
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return (placeholder_rows, param_rows)
|
def as_sql(self):
'\n Creates the SQL for this query. Returns the SQL string and list of\n parameters.\n '
assert (len([t for t in self.query.tables if (self.query.alias_refcount[t] > 0)]) == 1), 'Can only delete from one table at a time.'
qn = self.quote_name_unless_alias
result = [('DELETE FROM %s' % qn(self.query.tables[0]))]
(where, params) = self.compile(self.query.where)
if where:
result.append(('WHERE %s' % where))
return (' '.join(result), tuple(params))
| -8,388,625,242,359,966,000
|
Creates the SQL for this query. Returns the SQL string and list of
parameters.
|
django/db/models/sql/compiler.py
|
as_sql
|
hottwaj/django
|
python
|
def as_sql(self):
'\n Creates the SQL for this query. Returns the SQL string and list of\n parameters.\n '
assert (len([t for t in self.query.tables if (self.query.alias_refcount[t] > 0)]) == 1), 'Can only delete from one table at a time.'
qn = self.quote_name_unless_alias
result = [('DELETE FROM %s' % qn(self.query.tables[0]))]
(where, params) = self.compile(self.query.where)
if where:
result.append(('WHERE %s' % where))
return (' '.join(result), tuple(params))
|
def as_sql(self):
'\n Creates the SQL for this query. Returns the SQL string and list of\n parameters.\n '
self.pre_sql_setup()
if (not self.query.values):
return ('', ())
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = [('UPDATE %s' % qn(table))]
result.append('SET')
(values, update_params) = ([], [])
for (field, model, val) in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError('Aggregate functions are not allowed in this query')
elif hasattr(val, 'prepare_database_save'):
if field.remote_field:
val = field.get_db_prep_save(val.prepare_database_save(field), connection=self.connection)
else:
raise TypeError(('Tried to update field %s with a model instance, %r. Use a value compatible with %s.' % (field, val, field.__class__.__name__)))
else:
val = field.get_db_prep_save(val, connection=self.connection)
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
(sql, params) = self.compile(val)
values.append(('%s = %s' % (qn(name), sql)))
update_params.extend(params)
elif (val is not None):
values.append(('%s = %s' % (qn(name), placeholder)))
update_params.append(val)
else:
values.append(('%s = NULL' % qn(name)))
if (not values):
return ('', ())
result.append(', '.join(values))
(where, params) = self.compile(self.query.where)
if where:
result.append(('WHERE %s' % where))
return (' '.join(result), tuple((update_params + params)))
| 1,968,413,504,332,736,500
|
Creates the SQL for this query. Returns the SQL string and list of
parameters.
|
django/db/models/sql/compiler.py
|
as_sql
|
hottwaj/django
|
python
|
def as_sql(self):
'\n Creates the SQL for this query. Returns the SQL string and list of\n parameters.\n '
self.pre_sql_setup()
if (not self.query.values):
return (, ())
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = [('UPDATE %s' % qn(table))]
result.append('SET')
(values, update_params) = ([], [])
for (field, model, val) in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError('Aggregate functions are not allowed in this query')
elif hasattr(val, 'prepare_database_save'):
if field.remote_field:
val = field.get_db_prep_save(val.prepare_database_save(field), connection=self.connection)
else:
raise TypeError(('Tried to update field %s with a model instance, %r. Use a value compatible with %s.' % (field, val, field.__class__.__name__)))
else:
val = field.get_db_prep_save(val, connection=self.connection)
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
(sql, params) = self.compile(val)
values.append(('%s = %s' % (qn(name), sql)))
update_params.extend(params)
elif (val is not None):
values.append(('%s = %s' % (qn(name), placeholder)))
update_params.append(val)
else:
values.append(('%s = NULL' % qn(name)))
if (not values):
return (, ())
result.append(', '.join(values))
(where, params) = self.compile(self.query.where)
if where:
result.append(('WHERE %s' % where))
return (' '.join(result), tuple((update_params + params)))
|
def execute_sql(self, result_type):
'\n Execute the specified update. Returns the number of rows affected by\n the primary update query. The "primary update query" is the first\n non-empty query that is executed. Row counts for any subsequent,\n related queries are not available.\n '
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
try:
rows = (cursor.rowcount if cursor else 0)
is_empty = (cursor is None)
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if (is_empty and aux_rows):
rows = aux_rows
is_empty = False
return rows
| 5,114,767,702,504,362,000
|
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
|
django/db/models/sql/compiler.py
|
execute_sql
|
hottwaj/django
|
python
|
def execute_sql(self, result_type):
'\n Execute the specified update. Returns the number of rows affected by\n the primary update query. The "primary update query" is the first\n non-empty query that is executed. Row counts for any subsequent,\n related queries are not available.\n '
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
try:
rows = (cursor.rowcount if cursor else 0)
is_empty = (cursor is None)
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if (is_empty and aux_rows):
rows = aux_rows
is_empty = False
return rows
|
def pre_sql_setup(self):
'\n If the update depends on results from other tables, we need to do some\n munging of the "where" conditions to match the format required for\n (portable) SQL updates. That is done here.\n\n Further, if we are going to be running multiple updates, we pull out\n the id values to update at this point so that they don\'t change as a\n result of the progressive updates.\n '
refcounts_before = self.query.alias_refcount.copy()
self.query.get_initial_alias()
count = self.query.count_active_tables()
if ((not self.query.related_updates) and (count == 1)):
return
query = self.query.clone(klass=Query)
query.select_related = False
query.clear_ordering(True)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super(SQLUpdateCompiler, self).pre_sql_setup()
must_pre_select = ((count > 1) and (not self.connection.features.update_can_self_select))
self.query.where = self.query.where_class()
if (self.query.related_updates or must_pre_select):
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend((r[0] for r in rows))
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
| -1,691,562,841,568,250,000
|
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
|
django/db/models/sql/compiler.py
|
pre_sql_setup
|
hottwaj/django
|
python
|
def pre_sql_setup(self):
'\n If the update depends on results from other tables, we need to do some\n munging of the "where" conditions to match the format required for\n (portable) SQL updates. That is done here.\n\n Further, if we are going to be running multiple updates, we pull out\n the id values to update at this point so that they don\'t change as a\n result of the progressive updates.\n '
refcounts_before = self.query.alias_refcount.copy()
self.query.get_initial_alias()
count = self.query.count_active_tables()
if ((not self.query.related_updates) and (count == 1)):
return
query = self.query.clone(klass=Query)
query.select_related = False
query.clear_ordering(True)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super(SQLUpdateCompiler, self).pre_sql_setup()
must_pre_select = ((count > 1) and (not self.connection.features.update_can_self_select))
self.query.where = self.query.where_class()
if (self.query.related_updates or must_pre_select):
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend((r[0] for r in rows))
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
|
def as_sql(self):
'\n Creates the SQL for this query. Returns the SQL string and list of\n parameters.\n '
if (not self.query.subquery):
raise EmptyResultSet
(sql, params) = ([], [])
for annotation in self.query.annotation_select.values():
(ann_sql, ann_params) = self.compile(annotation, select_format=True)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = ('SELECT %s FROM (%s) subquery' % (sql, self.query.subquery))
params = (params + self.query.sub_params)
return (sql, params)
| -7,176,846,690,096,063,000
|
Creates the SQL for this query. Returns the SQL string and list of
parameters.
|
django/db/models/sql/compiler.py
|
as_sql
|
hottwaj/django
|
python
|
def as_sql(self):
'\n Creates the SQL for this query. Returns the SQL string and list of\n parameters.\n '
if (not self.query.subquery):
raise EmptyResultSet
(sql, params) = ([], [])
for annotation in self.query.annotation_select.values():
(ann_sql, ann_params) = self.compile(annotation, select_format=True)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = ('SELECT %s FROM (%s) subquery' % (sql, self.query.subquery))
params = (params + self.query.sub_params)
return (sql, params)
|
def test_register():
'Just test that there is no crash'
plugin.register_plugins([feedback])
| 1,114,956,463,751,036,300
|
Just test that there is no crash
|
tests/test_feedback.py
|
test_register
|
slarse/repobee-feedback
|
python
|
def test_register():
plugin.register_plugins([feedback])
|
@pytest.fixture
def with_issues(tmp_path):
'Create issue files in a temporary directory and return a list of (team,\n issue) tuples.\n '
repo_names = plug.generate_repo_names(STUDENT_TEAM_NAMES, ASSIGNMENT_NAMES)
existing_issues = []
for repo_name in repo_names:
issue_file = (tmp_path / '{}.md'.format(repo_name))
issue = random.choice(ISSUES)
_write_issue(issue, issue_file)
existing_issues.append((repo_name, issue))
return existing_issues
| 3,977,162,880,016,719,400
|
Create issue files in a temporary directory and return a list of (team,
issue) tuples.
|
tests/test_feedback.py
|
with_issues
|
slarse/repobee-feedback
|
python
|
@pytest.fixture
def with_issues(tmp_path):
'Create issue files in a temporary directory and return a list of (team,\n issue) tuples.\n '
repo_names = plug.generate_repo_names(STUDENT_TEAM_NAMES, ASSIGNMENT_NAMES)
existing_issues = []
for repo_name in repo_names:
issue_file = (tmp_path / '{}.md'.format(repo_name))
issue = random.choice(ISSUES)
_write_issue(issue, issue_file)
existing_issues.append((repo_name, issue))
return existing_issues
|
@pytest.fixture
def with_multi_issues_file(tmp_path):
'Create the multi issues file.'
repo_names = plug.generate_repo_names(STUDENT_TEAM_NAMES, ASSIGNMENT_NAMES)
repos_and_issues = [(repo_name, random.choice(ISSUES)) for repo_name in repo_names]
issues_file = (tmp_path / 'issues.md')
_write_multi_issues_file(repos_and_issues, issues_file)
return (issues_file, repos_and_issues)
| -4,134,705,189,988,907,500
|
Create the multi issues file.
|
tests/test_feedback.py
|
with_multi_issues_file
|
slarse/repobee-feedback
|
python
|
@pytest.fixture
def with_multi_issues_file(tmp_path):
repo_names = plug.generate_repo_names(STUDENT_TEAM_NAMES, ASSIGNMENT_NAMES)
repos_and_issues = [(repo_name, random.choice(ISSUES)) for repo_name in repo_names]
issues_file = (tmp_path / 'issues.md')
_write_multi_issues_file(repos_and_issues, issues_file)
return (issues_file, repos_and_issues)
|
def test_opens_issues_from_issues_dir(self, with_issues, parsed_args_issues_dir, api_mock):
'Test that the callback calls the API.open_issue for the expected\n repos and issues, when the issues all exist and are well formed.\n '
expected_calls = [mock.call(issue.title, issue.body, mock.ANY) for (repo_name, issue) in with_issues]
feedback.callback(args=parsed_args_issues_dir, api=api_mock)
api_mock.create_issue.assert_has_calls(expected_calls, any_order=True)
| 2,710,277,438,403,715,000
|
Test that the callback calls the API.open_issue for the expected
repos and issues, when the issues all exist and are well formed.
|
tests/test_feedback.py
|
test_opens_issues_from_issues_dir
|
slarse/repobee-feedback
|
python
|
def test_opens_issues_from_issues_dir(self, with_issues, parsed_args_issues_dir, api_mock):
'Test that the callback calls the API.open_issue for the expected\n repos and issues, when the issues all exist and are well formed.\n '
expected_calls = [mock.call(issue.title, issue.body, mock.ANY) for (repo_name, issue) in with_issues]
feedback.callback(args=parsed_args_issues_dir, api=api_mock)
api_mock.create_issue.assert_has_calls(expected_calls, any_order=True)
|
def test_aborts_if_issue_is_missing(self, with_issues, parsed_args_issues_dir, api_mock, tmp_path):
'Test that the callback exits with a plug.PlugError if any of the\n expected issues is not found.\n '
repo_without_issue = plug.generate_repo_name(STUDENT_TEAM_NAMES[(- 1)], ASSIGNMENT_NAMES[0])
missing_file = (tmp_path / '{}.md'.format(repo_without_issue))
missing_file.unlink()
with pytest.raises(plug.PlugError) as exc_info:
feedback.callback(args=parsed_args_issues_dir, api=api_mock)
assert (repo_without_issue in str(exc_info.value))
assert (not api_mock.create_issue.called)
| -8,800,887,472,667,265,000
|
Test that the callback exits with a plug.PlugError if any of the
expected issues is not found.
|
tests/test_feedback.py
|
test_aborts_if_issue_is_missing
|
slarse/repobee-feedback
|
python
|
def test_aborts_if_issue_is_missing(self, with_issues, parsed_args_issues_dir, api_mock, tmp_path):
'Test that the callback exits with a plug.PlugError if any of the\n expected issues is not found.\n '
repo_without_issue = plug.generate_repo_name(STUDENT_TEAM_NAMES[(- 1)], ASSIGNMENT_NAMES[0])
missing_file = (tmp_path / '{}.md'.format(repo_without_issue))
missing_file.unlink()
with pytest.raises(plug.PlugError) as exc_info:
feedback.callback(args=parsed_args_issues_dir, api=api_mock)
assert (repo_without_issue in str(exc_info.value))
assert (not api_mock.create_issue.called)
|
def test_ignores_missing_issue_if_allow_missing(self, with_issues, parsed_args_issues_dir, api_mock, tmp_path):
'Test that missing issues are ignored if --allow-mising is set.'
repo_without_issue = plug.generate_repo_name(STUDENT_TEAM_NAMES[(- 1)], ASSIGNMENT_NAMES[0])
(tmp_path / '{}.md'.format(repo_without_issue)).unlink()
expected_calls = [mock.call(issue.title, issue.body, mock.ANY) for (repo_name, issue) in with_issues if (repo_name != repo_without_issue)]
args_dict = vars(parsed_args_issues_dir)
args_dict['allow_missing'] = True
args = argparse.Namespace(**args_dict)
feedback.callback(args=args, api=api_mock)
api_mock.create_issue.assert_has_calls(expected_calls, any_order=True)
| 607,823,310,561,474,000
|
Test that missing issues are ignored if --allow-mising is set.
|
tests/test_feedback.py
|
test_ignores_missing_issue_if_allow_missing
|
slarse/repobee-feedback
|
python
|
def test_ignores_missing_issue_if_allow_missing(self, with_issues, parsed_args_issues_dir, api_mock, tmp_path):
repo_without_issue = plug.generate_repo_name(STUDENT_TEAM_NAMES[(- 1)], ASSIGNMENT_NAMES[0])
(tmp_path / '{}.md'.format(repo_without_issue)).unlink()
expected_calls = [mock.call(issue.title, issue.body, mock.ANY) for (repo_name, issue) in with_issues if (repo_name != repo_without_issue)]
args_dict = vars(parsed_args_issues_dir)
args_dict['allow_missing'] = True
args = argparse.Namespace(**args_dict)
feedback.callback(args=args, api=api_mock)
api_mock.create_issue.assert_has_calls(expected_calls, any_order=True)
|
def test_opens_nothing_if_open_prompt_returns_false(self, with_issues, parsed_args_issues_dir, api_mock):
"Test that the callback does not attempt to open any issues if the\n 'may I open' prompt returns false.\n "
args_dict = vars(parsed_args_issues_dir)
args_dict['batch_mode'] = False
parsed_args_interactive = argparse.Namespace(**args_dict)
with mock.patch('builtins.input', return_value='n', autospec=True):
feedback.callback(args=parsed_args_interactive, api=api_mock)
assert (not api_mock.create_issue.called)
| -425,145,883,097,062,600
|
Test that the callback does not attempt to open any issues if the
'may I open' prompt returns false.
|
tests/test_feedback.py
|
test_opens_nothing_if_open_prompt_returns_false
|
slarse/repobee-feedback
|
python
|
def test_opens_nothing_if_open_prompt_returns_false(self, with_issues, parsed_args_issues_dir, api_mock):
"Test that the callback does not attempt to open any issues if the\n 'may I open' prompt returns false.\n "
args_dict = vars(parsed_args_issues_dir)
args_dict['batch_mode'] = False
parsed_args_interactive = argparse.Namespace(**args_dict)
with mock.patch('builtins.input', return_value='n', autospec=True):
feedback.callback(args=parsed_args_interactive, api=api_mock)
assert (not api_mock.create_issue.called)
|
def test_opens_issues_from_multi_issues_file(self, with_multi_issues_file, api_mock, parsed_args_multi_issues_file):
'Test that the callback opens issues correctly when they are all\n contained in a multi issues file.\n '
(issues_file, repos_and_issues) = with_multi_issues_file
expected_calls = [mock.call(issue.title, issue.body, mock.ANY) for (repo_name, issue) in repos_and_issues]
feedback.callback(args=parsed_args_multi_issues_file, api=api_mock)
api_mock.create_issue.assert_has_calls(expected_calls)
| 3,622,953,374,141,381,000
|
Test that the callback opens issues correctly when they are all
contained in a multi issues file.
|
tests/test_feedback.py
|
test_opens_issues_from_multi_issues_file
|
slarse/repobee-feedback
|
python
|
def test_opens_issues_from_multi_issues_file(self, with_multi_issues_file, api_mock, parsed_args_multi_issues_file):
'Test that the callback opens issues correctly when they are all\n contained in a multi issues file.\n '
(issues_file, repos_and_issues) = with_multi_issues_file
expected_calls = [mock.call(issue.title, issue.body, mock.ANY) for (repo_name, issue) in repos_and_issues]
feedback.callback(args=parsed_args_multi_issues_file, api=api_mock)
api_mock.create_issue.assert_has_calls(expected_calls)
|
def test_skips_unexpected_issues_in_multi_issues_file(self, with_multi_issues_file, parsed_args_multi_issues_file, api_mock):
'Test that an exception is raised if one or more issues are found\n relating to student repos that ar not in prod(assignments, students).\n '
student_teams = parsed_args_multi_issues_file.students
args_dict = vars(parsed_args_multi_issues_file)
args_dict['students'] = student_teams[:(- 1)]
args = argparse.Namespace(**args_dict)
unexpected_repos = plug.generate_repo_names(student_teams[(- 1):], ASSIGNMENT_NAMES)
(_, repos_and_issues) = with_multi_issues_file
expected_calls = [mock.call(issue.title, issue.body, mock.ANY) for (repo_name, issue) in repos_and_issues if (repo_name not in unexpected_repos)]
feedback.callback(args=args, api=api_mock)
api_mock.create_issue.assert_has_calls(expected_calls, any_order=True)
| 1,893,744,415,645,420,800
|
Test that an exception is raised if one or more issues are found
relating to student repos that ar not in prod(assignments, students).
|
tests/test_feedback.py
|
test_skips_unexpected_issues_in_multi_issues_file
|
slarse/repobee-feedback
|
python
|
def test_skips_unexpected_issues_in_multi_issues_file(self, with_multi_issues_file, parsed_args_multi_issues_file, api_mock):
'Test that an exception is raised if one or more issues are found\n relating to student repos that ar not in prod(assignments, students).\n '
student_teams = parsed_args_multi_issues_file.students
args_dict = vars(parsed_args_multi_issues_file)
args_dict['students'] = student_teams[:(- 1)]
args = argparse.Namespace(**args_dict)
unexpected_repos = plug.generate_repo_names(student_teams[(- 1):], ASSIGNMENT_NAMES)
(_, repos_and_issues) = with_multi_issues_file
expected_calls = [mock.call(issue.title, issue.body, mock.ANY) for (repo_name, issue) in repos_and_issues if (repo_name not in unexpected_repos)]
feedback.callback(args=args, api=api_mock)
api_mock.create_issue.assert_has_calls(expected_calls, any_order=True)
|
def __init__(self, x: Union[(List[float], np.ndarray)], fval: float, variables: List[Variable], replacements: Dict[(str, Tuple[(str, int)])], history: Tuple[(List[MinimumEigenOptimizationResult], OptimizationResult)]) -> None:
'\n Constructs an instance of the result class.\n\n Args:\n x: the optimal value found in the optimization.\n fval: the optimal function value.\n variables: the list of variables of the optimization problem.\n replacements: a dictionary of substituted variables. Key is a variable being\n substituted, value is a tuple of substituting variable and a weight, either 1 or -1.\n history: a tuple containing intermediate results. The first element is a list of\n :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by\n invoking :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively,\n the second element is an instance of\n :class:`~qiskit.optimization.algorithm.OptimizationResult` obtained at the last step\n via `min_num_vars_optimizer`.\n '
super().__init__(x, fval, variables, None)
self._replacements = replacements
self._history = history
| 8,220,554,276,182,333,000
|
Constructs an instance of the result class.
Args:
x: the optimal value found in the optimization.
fval: the optimal function value.
variables: the list of variables of the optimization problem.
replacements: a dictionary of substituted variables. Key is a variable being
substituted, value is a tuple of substituting variable and a weight, either 1 or -1.
history: a tuple containing intermediate results. The first element is a list of
:class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by
invoking :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively,
the second element is an instance of
:class:`~qiskit.optimization.algorithm.OptimizationResult` obtained at the last step
via `min_num_vars_optimizer`.
|
qiskit/optimization/algorithms/recursive_minimum_eigen_optimizer.py
|
__init__
|
Cristian-Malinescu/qiskit-aqua
|
python
|
def __init__(self, x: Union[(List[float], np.ndarray)], fval: float, variables: List[Variable], replacements: Dict[(str, Tuple[(str, int)])], history: Tuple[(List[MinimumEigenOptimizationResult], OptimizationResult)]) -> None:
'\n Constructs an instance of the result class.\n\n Args:\n x: the optimal value found in the optimization.\n fval: the optimal function value.\n variables: the list of variables of the optimization problem.\n replacements: a dictionary of substituted variables. Key is a variable being\n substituted, value is a tuple of substituting variable and a weight, either 1 or -1.\n history: a tuple containing intermediate results. The first element is a list of\n :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by\n invoking :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively,\n the second element is an instance of\n :class:`~qiskit.optimization.algorithm.OptimizationResult` obtained at the last step\n via `min_num_vars_optimizer`.\n '
super().__init__(x, fval, variables, None)
self._replacements = replacements
self._history = history
|
@property
def replacements(self) -> Dict[(str, Tuple[(str, int)])]:
'\n Returns a dictionary of substituted variables. Key is a variable being substituted, value\n is a tuple of substituting variable and a weight, either 1 or -1.'
return self._replacements
| 6,997,684,331,896,984,000
|
Returns a dictionary of substituted variables. Key is a variable being substituted, value
is a tuple of substituting variable and a weight, either 1 or -1.
|
qiskit/optimization/algorithms/recursive_minimum_eigen_optimizer.py
|
replacements
|
Cristian-Malinescu/qiskit-aqua
|
python
|
@property
def replacements(self) -> Dict[(str, Tuple[(str, int)])]:
'\n Returns a dictionary of substituted variables. Key is a variable being substituted, value\n is a tuple of substituting variable and a weight, either 1 or -1.'
return self._replacements
|
@property
def history(self) -> Tuple[(List[MinimumEigenOptimizationResult], OptimizationResult)]:
'\n Returns intermediate results. The first element is a list of\n :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by invoking\n :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively, the second\n element is an instance of :class:`~qiskit.optimization.algorithm.OptimizationResult`\n obtained at the last step via `min_num_vars_optimizer`.\n '
return self._history
| 487,360,261,191,788,160
|
Returns intermediate results. The first element is a list of
:class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by invoking
:class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively, the second
element is an instance of :class:`~qiskit.optimization.algorithm.OptimizationResult`
obtained at the last step via `min_num_vars_optimizer`.
|
qiskit/optimization/algorithms/recursive_minimum_eigen_optimizer.py
|
history
|
Cristian-Malinescu/qiskit-aqua
|
python
|
@property
def history(self) -> Tuple[(List[MinimumEigenOptimizationResult], OptimizationResult)]:
'\n Returns intermediate results. The first element is a list of\n :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizerResult` obtained by invoking\n :class:`~qiskit.optimization.algorithms.MinimumEigenOptimizer` iteratively, the second\n element is an instance of :class:`~qiskit.optimization.algorithm.OptimizationResult`\n obtained at the last step via `min_num_vars_optimizer`.\n '
return self._history
|
def __init__(self, min_eigen_optimizer: MinimumEigenOptimizer, min_num_vars: int=1, min_num_vars_optimizer: Optional[OptimizationAlgorithm]=None, penalty: Optional[float]=None, history: Optional[IntermediateResult]=IntermediateResult.LAST_ITERATION) -> None:
' Initializes the recursive minimum eigen optimizer.\n\n This initializer takes a ``MinimumEigenOptimizer``, the parameters to specify until when to\n to apply the iterative scheme, and the optimizer to be applied once the threshold number of\n variables is reached.\n\n Args:\n min_eigen_optimizer: The eigen optimizer to use in every iteration.\n min_num_vars: The minimum number of variables to apply the recursive scheme. If this\n threshold is reached, the min_num_vars_optimizer is used.\n min_num_vars_optimizer: This optimizer is used after the recursive scheme for the\n problem with the remaining variables.\n penalty: The factor that is used to scale the penalty terms corresponding to linear\n equality constraints.\n history: Whether the intermediate results are stored.\n Default value is :py:obj:`~IntermediateResult.LAST_ITERATION`.\n\n Raises:\n QiskitOptimizationError: In case of invalid parameters (num_min_vars < 1).\n '
validate_min('min_num_vars', min_num_vars, 1)
self._min_eigen_optimizer = min_eigen_optimizer
self._min_num_vars = min_num_vars
if min_num_vars_optimizer:
self._min_num_vars_optimizer = min_num_vars_optimizer
else:
self._min_num_vars_optimizer = MinimumEigenOptimizer(NumPyMinimumEigensolver())
self._penalty = penalty
self._history = history
self._qubo_converter = QuadraticProgramToQubo()
| -677,792,891,465,104,500
|
Initializes the recursive minimum eigen optimizer.
This initializer takes a ``MinimumEigenOptimizer``, the parameters to specify until when to
to apply the iterative scheme, and the optimizer to be applied once the threshold number of
variables is reached.
Args:
min_eigen_optimizer: The eigen optimizer to use in every iteration.
min_num_vars: The minimum number of variables to apply the recursive scheme. If this
threshold is reached, the min_num_vars_optimizer is used.
min_num_vars_optimizer: This optimizer is used after the recursive scheme for the
problem with the remaining variables.
penalty: The factor that is used to scale the penalty terms corresponding to linear
equality constraints.
history: Whether the intermediate results are stored.
Default value is :py:obj:`~IntermediateResult.LAST_ITERATION`.
Raises:
QiskitOptimizationError: In case of invalid parameters (num_min_vars < 1).
|
qiskit/optimization/algorithms/recursive_minimum_eigen_optimizer.py
|
__init__
|
Cristian-Malinescu/qiskit-aqua
|
python
|
def __init__(self, min_eigen_optimizer: MinimumEigenOptimizer, min_num_vars: int=1, min_num_vars_optimizer: Optional[OptimizationAlgorithm]=None, penalty: Optional[float]=None, history: Optional[IntermediateResult]=IntermediateResult.LAST_ITERATION) -> None:
' Initializes the recursive minimum eigen optimizer.\n\n This initializer takes a ``MinimumEigenOptimizer``, the parameters to specify until when to\n to apply the iterative scheme, and the optimizer to be applied once the threshold number of\n variables is reached.\n\n Args:\n min_eigen_optimizer: The eigen optimizer to use in every iteration.\n min_num_vars: The minimum number of variables to apply the recursive scheme. If this\n threshold is reached, the min_num_vars_optimizer is used.\n min_num_vars_optimizer: This optimizer is used after the recursive scheme for the\n problem with the remaining variables.\n penalty: The factor that is used to scale the penalty terms corresponding to linear\n equality constraints.\n history: Whether the intermediate results are stored.\n Default value is :py:obj:`~IntermediateResult.LAST_ITERATION`.\n\n Raises:\n QiskitOptimizationError: In case of invalid parameters (num_min_vars < 1).\n '
validate_min('min_num_vars', min_num_vars, 1)
self._min_eigen_optimizer = min_eigen_optimizer
self._min_num_vars = min_num_vars
if min_num_vars_optimizer:
self._min_num_vars_optimizer = min_num_vars_optimizer
else:
self._min_num_vars_optimizer = MinimumEigenOptimizer(NumPyMinimumEigensolver())
self._penalty = penalty
self._history = history
self._qubo_converter = QuadraticProgramToQubo()
|
def get_compatibility_msg(self, problem: QuadraticProgram) -> str:
'Checks whether a given problem can be solved with this optimizer.\n\n Checks whether the given problem is compatible, i.e., whether the problem can be converted\n to a QUBO, and otherwise, returns a message explaining the incompatibility.\n\n Args:\n problem: The optimization problem to check compatibility.\n\n Returns:\n A message describing the incompatibility.\n '
return QuadraticProgramToQubo.get_compatibility_msg(problem)
| -6,303,538,141,527,137,000
|
Checks whether a given problem can be solved with this optimizer.
Checks whether the given problem is compatible, i.e., whether the problem can be converted
to a QUBO, and otherwise, returns a message explaining the incompatibility.
Args:
problem: The optimization problem to check compatibility.
Returns:
A message describing the incompatibility.
|
qiskit/optimization/algorithms/recursive_minimum_eigen_optimizer.py
|
get_compatibility_msg
|
Cristian-Malinescu/qiskit-aqua
|
python
|
def get_compatibility_msg(self, problem: QuadraticProgram) -> str:
'Checks whether a given problem can be solved with this optimizer.\n\n Checks whether the given problem is compatible, i.e., whether the problem can be converted\n to a QUBO, and otherwise, returns a message explaining the incompatibility.\n\n Args:\n problem: The optimization problem to check compatibility.\n\n Returns:\n A message describing the incompatibility.\n '
return QuadraticProgramToQubo.get_compatibility_msg(problem)
|
def solve(self, problem: QuadraticProgram) -> OptimizationResult:
'Tries to solve the given problem using the recursive optimizer.\n\n Runs the optimizer to try to solve the optimization problem.\n\n Args:\n problem: The problem to be solved.\n\n Returns:\n The result of the optimizer applied to the problem.\n\n Raises:\n QiskitOptimizationError: Incompatible problem.\n QiskitOptimizationError: Infeasible due to variable substitution\n '
self._verify_compatibility(problem)
problem_ = self._qubo_converter.convert(problem)
problem_ref = deepcopy(problem_)
replacements = {}
min_eigen_results = []
while (problem_.get_num_vars() > self._min_num_vars):
res = self._min_eigen_optimizer.solve(problem_)
if (self._history == IntermediateResult.ALL_ITERATIONS):
min_eigen_results.append(res)
correlations = res.get_correlations()
(i, j) = self._find_strongest_correlation(correlations)
x_i = problem_.variables[i].name
x_j = problem_.variables[j].name
if (correlations[(i, j)] > 0):
problem_ = problem_.substitute_variables(variables={i: (j, 1)})
if (problem_.status == QuadraticProgram.Status.INFEASIBLE):
raise QiskitOptimizationError('Infeasible due to variable substitution')
replacements[x_i] = (x_j, 1)
else:
constant = problem_.objective.constant
constant += problem_.objective.linear[i]
constant += problem_.objective.quadratic[(i, i)]
problem_.objective.constant = constant
for k in range(problem_.get_num_vars()):
coeff = problem_.objective.linear[k]
if (k == i):
coeff += (2 * problem_.objective.quadratic[(i, k)])
else:
coeff += problem_.objective.quadratic[(i, k)]
if (np.abs(coeff) > 1e-10):
problem_.objective.linear[k] = coeff
else:
problem_.objective.linear[k] = 0
problem_ = problem_.substitute_variables(variables={i: (j, (- 1))})
if (problem_.status == QuadraticProgram.Status.INFEASIBLE):
raise QiskitOptimizationError('Infeasible due to variable substitution')
replacements[x_i] = (x_j, (- 1))
result = self._min_num_vars_optimizer.solve(problem_)
var_values = {}
for (i, x) in enumerate(problem_.variables):
var_values[x.name] = result.x[i]
def find_value(x, replacements, var_values):
if (x in var_values):
return var_values[x]
elif (x in replacements):
(y, sgn) = replacements[x]
value = find_value(y, replacements, var_values)
var_values[x] = (value if (sgn == 1) else (1 - value))
return var_values[x]
else:
raise QiskitOptimizationError('Invalid values!')
for x_i in problem_ref.variables:
if (x_i.name not in var_values):
find_value(x_i.name, replacements, var_values)
history = (min_eigen_results, (None if (self._history == IntermediateResult.NO_ITERATIONS) else result))
x_v = [var_values[x_aux.name] for x_aux in problem_ref.variables]
fval = result.fval
result = OptimizationResult(x=x_v, fval=fval, variables=problem_ref.variables)
result = self._qubo_converter.interpret(result)
return RecursiveMinimumEigenOptimizationResult(x=result.x, fval=result.fval, variables=result.variables, replacements=replacements, history=history)
| 8,254,324,393,457,410,000
|
Tries to solve the given problem using the recursive optimizer.
Runs the optimizer to try to solve the optimization problem.
Args:
problem: The problem to be solved.
Returns:
The result of the optimizer applied to the problem.
Raises:
QiskitOptimizationError: Incompatible problem.
QiskitOptimizationError: Infeasible due to variable substitution
|
qiskit/optimization/algorithms/recursive_minimum_eigen_optimizer.py
|
solve
|
Cristian-Malinescu/qiskit-aqua
|
python
|
def solve(self, problem: QuadraticProgram) -> OptimizationResult:
'Tries to solve the given problem using the recursive optimizer.\n\n Runs the optimizer to try to solve the optimization problem.\n\n Args:\n problem: The problem to be solved.\n\n Returns:\n The result of the optimizer applied to the problem.\n\n Raises:\n QiskitOptimizationError: Incompatible problem.\n QiskitOptimizationError: Infeasible due to variable substitution\n '
self._verify_compatibility(problem)
problem_ = self._qubo_converter.convert(problem)
problem_ref = deepcopy(problem_)
replacements = {}
min_eigen_results = []
while (problem_.get_num_vars() > self._min_num_vars):
res = self._min_eigen_optimizer.solve(problem_)
if (self._history == IntermediateResult.ALL_ITERATIONS):
min_eigen_results.append(res)
correlations = res.get_correlations()
(i, j) = self._find_strongest_correlation(correlations)
x_i = problem_.variables[i].name
x_j = problem_.variables[j].name
if (correlations[(i, j)] > 0):
problem_ = problem_.substitute_variables(variables={i: (j, 1)})
if (problem_.status == QuadraticProgram.Status.INFEASIBLE):
raise QiskitOptimizationError('Infeasible due to variable substitution')
replacements[x_i] = (x_j, 1)
else:
constant = problem_.objective.constant
constant += problem_.objective.linear[i]
constant += problem_.objective.quadratic[(i, i)]
problem_.objective.constant = constant
for k in range(problem_.get_num_vars()):
coeff = problem_.objective.linear[k]
if (k == i):
coeff += (2 * problem_.objective.quadratic[(i, k)])
else:
coeff += problem_.objective.quadratic[(i, k)]
if (np.abs(coeff) > 1e-10):
problem_.objective.linear[k] = coeff
else:
problem_.objective.linear[k] = 0
problem_ = problem_.substitute_variables(variables={i: (j, (- 1))})
if (problem_.status == QuadraticProgram.Status.INFEASIBLE):
raise QiskitOptimizationError('Infeasible due to variable substitution')
replacements[x_i] = (x_j, (- 1))
result = self._min_num_vars_optimizer.solve(problem_)
var_values = {}
for (i, x) in enumerate(problem_.variables):
var_values[x.name] = result.x[i]
def find_value(x, replacements, var_values):
if (x in var_values):
return var_values[x]
elif (x in replacements):
(y, sgn) = replacements[x]
value = find_value(y, replacements, var_values)
var_values[x] = (value if (sgn == 1) else (1 - value))
return var_values[x]
else:
raise QiskitOptimizationError('Invalid values!')
for x_i in problem_ref.variables:
if (x_i.name not in var_values):
find_value(x_i.name, replacements, var_values)
history = (min_eigen_results, (None if (self._history == IntermediateResult.NO_ITERATIONS) else result))
x_v = [var_values[x_aux.name] for x_aux in problem_ref.variables]
fval = result.fval
result = OptimizationResult(x=x_v, fval=fval, variables=problem_ref.variables)
result = self._qubo_converter.interpret(result)
return RecursiveMinimumEigenOptimizationResult(x=result.x, fval=result.fval, variables=result.variables, replacements=replacements, history=history)
|
def generateEphemeris(orbits, observers, backend='MJOLNIR', backend_kwargs={}, test_orbit=None, threads=Config.NUM_THREADS, chunk_size=1):
"\n Generate ephemeris for the orbits and the given observatories. \n \n Parameters\n ----------\n orbits : `~numpy.ndarray` (N, 6)\n Orbits for which to generate ephemeris. If backend is 'THOR', then these orbits must be expressed\n as heliocentric ecliptic cartesian elements. If backend is 'PYOORB' orbits may be \n expressed in keplerian, cometary or cartesian elements.\n observers : dict\n A dictionary with observatory codes as keys and observation_times (`~astropy.time.core.Time`) as values. \n Or a data frame with observatory codes, observation times (in UTC), and the observer's heliocentric ecliptic state.\n The expected data frame columns are obs_x, obs_y, obs_y and optionally the velocity columns obs_vx, obs_vy, obs_vz.\n If no velocities are not correctly given, then sky-plane velocities will all be zero.\n (See: `~thor.observatories.getObserverState`)\n backend : {'MJOLNIR', 'PYOORB'}, optional\n Which backend to use. \n backend_kwargs : dict, optional\n Settings and additional parameters to pass to selected \n backend.\n\n Returns\n -------\n ephemeris : `~pandas.DataFrame` (N x M, 21) or (N x M, 18)\n A DataFrame containing the generated ephemeris.\n "
if (backend == 'MJOLNIR'):
backend = MJOLNIR(**backend_kwargs)
elif (backend == 'PYOORB'):
backend = PYOORB(**backend_kwargs)
elif (backend == 'FINDORB'):
backend = FINDORB(**backend_kwargs)
elif isinstance(backend, Backend):
backend = backend
if (len(backend_kwargs) > 0):
warnings.warn('backend_kwargs will be ignored since a instantiated backend class has been given.')
else:
err = "backend should be one of 'MJOLNIR', 'PYOORB', 'FINDORB' or an instantiated Backend class"
raise ValueError(err)
ephemeris = backend.generateEphemeris(orbits, observers, test_orbit=test_orbit, threads=threads, chunk_size=chunk_size)
ephemeris.sort_values(by=['orbit_id', 'observatory_code', 'mjd_utc'], inplace=True)
ephemeris.reset_index(inplace=True, drop=True)
return ephemeris
| 7,057,143,526,735,753,000
|
Generate ephemeris for the orbits and the given observatories.
Parameters
----------
orbits : `~numpy.ndarray` (N, 6)
Orbits for which to generate ephemeris. If backend is 'THOR', then these orbits must be expressed
as heliocentric ecliptic cartesian elements. If backend is 'PYOORB' orbits may be
expressed in keplerian, cometary or cartesian elements.
observers : dict
A dictionary with observatory codes as keys and observation_times (`~astropy.time.core.Time`) as values.
Or a data frame with observatory codes, observation times (in UTC), and the observer's heliocentric ecliptic state.
The expected data frame columns are obs_x, obs_y, obs_y and optionally the velocity columns obs_vx, obs_vy, obs_vz.
If no velocities are not correctly given, then sky-plane velocities will all be zero.
(See: `~thor.observatories.getObserverState`)
backend : {'MJOLNIR', 'PYOORB'}, optional
Which backend to use.
backend_kwargs : dict, optional
Settings and additional parameters to pass to selected
backend.
Returns
-------
ephemeris : `~pandas.DataFrame` (N x M, 21) or (N x M, 18)
A DataFrame containing the generated ephemeris.
|
thor/orbits/ephemeris.py
|
generateEphemeris
|
B612-Asteroid-Institute/thor
|
python
|
def generateEphemeris(orbits, observers, backend='MJOLNIR', backend_kwargs={}, test_orbit=None, threads=Config.NUM_THREADS, chunk_size=1):
"\n Generate ephemeris for the orbits and the given observatories. \n \n Parameters\n ----------\n orbits : `~numpy.ndarray` (N, 6)\n Orbits for which to generate ephemeris. If backend is 'THOR', then these orbits must be expressed\n as heliocentric ecliptic cartesian elements. If backend is 'PYOORB' orbits may be \n expressed in keplerian, cometary or cartesian elements.\n observers : dict\n A dictionary with observatory codes as keys and observation_times (`~astropy.time.core.Time`) as values. \n Or a data frame with observatory codes, observation times (in UTC), and the observer's heliocentric ecliptic state.\n The expected data frame columns are obs_x, obs_y, obs_y and optionally the velocity columns obs_vx, obs_vy, obs_vz.\n If no velocities are not correctly given, then sky-plane velocities will all be zero.\n (See: `~thor.observatories.getObserverState`)\n backend : {'MJOLNIR', 'PYOORB'}, optional\n Which backend to use. \n backend_kwargs : dict, optional\n Settings and additional parameters to pass to selected \n backend.\n\n Returns\n -------\n ephemeris : `~pandas.DataFrame` (N x M, 21) or (N x M, 18)\n A DataFrame containing the generated ephemeris.\n "
if (backend == 'MJOLNIR'):
backend = MJOLNIR(**backend_kwargs)
elif (backend == 'PYOORB'):
backend = PYOORB(**backend_kwargs)
elif (backend == 'FINDORB'):
backend = FINDORB(**backend_kwargs)
elif isinstance(backend, Backend):
backend = backend
if (len(backend_kwargs) > 0):
warnings.warn('backend_kwargs will be ignored since a instantiated backend class has been given.')
else:
err = "backend should be one of 'MJOLNIR', 'PYOORB', 'FINDORB' or an instantiated Backend class"
raise ValueError(err)
ephemeris = backend.generateEphemeris(orbits, observers, test_orbit=test_orbit, threads=threads, chunk_size=chunk_size)
ephemeris.sort_values(by=['orbit_id', 'observatory_code', 'mjd_utc'], inplace=True)
ephemeris.reset_index(inplace=True, drop=True)
return ephemeris
|
def get_data(self, verbose: bool):
'\n I: get data\n -----------\n :param verbose: [bool]\n :return: -\n '
url_base = 'https://raw.githubusercontent.com/patverga/torch-ner-nlp-from-scratch/master/data/conll2003/'
targets = ['eng.train', 'eng.testa', 'eng.testb']
for target in targets:
target_file = join(self.dataset_path, target)
if isfile(target_file):
if verbose:
print(f'.. file at {target_file} already exists')
else:
url = (url_base + target)
myfile = requests.get(url, allow_redirects=True)
open(target_file, 'wb').write(myfile.content)
if verbose:
print(f'.. file fetched from {url} and saved at {target_file}')
| -3,496,901,125,002,046,000
|
I: get data
-----------
:param verbose: [bool]
:return: -
|
nerblackbox/modules/datasets/formatter/conll2003_formatter.py
|
get_data
|
af-ai-center/nerblackbox
|
python
|
def get_data(self, verbose: bool):
'\n I: get data\n -----------\n :param verbose: [bool]\n :return: -\n '
url_base = 'https://raw.githubusercontent.com/patverga/torch-ner-nlp-from-scratch/master/data/conll2003/'
targets = ['eng.train', 'eng.testa', 'eng.testb']
for target in targets:
target_file = join(self.dataset_path, target)
if isfile(target_file):
if verbose:
print(f'.. file at {target_file} already exists')
else:
url = (url_base + target)
myfile = requests.get(url, allow_redirects=True)
open(target_file, 'wb').write(myfile.content)
if verbose:
print(f'.. file fetched from {url} and saved at {target_file}')
|
def create_ner_tag_mapping(self):
'\n II: customize ner_training tag mapping if wanted\n -------------------------------------\n :return: ner_tag_mapping: [dict] w/ keys = tags in original data, values = tags in formatted data\n '
return dict()
| -1,371,010,697,111,993,300
|
II: customize ner_training tag mapping if wanted
-------------------------------------
:return: ner_tag_mapping: [dict] w/ keys = tags in original data, values = tags in formatted data
|
nerblackbox/modules/datasets/formatter/conll2003_formatter.py
|
create_ner_tag_mapping
|
af-ai-center/nerblackbox
|
python
|
def create_ner_tag_mapping(self):
'\n II: customize ner_training tag mapping if wanted\n -------------------------------------\n :return: ner_tag_mapping: [dict] w/ keys = tags in original data, values = tags in formatted data\n '
return dict()
|
def format_data(self):
'\n III: format data\n ----------------\n :return: -\n '
for phase in ['train', 'val', 'test']:
rows = self._read_original_file(phase)
self._write_formatted_csv(phase, rows)
| 6,290,795,515,144,693,000
|
III: format data
----------------
:return: -
|
nerblackbox/modules/datasets/formatter/conll2003_formatter.py
|
format_data
|
af-ai-center/nerblackbox
|
python
|
def format_data(self):
'\n III: format data\n ----------------\n :return: -\n '
for phase in ['train', 'val', 'test']:
rows = self._read_original_file(phase)
self._write_formatted_csv(phase, rows)
|
def resplit_data(self, val_fraction: float):
'\n IV: resplit data\n ----------------\n :param val_fraction: [float]\n :return: -\n '
df_train = self._read_formatted_csvs(['train'])
self._write_final_csv('train', df_train)
df_val = self._read_formatted_csvs(['val'])
self._write_final_csv('val', df_val)
df_test = self._read_formatted_csvs(['test'])
self._write_final_csv('test', df_test)
| -2,747,583,459,563,875,300
|
IV: resplit data
----------------
:param val_fraction: [float]
:return: -
|
nerblackbox/modules/datasets/formatter/conll2003_formatter.py
|
resplit_data
|
af-ai-center/nerblackbox
|
python
|
def resplit_data(self, val_fraction: float):
'\n IV: resplit data\n ----------------\n :param val_fraction: [float]\n :return: -\n '
df_train = self._read_formatted_csvs(['train'])
self._write_final_csv('train', df_train)
df_val = self._read_formatted_csvs(['val'])
self._write_final_csv('val', df_val)
df_test = self._read_formatted_csvs(['test'])
self._write_final_csv('test', df_test)
|
def _read_original_file(self, phase):
"\n III: format data\n ---------------------------------------------\n :param phase: [str] 'train' or 'test'\n :return: _rows: [list] of [list] of [str], e.g. [[], ['Inger', 'PER'], ['säger', '0'], ..]\n "
file_name = {'train': 'eng.train', 'val': 'eng.testa', 'test': 'eng.testb'}
file_path_original = join(self.dataset_path, file_name[phase])
_rows = list()
if os.path.isfile(file_path_original):
with open(file_path_original) as f:
for (i, row) in enumerate(f.readlines()):
_rows.append(row.strip().split())
print(f'''
> read {file_path_original}''')
_rows = [([row[0], row[(- 1)]] if ((len(row) == 4) and (row[0] != '-DOCSTART-')) else list()) for row in _rows]
return _rows
| 6,818,160,006,964,992,000
|
III: format data
---------------------------------------------
:param phase: [str] 'train' or 'test'
:return: _rows: [list] of [list] of [str], e.g. [[], ['Inger', 'PER'], ['säger', '0'], ..]
|
nerblackbox/modules/datasets/formatter/conll2003_formatter.py
|
_read_original_file
|
af-ai-center/nerblackbox
|
python
|
def _read_original_file(self, phase):
"\n III: format data\n ---------------------------------------------\n :param phase: [str] 'train' or 'test'\n :return: _rows: [list] of [list] of [str], e.g. [[], ['Inger', 'PER'], ['säger', '0'], ..]\n "
file_name = {'train': 'eng.train', 'val': 'eng.testa', 'test': 'eng.testb'}
file_path_original = join(self.dataset_path, file_name[phase])
_rows = list()
if os.path.isfile(file_path_original):
with open(file_path_original) as f:
for (i, row) in enumerate(f.readlines()):
_rows.append(row.strip().split())
print(f'
> read {file_path_original}')
_rows = [([row[0], row[(- 1)]] if ((len(row) == 4) and (row[0] != '-DOCSTART-')) else list()) for row in _rows]
return _rows
|
def main():
'Main routine\n '
print('\nTesting ADMM')
print('====================')
print('m = n : ', args.n)
print('dataset: ', args.dataset)
if (args.dataset == 'DOTmark'):
print('class : ', args.imageclass)
print('method : ', args.method)
print('====================')
(mu, nu, c) = get_params(args.n, args.dataset, args.imageclass)
start = time.time()
if (args.method == 'primal'):
ADMM_primal(mu, nu, c, args.iters, args.rho, args.alpha)
elif (args.method == 'dual'):
ADMM_dual(mu, nu, c, args.iters, args.rho, args.alpha)
t = (time.time() - start)
print(('time = %.5e' % t))
| 6,122,039,336,531,661,000
|
Main routine
|
test_ADMM.py
|
main
|
CrazyIvanPro/Optimal_Transport
|
python
|
def main():
'\n '
print('\nTesting ADMM')
print('====================')
print('m = n : ', args.n)
print('dataset: ', args.dataset)
if (args.dataset == 'DOTmark'):
print('class : ', args.imageclass)
print('method : ', args.method)
print('====================')
(mu, nu, c) = get_params(args.n, args.dataset, args.imageclass)
start = time.time()
if (args.method == 'primal'):
ADMM_primal(mu, nu, c, args.iters, args.rho, args.alpha)
elif (args.method == 'dual'):
ADMM_dual(mu, nu, c, args.iters, args.rho, args.alpha)
t = (time.time() - start)
print(('time = %.5e' % t))
|
def create_security_token(api_key, stage):
'\n Generates a security token for SBT API access.\n\n Args:\n api_key (string): API_KEY value provided by solutionsbytext\n stage (string): STAGE values (test or ui)\n\n Returns:\n string: SecurityToken returns by LoginAPIService\n\n Raises:\n CustomException: Raises while error during GET request.\n\n '
url = ''.join([_base_url.format(stage), 'LoginAPIService.svc/AuthenticateAPIKey?', parse.urlencode({'APIKey': api_key})])
response_data = json.loads(requests.get(url).text)
if (response_data['AuthenticateAPIKeyResult'].get('ErrorCode') == 1402):
raise CustomException('Error in generating security key.')
if (response_data['AuthenticateAPIKeyResult'].get('ErrorCode') == 1401):
raise CustomException('SecurityToken generation is failed.')
return response_data['AuthenticateAPIKeyResult'].get('SecurityToken')
| 6,539,444,712,881,635,000
|
Generates a security token for SBT API access.
Args:
api_key (string): API_KEY value provided by solutionsbytext
stage (string): STAGE values (test or ui)
Returns:
string: SecurityToken returns by LoginAPIService
Raises:
CustomException: Raises while error during GET request.
|
solutions_by_text/sbt_token_generator.py
|
create_security_token
|
sijanonly/sbt-python-client
|
python
|
def create_security_token(api_key, stage):
'\n Generates a security token for SBT API access.\n\n Args:\n api_key (string): API_KEY value provided by solutionsbytext\n stage (string): STAGE values (test or ui)\n\n Returns:\n string: SecurityToken returns by LoginAPIService\n\n Raises:\n CustomException: Raises while error during GET request.\n\n '
url = .join([_base_url.format(stage), 'LoginAPIService.svc/AuthenticateAPIKey?', parse.urlencode({'APIKey': api_key})])
response_data = json.loads(requests.get(url).text)
if (response_data['AuthenticateAPIKeyResult'].get('ErrorCode') == 1402):
raise CustomException('Error in generating security key.')
if (response_data['AuthenticateAPIKeyResult'].get('ErrorCode') == 1401):
raise CustomException('SecurityToken generation is failed.')
return response_data['AuthenticateAPIKeyResult'].get('SecurityToken')
|
def testChild(self):
'Test Child\n This will fail because additional_properties_type is None in ChildAllOf and it must be defined as any type\n to allow in the property radio_waves which is not defined in ChildAllOf, it is defined in Grandparent\n '
radio_waves = True
tele_vision = True
inter_net = True
with self.assertRaises(petstore_api.exceptions.ApiValueError):
child = Child(radio_waves=radio_waves, tele_vision=tele_vision, inter_net=inter_net)
| 7,167,641,836,760,918,000
|
Test Child
This will fail because additional_properties_type is None in ChildAllOf and it must be defined as any type
to allow in the property radio_waves which is not defined in ChildAllOf, it is defined in Grandparent
|
samples/client/petstore/python_disallowAdditionalPropertiesIfNotPresent/test/test_child.py
|
testChild
|
0x0c/openapi-generator
|
python
|
def testChild(self):
'Test Child\n This will fail because additional_properties_type is None in ChildAllOf and it must be defined as any type\n to allow in the property radio_waves which is not defined in ChildAllOf, it is defined in Grandparent\n '
radio_waves = True
tele_vision = True
inter_net = True
with self.assertRaises(petstore_api.exceptions.ApiValueError):
child = Child(radio_waves=radio_waves, tele_vision=tele_vision, inter_net=inter_net)
|
def attach(self, engine: Engine) -> None:
'\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n '
if (self._name is None):
self.logger = engine.logger
engine.add_event_handler(Events.STARTED, self)
| 7,773,029,528,368,912,000
|
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
monai/handlers/checkpoint_loader.py
|
attach
|
BRAINSia/MONAI
|
python
|
def attach(self, engine: Engine) -> None:
'\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n '
if (self._name is None):
self.logger = engine.logger
engine.add_event_handler(Events.STARTED, self)
|
def __call__(self, engine: Engine) -> None:
'\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n '
checkpoint = torch.load(self.load_path, map_location=self.map_location)
if (len(self.load_dict) == 1):
key = list(self.load_dict.keys())[0]
if (not (key in checkpoint)):
checkpoint = {key: checkpoint}
Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint)
self.logger.info(f'Restored all variables from {self.load_path}')
| 8,321,460,817,494,644,000
|
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
|
monai/handlers/checkpoint_loader.py
|
__call__
|
BRAINSia/MONAI
|
python
|
def __call__(self, engine: Engine) -> None:
'\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n '
checkpoint = torch.load(self.load_path, map_location=self.map_location)
if (len(self.load_dict) == 1):
key = list(self.load_dict.keys())[0]
if (not (key in checkpoint)):
checkpoint = {key: checkpoint}
Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint)
self.logger.info(f'Restored all variables from {self.load_path}')
|
def __init__(self, xml_file=None):
'\n Given a well formed XML file (xml_file), read it and turn it into\n a big string.\n '
self.__root = None
self.__name = ''
self.__namespace = None
self.__include_header_files = []
self.__includes = []
self.__include_enum_files = []
self.__include_array_files = []
self.__comment = ''
self.__members = []
self.__type_id = None
if (os.path.isfile(xml_file) == False):
stri = ('ERROR: Could not find specified XML file %s.' % xml_file)
raise OSError(stri)
fd = open(xml_file)
xml_file = os.path.basename(xml_file)
self.__xml_filename = xml_file
self.__config = ConfigManager.ConfigManager.getInstance()
xml_parser = etree.XMLParser(remove_comments=True)
element_tree = etree.parse(fd, parser=xml_parser)
rng_file = self.__config.get('schema', element_tree.getroot().tag.lower()).lstrip('/')
try:
rng_file = locate_build_root(rng_file)
except (BuildRootMissingException, BuildRootCollisionException) as bre:
stri = 'ERROR: Could not find specified RNG file {}. {}'.format(rng_file, str(bre))
raise OSError(stri)
file_handler = open(rng_file)
relax_parsed = etree.parse(file_handler)
file_handler.close()
relax_compiled = etree.RelaxNG(relax_parsed)
if (not relax_compiled.validate(element_tree)):
msg = 'XML file {} is not valid according to schema {}.'.format(xml_file, rng_file)
raise FprimeXmlException(msg)
serializable = element_tree.getroot()
if (serializable.tag != 'serializable'):
PRINT.info(('%s is not a serializable definition file' % xml_file))
sys.exit((- 1))
print(('Parsing Serializable %s' % serializable.attrib['name']))
self.__name = serializable.attrib['name']
if ('namespace' in serializable.attrib):
self.__namespace = serializable.attrib['namespace']
else:
self.__namespace = None
if ('typeid' in serializable.attrib):
self.__type_id = serializable.attrib['typeid']
else:
self.__type_id = None
for serializable_tag in serializable:
if (serializable_tag.tag == 'comment'):
self.__comment = serializable_tag.text.strip()
elif (serializable_tag.tag == 'include_header'):
self.__include_header_files.append(serializable_tag.text)
elif (serializable_tag.tag == 'import_serializable_type'):
self.__includes.append(serializable_tag.text)
elif (serializable_tag.tag == 'import_enum_type'):
self.__include_enum_files.append(serializable_tag.text)
elif (serializable_tag.tag == 'import_array_type'):
self.__include_array_files.append(serializable_tag.text)
elif (serializable_tag.tag == 'members'):
for member in serializable_tag:
if (member.tag != 'member'):
PRINT.info(('%s: Invalid tag %s in serializable member definition' % (xml_file, member.tag)))
sys.exit((- 1))
n = member.attrib['name']
t = member.attrib['type']
if ('size' in list(member.attrib.keys())):
if (t == 'ENUM'):
PRINT.info(('%s: Member %s: arrays of enums not supported yet!' % (xml_file, n)))
sys.exit((- 1))
s = member.attrib['size']
if (not s.isdigit()):
PRINT.info('{}: Member {}: size must be a number'.format(xml_file, n))
sys.exit((- 1))
else:
s = None
if ('format' in list(member.attrib.keys())):
f = member.attrib['format']
elif (t in list(format_dictionary.keys())):
f = format_dictionary[t]
else:
f = '%s'
if (t == 'string'):
if (s is None):
PRINT.info(('%s: member %s string must specify size tag' % (xml_file, member.tag)))
sys.exit((- 1))
if ('comment' in list(member.attrib.keys())):
c = member.attrib['comment']
else:
c = None
for member_tag in member:
if ((member_tag.tag == 'enum') and (t == 'ENUM')):
en = member_tag.attrib['name']
enum_members = []
for mem in member_tag:
mn = mem.attrib['name']
if ('value' in list(mem.attrib.keys())):
v = mem.attrib['value']
else:
v = None
if ('comment' in list(mem.attrib.keys())):
mc = mem.attrib['comment'].strip()
else:
mc = None
enum_members.append((mn, v, mc))
t = ((t, en), enum_members)
else:
PRINT.info(('%s: Invalid member tag %s in serializable member %s' % (xml_file, member_tag.tag, n)))
sys.exit((- 1))
self.__members.append((n, t, s, f, c))
if (not ('typeid' in serializable.attrib)):
s = etree.tostring(element_tree.getroot())
h = hashlib.sha256(s)
n = h.hexdigest()
self.__type_id = ('0x' + n.upper()[(- 8):])
| 8,446,292,367,681,806,000
|
Given a well formed XML file (xml_file), read it and turn it into
a big string.
|
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
|
__init__
|
1Blackdiamondsc/fprime
|
python
|
def __init__(self, xml_file=None):
'\n Given a well formed XML file (xml_file), read it and turn it into\n a big string.\n '
self.__root = None
self.__name =
self.__namespace = None
self.__include_header_files = []
self.__includes = []
self.__include_enum_files = []
self.__include_array_files = []
self.__comment =
self.__members = []
self.__type_id = None
if (os.path.isfile(xml_file) == False):
stri = ('ERROR: Could not find specified XML file %s.' % xml_file)
raise OSError(stri)
fd = open(xml_file)
xml_file = os.path.basename(xml_file)
self.__xml_filename = xml_file
self.__config = ConfigManager.ConfigManager.getInstance()
xml_parser = etree.XMLParser(remove_comments=True)
element_tree = etree.parse(fd, parser=xml_parser)
rng_file = self.__config.get('schema', element_tree.getroot().tag.lower()).lstrip('/')
try:
rng_file = locate_build_root(rng_file)
except (BuildRootMissingException, BuildRootCollisionException) as bre:
stri = 'ERROR: Could not find specified RNG file {}. {}'.format(rng_file, str(bre))
raise OSError(stri)
file_handler = open(rng_file)
relax_parsed = etree.parse(file_handler)
file_handler.close()
relax_compiled = etree.RelaxNG(relax_parsed)
if (not relax_compiled.validate(element_tree)):
msg = 'XML file {} is not valid according to schema {}.'.format(xml_file, rng_file)
raise FprimeXmlException(msg)
serializable = element_tree.getroot()
if (serializable.tag != 'serializable'):
PRINT.info(('%s is not a serializable definition file' % xml_file))
sys.exit((- 1))
print(('Parsing Serializable %s' % serializable.attrib['name']))
self.__name = serializable.attrib['name']
if ('namespace' in serializable.attrib):
self.__namespace = serializable.attrib['namespace']
else:
self.__namespace = None
if ('typeid' in serializable.attrib):
self.__type_id = serializable.attrib['typeid']
else:
self.__type_id = None
for serializable_tag in serializable:
if (serializable_tag.tag == 'comment'):
self.__comment = serializable_tag.text.strip()
elif (serializable_tag.tag == 'include_header'):
self.__include_header_files.append(serializable_tag.text)
elif (serializable_tag.tag == 'import_serializable_type'):
self.__includes.append(serializable_tag.text)
elif (serializable_tag.tag == 'import_enum_type'):
self.__include_enum_files.append(serializable_tag.text)
elif (serializable_tag.tag == 'import_array_type'):
self.__include_array_files.append(serializable_tag.text)
elif (serializable_tag.tag == 'members'):
for member in serializable_tag:
if (member.tag != 'member'):
PRINT.info(('%s: Invalid tag %s in serializable member definition' % (xml_file, member.tag)))
sys.exit((- 1))
n = member.attrib['name']
t = member.attrib['type']
if ('size' in list(member.attrib.keys())):
if (t == 'ENUM'):
PRINT.info(('%s: Member %s: arrays of enums not supported yet!' % (xml_file, n)))
sys.exit((- 1))
s = member.attrib['size']
if (not s.isdigit()):
PRINT.info('{}: Member {}: size must be a number'.format(xml_file, n))
sys.exit((- 1))
else:
s = None
if ('format' in list(member.attrib.keys())):
f = member.attrib['format']
elif (t in list(format_dictionary.keys())):
f = format_dictionary[t]
else:
f = '%s'
if (t == 'string'):
if (s is None):
PRINT.info(('%s: member %s string must specify size tag' % (xml_file, member.tag)))
sys.exit((- 1))
if ('comment' in list(member.attrib.keys())):
c = member.attrib['comment']
else:
c = None
for member_tag in member:
if ((member_tag.tag == 'enum') and (t == 'ENUM')):
en = member_tag.attrib['name']
enum_members = []
for mem in member_tag:
mn = mem.attrib['name']
if ('value' in list(mem.attrib.keys())):
v = mem.attrib['value']
else:
v = None
if ('comment' in list(mem.attrib.keys())):
mc = mem.attrib['comment'].strip()
else:
mc = None
enum_members.append((mn, v, mc))
t = ((t, en), enum_members)
else:
PRINT.info(('%s: Invalid member tag %s in serializable member %s' % (xml_file, member_tag.tag, n)))
sys.exit((- 1))
self.__members.append((n, t, s, f, c))
if (not ('typeid' in serializable.attrib)):
s = etree.tostring(element_tree.getroot())
h = hashlib.sha256(s)
n = h.hexdigest()
self.__type_id = ('0x' + n.upper()[(- 8):])
|
def get_typeid(self):
'\n Return a generated type ID from contents of XML file.\n '
return self.__type_id
| 5,982,048,283,816,331,000
|
Return a generated type ID from contents of XML file.
|
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
|
get_typeid
|
1Blackdiamondsc/fprime
|
python
|
def get_typeid(self):
'\n \n '
return self.__type_id
|
def get_xml_filename(self):
'\n Return the original XML filename parsed.\n '
return self.__xml_filename
| -5,144,559,668,066,074,000
|
Return the original XML filename parsed.
|
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
|
get_xml_filename
|
1Blackdiamondsc/fprime
|
python
|
def get_xml_filename(self):
'\n \n '
return self.__xml_filename
|
def get_include_header_files(self):
'\n Return a list of all imported Port type XML files.\n '
return self.__include_header_files
| 17,913,104,661,121,070
|
Return a list of all imported Port type XML files.
|
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
|
get_include_header_files
|
1Blackdiamondsc/fprime
|
python
|
def get_include_header_files(self):
'\n \n '
return self.__include_header_files
|
def get_includes(self):
'\n Returns a list of all imported XML serializable files.\n '
return self.__includes
| 8,429,568,674,755,246,000
|
Returns a list of all imported XML serializable files.
|
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
|
get_includes
|
1Blackdiamondsc/fprime
|
python
|
def get_includes(self):
'\n \n '
return self.__includes
|
def get_include_enums(self):
'\n Returns a list of all imported XML enum files.\n '
return self.__include_enum_files
| -8,368,547,394,618,953,000
|
Returns a list of all imported XML enum files.
|
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
|
get_include_enums
|
1Blackdiamondsc/fprime
|
python
|
def get_include_enums(self):
'\n \n '
return self.__include_enum_files
|
def get_include_arrays(self):
'\n Returns a list of all imported XML array files.\n '
return self.__include_array_files
| 1,281,047,629,684,097,800
|
Returns a list of all imported XML array files.
|
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
|
get_include_arrays
|
1Blackdiamondsc/fprime
|
python
|
def get_include_arrays(self):
'\n \n '
return self.__include_array_files
|
def get_comment(self):
'\n Return text block string of comment for serializable class.\n '
return self.__comment
| 502,717,888,823,918,800
|
Return text block string of comment for serializable class.
|
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
|
get_comment
|
1Blackdiamondsc/fprime
|
python
|
def get_comment(self):
'\n \n '
return self.__comment
|
def get_members(self):
'\n Returns a list of member (name, type, optional size, optional format, optional comment) needed.\n '
return self.__members
| -6,281,081,859,112,102,000
|
Returns a list of member (name, type, optional size, optional format, optional comment) needed.
|
Autocoders/Python/src/fprime_ac/parsers/XmlSerializeParser.py
|
get_members
|
1Blackdiamondsc/fprime
|
python
|
def get_members(self):
'\n \n '
return self.__members
|
def eval_metric(label, approx, metric, weight=None, group_id=None, thread_count=(- 1)):
'\n Evaluate metrics with raw approxes and labels.\n\n Parameters\n ----------\n label : list or numpy.arrays or pandas.DataFrame or pandas.Series\n Object labels.\n\n approx : list or numpy.arrays or pandas.DataFrame or pandas.Series\n Object approxes.\n\n metrics : list of strings\n List of eval metrics.\n\n weight : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Object weights.\n\n group_id : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Object group ids.\n\n thread_count : int, optional (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n metric results : list with metric values.\n '
if (len(approx) == 0):
approx = [[]]
if (not isinstance(approx[0], ARRAY_TYPES)):
approx = [approx]
return _eval_metric_util(label, approx, metric, weight, group_id, thread_count)
| 3,825,111,144,821,981,000
|
Evaluate metrics with raw approxes and labels.
Parameters
----------
label : list or numpy.arrays or pandas.DataFrame or pandas.Series
Object labels.
approx : list or numpy.arrays or pandas.DataFrame or pandas.Series
Object approxes.
metrics : list of strings
List of eval metrics.
weight : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)
Object weights.
group_id : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)
Object group ids.
thread_count : int, optional (default=-1)
Number of threads to work with.
If -1, then the number of threads is set to the number of cores.
Returns
-------
metric results : list with metric values.
|
catboost/python-package/catboost/utils.py
|
eval_metric
|
infected-mushroom/catboost
|
python
|
def eval_metric(label, approx, metric, weight=None, group_id=None, thread_count=(- 1)):
'\n Evaluate metrics with raw approxes and labels.\n\n Parameters\n ----------\n label : list or numpy.arrays or pandas.DataFrame or pandas.Series\n Object labels.\n\n approx : list or numpy.arrays or pandas.DataFrame or pandas.Series\n Object approxes.\n\n metrics : list of strings\n List of eval metrics.\n\n weight : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Object weights.\n\n group_id : list or numpy.array or pandas.DataFrame or pandas.Series, optional (default=None)\n Object group ids.\n\n thread_count : int, optional (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n metric results : list with metric values.\n '
if (len(approx) == 0):
approx = [[]]
if (not isinstance(approx[0], ARRAY_TYPES)):
approx = [approx]
return _eval_metric_util(label, approx, metric, weight, group_id, thread_count)
|
def get_roc_curve(model, data, thread_count=(- 1)):
'\n Build points of ROC curve.\n\n Parameters\n ----------\n model : catboost.CatBoost\n The trained model.\n\n data : catboost.Pool or list of catboost.Pool\n A set of samples to build ROC curve with.\n\n thread_count : int (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n curve points : tuple of three arrays (fpr, tpr, thresholds)\n '
if (type(data) == Pool):
data = [data]
if (not isinstance(data, list)):
raise CatboostError('data must be a catboost.Pool or list of pools.')
for pool in data:
if (not isinstance(pool, Pool)):
raise CatboostError('one of data pools is not catboost.Pool')
return _get_roc_curve(model._object, data, thread_count)
| 2,717,702,817,336,356,000
|
Build points of ROC curve.
Parameters
----------
model : catboost.CatBoost
The trained model.
data : catboost.Pool or list of catboost.Pool
A set of samples to build ROC curve with.
thread_count : int (default=-1)
Number of threads to work with.
If -1, then the number of threads is set to the number of cores.
Returns
-------
curve points : tuple of three arrays (fpr, tpr, thresholds)
|
catboost/python-package/catboost/utils.py
|
get_roc_curve
|
infected-mushroom/catboost
|
python
|
def get_roc_curve(model, data, thread_count=(- 1)):
'\n Build points of ROC curve.\n\n Parameters\n ----------\n model : catboost.CatBoost\n The trained model.\n\n data : catboost.Pool or list of catboost.Pool\n A set of samples to build ROC curve with.\n\n thread_count : int (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n curve points : tuple of three arrays (fpr, tpr, thresholds)\n '
if (type(data) == Pool):
data = [data]
if (not isinstance(data, list)):
raise CatboostError('data must be a catboost.Pool or list of pools.')
for pool in data:
if (not isinstance(pool, Pool)):
raise CatboostError('one of data pools is not catboost.Pool')
return _get_roc_curve(model._object, data, thread_count)
|
def get_fpr_curve(model=None, data=None, curve=None, thread_count=(- 1)):
'\n Build points of FPR curve.\n\n Parameters\n ----------\n model : catboost.CatBoost\n The trained model.\n\n data : catboost.Pool or list of catboost.Pool\n A set of samples to build ROC curve with.\n\n curve : tuple of three arrays (fpr, tpr, thresholds)\n ROC curve points in format of get_roc_curve returned value.\n If set, data parameter must not be set.\n\n thread_count : int (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n curve points : tuple of two arrays (thresholds, fpr)\n '
if (curve is not None):
if (data is not None):
raise CatboostError('Only one of the parameters data and curve should be set.')
if ((not (isinstance(curve, list) or isinstance(curve, tuple))) or (len(curve) != 3)):
raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).')
(fpr, thresholds) = (curve[0][:], curve[2][:])
else:
if ((model is None) or (data is None)):
raise CatboostError('model and data parameters should be set when curve parameter is None.')
(fpr, _, thresholds) = get_roc_curve(model, data, thread_count)
return (thresholds, fpr)
| 7,066,552,134,008,052,000
|
Build points of FPR curve.
Parameters
----------
model : catboost.CatBoost
The trained model.
data : catboost.Pool or list of catboost.Pool
A set of samples to build ROC curve with.
curve : tuple of three arrays (fpr, tpr, thresholds)
ROC curve points in format of get_roc_curve returned value.
If set, data parameter must not be set.
thread_count : int (default=-1)
Number of threads to work with.
If -1, then the number of threads is set to the number of cores.
Returns
-------
curve points : tuple of two arrays (thresholds, fpr)
|
catboost/python-package/catboost/utils.py
|
get_fpr_curve
|
infected-mushroom/catboost
|
python
|
def get_fpr_curve(model=None, data=None, curve=None, thread_count=(- 1)):
'\n Build points of FPR curve.\n\n Parameters\n ----------\n model : catboost.CatBoost\n The trained model.\n\n data : catboost.Pool or list of catboost.Pool\n A set of samples to build ROC curve with.\n\n curve : tuple of three arrays (fpr, tpr, thresholds)\n ROC curve points in format of get_roc_curve returned value.\n If set, data parameter must not be set.\n\n thread_count : int (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n curve points : tuple of two arrays (thresholds, fpr)\n '
if (curve is not None):
if (data is not None):
raise CatboostError('Only one of the parameters data and curve should be set.')
if ((not (isinstance(curve, list) or isinstance(curve, tuple))) or (len(curve) != 3)):
raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).')
(fpr, thresholds) = (curve[0][:], curve[2][:])
else:
if ((model is None) or (data is None)):
raise CatboostError('model and data parameters should be set when curve parameter is None.')
(fpr, _, thresholds) = get_roc_curve(model, data, thread_count)
return (thresholds, fpr)
|
def get_fnr_curve(model=None, data=None, curve=None, thread_count=(- 1)):
'\n Build points of FNR curve.\n\n Parameters\n ----------\n model : catboost.CatBoost\n The trained model.\n\n data : catboost.Pool or list of catboost.Pool\n A set of samples to build ROC curve with.\n\n curve : tuple of three arrays (fpr, tpr, thresholds)\n ROC curve points in format of get_roc_curve returned value.\n If set, data parameter must not be set.\n\n thread_count : int (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n curve points : tuple of two arrays (thresholds, fnr)\n '
if (curve is not None):
if (data is not None):
raise CatboostError('Only one of the parameters data and curve should be set.')
if ((not (isinstance(curve, list) or isinstance(curve, tuple))) or (len(curve) != 3)):
raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).')
(tpr, thresholds) = (curve[1], curve[2][:])
else:
if ((model is None) or (data is None)):
raise CatboostError('model and data parameters should be set when curve parameter is None.')
(_, tpr, thresholds) = get_roc_curve(model, data, thread_count)
fnr = np.array([(1 - x) for x in tpr])
return (thresholds, fnr)
| 1,644,376,199,734,561,300
|
Build points of FNR curve.
Parameters
----------
model : catboost.CatBoost
The trained model.
data : catboost.Pool or list of catboost.Pool
A set of samples to build ROC curve with.
curve : tuple of three arrays (fpr, tpr, thresholds)
ROC curve points in format of get_roc_curve returned value.
If set, data parameter must not be set.
thread_count : int (default=-1)
Number of threads to work with.
If -1, then the number of threads is set to the number of cores.
Returns
-------
curve points : tuple of two arrays (thresholds, fnr)
|
catboost/python-package/catboost/utils.py
|
get_fnr_curve
|
infected-mushroom/catboost
|
python
|
def get_fnr_curve(model=None, data=None, curve=None, thread_count=(- 1)):
'\n Build points of FNR curve.\n\n Parameters\n ----------\n model : catboost.CatBoost\n The trained model.\n\n data : catboost.Pool or list of catboost.Pool\n A set of samples to build ROC curve with.\n\n curve : tuple of three arrays (fpr, tpr, thresholds)\n ROC curve points in format of get_roc_curve returned value.\n If set, data parameter must not be set.\n\n thread_count : int (default=-1)\n Number of threads to work with.\n If -1, then the number of threads is set to the number of cores.\n\n Returns\n -------\n curve points : tuple of two arrays (thresholds, fnr)\n '
if (curve is not None):
if (data is not None):
raise CatboostError('Only one of the parameters data and curve should be set.')
if ((not (isinstance(curve, list) or isinstance(curve, tuple))) or (len(curve) != 3)):
raise CatboostError('curve must be list or tuple of three arrays (fpr, tpr, thresholds).')
(tpr, thresholds) = (curve[1], curve[2][:])
else:
if ((model is None) or (data is None)):
raise CatboostError('model and data parameters should be set when curve parameter is None.')
(_, tpr, thresholds) = get_roc_curve(model, data, thread_count)
fnr = np.array([(1 - x) for x in tpr])
return (thresholds, fnr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.