repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
atztogo/phono3py | phono3py/phonon3/displacement_fc3.py | get_bond_symmetry | python | def get_bond_symmetry(site_symmetry,
lattice,
positions,
atom_center,
atom_disp,
symprec=1e-5):
bond_sym = []
pos = positions
for rot in site_symmetry:
rot_pos = (np.dot(pos[atom_disp] - pos[atom_center], rot.T) +
pos[atom_center])
diff = pos[atom_disp] - rot_pos
diff -= np.rint(diff)
dist = np.linalg.norm(np.dot(lattice, diff))
if dist < symprec:
bond_sym.append(rot)
return np.array(bond_sym) | Bond symmetry is the symmetry operations that keep the symmetry
of the cell containing two fixed atoms. | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/displacement_fc3.py#L194-L215 | null | import numpy as np
from phonopy.harmonic.displacement import (get_least_displacements,
directions_axis, get_displacement,
is_minus_displacement)
from phonopy.structure.cells import get_smallest_vectors
def direction_to_displacement(dataset,
distance,
supercell,
cutoff_distance=None):
lattice = supercell.get_cell().T
new_dataset = {}
new_dataset['natom'] = supercell.get_number_of_atoms()
if cutoff_distance is not None:
new_dataset['cutoff_distance'] = cutoff_distance
new_first_atoms = []
for first_atoms in dataset:
atom1 = first_atoms['number']
direction1 = first_atoms['direction']
disp_cart1 = np.dot(direction1, lattice.T)
disp_cart1 *= distance / np.linalg.norm(disp_cart1)
new_second_atoms = []
for second_atom in first_atoms['second_atoms']:
atom2 = second_atom['number']
pair_distance = second_atom['distance']
included = (cutoff_distance is None or
pair_distance < cutoff_distance)
for direction2 in second_atom['directions']:
disp_cart2 = np.dot(direction2, lattice.T)
disp_cart2 *= distance / np.linalg.norm(disp_cart2)
if cutoff_distance is None:
new_second_atoms.append({'number': atom2,
'direction': direction2,
'displacement': disp_cart2,
'pair_distance': pair_distance})
else:
new_second_atoms.append({'number': atom2,
'direction': direction2,
'displacement': disp_cart2,
'pair_distance': pair_distance,
'included': included})
new_first_atoms.append({'number': atom1,
'direction': direction1,
'displacement': disp_cart1,
'second_atoms': new_second_atoms})
new_dataset['first_atoms'] = new_first_atoms
return new_dataset
def get_third_order_displacements(cell,
symmetry,
is_plusminus='auto',
is_diagonal=False):
"""Create dispalcement dataset
Note
----
Atoms 1, 2, and 3 are defined as follows:
Atom 1: The first displaced atom. Third order force constant
between Atoms 1, 2, and 3 is calculated.
Atom 2: The second displaced atom. Second order force constant
between Atoms 2 and 3 is calculated.
Atom 3: Force is mesuared on this atom.
Parameters
----------
cell : PhonopyAtoms
Supercell
symmetry : Symmetry
Symmetry of supercell
is_plusminus : str or bool, optional
Type of displacements, plus only (False), always plus and minus (True),
and plus and minus depending on site symmetry ('auto').
is_diagonal : bool, optional
Whether allow diagonal displacements of Atom 2 or not
Returns
-------
dict
Data structure is like:
{'natom': 64,
'cutoff_distance': 4.000000,
'first_atoms':
[{'number': atom1,
'displacement': [0.03, 0., 0.],
'second_atoms': [ {'number': atom2,
'displacement': [0., -0.03, 0.],
'distance': 2.353},
{'number': ... }, ... ] },
{'number': atom1, ... } ]}
"""
positions = cell.get_scaled_positions()
lattice = cell.get_cell().T
# Least displacements of first atoms (Atom 1) are searched by
# using respective site symmetries of the original crystal.
# 'is_diagonal=False' below is made intentionally to expect
# better accuracy.
disps_first = get_least_displacements(symmetry,
is_plusminus=is_plusminus,
is_diagonal=False)
symprec = symmetry.get_symmetry_tolerance()
dds = []
for disp in disps_first:
atom1 = disp[0]
disp1 = disp[1:4]
site_sym = symmetry.get_site_symmetry(atom1)
dds_atom1 = {'number': atom1,
'direction': disp1,
'second_atoms': []}
# Reduced site symmetry at the first atom with respect to
# the displacement of the first atoms.
reduced_site_sym = get_reduced_site_symmetry(site_sym, disp1, symprec)
# Searching orbits (second atoms) with respect to
# the first atom and its reduced site symmetry.
second_atoms = get_least_orbits(atom1,
cell,
reduced_site_sym,
symprec)
for atom2 in second_atoms:
dds_atom2 = get_next_displacements(atom1,
atom2,
reduced_site_sym,
lattice,
positions,
symprec,
is_diagonal)
min_vec = get_equivalent_smallest_vectors(atom1,
atom2,
cell,
symprec)[0]
min_distance = np.linalg.norm(np.dot(lattice, min_vec))
dds_atom2['distance'] = min_distance
dds_atom1['second_atoms'].append(dds_atom2)
dds.append(dds_atom1)
return dds
def get_next_displacements(atom1,
atom2,
reduced_site_sym,
lattice,
positions,
symprec,
is_diagonal):
# Bond symmetry between first and second atoms.
reduced_bond_sym = get_bond_symmetry(
reduced_site_sym,
lattice,
positions,
atom1,
atom2,
symprec)
# Since displacement of first atom breaks translation
# symmetry, the crystal symmetry is reduced to point
# symmetry and it is equivalent to the site symmetry
# on the first atom. Therefore site symmetry on the
# second atom with the displacement is equivalent to
# this bond symmetry.
if is_diagonal:
disps_second = get_displacement(reduced_bond_sym)
else:
disps_second = get_displacement(reduced_bond_sym, directions_axis)
dds_atom2 = {'number': atom2, 'directions': []}
for disp2 in disps_second:
dds_atom2['directions'].append(disp2)
if is_minus_displacement(disp2, reduced_bond_sym):
dds_atom2['directions'].append(-disp2)
return dds_atom2
def get_reduced_site_symmetry(site_sym, direction, symprec=1e-5):
reduced_site_sym = []
for rot in site_sym:
if (abs(direction - np.dot(direction, rot.T)) < symprec).all():
reduced_site_sym.append(rot)
return np.array(reduced_site_sym, dtype='intc')
def get_least_orbits(atom_index, cell, site_symmetry, symprec=1e-5):
"""Find least orbits for a centering atom"""
orbits = _get_orbits(atom_index, cell, site_symmetry, symprec)
mapping = np.arange(cell.get_number_of_atoms())
for i, orb in enumerate(orbits):
for num in np.unique(orb):
if mapping[num] > mapping[i]:
mapping[num] = mapping[i]
return np.unique(mapping)
def _get_orbits(atom_index, cell, site_symmetry, symprec=1e-5):
lattice = cell.get_cell().T
positions = cell.get_scaled_positions()
center = positions[atom_index]
# orbits[num_atoms, num_site_sym]
orbits = []
for pos in positions:
mapping = []
for rot in site_symmetry:
rot_pos = np.dot(pos - center, rot.T) + center
for i, pos in enumerate(positions):
diff = pos - rot_pos
diff -= np.rint(diff)
dist = np.linalg.norm(np.dot(lattice, diff))
if dist < symprec:
mapping.append(i)
break
if len(mapping) < len(site_symmetry):
print("Site symmetry is broken.")
raise ValueError
else:
orbits.append(mapping)
return np.array(orbits)
def get_equivalent_smallest_vectors(atom_number_supercell,
atom_number_primitive,
supercell,
symprec):
s_pos = supercell.get_scaled_positions()
svecs, multi = get_smallest_vectors(supercell.get_cell(),
[s_pos[atom_number_supercell]],
[s_pos[atom_number_primitive]],
symprec=symprec)
return svecs[0, 0]
|
atztogo/phono3py | phono3py/phonon3/displacement_fc3.py | get_least_orbits | python | def get_least_orbits(atom_index, cell, site_symmetry, symprec=1e-5):
orbits = _get_orbits(atom_index, cell, site_symmetry, symprec)
mapping = np.arange(cell.get_number_of_atoms())
for i, orb in enumerate(orbits):
for num in np.unique(orb):
if mapping[num] > mapping[i]:
mapping[num] = mapping[i]
return np.unique(mapping) | Find least orbits for a centering atom | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/displacement_fc3.py#L218-L228 | [
"def _get_orbits(atom_index, cell, site_symmetry, symprec=1e-5):\n lattice = cell.get_cell().T\n positions = cell.get_scaled_positions()\n center = positions[atom_index]\n\n # orbits[num_atoms, num_site_sym]\n orbits = []\n for pos in positions:\n mapping = []\n\n for rot in site_symmetry:\n rot_pos = np.dot(pos - center, rot.T) + center\n\n for i, pos in enumerate(positions):\n diff = pos - rot_pos\n diff -= np.rint(diff)\n dist = np.linalg.norm(np.dot(lattice, diff))\n if dist < symprec:\n mapping.append(i)\n break\n\n if len(mapping) < len(site_symmetry):\n print(\"Site symmetry is broken.\")\n raise ValueError\n else:\n orbits.append(mapping)\n\n return np.array(orbits)\n"
] | import numpy as np
from phonopy.harmonic.displacement import (get_least_displacements,
directions_axis, get_displacement,
is_minus_displacement)
from phonopy.structure.cells import get_smallest_vectors
def direction_to_displacement(dataset,
distance,
supercell,
cutoff_distance=None):
lattice = supercell.get_cell().T
new_dataset = {}
new_dataset['natom'] = supercell.get_number_of_atoms()
if cutoff_distance is not None:
new_dataset['cutoff_distance'] = cutoff_distance
new_first_atoms = []
for first_atoms in dataset:
atom1 = first_atoms['number']
direction1 = first_atoms['direction']
disp_cart1 = np.dot(direction1, lattice.T)
disp_cart1 *= distance / np.linalg.norm(disp_cart1)
new_second_atoms = []
for second_atom in first_atoms['second_atoms']:
atom2 = second_atom['number']
pair_distance = second_atom['distance']
included = (cutoff_distance is None or
pair_distance < cutoff_distance)
for direction2 in second_atom['directions']:
disp_cart2 = np.dot(direction2, lattice.T)
disp_cart2 *= distance / np.linalg.norm(disp_cart2)
if cutoff_distance is None:
new_second_atoms.append({'number': atom2,
'direction': direction2,
'displacement': disp_cart2,
'pair_distance': pair_distance})
else:
new_second_atoms.append({'number': atom2,
'direction': direction2,
'displacement': disp_cart2,
'pair_distance': pair_distance,
'included': included})
new_first_atoms.append({'number': atom1,
'direction': direction1,
'displacement': disp_cart1,
'second_atoms': new_second_atoms})
new_dataset['first_atoms'] = new_first_atoms
return new_dataset
def get_third_order_displacements(cell,
symmetry,
is_plusminus='auto',
is_diagonal=False):
"""Create dispalcement dataset
Note
----
Atoms 1, 2, and 3 are defined as follows:
Atom 1: The first displaced atom. Third order force constant
between Atoms 1, 2, and 3 is calculated.
Atom 2: The second displaced atom. Second order force constant
between Atoms 2 and 3 is calculated.
Atom 3: Force is mesuared on this atom.
Parameters
----------
cell : PhonopyAtoms
Supercell
symmetry : Symmetry
Symmetry of supercell
is_plusminus : str or bool, optional
Type of displacements, plus only (False), always plus and minus (True),
and plus and minus depending on site symmetry ('auto').
is_diagonal : bool, optional
Whether allow diagonal displacements of Atom 2 or not
Returns
-------
dict
Data structure is like:
{'natom': 64,
'cutoff_distance': 4.000000,
'first_atoms':
[{'number': atom1,
'displacement': [0.03, 0., 0.],
'second_atoms': [ {'number': atom2,
'displacement': [0., -0.03, 0.],
'distance': 2.353},
{'number': ... }, ... ] },
{'number': atom1, ... } ]}
"""
positions = cell.get_scaled_positions()
lattice = cell.get_cell().T
# Least displacements of first atoms (Atom 1) are searched by
# using respective site symmetries of the original crystal.
# 'is_diagonal=False' below is made intentionally to expect
# better accuracy.
disps_first = get_least_displacements(symmetry,
is_plusminus=is_plusminus,
is_diagonal=False)
symprec = symmetry.get_symmetry_tolerance()
dds = []
for disp in disps_first:
atom1 = disp[0]
disp1 = disp[1:4]
site_sym = symmetry.get_site_symmetry(atom1)
dds_atom1 = {'number': atom1,
'direction': disp1,
'second_atoms': []}
# Reduced site symmetry at the first atom with respect to
# the displacement of the first atoms.
reduced_site_sym = get_reduced_site_symmetry(site_sym, disp1, symprec)
# Searching orbits (second atoms) with respect to
# the first atom and its reduced site symmetry.
second_atoms = get_least_orbits(atom1,
cell,
reduced_site_sym,
symprec)
for atom2 in second_atoms:
dds_atom2 = get_next_displacements(atom1,
atom2,
reduced_site_sym,
lattice,
positions,
symprec,
is_diagonal)
min_vec = get_equivalent_smallest_vectors(atom1,
atom2,
cell,
symprec)[0]
min_distance = np.linalg.norm(np.dot(lattice, min_vec))
dds_atom2['distance'] = min_distance
dds_atom1['second_atoms'].append(dds_atom2)
dds.append(dds_atom1)
return dds
def get_next_displacements(atom1,
atom2,
reduced_site_sym,
lattice,
positions,
symprec,
is_diagonal):
# Bond symmetry between first and second atoms.
reduced_bond_sym = get_bond_symmetry(
reduced_site_sym,
lattice,
positions,
atom1,
atom2,
symprec)
# Since displacement of first atom breaks translation
# symmetry, the crystal symmetry is reduced to point
# symmetry and it is equivalent to the site symmetry
# on the first atom. Therefore site symmetry on the
# second atom with the displacement is equivalent to
# this bond symmetry.
if is_diagonal:
disps_second = get_displacement(reduced_bond_sym)
else:
disps_second = get_displacement(reduced_bond_sym, directions_axis)
dds_atom2 = {'number': atom2, 'directions': []}
for disp2 in disps_second:
dds_atom2['directions'].append(disp2)
if is_minus_displacement(disp2, reduced_bond_sym):
dds_atom2['directions'].append(-disp2)
return dds_atom2
def get_reduced_site_symmetry(site_sym, direction, symprec=1e-5):
reduced_site_sym = []
for rot in site_sym:
if (abs(direction - np.dot(direction, rot.T)) < symprec).all():
reduced_site_sym.append(rot)
return np.array(reduced_site_sym, dtype='intc')
def get_bond_symmetry(site_symmetry,
lattice,
positions,
atom_center,
atom_disp,
symprec=1e-5):
"""
Bond symmetry is the symmetry operations that keep the symmetry
of the cell containing two fixed atoms.
"""
bond_sym = []
pos = positions
for rot in site_symmetry:
rot_pos = (np.dot(pos[atom_disp] - pos[atom_center], rot.T) +
pos[atom_center])
diff = pos[atom_disp] - rot_pos
diff -= np.rint(diff)
dist = np.linalg.norm(np.dot(lattice, diff))
if dist < symprec:
bond_sym.append(rot)
return np.array(bond_sym)
def _get_orbits(atom_index, cell, site_symmetry, symprec=1e-5):
lattice = cell.get_cell().T
positions = cell.get_scaled_positions()
center = positions[atom_index]
# orbits[num_atoms, num_site_sym]
orbits = []
for pos in positions:
mapping = []
for rot in site_symmetry:
rot_pos = np.dot(pos - center, rot.T) + center
for i, pos in enumerate(positions):
diff = pos - rot_pos
diff -= np.rint(diff)
dist = np.linalg.norm(np.dot(lattice, diff))
if dist < symprec:
mapping.append(i)
break
if len(mapping) < len(site_symmetry):
print("Site symmetry is broken.")
raise ValueError
else:
orbits.append(mapping)
return np.array(orbits)
def get_equivalent_smallest_vectors(atom_number_supercell,
atom_number_primitive,
supercell,
symprec):
s_pos = supercell.get_scaled_positions()
svecs, multi = get_smallest_vectors(supercell.get_cell(),
[s_pos[atom_number_supercell]],
[s_pos[atom_number_primitive]],
symprec=symprec)
return svecs[0, 0]
|
atztogo/phono3py | phono3py/file_IO.py | write_fc3_to_hdf5 | python | def write_fc3_to_hdf5(fc3,
filename='fc3.hdf5',
p2s_map=None,
compression=None):
with h5py.File(filename, 'w') as w:
w.create_dataset('fc3', data=fc3, compression=compression)
if p2s_map is not None:
w.create_dataset('p2s_map', data=p2s_map) | Write third-order force constants in hdf5 format.
Parameters
----------
force_constants : ndarray
Force constants
shape=(n_satom, n_satom, n_satom, 3, 3, 3) or
(n_patom, n_satom, n_satom,3,3,3), dtype=double
filename : str
Filename to be used.
p2s_map : ndarray, optional
Primitive atom indices in supercell index system
shape=(n_patom,), dtype=intc
compression : str or int, optional
h5py's lossless compression filters (e.g., "gzip", "lzf").
See the detail at docstring of h5py.Group.create_dataset. Default is
None. | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/file_IO.py#L184-L211 | null | import os
import numpy as np
import h5py
from phonopy.file_IO import (write_force_constants_to_hdf5,
check_force_constants_indices,
get_cell_from_disp_yaml)
from phonopy.cui.load_helper import read_force_constants_from_hdf5
def write_cell_yaml(w, supercell):
w.write("lattice:\n")
for axis in supercell.get_cell():
w.write("- [ %20.15f,%20.15f,%20.15f ]\n" % tuple(axis))
symbols = supercell.get_chemical_symbols()
positions = supercell.get_scaled_positions()
w.write("atoms:\n")
for i, (s, v) in enumerate(zip(symbols, positions)):
w.write("- symbol: %-2s # %d\n" % (s, i+1))
w.write(" position: [ %18.14f,%18.14f,%18.14f ]\n" % tuple(v))
def write_disp_fc3_yaml(dataset, supercell, filename='disp_fc3.yaml'):
w = open(filename, 'w')
w.write("natom: %d\n" % dataset['natom'])
num_first = len(dataset['first_atoms'])
w.write("num_first_displacements: %d\n" % num_first)
if 'cutoff_distance' in dataset:
w.write("cutoff_distance: %f\n" % dataset['cutoff_distance'])
num_second = 0
num_disp_files = 0
for d1 in dataset['first_atoms']:
num_disp_files += 1
num_second += len(d1['second_atoms'])
for d2 in d1['second_atoms']:
if 'included' in d2:
if d2['included']:
num_disp_files += 1
else:
num_disp_files += 1
w.write("num_second_displacements: %d\n" % num_second)
w.write("num_displacements_created: %d\n" % num_disp_files)
w.write("first_atoms:\n")
count1 = 1
count2 = num_first + 1
for disp1 in dataset['first_atoms']:
disp_cart1 = disp1['displacement']
w.write("- number: %5d\n" % (disp1['number'] + 1))
w.write(" displacement:\n")
w.write(" [%20.16f,%20.16f,%20.16f ] # %05d\n" %
(disp_cart1[0], disp_cart1[1], disp_cart1[2], count1))
w.write(" second_atoms:\n")
count1 += 1
included = None
atom2 = -1
for disp2 in disp1['second_atoms']:
if atom2 != disp2['number']:
atom2 = disp2['number']
if 'included' in disp2:
included = disp2['included']
pair_distance = disp2['pair_distance']
w.write(" - number: %5d\n" % (atom2 + 1))
w.write(" distance: %f\n" % pair_distance)
if included is not None:
if included:
w.write(" included: %s\n" % "true")
else:
w.write(" included: %s\n" % "false")
w.write(" displacements:\n")
disp_cart2 = disp2['displacement']
w.write(" - [%20.16f,%20.16f,%20.16f ] # %05d\n" %
(disp_cart2[0], disp_cart2[1], disp_cart2[2], count2))
count2 += 1
write_cell_yaml(w, supercell)
w.close()
return num_first + num_second, num_disp_files
def write_disp_fc2_yaml(dataset, supercell, filename='disp_fc2.yaml'):
w = open(filename, 'w')
w.write("natom: %d\n" % dataset['natom'])
num_first = len(dataset['first_atoms'])
w.write("num_first_displacements: %d\n" % num_first)
w.write("first_atoms:\n")
for i, disp1 in enumerate(dataset['first_atoms']):
disp_cart1 = disp1['displacement']
w.write("- number: %5d\n" % (disp1['number'] + 1))
w.write(" displacement:\n")
w.write(" [%20.16f,%20.16f,%20.16f ] # %05d\n" %
(disp_cart1[0], disp_cart1[1], disp_cart1[2], i + 1))
if supercell is not None:
write_cell_yaml(w, supercell)
w.close()
return num_first
def write_FORCES_FC2(disp_dataset,
forces_fc2=None,
fp=None,
filename="FORCES_FC2"):
if fp is None:
w = open(filename, 'w')
else:
w = fp
for i, disp1 in enumerate(disp_dataset['first_atoms']):
w.write("# File: %-5d\n" % (i + 1))
w.write("# %-5d " % (disp1['number'] + 1))
w.write("%20.16f %20.16f %20.16f\n" % tuple(disp1['displacement']))
if forces_fc2 is None:
force_set = disp1['forces']
else:
force_set = forces_fc2[i]
for forces in force_set:
w.write("%15.10f %15.10f %15.10f\n" % tuple(forces))
def write_FORCES_FC3(disp_dataset, forces_fc3, fp=None, filename="FORCES_FC3"):
if fp is None:
w = open(filename, 'w')
else:
w = fp
natom = disp_dataset['natom']
num_disp1 = len(disp_dataset['first_atoms'])
count = num_disp1
file_count = num_disp1
write_FORCES_FC2(disp_dataset, forces_fc2=forces_fc3, fp=w)
for i, disp1 in enumerate(disp_dataset['first_atoms']):
atom1 = disp1['number']
for disp2 in disp1['second_atoms']:
atom2 = disp2['number']
w.write("# File: %-5d\n" % (count + 1))
w.write("# %-5d " % (atom1 + 1))
w.write("%20.16f %20.16f %20.16f\n" % tuple(disp1['displacement']))
w.write("# %-5d " % (atom2 + 1))
w.write("%20.16f %20.16f %20.16f\n" % tuple(disp2['displacement']))
# For supercell calculation reduction
included = True
if 'included' in disp2:
included = disp2['included']
if included:
for forces in forces_fc3[file_count]:
w.write("%15.10f %15.10f %15.10f\n" % tuple(forces))
file_count += 1
else:
# for forces in forces_fc3[i]:
# w.write("%15.10f %15.10f %15.10f\n" % (tuple(forces)))
for j in range(natom):
w.write("%15.10f %15.10f %15.10f\n" % (0, 0, 0))
count += 1
def write_fc3_dat(force_constants_third, filename='fc3.dat'):
w = open(filename, 'w')
for i in range(force_constants_third.shape[0]):
for j in range(force_constants_third.shape[1]):
for k in range(force_constants_third.shape[2]):
tensor3 = force_constants_third[i, j, k]
w.write(" %d - %d - %d (%f)\n" % (i + 1, j + 1, k + 1,
np.abs(tensor3).sum()))
for tensor2 in tensor3:
for vec in tensor2:
w.write("%20.14f %20.14f %20.14f\n" % tuple(vec))
w.write("\n")
def read_fc3_from_hdf5(filename='fc3.hdf5', p2s_map=None):
with h5py.File(filename, 'r') as f:
fc3 = f['fc3'][:]
if 'p2s_map' in f:
p2s_map_in_file = f['p2s_map'][:]
check_force_constants_indices(fc3.shape[:2],
p2s_map_in_file,
p2s_map,
filename)
if fc3.dtype == np.double and fc3.flags.c_contiguous:
return fc3
else:
msg = ("%s has to be read by h5py as numpy ndarray of "
"dtype='double' and c_contiguous." % filename)
raise TypeError(msg)
return None
def write_fc2_dat(force_constants, filename='fc2.dat'):
w = open(filename, 'w')
for i, fcs in enumerate(force_constants):
for j, fcb in enumerate(fcs):
w.write(" %d - %d\n" % (i+1, j+1))
for vec in fcb:
w.write("%20.14f %20.14f %20.14f\n" % tuple(vec))
w.write("\n")
def write_fc2_to_hdf5(force_constants,
filename='fc2.hdf5',
p2s_map=None,
physical_unit=None,
compression=None):
write_force_constants_to_hdf5(force_constants,
filename=filename,
p2s_map=p2s_map,
physical_unit=physical_unit,
compression=compression)
def read_fc2_from_hdf5(filename='fc2.hdf5',
p2s_map=None):
return read_force_constants_from_hdf5(filename=filename,
p2s_map=p2s_map,
calculator='vasp')
def write_triplets(triplets,
weights,
mesh,
grid_address,
grid_point=None,
filename=None):
triplets_filename = "triplets"
suffix = "-m%d%d%d" % tuple(mesh)
if grid_point is not None:
suffix += ("-g%d" % grid_point)
if filename is not None:
suffix += "." + filename
suffix += ".dat"
triplets_filename += suffix
w = open(triplets_filename, 'w')
for weight, g3 in zip(weights, triplets):
w.write("%4d " % weight)
for q3 in grid_address[g3]:
w.write("%4d %4d %4d " % tuple(q3))
w.write("\n")
w.close()
def write_grid_address(grid_address, mesh, filename=None):
grid_address_filename = "grid_address"
suffix = "-m%d%d%d" % tuple(mesh)
if filename is not None:
suffix += "." + filename
suffix += ".dat"
grid_address_filename += suffix
w = open(grid_address_filename, 'w')
w.write("# Grid addresses for %dx%dx%d mesh\n" % tuple(mesh))
w.write("#%9s %8s %8s %8s %8s %8s %8s\n" %
("index", "a", "b", "c",
("a%%%d" % mesh[0]), ("b%%%d" % mesh[1]), ("c%%%d" % mesh[2])))
for i, bz_q in enumerate(grid_address):
if i == np.prod(mesh):
w.write("#" + "-" * 78 + "\n")
q = bz_q % mesh
w.write("%10d %8d %8d %8d " % (i, bz_q[0], bz_q[1], bz_q[2]))
w.write("%8d %8d %8d\n" % tuple(q))
return grid_address_filename
def write_grid_address_to_hdf5(grid_address,
mesh,
grid_mapping_table,
compression=None,
filename=None):
suffix = _get_filename_suffix(mesh, filename=filename)
full_filename = "grid_address" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('mesh', data=mesh)
w.create_dataset('grid_address', data=grid_address,
compression=compression)
w.create_dataset('grid_mapping_table', data=grid_mapping_table,
compression=compression)
return full_filename
return None
def write_freq_shifts_to_hdf5(freq_shifts, filename='freq_shifts.hdf5'):
with h5py.File(filename, 'w') as w:
w.create_dataset('shift', data=freq_shifts)
def write_imag_self_energy_at_grid_point(gp,
band_indices,
mesh,
frequencies,
gammas,
sigma=None,
temperature=None,
scattering_event_class=None,
filename=None,
is_mesh_symmetry=True):
gammas_filename = "gammas"
gammas_filename += "-m%d%d%d-g%d-" % (mesh[0],
mesh[1],
mesh[2],
gp)
if sigma is not None:
gammas_filename += ("s%f" % sigma).rstrip('0').rstrip('\.') + "-"
if temperature is not None:
gammas_filename += ("t%f" % temperature).rstrip('0').rstrip('\.') + "-"
for i in band_indices:
gammas_filename += "b%d" % (i + 1)
if scattering_event_class is not None:
gammas_filename += "-c%d" % scattering_event_class
if filename is not None:
gammas_filename += ".%s" % filename
elif not is_mesh_symmetry:
gammas_filename += ".nosym"
gammas_filename += ".dat"
w = open(gammas_filename, 'w')
for freq, g in zip(frequencies, gammas):
w.write("%15.7f %20.15e\n" % (freq, g))
w.close()
def write_joint_dos(gp,
mesh,
frequencies,
jdos,
sigma=None,
temperatures=None,
filename=None,
is_mesh_symmetry=True):
if temperatures is None:
return _write_joint_dos_at_t(gp,
mesh,
frequencies,
jdos,
sigma=sigma,
temperature=None,
filename=filename,
is_mesh_symmetry=is_mesh_symmetry)
else:
for jdos_at_t, t in zip(jdos, temperatures):
return _write_joint_dos_at_t(gp,
mesh,
frequencies,
jdos_at_t,
sigma=sigma,
temperature=t,
filename=filename,
is_mesh_symmetry=is_mesh_symmetry)
def _write_joint_dos_at_t(grid_point,
mesh,
frequencies,
jdos,
sigma=None,
temperature=None,
filename=None,
is_mesh_symmetry=True):
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
sigma=sigma,
filename=filename)
jdos_filename = "jdos%s" % suffix
if temperature is not None:
jdos_filename += ("-t%f" % temperature).rstrip('0').rstrip('\.')
if not is_mesh_symmetry:
jdos_filename += ".nosym"
if filename is not None:
jdos_filename += ".%s" % filename
jdos_filename += ".dat"
with open(jdos_filename, 'w') as w:
for omega, vals in zip(frequencies, jdos):
w.write("%15.7f" % omega)
w.write((" %20.15e" * len(vals)) % tuple(vals))
w.write("\n")
return jdos_filename
def write_linewidth_at_grid_point(gp,
band_indices,
temperatures,
gamma,
mesh,
sigma=None,
filename=None,
is_mesh_symmetry=True):
lw_filename = "linewidth"
lw_filename += "-m%d%d%d-g%d-" % (mesh[0], mesh[1], mesh[2], gp)
if sigma is not None:
lw_filename += ("s%f" % sigma).rstrip('0') + "-"
for i in band_indices:
lw_filename += "b%d" % (i + 1)
if filename is not None:
lw_filename += ".%s" % filename
elif not is_mesh_symmetry:
lw_filename += ".nosym"
lw_filename += ".dat"
w = open(lw_filename, 'w')
for v, t in zip(gamma.sum(axis=1) * 2 / gamma.shape[1], temperatures):
w.write("%15.7f %20.15e\n" % (t, v))
w.close()
def write_frequency_shift(gp,
band_indices,
temperatures,
delta,
mesh,
epsilon=None,
filename=None,
is_mesh_symmetry=True):
fst_filename = "frequency_shift"
fst_filename += "-m%d%d%d-g%d-" % (mesh[0], mesh[1], mesh[2], gp)
if epsilon is not None:
if epsilon > 1e-5:
fst_filename += ("s%f" % epsilon).rstrip('0') + "-"
else:
fst_filename += ("s%.3e" % epsilon) + "-"
for i in band_indices:
fst_filename += "b%d" % (i + 1)
if filename is not None:
fst_filename += ".%s" % filename
elif not is_mesh_symmetry:
fst_filename += ".nosym"
fst_filename += ".dat"
w = open(fst_filename, 'w')
for v, t in zip(delta.sum(axis=1) / delta.shape[1], temperatures):
w.write("%15.7f %20.15e\n" % (t, v))
w.close()
def write_collision_to_hdf5(temperature,
mesh,
gamma=None,
gamma_isotope=None,
collision_matrix=None,
grid_point=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
filename=None):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "collision" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
if gamma is not None:
w.create_dataset('gamma', data=gamma)
if gamma_isotope is not None:
w.create_dataset('gamma_isotope', data=gamma_isotope)
if collision_matrix is not None:
w.create_dataset('collision_matrix', data=collision_matrix)
if grid_point is not None:
w.create_dataset('grid_point', data=grid_point)
if band_index is not None:
w.create_dataset('band_index', data=(band_index + 1))
if sigma is not None:
w.create_dataset('sigma', data=sigma)
if sigma_cutoff is not None:
w.create_dataset('sigma_cutoff_width', data=sigma_cutoff)
text = "Collisions "
if grid_point is not None:
text += "at grid adress %d " % grid_point
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s " % _del_zeros(sigma)
text += "were written into "
if sigma is not None:
text += "\n"
text += "\"%s\"." % ("collision" + suffix + ".hdf5")
print(text)
return full_filename
def write_full_collision_matrix(collision_matrix, filename='fcm.hdf5'):
with h5py.File(filename, 'w') as w:
w.create_dataset('collision_matrix', data=collision_matrix)
def write_unitary_matrix_to_hdf5(temperature,
mesh,
unitary_matrix=None,
sigma=None,
sigma_cutoff=None,
solver=None,
filename=None,
verbose=False):
"""Write eigenvectors of collision matrices at temperatures.
Depending on the choice of the solver, eigenvectors are sotred in
either column-wise or row-wise.
"""
suffix = _get_filename_suffix(mesh,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
hdf5_filename = "unitary" + suffix + ".hdf5"
with h5py.File(hdf5_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
if unitary_matrix is not None:
w.create_dataset('unitary_matrix', data=unitary_matrix)
if solver is not None:
w.create_dataset('solver', data=solver)
if verbose:
if len(temperature) > 1:
text = "Unitary matrices "
else:
text = "Unitary matrix "
if sigma is not None:
text += "at sigma %s " % _del_zeros(sigma)
if sigma_cutoff is not None:
text += "(%4.2f SD) " % sigma_cutoff
if len(temperature) > 1:
text += "were written into "
else:
text += "was written into "
if sigma is not None:
text += "\n"
text += "\"%s\"." % hdf5_filename
print(text)
def write_collision_eigenvalues_to_hdf5(temperatures,
mesh,
collision_eigenvalues,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True):
suffix = _get_filename_suffix(mesh,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
with h5py.File("coleigs" + suffix + ".hdf5", 'w') as w:
w.create_dataset('temperature', data=temperatures)
w.create_dataset('collision_eigenvalues', data=collision_eigenvalues)
w.close()
if verbose:
text = "Eigenvalues of collision matrix "
if sigma is not None:
text += "with sigma %s\n" % sigma
text += "were written into "
text += "\"%s\"" % ("coleigs" + suffix + ".hdf5")
print(text)
def write_kappa_to_hdf5(temperature,
mesh,
frequency=None,
group_velocity=None,
gv_by_gv=None,
mean_free_path=None,
heat_capacity=None,
kappa=None,
mode_kappa=None,
kappa_RTA=None, # RTA calculated in LBTE
mode_kappa_RTA=None, # RTA calculated in LBTE
f_vector=None,
gamma=None,
gamma_isotope=None,
gamma_N=None,
gamma_U=None,
averaged_pp_interaction=None,
qpoint=None,
weight=None,
mesh_divisors=None,
grid_point=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
kappa_unit_conversion=None,
compression=None,
filename=None,
verbose=True):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
mesh_divisors=mesh_divisors,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "kappa" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
w.create_dataset('mesh', data=mesh)
if frequency is not None:
w.create_dataset('frequency', data=frequency,
compression=compression)
if group_velocity is not None:
w.create_dataset('group_velocity', data=group_velocity,
compression=compression)
if gv_by_gv is not None:
w.create_dataset('gv_by_gv', data=gv_by_gv)
if mean_free_path is not None:
w.create_dataset('mean_free_path', data=mean_free_path,
compression=compression)
if heat_capacity is not None:
w.create_dataset('heat_capacity', data=heat_capacity,
compression=compression)
if kappa is not None:
w.create_dataset('kappa', data=kappa)
if mode_kappa is not None:
w.create_dataset('mode_kappa', data=mode_kappa,
compression=compression)
if kappa_RTA is not None:
w.create_dataset('kappa_RTA', data=kappa_RTA)
if mode_kappa_RTA is not None:
w.create_dataset('mode_kappa_RTA', data=mode_kappa_RTA,
compression=compression)
if f_vector is not None:
w.create_dataset('f_vector', data=f_vector,
compression=compression)
if gamma is not None:
w.create_dataset('gamma', data=gamma,
compression=compression)
if gamma_isotope is not None:
w.create_dataset('gamma_isotope', data=gamma_isotope,
compression=compression)
if gamma_N is not None:
w.create_dataset('gamma_N', data=gamma_N,
compression=compression)
if gamma_U is not None:
w.create_dataset('gamma_U', data=gamma_U,
compression=compression)
if averaged_pp_interaction is not None:
w.create_dataset('ave_pp', data=averaged_pp_interaction,
compression=compression)
if qpoint is not None:
w.create_dataset('qpoint', data=qpoint,
compression=compression)
if weight is not None:
w.create_dataset('weight', data=weight,
compression=compression)
if grid_point is not None:
w.create_dataset('grid_point', data=grid_point)
if band_index is not None:
w.create_dataset('band_index', data=(band_index + 1))
if sigma is not None:
w.create_dataset('sigma', data=sigma)
if sigma_cutoff is not None:
w.create_dataset('sigma_cutoff_width', data=sigma_cutoff)
if kappa_unit_conversion is not None:
w.create_dataset('kappa_unit_conversion',
data=kappa_unit_conversion)
if verbose:
text = ""
if kappa is not None:
text += "Thermal conductivity and related properties "
else:
text += "Thermal conductivity related properties "
if grid_point is not None:
text += "at gp-%d " % grid_point
if band_index is not None:
text += "and band_index-%d\n" % (band_index + 1)
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s" % sigma
if sigma_cutoff is None:
text += "\n"
else:
text += "(%4.2f SD)\n" % sigma_cutoff
text += "were written into "
else:
text += "were written into "
if band_index is None:
text += "\n"
text += "\"%s\"." % full_filename
print(text)
return full_filename
def read_gamma_from_hdf5(mesh,
mesh_divisors=None,
grid_point=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
mesh_divisors=mesh_divisors,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "kappa" + suffix + ".hdf5"
if not os.path.exists(full_filename):
if verbose:
print("%s not found." % full_filename)
return None
read_data = {}
with h5py.File(full_filename, 'r') as f:
read_data['gamma'] = f['gamma'][:]
for key in ('gamma_isotope',
'ave_pp',
'gamma_N',
'gamma_U'):
if key in f.keys():
if len(f[key].shape) > 0:
read_data[key] = f[key][:]
else:
read_data[key] = f[key][()]
if verbose:
print("Read data from %s." % full_filename)
return read_data
def read_collision_from_hdf5(mesh,
indices=None,
grid_point=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "collision" + suffix + ".hdf5"
if not os.path.exists(full_filename):
if verbose:
print("%s not found." % full_filename)
return None
with h5py.File(full_filename, 'r') as f:
if indices == 'all':
colmat_shape = (1,) + f['collision_matrix'].shape
collision_matrix = np.zeros(colmat_shape,
dtype='double', order='C')
gamma = np.array(f['gamma'][:], dtype='double', order='C')
collision_matrix[0] = f['collision_matrix'][:]
temperatures = np.array(f['temperature'][:], dtype='double')
else:
colmat_shape = (1, len(indices)) + f['collision_matrix'].shape[1:]
collision_matrix = np.zeros(colmat_shape, dtype='double')
gamma = np.array(f['gamma'][indices], dtype='double', order='C')
collision_matrix[0] = f['collision_matrix'][indices]
temperatures = np.array(f['temperature'][indices], dtype='double')
if verbose:
text = "Collisions "
if band_index is None:
if grid_point is not None:
text += "at grid point %d " % grid_point
else:
if grid_point is not None:
text += ("at (grid point %d, band index %d) " %
(grid_point, band_index))
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s" % _del_zeros(sigma)
if sigma_cutoff is not None:
text += "(%4.2f SD)" % sigma_cutoff
if band_index is None and grid_point is not None:
text += " were read from "
text += "\n"
else:
text += "\n"
text += "were read from "
text += "\"%s\"." % full_filename
print(text)
return collision_matrix, gamma, temperatures
return None
def write_pp_to_hdf5(mesh,
pp=None,
g_zero=None,
grid_point=None,
triplet=None,
weight=None,
triplet_map=None,
triplet_all=None,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True,
check_consistency=False,
compression=None):
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "pp" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
if pp is not None:
if g_zero is None:
w.create_dataset('pp', data=pp,
compression=compression)
if triplet is not None:
w.create_dataset('triplet', data=triplet,
compression=compression)
if weight is not None:
w.create_dataset('weight', data=weight,
compression=compression)
if triplet_map is not None:
w.create_dataset('triplet_map', data=triplet_map,
compression=compression)
if triplet_all is not None:
w.create_dataset('triplet_all', data=triplet_all,
compression=compression)
else:
x = g_zero.ravel()
nonzero_pp = np.array(pp.ravel()[x == 0], dtype='double')
bytelen = len(x) // 8
remlen = len(x) % 8
y = x[:bytelen * 8].reshape(-1, 8)
z = np.packbits(y)
if remlen != 0:
z_rem = np.packbits(x[bytelen * 8:])
w.create_dataset('nonzero_pp', data=nonzero_pp,
compression=compression)
w.create_dataset('pp_shape', data=pp.shape,
compression=compression)
w.create_dataset('g_zero_bits', data=z,
compression=compression)
if remlen != 0:
w.create_dataset('g_zero_bits_reminder', data=z_rem)
# This is only for the test and coupled with read_pp_from_hdf5.
if check_consistency:
w.create_dataset('pp', data=pp,
compression=compression)
w.create_dataset('g_zero', data=g_zero,
compression=compression)
if verbose:
text = ""
text += "Ph-ph interaction strength "
if grid_point is not None:
text += "at gp-%d " % grid_point
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s" % sigma
if sigma_cutoff is None:
text += "\n"
else:
text += "(%4.2f SD)\n" % sigma_cutoff
text += "were written into "
else:
text += "were written into "
text += "\n"
text += "\"%s\"." % full_filename
print(text)
return full_filename
def read_pp_from_hdf5(mesh,
grid_point=None,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True,
check_consistency=False):
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "pp" + suffix + ".hdf5"
if not os.path.exists(full_filename):
if verbose:
print("%s not found." % full_filename)
return None
with h5py.File(full_filename, 'r') as f:
if 'nonzero_pp' in f:
nonzero_pp = f['nonzero_pp'][:]
pp_shape = f['pp_shape'][:]
z = f['g_zero_bits'][:]
bytelen = np.prod(pp_shape) // 8
remlen = 0
if 'g_zero_bits_reminder' in f:
z_rem = f['g_zero_bits_reminder'][:]
remlen = np.prod(pp_shape) - bytelen * 8
bits = np.unpackbits(z)
if not bits.flags['C_CONTIGUOUS']:
bits = np.array(bits, dtype='uint8')
g_zero = np.zeros(pp_shape, dtype='byte', order='C')
b = g_zero.ravel()
b[:(bytelen * 8)] = bits
if remlen != 0:
b[-remlen:] = np.unpackbits(z_rem)[:remlen]
pp = np.zeros(pp_shape, dtype='double', order='C')
pp_ravel = pp.ravel()
pp_ravel[g_zero.ravel() == 0] = nonzero_pp
# check_consistency==True in write_pp_to_hdf5 required.
if check_consistency and g_zero is not None:
if verbose:
print("Checking consistency of ph-ph interanction "
"strength.")
assert (g_zero == f['g_zero'][:]).all()
assert np.allclose(pp, f['pp'][:])
else:
pp = np.zeros(f['pp'].shape, dtype='double', order='C')
pp[:] = f['pp'][:]
g_zero = None
if verbose:
print("Ph-ph interaction strength was read from \"%s\"." %
full_filename)
return pp, g_zero
return None
def write_gamma_detail_to_hdf5(temperature,
mesh,
gamma_detail=None,
grid_point=None,
triplet=None,
weight=None,
triplet_map=None,
triplet_all=None,
frequency_points=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
compression=None,
filename=None,
verbose=True):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "gamma_detail" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
w.create_dataset('mesh', data=mesh)
if gamma_detail is not None:
w.create_dataset('gamma_detail', data=gamma_detail,
compression=compression)
if triplet is not None:
w.create_dataset('triplet', data=triplet,
compression=compression)
if weight is not None:
w.create_dataset('weight', data=weight,
compression=compression)
if triplet_map is not None:
w.create_dataset('triplet_map', data=triplet_map,
compression=compression)
if triplet_all is not None:
w.create_dataset('triplet_all', data=triplet_all,
compression=compression)
if grid_point is not None:
w.create_dataset('grid_point', data=grid_point)
if band_index is not None:
w.create_dataset('band_index', data=(band_index + 1))
if sigma is not None:
w.create_dataset('sigma', data=sigma)
if sigma_cutoff is not None:
w.create_dataset('sigma_cutoff_width', data=sigma_cutoff)
if frequency_points is not None:
w.create_dataset('frequency_point', data=frequency_points)
if verbose:
text = ""
text += "Phonon triplets contributions to Gamma "
if grid_point is not None:
text += "at gp-%d " % grid_point
if band_index is not None:
text += "and band_index-%d\n" % (band_index + 1)
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s" % sigma
if sigma_cutoff is None:
text += "\n"
else:
text += "(%4.2f SD)\n" % sigma_cutoff
text += "were written into "
else:
text += "were written into "
if band_index is None:
text += "\n"
text += "\"%s\"." % full_filename
print(text)
return full_filename
return None
def write_phonon_to_hdf5(frequency,
eigenvector,
grid_address,
mesh,
compression=None,
filename=None):
suffix = _get_filename_suffix(mesh, filename=filename)
full_filename = "phonon" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('mesh', data=mesh)
w.create_dataset('grid_address', data=grid_address,
compression=compression)
w.create_dataset('frequency', data=frequency,
compression=compression)
w.create_dataset('eigenvector', data=eigenvector,
compression=compression)
return full_filename
return None
def read_phonon_from_hdf5(mesh,
filename=None,
verbose=True):
suffix = _get_filename_suffix(mesh, filename=filename)
full_filename = "phonon" + suffix + ".hdf5"
if not os.path.exists(full_filename):
if verbose:
print("%s not found." % full_filename)
return None
with h5py.File(full_filename, 'r') as f:
frequencies = np.array(f['frequency'][:], dtype='double', order='C')
itemsize = frequencies.itemsize
eigenvectors = np.array(f['eigenvector'][:],
dtype=("c%d" % (itemsize * 2)), order='C')
mesh_in_file = np.array(f['mesh'][:], dtype='intc')
grid_address = np.array(f['grid_address'][:], dtype='intc', order='C')
assert (mesh_in_file == mesh).all(), "Mesh numbers are inconsistent."
if verbose:
print("Phonons are read from \"%s\"." % full_filename)
return frequencies, eigenvectors, grid_address
return None
def write_ir_grid_points(mesh,
mesh_divs,
grid_points,
coarse_grid_weights,
grid_address,
primitive_lattice):
w = open("ir_grid_points.yaml", 'w')
w.write("mesh: [ %d, %d, %d ]\n" % tuple(mesh))
if mesh_divs is not None:
w.write("mesh_divisors: [ %d, %d, %d ]\n" % tuple(mesh_divs))
w.write("reciprocal_lattice:\n")
for vec, axis in zip(primitive_lattice.T, ('a*', 'b*', 'c*')):
w.write("- [ %12.8f, %12.8f, %12.8f ] # %2s\n"
% (tuple(vec) + (axis,)))
w.write("num_reduced_ir_grid_points: %d\n" % len(grid_points))
w.write("ir_grid_points: # [address, weight]\n")
for g, weight in zip(grid_points, coarse_grid_weights):
w.write("- grid_point: %d\n" % g)
w.write(" weight: %d\n" % weight)
w.write(" grid_address: [ %12d, %12d, %12d ]\n" %
tuple(grid_address[g]))
w.write(" q-point: [ %12.7f, %12.7f, %12.7f ]\n" %
tuple(grid_address[g].astype('double') / mesh))
def parse_disp_fc2_yaml(filename="disp_fc2.yaml", return_cell=False):
dataset = _parse_yaml(filename)
natom = dataset['natom']
new_dataset = {}
new_dataset['natom'] = natom
new_first_atoms = []
for first_atoms in dataset['first_atoms']:
first_atoms['number'] -= 1
atom1 = first_atoms['number']
disp1 = first_atoms['displacement']
new_first_atoms.append({'number': atom1, 'displacement': disp1})
new_dataset['first_atoms'] = new_first_atoms
if return_cell:
cell = get_cell_from_disp_yaml(dataset)
return new_dataset, cell
else:
return new_dataset
def parse_disp_fc3_yaml(filename="disp_fc3.yaml", return_cell=False):
dataset = _parse_yaml(filename)
natom = dataset['natom']
new_dataset = {}
new_dataset['natom'] = natom
if 'cutoff_distance' in dataset:
new_dataset['cutoff_distance'] = dataset['cutoff_distance']
new_first_atoms = []
for first_atoms in dataset['first_atoms']:
atom1 = first_atoms['number'] - 1
disp1 = first_atoms['displacement']
new_second_atoms = []
for second_atom in first_atoms['second_atoms']:
disp2_dataset = {'number': second_atom['number'] - 1}
if 'included' in second_atom:
disp2_dataset.update({'included': second_atom['included']})
if 'distance' in second_atom:
disp2_dataset.update(
{'pair_distance': second_atom['distance']})
for disp2 in second_atom['displacements']:
disp2_dataset.update({'displacement': disp2})
new_second_atoms.append(disp2_dataset.copy())
new_first_atoms.append({'number': atom1,
'displacement': disp1,
'second_atoms': new_second_atoms})
new_dataset['first_atoms'] = new_first_atoms
if return_cell:
cell = get_cell_from_disp_yaml(dataset)
return new_dataset, cell
else:
return new_dataset
def parse_FORCES_FC2(disp_dataset, filename="FORCES_FC2"):
num_atom = disp_dataset['natom']
num_disp = len(disp_dataset['first_atoms'])
forces_fc2 = []
with open(filename, 'r') as f2:
for i in range(num_disp):
forces = _parse_force_lines(f2, num_atom)
if forces is None:
return []
else:
forces_fc2.append(forces)
return forces_fc2
def parse_FORCES_FC3(disp_dataset, filename="FORCES_FC3", use_loadtxt=False):
num_atom = disp_dataset['natom']
num_disp = len(disp_dataset['first_atoms'])
for disp1 in disp_dataset['first_atoms']:
num_disp += len(disp1['second_atoms'])
if use_loadtxt:
forces_fc3 = np.loadtxt(filename)
return forces_fc3.reshape((num_disp, -1, 3))
else:
forces_fc3 = np.zeros((num_disp, num_atom, 3),
dtype='double', order='C')
with open(filename, 'r') as f3:
for i in range(num_disp):
forces = _parse_force_lines(f3, num_atom)
if forces is None:
raise RuntimeError("Failed to parse %s." % filename)
else:
forces_fc3[i] = forces
return forces_fc3
def parse_QPOINTS3(filename='QPOINTS3'):
f = open(filename)
num = int(f.readline().strip())
count = 0
qpoints3 = []
for line in f:
line_array = [float(x) for x in line.strip().split()]
if len(line_array) < 9:
raise RuntimeError("Failed to parse %s." % filename)
else:
qpoints3.append(line_array[0:9])
count += 1
if count == num:
break
return np.array(qpoints3)
def parse_fc3(num_atom, filename='fc3.dat'):
f = open(filename)
fc3 = np.zeros((num_atom, num_atom, num_atom, 3, 3, 3), dtype=float)
for i in range(num_atom):
for j in range(num_atom):
for k in range(num_atom):
f.readline()
for l in range(3):
fc3[i, j, k, l] = [
[float(x) for x in f.readline().split()],
[float(x) for x in f.readline().split()],
[float(x) for x in f.readline().split()]]
f.readline()
return fc3
def parse_fc2(num_atom, filename='fc2.dat'):
f = open(filename)
fc2 = np.zeros((num_atom, num_atom, 3, 3), dtype=float)
for i in range(num_atom):
for j in range(num_atom):
f.readline()
fc2[i, j] = [[float(x) for x in f.readline().split()],
[float(x) for x in f.readline().split()],
[float(x) for x in f.readline().split()]]
f.readline()
return fc2
def parse_triplets(filename):
f = open(filename)
triplets = []
weights = []
for line in f:
if line.strip()[0] == "#":
continue
line_array = [int(x) for x in line.split()]
triplets.append(line_array[:3])
weights.append(line_array[3])
return np.array(triplets), np.array(weights)
def parse_grid_address(filename):
f = open(filename, 'r')
grid_address = []
for line in f:
if line.strip()[0] == "#":
continue
line_array = [int(x) for x in line.split()]
grid_address.append(line_array[1:4])
return np.array(grid_address)
def _get_filename_suffix(mesh,
mesh_divisors=None,
grid_point=None,
band_indices=None,
sigma=None,
sigma_cutoff=None,
filename=None):
suffix = "-m%d%d%d" % tuple(mesh)
if mesh_divisors is not None:
if (np.array(mesh_divisors, dtype=int) != 1).any():
suffix += "-d%d%d%d" % tuple(mesh_divisors)
if grid_point is not None:
suffix += ("-g%d" % grid_point)
if band_indices is not None:
suffix += "-"
for bi in band_indices:
suffix += "b%d" % (bi + 1)
if sigma is not None:
suffix += "-s" + _del_zeros(sigma)
if sigma_cutoff is not None:
sigma_cutoff_str = _del_zeros(sigma_cutoff)
suffix += "-sd" + sigma_cutoff_str
if filename is not None:
suffix += "." + filename
return suffix
def _del_zeros(val):
return ("%f" % val).rstrip('0').rstrip('\.')
def _parse_yaml(file_yaml):
import yaml
try:
from yaml import CLoader as Loader
from yaml import CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
with open(file_yaml) as f:
string = f.read()
data = yaml.load(string, Loader=Loader)
return data
def _parse_force_lines(forcefile, num_atom):
forces = []
for line in forcefile:
if line.strip() == '':
continue
if line.strip()[0] == '#':
continue
forces.append([float(x) for x in line.strip().split()])
if len(forces) == num_atom:
break
if not len(forces) == num_atom:
return None
else:
return np.array(forces)
def _parse_force_constants_lines(fcthird_file, num_atom):
fc2 = []
for line in fcthird_file:
if line.strip() == '':
continue
if line.strip()[0] == '#':
continue
fc2.append([float(x) for x in line.strip().split()])
if len(fc2) == num_atom ** 2 * 3:
break
if not len(fc2) == num_atom ** 2 * 3:
return None
else:
return np.array(fc2).reshape(num_atom, num_atom, 3, 3)
|
atztogo/phono3py | phono3py/file_IO.py | write_unitary_matrix_to_hdf5 | python | def write_unitary_matrix_to_hdf5(temperature,
mesh,
unitary_matrix=None,
sigma=None,
sigma_cutoff=None,
solver=None,
filename=None,
verbose=False):
suffix = _get_filename_suffix(mesh,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
hdf5_filename = "unitary" + suffix + ".hdf5"
with h5py.File(hdf5_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
if unitary_matrix is not None:
w.create_dataset('unitary_matrix', data=unitary_matrix)
if solver is not None:
w.create_dataset('solver', data=solver)
if verbose:
if len(temperature) > 1:
text = "Unitary matrices "
else:
text = "Unitary matrix "
if sigma is not None:
text += "at sigma %s " % _del_zeros(sigma)
if sigma_cutoff is not None:
text += "(%4.2f SD) " % sigma_cutoff
if len(temperature) > 1:
text += "were written into "
else:
text += "was written into "
if sigma is not None:
text += "\n"
text += "\"%s\"." % hdf5_filename
print(text) | Write eigenvectors of collision matrices at temperatures.
Depending on the choice of the solver, eigenvectors are sotred in
either column-wise or row-wise. | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/file_IO.py#L546-L589 | [
"def _get_filename_suffix(mesh,\n mesh_divisors=None,\n grid_point=None,\n band_indices=None,\n sigma=None,\n sigma_cutoff=None,\n filename=None):\n suffix = \"-m%d%d%d\" % tuple(mesh)\n if mesh_divisors is not None:\n if (np.array(mesh_divisors, dtype=int) != 1).any():\n suffix += \"-d%d%d%d\" % tuple(mesh_divisors)\n if grid_point is not None:\n suffix += (\"-g%d\" % grid_point)\n if band_indices is not None:\n suffix += \"-\"\n for bi in band_indices:\n suffix += \"b%d\" % (bi + 1)\n if sigma is not None:\n suffix += \"-s\" + _del_zeros(sigma)\n if sigma_cutoff is not None:\n sigma_cutoff_str = _del_zeros(sigma_cutoff)\n suffix += \"-sd\" + sigma_cutoff_str\n if filename is not None:\n suffix += \".\" + filename\n\n return suffix\n",
"def _del_zeros(val):\n return (\"%f\" % val).rstrip('0').rstrip('\\.')\n"
] | import os
import numpy as np
import h5py
from phonopy.file_IO import (write_force_constants_to_hdf5,
check_force_constants_indices,
get_cell_from_disp_yaml)
from phonopy.cui.load_helper import read_force_constants_from_hdf5
def write_cell_yaml(w, supercell):
w.write("lattice:\n")
for axis in supercell.get_cell():
w.write("- [ %20.15f,%20.15f,%20.15f ]\n" % tuple(axis))
symbols = supercell.get_chemical_symbols()
positions = supercell.get_scaled_positions()
w.write("atoms:\n")
for i, (s, v) in enumerate(zip(symbols, positions)):
w.write("- symbol: %-2s # %d\n" % (s, i+1))
w.write(" position: [ %18.14f,%18.14f,%18.14f ]\n" % tuple(v))
def write_disp_fc3_yaml(dataset, supercell, filename='disp_fc3.yaml'):
w = open(filename, 'w')
w.write("natom: %d\n" % dataset['natom'])
num_first = len(dataset['first_atoms'])
w.write("num_first_displacements: %d\n" % num_first)
if 'cutoff_distance' in dataset:
w.write("cutoff_distance: %f\n" % dataset['cutoff_distance'])
num_second = 0
num_disp_files = 0
for d1 in dataset['first_atoms']:
num_disp_files += 1
num_second += len(d1['second_atoms'])
for d2 in d1['second_atoms']:
if 'included' in d2:
if d2['included']:
num_disp_files += 1
else:
num_disp_files += 1
w.write("num_second_displacements: %d\n" % num_second)
w.write("num_displacements_created: %d\n" % num_disp_files)
w.write("first_atoms:\n")
count1 = 1
count2 = num_first + 1
for disp1 in dataset['first_atoms']:
disp_cart1 = disp1['displacement']
w.write("- number: %5d\n" % (disp1['number'] + 1))
w.write(" displacement:\n")
w.write(" [%20.16f,%20.16f,%20.16f ] # %05d\n" %
(disp_cart1[0], disp_cart1[1], disp_cart1[2], count1))
w.write(" second_atoms:\n")
count1 += 1
included = None
atom2 = -1
for disp2 in disp1['second_atoms']:
if atom2 != disp2['number']:
atom2 = disp2['number']
if 'included' in disp2:
included = disp2['included']
pair_distance = disp2['pair_distance']
w.write(" - number: %5d\n" % (atom2 + 1))
w.write(" distance: %f\n" % pair_distance)
if included is not None:
if included:
w.write(" included: %s\n" % "true")
else:
w.write(" included: %s\n" % "false")
w.write(" displacements:\n")
disp_cart2 = disp2['displacement']
w.write(" - [%20.16f,%20.16f,%20.16f ] # %05d\n" %
(disp_cart2[0], disp_cart2[1], disp_cart2[2], count2))
count2 += 1
write_cell_yaml(w, supercell)
w.close()
return num_first + num_second, num_disp_files
def write_disp_fc2_yaml(dataset, supercell, filename='disp_fc2.yaml'):
w = open(filename, 'w')
w.write("natom: %d\n" % dataset['natom'])
num_first = len(dataset['first_atoms'])
w.write("num_first_displacements: %d\n" % num_first)
w.write("first_atoms:\n")
for i, disp1 in enumerate(dataset['first_atoms']):
disp_cart1 = disp1['displacement']
w.write("- number: %5d\n" % (disp1['number'] + 1))
w.write(" displacement:\n")
w.write(" [%20.16f,%20.16f,%20.16f ] # %05d\n" %
(disp_cart1[0], disp_cart1[1], disp_cart1[2], i + 1))
if supercell is not None:
write_cell_yaml(w, supercell)
w.close()
return num_first
def write_FORCES_FC2(disp_dataset,
forces_fc2=None,
fp=None,
filename="FORCES_FC2"):
if fp is None:
w = open(filename, 'w')
else:
w = fp
for i, disp1 in enumerate(disp_dataset['first_atoms']):
w.write("# File: %-5d\n" % (i + 1))
w.write("# %-5d " % (disp1['number'] + 1))
w.write("%20.16f %20.16f %20.16f\n" % tuple(disp1['displacement']))
if forces_fc2 is None:
force_set = disp1['forces']
else:
force_set = forces_fc2[i]
for forces in force_set:
w.write("%15.10f %15.10f %15.10f\n" % tuple(forces))
def write_FORCES_FC3(disp_dataset, forces_fc3, fp=None, filename="FORCES_FC3"):
if fp is None:
w = open(filename, 'w')
else:
w = fp
natom = disp_dataset['natom']
num_disp1 = len(disp_dataset['first_atoms'])
count = num_disp1
file_count = num_disp1
write_FORCES_FC2(disp_dataset, forces_fc2=forces_fc3, fp=w)
for i, disp1 in enumerate(disp_dataset['first_atoms']):
atom1 = disp1['number']
for disp2 in disp1['second_atoms']:
atom2 = disp2['number']
w.write("# File: %-5d\n" % (count + 1))
w.write("# %-5d " % (atom1 + 1))
w.write("%20.16f %20.16f %20.16f\n" % tuple(disp1['displacement']))
w.write("# %-5d " % (atom2 + 1))
w.write("%20.16f %20.16f %20.16f\n" % tuple(disp2['displacement']))
# For supercell calculation reduction
included = True
if 'included' in disp2:
included = disp2['included']
if included:
for forces in forces_fc3[file_count]:
w.write("%15.10f %15.10f %15.10f\n" % tuple(forces))
file_count += 1
else:
# for forces in forces_fc3[i]:
# w.write("%15.10f %15.10f %15.10f\n" % (tuple(forces)))
for j in range(natom):
w.write("%15.10f %15.10f %15.10f\n" % (0, 0, 0))
count += 1
def write_fc3_dat(force_constants_third, filename='fc3.dat'):
w = open(filename, 'w')
for i in range(force_constants_third.shape[0]):
for j in range(force_constants_third.shape[1]):
for k in range(force_constants_third.shape[2]):
tensor3 = force_constants_third[i, j, k]
w.write(" %d - %d - %d (%f)\n" % (i + 1, j + 1, k + 1,
np.abs(tensor3).sum()))
for tensor2 in tensor3:
for vec in tensor2:
w.write("%20.14f %20.14f %20.14f\n" % tuple(vec))
w.write("\n")
def write_fc3_to_hdf5(fc3,
filename='fc3.hdf5',
p2s_map=None,
compression=None):
"""Write third-order force constants in hdf5 format.
Parameters
----------
force_constants : ndarray
Force constants
shape=(n_satom, n_satom, n_satom, 3, 3, 3) or
(n_patom, n_satom, n_satom,3,3,3), dtype=double
filename : str
Filename to be used.
p2s_map : ndarray, optional
Primitive atom indices in supercell index system
shape=(n_patom,), dtype=intc
compression : str or int, optional
h5py's lossless compression filters (e.g., "gzip", "lzf").
See the detail at docstring of h5py.Group.create_dataset. Default is
None.
"""
with h5py.File(filename, 'w') as w:
w.create_dataset('fc3', data=fc3, compression=compression)
if p2s_map is not None:
w.create_dataset('p2s_map', data=p2s_map)
def read_fc3_from_hdf5(filename='fc3.hdf5', p2s_map=None):
with h5py.File(filename, 'r') as f:
fc3 = f['fc3'][:]
if 'p2s_map' in f:
p2s_map_in_file = f['p2s_map'][:]
check_force_constants_indices(fc3.shape[:2],
p2s_map_in_file,
p2s_map,
filename)
if fc3.dtype == np.double and fc3.flags.c_contiguous:
return fc3
else:
msg = ("%s has to be read by h5py as numpy ndarray of "
"dtype='double' and c_contiguous." % filename)
raise TypeError(msg)
return None
def write_fc2_dat(force_constants, filename='fc2.dat'):
w = open(filename, 'w')
for i, fcs in enumerate(force_constants):
for j, fcb in enumerate(fcs):
w.write(" %d - %d\n" % (i+1, j+1))
for vec in fcb:
w.write("%20.14f %20.14f %20.14f\n" % tuple(vec))
w.write("\n")
def write_fc2_to_hdf5(force_constants,
filename='fc2.hdf5',
p2s_map=None,
physical_unit=None,
compression=None):
write_force_constants_to_hdf5(force_constants,
filename=filename,
p2s_map=p2s_map,
physical_unit=physical_unit,
compression=compression)
def read_fc2_from_hdf5(filename='fc2.hdf5',
p2s_map=None):
return read_force_constants_from_hdf5(filename=filename,
p2s_map=p2s_map,
calculator='vasp')
def write_triplets(triplets,
weights,
mesh,
grid_address,
grid_point=None,
filename=None):
triplets_filename = "triplets"
suffix = "-m%d%d%d" % tuple(mesh)
if grid_point is not None:
suffix += ("-g%d" % grid_point)
if filename is not None:
suffix += "." + filename
suffix += ".dat"
triplets_filename += suffix
w = open(triplets_filename, 'w')
for weight, g3 in zip(weights, triplets):
w.write("%4d " % weight)
for q3 in grid_address[g3]:
w.write("%4d %4d %4d " % tuple(q3))
w.write("\n")
w.close()
def write_grid_address(grid_address, mesh, filename=None):
grid_address_filename = "grid_address"
suffix = "-m%d%d%d" % tuple(mesh)
if filename is not None:
suffix += "." + filename
suffix += ".dat"
grid_address_filename += suffix
w = open(grid_address_filename, 'w')
w.write("# Grid addresses for %dx%dx%d mesh\n" % tuple(mesh))
w.write("#%9s %8s %8s %8s %8s %8s %8s\n" %
("index", "a", "b", "c",
("a%%%d" % mesh[0]), ("b%%%d" % mesh[1]), ("c%%%d" % mesh[2])))
for i, bz_q in enumerate(grid_address):
if i == np.prod(mesh):
w.write("#" + "-" * 78 + "\n")
q = bz_q % mesh
w.write("%10d %8d %8d %8d " % (i, bz_q[0], bz_q[1], bz_q[2]))
w.write("%8d %8d %8d\n" % tuple(q))
return grid_address_filename
def write_grid_address_to_hdf5(grid_address,
mesh,
grid_mapping_table,
compression=None,
filename=None):
suffix = _get_filename_suffix(mesh, filename=filename)
full_filename = "grid_address" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('mesh', data=mesh)
w.create_dataset('grid_address', data=grid_address,
compression=compression)
w.create_dataset('grid_mapping_table', data=grid_mapping_table,
compression=compression)
return full_filename
return None
def write_freq_shifts_to_hdf5(freq_shifts, filename='freq_shifts.hdf5'):
with h5py.File(filename, 'w') as w:
w.create_dataset('shift', data=freq_shifts)
def write_imag_self_energy_at_grid_point(gp,
band_indices,
mesh,
frequencies,
gammas,
sigma=None,
temperature=None,
scattering_event_class=None,
filename=None,
is_mesh_symmetry=True):
gammas_filename = "gammas"
gammas_filename += "-m%d%d%d-g%d-" % (mesh[0],
mesh[1],
mesh[2],
gp)
if sigma is not None:
gammas_filename += ("s%f" % sigma).rstrip('0').rstrip('\.') + "-"
if temperature is not None:
gammas_filename += ("t%f" % temperature).rstrip('0').rstrip('\.') + "-"
for i in band_indices:
gammas_filename += "b%d" % (i + 1)
if scattering_event_class is not None:
gammas_filename += "-c%d" % scattering_event_class
if filename is not None:
gammas_filename += ".%s" % filename
elif not is_mesh_symmetry:
gammas_filename += ".nosym"
gammas_filename += ".dat"
w = open(gammas_filename, 'w')
for freq, g in zip(frequencies, gammas):
w.write("%15.7f %20.15e\n" % (freq, g))
w.close()
def write_joint_dos(gp,
mesh,
frequencies,
jdos,
sigma=None,
temperatures=None,
filename=None,
is_mesh_symmetry=True):
if temperatures is None:
return _write_joint_dos_at_t(gp,
mesh,
frequencies,
jdos,
sigma=sigma,
temperature=None,
filename=filename,
is_mesh_symmetry=is_mesh_symmetry)
else:
for jdos_at_t, t in zip(jdos, temperatures):
return _write_joint_dos_at_t(gp,
mesh,
frequencies,
jdos_at_t,
sigma=sigma,
temperature=t,
filename=filename,
is_mesh_symmetry=is_mesh_symmetry)
def _write_joint_dos_at_t(grid_point,
mesh,
frequencies,
jdos,
sigma=None,
temperature=None,
filename=None,
is_mesh_symmetry=True):
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
sigma=sigma,
filename=filename)
jdos_filename = "jdos%s" % suffix
if temperature is not None:
jdos_filename += ("-t%f" % temperature).rstrip('0').rstrip('\.')
if not is_mesh_symmetry:
jdos_filename += ".nosym"
if filename is not None:
jdos_filename += ".%s" % filename
jdos_filename += ".dat"
with open(jdos_filename, 'w') as w:
for omega, vals in zip(frequencies, jdos):
w.write("%15.7f" % omega)
w.write((" %20.15e" * len(vals)) % tuple(vals))
w.write("\n")
return jdos_filename
def write_linewidth_at_grid_point(gp,
band_indices,
temperatures,
gamma,
mesh,
sigma=None,
filename=None,
is_mesh_symmetry=True):
lw_filename = "linewidth"
lw_filename += "-m%d%d%d-g%d-" % (mesh[0], mesh[1], mesh[2], gp)
if sigma is not None:
lw_filename += ("s%f" % sigma).rstrip('0') + "-"
for i in band_indices:
lw_filename += "b%d" % (i + 1)
if filename is not None:
lw_filename += ".%s" % filename
elif not is_mesh_symmetry:
lw_filename += ".nosym"
lw_filename += ".dat"
w = open(lw_filename, 'w')
for v, t in zip(gamma.sum(axis=1) * 2 / gamma.shape[1], temperatures):
w.write("%15.7f %20.15e\n" % (t, v))
w.close()
def write_frequency_shift(gp,
band_indices,
temperatures,
delta,
mesh,
epsilon=None,
filename=None,
is_mesh_symmetry=True):
fst_filename = "frequency_shift"
fst_filename += "-m%d%d%d-g%d-" % (mesh[0], mesh[1], mesh[2], gp)
if epsilon is not None:
if epsilon > 1e-5:
fst_filename += ("s%f" % epsilon).rstrip('0') + "-"
else:
fst_filename += ("s%.3e" % epsilon) + "-"
for i in band_indices:
fst_filename += "b%d" % (i + 1)
if filename is not None:
fst_filename += ".%s" % filename
elif not is_mesh_symmetry:
fst_filename += ".nosym"
fst_filename += ".dat"
w = open(fst_filename, 'w')
for v, t in zip(delta.sum(axis=1) / delta.shape[1], temperatures):
w.write("%15.7f %20.15e\n" % (t, v))
w.close()
def write_collision_to_hdf5(temperature,
mesh,
gamma=None,
gamma_isotope=None,
collision_matrix=None,
grid_point=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
filename=None):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "collision" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
if gamma is not None:
w.create_dataset('gamma', data=gamma)
if gamma_isotope is not None:
w.create_dataset('gamma_isotope', data=gamma_isotope)
if collision_matrix is not None:
w.create_dataset('collision_matrix', data=collision_matrix)
if grid_point is not None:
w.create_dataset('grid_point', data=grid_point)
if band_index is not None:
w.create_dataset('band_index', data=(band_index + 1))
if sigma is not None:
w.create_dataset('sigma', data=sigma)
if sigma_cutoff is not None:
w.create_dataset('sigma_cutoff_width', data=sigma_cutoff)
text = "Collisions "
if grid_point is not None:
text += "at grid adress %d " % grid_point
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s " % _del_zeros(sigma)
text += "were written into "
if sigma is not None:
text += "\n"
text += "\"%s\"." % ("collision" + suffix + ".hdf5")
print(text)
return full_filename
def write_full_collision_matrix(collision_matrix, filename='fcm.hdf5'):
with h5py.File(filename, 'w') as w:
w.create_dataset('collision_matrix', data=collision_matrix)
def write_collision_eigenvalues_to_hdf5(temperatures,
mesh,
collision_eigenvalues,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True):
suffix = _get_filename_suffix(mesh,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
with h5py.File("coleigs" + suffix + ".hdf5", 'w') as w:
w.create_dataset('temperature', data=temperatures)
w.create_dataset('collision_eigenvalues', data=collision_eigenvalues)
w.close()
if verbose:
text = "Eigenvalues of collision matrix "
if sigma is not None:
text += "with sigma %s\n" % sigma
text += "were written into "
text += "\"%s\"" % ("coleigs" + suffix + ".hdf5")
print(text)
def write_kappa_to_hdf5(temperature,
mesh,
frequency=None,
group_velocity=None,
gv_by_gv=None,
mean_free_path=None,
heat_capacity=None,
kappa=None,
mode_kappa=None,
kappa_RTA=None, # RTA calculated in LBTE
mode_kappa_RTA=None, # RTA calculated in LBTE
f_vector=None,
gamma=None,
gamma_isotope=None,
gamma_N=None,
gamma_U=None,
averaged_pp_interaction=None,
qpoint=None,
weight=None,
mesh_divisors=None,
grid_point=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
kappa_unit_conversion=None,
compression=None,
filename=None,
verbose=True):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
mesh_divisors=mesh_divisors,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "kappa" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
w.create_dataset('mesh', data=mesh)
if frequency is not None:
w.create_dataset('frequency', data=frequency,
compression=compression)
if group_velocity is not None:
w.create_dataset('group_velocity', data=group_velocity,
compression=compression)
if gv_by_gv is not None:
w.create_dataset('gv_by_gv', data=gv_by_gv)
if mean_free_path is not None:
w.create_dataset('mean_free_path', data=mean_free_path,
compression=compression)
if heat_capacity is not None:
w.create_dataset('heat_capacity', data=heat_capacity,
compression=compression)
if kappa is not None:
w.create_dataset('kappa', data=kappa)
if mode_kappa is not None:
w.create_dataset('mode_kappa', data=mode_kappa,
compression=compression)
if kappa_RTA is not None:
w.create_dataset('kappa_RTA', data=kappa_RTA)
if mode_kappa_RTA is not None:
w.create_dataset('mode_kappa_RTA', data=mode_kappa_RTA,
compression=compression)
if f_vector is not None:
w.create_dataset('f_vector', data=f_vector,
compression=compression)
if gamma is not None:
w.create_dataset('gamma', data=gamma,
compression=compression)
if gamma_isotope is not None:
w.create_dataset('gamma_isotope', data=gamma_isotope,
compression=compression)
if gamma_N is not None:
w.create_dataset('gamma_N', data=gamma_N,
compression=compression)
if gamma_U is not None:
w.create_dataset('gamma_U', data=gamma_U,
compression=compression)
if averaged_pp_interaction is not None:
w.create_dataset('ave_pp', data=averaged_pp_interaction,
compression=compression)
if qpoint is not None:
w.create_dataset('qpoint', data=qpoint,
compression=compression)
if weight is not None:
w.create_dataset('weight', data=weight,
compression=compression)
if grid_point is not None:
w.create_dataset('grid_point', data=grid_point)
if band_index is not None:
w.create_dataset('band_index', data=(band_index + 1))
if sigma is not None:
w.create_dataset('sigma', data=sigma)
if sigma_cutoff is not None:
w.create_dataset('sigma_cutoff_width', data=sigma_cutoff)
if kappa_unit_conversion is not None:
w.create_dataset('kappa_unit_conversion',
data=kappa_unit_conversion)
if verbose:
text = ""
if kappa is not None:
text += "Thermal conductivity and related properties "
else:
text += "Thermal conductivity related properties "
if grid_point is not None:
text += "at gp-%d " % grid_point
if band_index is not None:
text += "and band_index-%d\n" % (band_index + 1)
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s" % sigma
if sigma_cutoff is None:
text += "\n"
else:
text += "(%4.2f SD)\n" % sigma_cutoff
text += "were written into "
else:
text += "were written into "
if band_index is None:
text += "\n"
text += "\"%s\"." % full_filename
print(text)
return full_filename
def read_gamma_from_hdf5(mesh,
mesh_divisors=None,
grid_point=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
mesh_divisors=mesh_divisors,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "kappa" + suffix + ".hdf5"
if not os.path.exists(full_filename):
if verbose:
print("%s not found." % full_filename)
return None
read_data = {}
with h5py.File(full_filename, 'r') as f:
read_data['gamma'] = f['gamma'][:]
for key in ('gamma_isotope',
'ave_pp',
'gamma_N',
'gamma_U'):
if key in f.keys():
if len(f[key].shape) > 0:
read_data[key] = f[key][:]
else:
read_data[key] = f[key][()]
if verbose:
print("Read data from %s." % full_filename)
return read_data
def read_collision_from_hdf5(mesh,
indices=None,
grid_point=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "collision" + suffix + ".hdf5"
if not os.path.exists(full_filename):
if verbose:
print("%s not found." % full_filename)
return None
with h5py.File(full_filename, 'r') as f:
if indices == 'all':
colmat_shape = (1,) + f['collision_matrix'].shape
collision_matrix = np.zeros(colmat_shape,
dtype='double', order='C')
gamma = np.array(f['gamma'][:], dtype='double', order='C')
collision_matrix[0] = f['collision_matrix'][:]
temperatures = np.array(f['temperature'][:], dtype='double')
else:
colmat_shape = (1, len(indices)) + f['collision_matrix'].shape[1:]
collision_matrix = np.zeros(colmat_shape, dtype='double')
gamma = np.array(f['gamma'][indices], dtype='double', order='C')
collision_matrix[0] = f['collision_matrix'][indices]
temperatures = np.array(f['temperature'][indices], dtype='double')
if verbose:
text = "Collisions "
if band_index is None:
if grid_point is not None:
text += "at grid point %d " % grid_point
else:
if grid_point is not None:
text += ("at (grid point %d, band index %d) " %
(grid_point, band_index))
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s" % _del_zeros(sigma)
if sigma_cutoff is not None:
text += "(%4.2f SD)" % sigma_cutoff
if band_index is None and grid_point is not None:
text += " were read from "
text += "\n"
else:
text += "\n"
text += "were read from "
text += "\"%s\"." % full_filename
print(text)
return collision_matrix, gamma, temperatures
return None
def write_pp_to_hdf5(mesh,
pp=None,
g_zero=None,
grid_point=None,
triplet=None,
weight=None,
triplet_map=None,
triplet_all=None,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True,
check_consistency=False,
compression=None):
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "pp" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
if pp is not None:
if g_zero is None:
w.create_dataset('pp', data=pp,
compression=compression)
if triplet is not None:
w.create_dataset('triplet', data=triplet,
compression=compression)
if weight is not None:
w.create_dataset('weight', data=weight,
compression=compression)
if triplet_map is not None:
w.create_dataset('triplet_map', data=triplet_map,
compression=compression)
if triplet_all is not None:
w.create_dataset('triplet_all', data=triplet_all,
compression=compression)
else:
x = g_zero.ravel()
nonzero_pp = np.array(pp.ravel()[x == 0], dtype='double')
bytelen = len(x) // 8
remlen = len(x) % 8
y = x[:bytelen * 8].reshape(-1, 8)
z = np.packbits(y)
if remlen != 0:
z_rem = np.packbits(x[bytelen * 8:])
w.create_dataset('nonzero_pp', data=nonzero_pp,
compression=compression)
w.create_dataset('pp_shape', data=pp.shape,
compression=compression)
w.create_dataset('g_zero_bits', data=z,
compression=compression)
if remlen != 0:
w.create_dataset('g_zero_bits_reminder', data=z_rem)
# This is only for the test and coupled with read_pp_from_hdf5.
if check_consistency:
w.create_dataset('pp', data=pp,
compression=compression)
w.create_dataset('g_zero', data=g_zero,
compression=compression)
if verbose:
text = ""
text += "Ph-ph interaction strength "
if grid_point is not None:
text += "at gp-%d " % grid_point
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s" % sigma
if sigma_cutoff is None:
text += "\n"
else:
text += "(%4.2f SD)\n" % sigma_cutoff
text += "were written into "
else:
text += "were written into "
text += "\n"
text += "\"%s\"." % full_filename
print(text)
return full_filename
def read_pp_from_hdf5(mesh,
grid_point=None,
sigma=None,
sigma_cutoff=None,
filename=None,
verbose=True,
check_consistency=False):
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "pp" + suffix + ".hdf5"
if not os.path.exists(full_filename):
if verbose:
print("%s not found." % full_filename)
return None
with h5py.File(full_filename, 'r') as f:
if 'nonzero_pp' in f:
nonzero_pp = f['nonzero_pp'][:]
pp_shape = f['pp_shape'][:]
z = f['g_zero_bits'][:]
bytelen = np.prod(pp_shape) // 8
remlen = 0
if 'g_zero_bits_reminder' in f:
z_rem = f['g_zero_bits_reminder'][:]
remlen = np.prod(pp_shape) - bytelen * 8
bits = np.unpackbits(z)
if not bits.flags['C_CONTIGUOUS']:
bits = np.array(bits, dtype='uint8')
g_zero = np.zeros(pp_shape, dtype='byte', order='C')
b = g_zero.ravel()
b[:(bytelen * 8)] = bits
if remlen != 0:
b[-remlen:] = np.unpackbits(z_rem)[:remlen]
pp = np.zeros(pp_shape, dtype='double', order='C')
pp_ravel = pp.ravel()
pp_ravel[g_zero.ravel() == 0] = nonzero_pp
# check_consistency==True in write_pp_to_hdf5 required.
if check_consistency and g_zero is not None:
if verbose:
print("Checking consistency of ph-ph interanction "
"strength.")
assert (g_zero == f['g_zero'][:]).all()
assert np.allclose(pp, f['pp'][:])
else:
pp = np.zeros(f['pp'].shape, dtype='double', order='C')
pp[:] = f['pp'][:]
g_zero = None
if verbose:
print("Ph-ph interaction strength was read from \"%s\"." %
full_filename)
return pp, g_zero
return None
def write_gamma_detail_to_hdf5(temperature,
mesh,
gamma_detail=None,
grid_point=None,
triplet=None,
weight=None,
triplet_map=None,
triplet_all=None,
frequency_points=None,
band_index=None,
sigma=None,
sigma_cutoff=None,
compression=None,
filename=None,
verbose=True):
if band_index is None:
band_indices = None
else:
band_indices = [band_index]
suffix = _get_filename_suffix(mesh,
grid_point=grid_point,
band_indices=band_indices,
sigma=sigma,
sigma_cutoff=sigma_cutoff,
filename=filename)
full_filename = "gamma_detail" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('temperature', data=temperature)
w.create_dataset('mesh', data=mesh)
if gamma_detail is not None:
w.create_dataset('gamma_detail', data=gamma_detail,
compression=compression)
if triplet is not None:
w.create_dataset('triplet', data=triplet,
compression=compression)
if weight is not None:
w.create_dataset('weight', data=weight,
compression=compression)
if triplet_map is not None:
w.create_dataset('triplet_map', data=triplet_map,
compression=compression)
if triplet_all is not None:
w.create_dataset('triplet_all', data=triplet_all,
compression=compression)
if grid_point is not None:
w.create_dataset('grid_point', data=grid_point)
if band_index is not None:
w.create_dataset('band_index', data=(band_index + 1))
if sigma is not None:
w.create_dataset('sigma', data=sigma)
if sigma_cutoff is not None:
w.create_dataset('sigma_cutoff_width', data=sigma_cutoff)
if frequency_points is not None:
w.create_dataset('frequency_point', data=frequency_points)
if verbose:
text = ""
text += "Phonon triplets contributions to Gamma "
if grid_point is not None:
text += "at gp-%d " % grid_point
if band_index is not None:
text += "and band_index-%d\n" % (band_index + 1)
if sigma is not None:
if grid_point is not None:
text += "and "
else:
text += "at "
text += "sigma %s" % sigma
if sigma_cutoff is None:
text += "\n"
else:
text += "(%4.2f SD)\n" % sigma_cutoff
text += "were written into "
else:
text += "were written into "
if band_index is None:
text += "\n"
text += "\"%s\"." % full_filename
print(text)
return full_filename
return None
def write_phonon_to_hdf5(frequency,
eigenvector,
grid_address,
mesh,
compression=None,
filename=None):
suffix = _get_filename_suffix(mesh, filename=filename)
full_filename = "phonon" + suffix + ".hdf5"
with h5py.File(full_filename, 'w') as w:
w.create_dataset('mesh', data=mesh)
w.create_dataset('grid_address', data=grid_address,
compression=compression)
w.create_dataset('frequency', data=frequency,
compression=compression)
w.create_dataset('eigenvector', data=eigenvector,
compression=compression)
return full_filename
return None
def read_phonon_from_hdf5(mesh,
filename=None,
verbose=True):
suffix = _get_filename_suffix(mesh, filename=filename)
full_filename = "phonon" + suffix + ".hdf5"
if not os.path.exists(full_filename):
if verbose:
print("%s not found." % full_filename)
return None
with h5py.File(full_filename, 'r') as f:
frequencies = np.array(f['frequency'][:], dtype='double', order='C')
itemsize = frequencies.itemsize
eigenvectors = np.array(f['eigenvector'][:],
dtype=("c%d" % (itemsize * 2)), order='C')
mesh_in_file = np.array(f['mesh'][:], dtype='intc')
grid_address = np.array(f['grid_address'][:], dtype='intc', order='C')
assert (mesh_in_file == mesh).all(), "Mesh numbers are inconsistent."
if verbose:
print("Phonons are read from \"%s\"." % full_filename)
return frequencies, eigenvectors, grid_address
return None
def write_ir_grid_points(mesh,
mesh_divs,
grid_points,
coarse_grid_weights,
grid_address,
primitive_lattice):
w = open("ir_grid_points.yaml", 'w')
w.write("mesh: [ %d, %d, %d ]\n" % tuple(mesh))
if mesh_divs is not None:
w.write("mesh_divisors: [ %d, %d, %d ]\n" % tuple(mesh_divs))
w.write("reciprocal_lattice:\n")
for vec, axis in zip(primitive_lattice.T, ('a*', 'b*', 'c*')):
w.write("- [ %12.8f, %12.8f, %12.8f ] # %2s\n"
% (tuple(vec) + (axis,)))
w.write("num_reduced_ir_grid_points: %d\n" % len(grid_points))
w.write("ir_grid_points: # [address, weight]\n")
for g, weight in zip(grid_points, coarse_grid_weights):
w.write("- grid_point: %d\n" % g)
w.write(" weight: %d\n" % weight)
w.write(" grid_address: [ %12d, %12d, %12d ]\n" %
tuple(grid_address[g]))
w.write(" q-point: [ %12.7f, %12.7f, %12.7f ]\n" %
tuple(grid_address[g].astype('double') / mesh))
def parse_disp_fc2_yaml(filename="disp_fc2.yaml", return_cell=False):
dataset = _parse_yaml(filename)
natom = dataset['natom']
new_dataset = {}
new_dataset['natom'] = natom
new_first_atoms = []
for first_atoms in dataset['first_atoms']:
first_atoms['number'] -= 1
atom1 = first_atoms['number']
disp1 = first_atoms['displacement']
new_first_atoms.append({'number': atom1, 'displacement': disp1})
new_dataset['first_atoms'] = new_first_atoms
if return_cell:
cell = get_cell_from_disp_yaml(dataset)
return new_dataset, cell
else:
return new_dataset
def parse_disp_fc3_yaml(filename="disp_fc3.yaml", return_cell=False):
dataset = _parse_yaml(filename)
natom = dataset['natom']
new_dataset = {}
new_dataset['natom'] = natom
if 'cutoff_distance' in dataset:
new_dataset['cutoff_distance'] = dataset['cutoff_distance']
new_first_atoms = []
for first_atoms in dataset['first_atoms']:
atom1 = first_atoms['number'] - 1
disp1 = first_atoms['displacement']
new_second_atoms = []
for second_atom in first_atoms['second_atoms']:
disp2_dataset = {'number': second_atom['number'] - 1}
if 'included' in second_atom:
disp2_dataset.update({'included': second_atom['included']})
if 'distance' in second_atom:
disp2_dataset.update(
{'pair_distance': second_atom['distance']})
for disp2 in second_atom['displacements']:
disp2_dataset.update({'displacement': disp2})
new_second_atoms.append(disp2_dataset.copy())
new_first_atoms.append({'number': atom1,
'displacement': disp1,
'second_atoms': new_second_atoms})
new_dataset['first_atoms'] = new_first_atoms
if return_cell:
cell = get_cell_from_disp_yaml(dataset)
return new_dataset, cell
else:
return new_dataset
def parse_FORCES_FC2(disp_dataset, filename="FORCES_FC2"):
num_atom = disp_dataset['natom']
num_disp = len(disp_dataset['first_atoms'])
forces_fc2 = []
with open(filename, 'r') as f2:
for i in range(num_disp):
forces = _parse_force_lines(f2, num_atom)
if forces is None:
return []
else:
forces_fc2.append(forces)
return forces_fc2
def parse_FORCES_FC3(disp_dataset, filename="FORCES_FC3", use_loadtxt=False):
num_atom = disp_dataset['natom']
num_disp = len(disp_dataset['first_atoms'])
for disp1 in disp_dataset['first_atoms']:
num_disp += len(disp1['second_atoms'])
if use_loadtxt:
forces_fc3 = np.loadtxt(filename)
return forces_fc3.reshape((num_disp, -1, 3))
else:
forces_fc3 = np.zeros((num_disp, num_atom, 3),
dtype='double', order='C')
with open(filename, 'r') as f3:
for i in range(num_disp):
forces = _parse_force_lines(f3, num_atom)
if forces is None:
raise RuntimeError("Failed to parse %s." % filename)
else:
forces_fc3[i] = forces
return forces_fc3
def parse_QPOINTS3(filename='QPOINTS3'):
f = open(filename)
num = int(f.readline().strip())
count = 0
qpoints3 = []
for line in f:
line_array = [float(x) for x in line.strip().split()]
if len(line_array) < 9:
raise RuntimeError("Failed to parse %s." % filename)
else:
qpoints3.append(line_array[0:9])
count += 1
if count == num:
break
return np.array(qpoints3)
def parse_fc3(num_atom, filename='fc3.dat'):
f = open(filename)
fc3 = np.zeros((num_atom, num_atom, num_atom, 3, 3, 3), dtype=float)
for i in range(num_atom):
for j in range(num_atom):
for k in range(num_atom):
f.readline()
for l in range(3):
fc3[i, j, k, l] = [
[float(x) for x in f.readline().split()],
[float(x) for x in f.readline().split()],
[float(x) for x in f.readline().split()]]
f.readline()
return fc3
def parse_fc2(num_atom, filename='fc2.dat'):
f = open(filename)
fc2 = np.zeros((num_atom, num_atom, 3, 3), dtype=float)
for i in range(num_atom):
for j in range(num_atom):
f.readline()
fc2[i, j] = [[float(x) for x in f.readline().split()],
[float(x) for x in f.readline().split()],
[float(x) for x in f.readline().split()]]
f.readline()
return fc2
def parse_triplets(filename):
f = open(filename)
triplets = []
weights = []
for line in f:
if line.strip()[0] == "#":
continue
line_array = [int(x) for x in line.split()]
triplets.append(line_array[:3])
weights.append(line_array[3])
return np.array(triplets), np.array(weights)
def parse_grid_address(filename):
f = open(filename, 'r')
grid_address = []
for line in f:
if line.strip()[0] == "#":
continue
line_array = [int(x) for x in line.split()]
grid_address.append(line_array[1:4])
return np.array(grid_address)
def _get_filename_suffix(mesh,
mesh_divisors=None,
grid_point=None,
band_indices=None,
sigma=None,
sigma_cutoff=None,
filename=None):
suffix = "-m%d%d%d" % tuple(mesh)
if mesh_divisors is not None:
if (np.array(mesh_divisors, dtype=int) != 1).any():
suffix += "-d%d%d%d" % tuple(mesh_divisors)
if grid_point is not None:
suffix += ("-g%d" % grid_point)
if band_indices is not None:
suffix += "-"
for bi in band_indices:
suffix += "b%d" % (bi + 1)
if sigma is not None:
suffix += "-s" + _del_zeros(sigma)
if sigma_cutoff is not None:
sigma_cutoff_str = _del_zeros(sigma_cutoff)
suffix += "-sd" + sigma_cutoff_str
if filename is not None:
suffix += "." + filename
return suffix
def _del_zeros(val):
return ("%f" % val).rstrip('0').rstrip('\.')
def _parse_yaml(file_yaml):
import yaml
try:
from yaml import CLoader as Loader
from yaml import CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
with open(file_yaml) as f:
string = f.read()
data = yaml.load(string, Loader=Loader)
return data
def _parse_force_lines(forcefile, num_atom):
forces = []
for line in forcefile:
if line.strip() == '':
continue
if line.strip()[0] == '#':
continue
forces.append([float(x) for x in line.strip().split()])
if len(forces) == num_atom:
break
if not len(forces) == num_atom:
return None
else:
return np.array(forces)
def _parse_force_constants_lines(fcthird_file, num_atom):
fc2 = []
for line in fcthird_file:
if line.strip() == '':
continue
if line.strip()[0] == '#':
continue
fc2.append([float(x) for x in line.strip().split()])
if len(fc2) == num_atom ** 2 * 3:
break
if not len(fc2) == num_atom ** 2 * 3:
return None
else:
return np.array(fc2).reshape(num_atom, num_atom, 3, 3)
|
atztogo/phono3py | phono3py/phonon3/__init__.py | Phono3py.get_frequency_shift | python | def get_frequency_shift(
self,
grid_points,
temperatures=np.arange(0, 1001, 10, dtype='double'),
epsilons=None,
output_filename=None):
if self._interaction is None:
self.set_phph_interaction()
if epsilons is None:
_epsilons = [0.1]
else:
_epsilons = epsilons
self._grid_points = grid_points
get_frequency_shift(self._interaction,
self._grid_points,
self._band_indices,
_epsilons,
temperatures,
output_filename=output_filename,
log_level=self._log_level) | Frequency shift from lowest order diagram is calculated.
Args:
epslins(list of float):
The value to avoid divergence. When multiple values are given
frequency shifts for those values are returned. | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/__init__.py#L667-L695 | [
"def get_frequency_shift(interaction,\n grid_points,\n band_indices,\n epsilons,\n temperatures=None,\n output_filename=None,\n log_level=0):\n if temperatures is None:\n temperatures = [0.0, 300.0]\n fst = FrequencyShift(interaction)\n band_indices_flatten = interaction.get_band_indices()\n mesh = interaction.get_mesh_numbers()\n for gp in grid_points:\n fst.set_grid_point(gp)\n if log_level:\n weights = interaction.get_triplets_at_q()[1]\n print(\"------ Frequency shift -o- ------\")\n print(\"Number of ir-triplets: \"\n \"%d / %d\" % (len(weights), weights.sum()))\n fst.run_interaction()\n\n for epsilon in epsilons:\n fst.set_epsilon(epsilon)\n delta = np.zeros((len(temperatures),\n len(band_indices_flatten)),\n dtype='double')\n for i, t in enumerate(temperatures):\n fst.set_temperature(t)\n fst.run()\n delta[i] = fst.get_frequency_shift()\n\n for i, bi in enumerate(band_indices):\n pos = 0\n for j in range(i):\n pos += len(band_indices[j])\n\n write_frequency_shift(gp,\n bi,\n temperatures,\n delta[:, pos:(pos+len(bi))],\n mesh,\n epsilon=epsilon,\n filename=output_filename)\n",
"def set_phph_interaction(self,\n nac_params=None,\n nac_q_direction=None,\n constant_averaged_interaction=None,\n frequency_scale_factor=None,\n unit_conversion=None,\n solve_dynamical_matrices=True):\n if self._mesh_numbers is None:\n print(\"'mesh' has to be set in Phono3py instantiation.\")\n raise RuntimeError\n\n self._nac_params = nac_params\n self._interaction = Interaction(\n self._supercell,\n self._primitive,\n self._mesh_numbers,\n self._primitive_symmetry,\n fc3=self._fc3,\n band_indices=self._band_indices_flatten,\n constant_averaged_interaction=constant_averaged_interaction,\n frequency_factor_to_THz=self._frequency_factor_to_THz,\n frequency_scale_factor=frequency_scale_factor,\n unit_conversion=unit_conversion,\n cutoff_frequency=self._cutoff_frequency,\n is_mesh_symmetry=self._is_mesh_symmetry,\n symmetrize_fc3q=self._symmetrize_fc3q,\n lapack_zheev_uplo=self._lapack_zheev_uplo)\n self._interaction.set_nac_q_direction(nac_q_direction=nac_q_direction)\n self._interaction.set_dynamical_matrix(\n self._fc2,\n self._phonon_supercell,\n self._phonon_primitive,\n nac_params=self._nac_params,\n solve_dynamical_matrices=solve_dynamical_matrices,\n verbose=self._log_level)\n"
] | class Phono3py(object):
def __init__(self,
unitcell,
supercell_matrix,
primitive_matrix=None,
phonon_supercell_matrix=None,
masses=None,
mesh=None,
band_indices=None,
sigmas=None,
sigma_cutoff=None,
cutoff_frequency=1e-4,
frequency_factor_to_THz=VaspToTHz,
is_symmetry=True,
is_mesh_symmetry=True,
symmetrize_fc3q=False,
symprec=1e-5,
log_level=0,
lapack_zheev_uplo='L'):
if sigmas is None:
self._sigmas = [None]
else:
self._sigmas = sigmas
self._sigma_cutoff = sigma_cutoff
self._symprec = symprec
self._frequency_factor_to_THz = frequency_factor_to_THz
self._is_symmetry = is_symmetry
self._is_mesh_symmetry = is_mesh_symmetry
self._lapack_zheev_uplo = lapack_zheev_uplo
self._symmetrize_fc3q = symmetrize_fc3q
self._cutoff_frequency = cutoff_frequency
self._log_level = log_level
# Create supercell and primitive cell
self._unitcell = unitcell
self._supercell_matrix = supercell_matrix
if type(primitive_matrix) is str and primitive_matrix == 'auto':
self._primitive_matrix = self._guess_primitive_matrix()
else:
self._primitive_matrix = primitive_matrix
self._phonon_supercell_matrix = phonon_supercell_matrix # optional
self._supercell = None
self._primitive = None
self._phonon_supercell = None
self._phonon_primitive = None
self._build_supercell()
self._build_primitive_cell()
self._build_phonon_supercell()
self._build_phonon_primitive_cell()
if masses is not None:
self._set_masses(masses)
# Set supercell, primitive, and phonon supercell symmetries
self._symmetry = None
self._primitive_symmetry = None
self._phonon_supercell_symmetry = None
self._search_symmetry()
self._search_primitive_symmetry()
self._search_phonon_supercell_symmetry()
# Displacements and supercells
self._supercells_with_displacements = None
self._displacement_dataset = None
self._phonon_displacement_dataset = None
self._phonon_supercells_with_displacements = None
# Thermal conductivity
self._thermal_conductivity = None # conductivity_RTA object
# Imaginary part of self energy at frequency points
self._imag_self_energy = None
self._scattering_event_class = None
self._grid_points = None
self._frequency_points = None
self._temperatures = None
# Other variables
self._fc2 = None
self._fc3 = None
self._nac_params = None
# Setup interaction
self._interaction = None
self._mesh_numbers = None
self._band_indices = None
self._band_indices_flatten = None
if mesh is not None:
self._set_mesh_numbers(mesh)
self.set_band_indices(band_indices)
def set_band_indices(self, band_indices):
if band_indices is None:
num_band = self._primitive.get_number_of_atoms() * 3
self._band_indices = [np.arange(num_band, dtype='intc')]
else:
self._band_indices = band_indices
self._band_indices_flatten = np.hstack(
self._band_indices).astype('intc')
def set_phph_interaction(self,
nac_params=None,
nac_q_direction=None,
constant_averaged_interaction=None,
frequency_scale_factor=None,
unit_conversion=None,
solve_dynamical_matrices=True):
if self._mesh_numbers is None:
print("'mesh' has to be set in Phono3py instantiation.")
raise RuntimeError
self._nac_params = nac_params
self._interaction = Interaction(
self._supercell,
self._primitive,
self._mesh_numbers,
self._primitive_symmetry,
fc3=self._fc3,
band_indices=self._band_indices_flatten,
constant_averaged_interaction=constant_averaged_interaction,
frequency_factor_to_THz=self._frequency_factor_to_THz,
frequency_scale_factor=frequency_scale_factor,
unit_conversion=unit_conversion,
cutoff_frequency=self._cutoff_frequency,
is_mesh_symmetry=self._is_mesh_symmetry,
symmetrize_fc3q=self._symmetrize_fc3q,
lapack_zheev_uplo=self._lapack_zheev_uplo)
self._interaction.set_nac_q_direction(nac_q_direction=nac_q_direction)
self._interaction.set_dynamical_matrix(
self._fc2,
self._phonon_supercell,
self._phonon_primitive,
nac_params=self._nac_params,
solve_dynamical_matrices=solve_dynamical_matrices,
verbose=self._log_level)
def set_phonon_data(self, frequencies, eigenvectors, grid_address):
if self._interaction is not None:
return self._interaction.set_phonon_data(frequencies,
eigenvectors,
grid_address)
else:
return False
def get_phonon_data(self):
if self._interaction is not None:
grid_address = self._interaction.get_grid_address()
freqs, eigvecs, _ = self._interaction.get_phonons()
return freqs, eigvecs, grid_address
else:
msg = "set_phph_interaction has to be done."
raise RuntimeError(msg)
def generate_displacements(self,
distance=0.03,
cutoff_pair_distance=None,
is_plusminus='auto',
is_diagonal=True):
direction_dataset = get_third_order_displacements(
self._supercell,
self._symmetry,
is_plusminus=is_plusminus,
is_diagonal=is_diagonal)
self._displacement_dataset = direction_to_displacement(
direction_dataset,
distance,
self._supercell,
cutoff_distance=cutoff_pair_distance)
if self._phonon_supercell_matrix is not None:
# 'is_diagonal=False' below is made intentionally. For
# third-order force constants, we need better accuracy,
# and I expect this choice is better for it, but not very
# sure.
# In phono3py, two atoms are displaced for each
# configuration and the displacements are chosen, first
# displacement from the perfect supercell, then second
# displacement, considering symmetry. If I choose
# is_diagonal=False for the first displacement, the
# symmetry is less broken and the number of second
# displacements can be smaller than in the case of
# is_diagonal=True for the first displacement. This is
# done in the call get_least_displacements() in
# phonon3.displacement_fc3.get_third_order_displacements().
#
# The call get_least_displacements() is only for the
# second order force constants, but 'is_diagonal=False' to
# be consistent with the above function call, and also for
# the accuracy when calculating ph-ph interaction
# strength because displacement directions are better to be
# close to perpendicular each other to fit force constants.
#
# On the discussion of the accuracy, these are just my
# expectation when I designed phono3py in the early time,
# and in fact now I guess not very different. If these are
# little different, then I should not surprise users to
# change the default behaviour. At this moment, this is
# open question and we will have more advance and should
# have better specificy external software on this.
phonon_displacement_directions = get_least_displacements(
self._phonon_supercell_symmetry,
is_plusminus=is_plusminus,
is_diagonal=False)
self._phonon_displacement_dataset = directions_to_displacement_dataset(
phonon_displacement_directions,
distance,
self._phonon_supercell)
def produce_fc2(self,
forces_fc2,
displacement_dataset=None,
symmetrize_fc2=False,
is_compact_fc=False,
use_alm=False,
alm_options=None):
if displacement_dataset is None:
if self._phonon_displacement_dataset is None:
disp_dataset = self._displacement_dataset
else:
disp_dataset = self._phonon_displacement_dataset
else:
disp_dataset = displacement_dataset
for forces, disp1 in zip(forces_fc2, disp_dataset['first_atoms']):
disp1['forces'] = forces
if is_compact_fc:
p2s_map = self._phonon_primitive.p2s_map
else:
p2s_map = None
if use_alm:
from phonopy.interface.alm import get_fc2 as get_fc2_alm
self._fc2 = get_fc2_alm(self._phonon_supercell,
self._phonon_primitive,
disp_dataset,
atom_list=p2s_map,
alm_options=alm_options,
log_level=self._log_level)
else:
self._fc2 = get_fc2(self._phonon_supercell,
self._phonon_supercell_symmetry,
disp_dataset,
atom_list=p2s_map)
if symmetrize_fc2:
if is_compact_fc:
symmetrize_compact_force_constants(
self._fc2, self._phonon_primitive)
else:
symmetrize_force_constants(self._fc2)
def produce_fc3(self,
forces_fc3,
displacement_dataset=None,
cutoff_distance=None, # set fc3 zero
symmetrize_fc3r=False,
is_compact_fc=False,
use_alm=False,
alm_options=None):
if displacement_dataset is None:
disp_dataset = self._displacement_dataset
else:
disp_dataset = displacement_dataset
if use_alm:
from phono3py.other.alm_wrapper import get_fc3 as get_fc3_alm
fc2, fc3 = get_fc3_alm(self._supercell,
self._primitive,
forces_fc3,
disp_dataset,
self._symmetry,
alm_options=alm_options,
is_compact_fc=is_compact_fc,
log_level=self._log_level)
else:
fc2, fc3 = self._get_fc3(forces_fc3,
disp_dataset,
is_compact_fc=is_compact_fc)
if symmetrize_fc3r:
if is_compact_fc:
set_translational_invariance_compact_fc3(
fc3, self._primitive)
set_permutation_symmetry_compact_fc3(fc3, self._primitive)
if self._fc2 is None:
symmetrize_compact_force_constants(fc2,
self._primitive)
else:
set_translational_invariance_fc3(fc3)
set_permutation_symmetry_fc3(fc3)
if self._fc2 is None:
symmetrize_force_constants(fc2)
# Set fc2 and fc3
self._fc3 = fc3
# Normally self._fc2 is overwritten in produce_fc2
if self._fc2 is None:
self._fc2 = fc2
def cutoff_fc3_by_zero(self, cutoff_distance, fc3=None):
if fc3 is None:
_fc3 = self._fc3
else:
_fc3 = fc3
cutoff_fc3_by_zero(_fc3, # overwritten
self._supercell,
cutoff_distance,
self._symprec)
def set_permutation_symmetry(self):
if self._fc2 is not None:
set_permutation_symmetry(self._fc2)
if self._fc3 is not None:
set_permutation_symmetry_fc3(self._fc3)
def set_translational_invariance(self):
if self._fc2 is not None:
set_translational_invariance(self._fc2)
if self._fc3 is not None:
set_translational_invariance_fc3(self._fc3)
@property
def version(self):
return __version__
def get_version(self):
return self.version
def get_interaction_strength(self):
return self._interaction
def get_fc2(self):
return self._fc2
def set_fc2(self, fc2):
self._fc2 = fc2
def get_fc3(self):
return self._fc3
def set_fc3(self, fc3):
self._fc3 = fc3
@property
def nac_params(self):
return self._nac_params
def get_nac_params(self):
return self.nac_params
@property
def primitive(self):
return self._primitive
def get_primitive(self):
return self.primitive
@property
def unitcell(self):
return self._unitcell
def get_unitcell(self):
return self.unitcell
@property
def supercell(self):
return self._supercell
def get_supercell(self):
return self.supercell
@property
def phonon_supercell(self):
return self._phonon_supercell
def get_phonon_supercell(self):
return self.phonon_supercell
@property
def phonon_primitive(self):
return self._phonon_primitive
def get_phonon_primitive(self):
return self.phonon_primitive
@property
def symmetry(self):
"""return symmetry of supercell"""
return self._symmetry
def get_symmetry(self):
return self.symmetry
@property
def primitive_symmetry(self):
"""return symmetry of primitive cell"""
return self._primitive_symmetry
def get_primitive_symmetry(self):
"""return symmetry of primitive cell"""
return self.primitive_symmetry
def get_phonon_supercell_symmetry(self):
return self._phonon_supercell_symmetry
@property
def supercell_matrix(self):
return self._supercell_matrix
def get_supercell_matrix(self):
return self.supercell_matrix
@property
def phonon_supercell_matrix(self):
return self._phonon_supercell_matrix
def get_phonon_supercell_matrix(self):
return self.phonon_supercell_matrix
@property
def primitive_matrix(self):
return self._primitive_matrix
def get_primitive_matrix(self):
return self.primitive_matrix
@property
def unit_conversion_factor(self):
return self._frequency_factor_to_THz
def set_displacement_dataset(self, dataset):
self._displacement_dataset = dataset
@property
def displacement_dataset(self):
return self._displacement_dataset
def get_displacement_dataset(self):
return self.displacement_dataset
def get_phonon_displacement_dataset(self):
return self._phonon_displacement_dataset
def get_supercells_with_displacements(self):
if self._supercells_with_displacements is None:
self._build_supercells_with_displacements()
return self._supercells_with_displacements
def get_phonon_supercells_with_displacements(self):
if self._phonon_supercells_with_displacements is None:
if self._phonon_displacement_dataset is not None:
self._phonon_supercells_with_displacements = \
self._build_phonon_supercells_with_displacements(
self._phonon_supercell,
self._phonon_displacement_dataset)
return self._phonon_supercells_with_displacements
@property
def mesh_numbers(self):
return self._mesh_numbers
def run_imag_self_energy(self,
grid_points,
frequency_step=None,
num_frequency_points=None,
temperatures=None,
scattering_event_class=None,
write_gamma_detail=False,
output_filename=None):
if self._interaction is None:
self.set_phph_interaction()
if temperatures is None:
temperatures = [0.0, 300.0]
self._grid_points = grid_points
self._temperatures = temperatures
self._scattering_event_class = scattering_event_class
self._imag_self_energy, self._frequency_points = get_imag_self_energy(
self._interaction,
grid_points,
self._sigmas,
frequency_step=frequency_step,
num_frequency_points=num_frequency_points,
temperatures=temperatures,
scattering_event_class=scattering_event_class,
write_detail=write_gamma_detail,
output_filename=output_filename,
log_level=self._log_level)
def write_imag_self_energy(self, filename=None):
write_imag_self_energy(
self._imag_self_energy,
self._mesh_numbers,
self._grid_points,
self._band_indices,
self._frequency_points,
self._temperatures,
self._sigmas,
scattering_event_class=self._scattering_event_class,
filename=filename,
is_mesh_symmetry=self._is_mesh_symmetry)
def run_thermal_conductivity(
self,
is_LBTE=False,
temperatures=np.arange(0, 1001, 10, dtype='double'),
is_isotope=False,
mass_variances=None,
grid_points=None,
boundary_mfp=None, # in micrometre
solve_collective_phonon=False,
use_ave_pp=False,
gamma_unit_conversion=None,
mesh_divisors=None,
coarse_mesh_shifts=None,
is_reducible_collision_matrix=False,
is_kappa_star=True,
gv_delta_q=None, # for group velocity
is_full_pp=False,
pinv_cutoff=1.0e-8, # for pseudo-inversion of collision matrix
pinv_solver=0, # solver of pseudo-inversion of collision matrix
write_gamma=False,
read_gamma=False,
is_N_U=False,
write_kappa=False,
write_gamma_detail=False,
write_collision=False,
read_collision=False,
write_pp=False,
read_pp=False,
write_LBTE_solution=False,
compression=None,
input_filename=None,
output_filename=None):
if self._interaction is None:
self.set_phph_interaction()
if is_LBTE:
self._thermal_conductivity = get_thermal_conductivity_LBTE(
self._interaction,
self._primitive_symmetry,
temperatures=temperatures,
sigmas=self._sigmas,
sigma_cutoff=self._sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
grid_points=grid_points,
boundary_mfp=boundary_mfp,
solve_collective_phonon=solve_collective_phonon,
is_reducible_collision_matrix=is_reducible_collision_matrix,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
pinv_cutoff=pinv_cutoff,
pinv_solver=pinv_solver,
write_collision=write_collision,
read_collision=read_collision,
write_kappa=write_kappa,
write_pp=write_pp,
read_pp=read_pp,
write_LBTE_solution=write_LBTE_solution,
compression=compression,
input_filename=input_filename,
output_filename=output_filename,
log_level=self._log_level)
else:
self._thermal_conductivity = get_thermal_conductivity_RTA(
self._interaction,
self._primitive_symmetry,
temperatures=temperatures,
sigmas=self._sigmas,
sigma_cutoff=self._sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
grid_points=grid_points,
boundary_mfp=boundary_mfp,
use_ave_pp=use_ave_pp,
gamma_unit_conversion=gamma_unit_conversion,
mesh_divisors=mesh_divisors,
coarse_mesh_shifts=coarse_mesh_shifts,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
write_gamma=write_gamma,
read_gamma=read_gamma,
is_N_U=is_N_U,
write_kappa=write_kappa,
write_pp=write_pp,
read_pp=read_pp,
write_gamma_detail=write_gamma_detail,
compression=compression,
input_filename=input_filename,
output_filename=output_filename,
log_level=self._log_level)
def get_thermal_conductivity(self):
return self._thermal_conductivity
def _search_symmetry(self):
self._symmetry = Symmetry(self._supercell,
self._symprec,
self._is_symmetry)
def _search_primitive_symmetry(self):
self._primitive_symmetry = Symmetry(self._primitive,
self._symprec,
self._is_symmetry)
if (len(self._symmetry.get_pointgroup_operations()) !=
len(self._primitive_symmetry.get_pointgroup_operations())):
print("Warning: point group symmetries of supercell and primitive"
"cell are different.")
def _search_phonon_supercell_symmetry(self):
if self._phonon_supercell_matrix is None:
self._phonon_supercell_symmetry = self._symmetry
else:
self._phonon_supercell_symmetry = Symmetry(self._phonon_supercell,
self._symprec,
self._is_symmetry)
def _build_supercell(self):
self._supercell = get_supercell(self._unitcell,
self._supercell_matrix,
self._symprec)
def _build_primitive_cell(self):
"""
primitive_matrix:
Relative axes of primitive cell to the input unit cell.
Relative axes to the supercell is calculated by:
supercell_matrix^-1 * primitive_matrix
Therefore primitive cell lattice is finally calculated by:
(supercell_lattice * (supercell_matrix)^-1 * primitive_matrix)^T
"""
self._primitive = self._get_primitive_cell(
self._supercell, self._supercell_matrix, self._primitive_matrix)
def _build_phonon_supercell(self):
"""
phonon_supercell:
This supercell is used for harmonic phonons (frequencies,
eigenvectors, group velocities, ...)
phonon_supercell_matrix:
Different supercell size can be specified.
"""
if self._phonon_supercell_matrix is None:
self._phonon_supercell = self._supercell
else:
self._phonon_supercell = get_supercell(
self._unitcell, self._phonon_supercell_matrix, self._symprec)
def _build_phonon_primitive_cell(self):
if self._phonon_supercell_matrix is None:
self._phonon_primitive = self._primitive
else:
self._phonon_primitive = self._get_primitive_cell(
self._phonon_supercell,
self._phonon_supercell_matrix,
self._primitive_matrix)
if (self._primitive is not None and
(self._primitive.get_atomic_numbers() !=
self._phonon_primitive.get_atomic_numbers()).any()):
print(" Primitive cells for fc2 and fc3 can be different.")
raise RuntimeError
def _build_phonon_supercells_with_displacements(self,
supercell,
displacement_dataset):
supercells = []
magmoms = supercell.get_magnetic_moments()
masses = supercell.get_masses()
numbers = supercell.get_atomic_numbers()
lattice = supercell.get_cell()
for disp1 in displacement_dataset['first_atoms']:
disp_cart1 = disp1['displacement']
positions = supercell.get_positions()
positions[disp1['number']] += disp_cart1
supercells.append(
Atoms(numbers=numbers,
masses=masses,
magmoms=magmoms,
positions=positions,
cell=lattice,
pbc=True))
return supercells
def _build_supercells_with_displacements(self):
supercells = []
magmoms = self._supercell.get_magnetic_moments()
masses = self._supercell.get_masses()
numbers = self._supercell.get_atomic_numbers()
lattice = self._supercell.get_cell()
supercells = self._build_phonon_supercells_with_displacements(
self._supercell,
self._displacement_dataset)
for disp1 in self._displacement_dataset['first_atoms']:
disp_cart1 = disp1['displacement']
for disp2 in disp1['second_atoms']:
if 'included' in disp2:
included = disp2['included']
else:
included = True
if included:
positions = self._supercell.get_positions()
positions[disp1['number']] += disp_cart1
positions[disp2['number']] += disp2['displacement']
supercells.append(Atoms(numbers=numbers,
masses=masses,
magmoms=magmoms,
positions=positions,
cell=lattice,
pbc=True))
else:
supercells.append(None)
self._supercells_with_displacements = supercells
def _get_primitive_cell(self,
supercell,
supercell_matrix,
primitive_matrix):
inv_supercell_matrix = np.linalg.inv(supercell_matrix)
if primitive_matrix is None:
t_mat = inv_supercell_matrix
else:
t_mat = np.dot(inv_supercell_matrix, primitive_matrix)
return get_primitive(supercell, t_mat, self._symprec)
def _guess_primitive_matrix(self):
return guess_primitive_matrix(self._unitcell, symprec=self._symprec)
def _set_masses(self, masses):
p_masses = np.array(masses)
self._primitive.set_masses(p_masses)
p2p_map = self._primitive.get_primitive_to_primitive_map()
s_masses = p_masses[[p2p_map[x] for x in
self._primitive.get_supercell_to_primitive_map()]]
self._supercell.set_masses(s_masses)
u2s_map = self._supercell.get_unitcell_to_supercell_map()
u_masses = s_masses[u2s_map]
self._unitcell.set_masses(u_masses)
self._phonon_primitive.set_masses(p_masses)
p2p_map = self._phonon_primitive.get_primitive_to_primitive_map()
s_masses = p_masses[
[p2p_map[x] for x in
self._phonon_primitive.get_supercell_to_primitive_map()]]
self._phonon_supercell.set_masses(s_masses)
def _set_mesh_numbers(self, mesh):
_mesh = np.array(mesh)
mesh_nums = None
if _mesh.shape:
if _mesh.shape == (3,):
mesh_nums = mesh
elif self._primitive_symmetry is None:
mesh_nums = length2mesh(mesh, self._primitive.get_cell())
else:
rotations = self._primitive_symmetry.get_pointgroup_operations()
mesh_nums = length2mesh(mesh, self._primitive.get_cell(),
rotations=rotations)
if mesh_nums is None:
msg = "mesh has inappropriate type."
raise TypeError(msg)
self._mesh_numbers = mesh_nums
def _get_fc3(self,
forces_fc3,
disp_dataset,
is_compact_fc=False):
count = 0
for disp1 in disp_dataset['first_atoms']:
disp1['forces'] = forces_fc3[count]
count += 1
for disp1 in disp_dataset['first_atoms']:
for disp2 in disp1['second_atoms']:
disp2['delta_forces'] = forces_fc3[count] - disp1['forces']
count += 1
fc2, fc3 = get_fc3(self._supercell,
self._primitive,
disp_dataset,
self._symmetry,
is_compact_fc=is_compact_fc,
verbose=self._log_level)
return fc2, fc3
|
atztogo/phono3py | phono3py/phonon3/__init__.py | Phono3py._build_primitive_cell | python | def _build_primitive_cell(self):
self._primitive = self._get_primitive_cell(
self._supercell, self._supercell_matrix, self._primitive_matrix) | primitive_matrix:
Relative axes of primitive cell to the input unit cell.
Relative axes to the supercell is calculated by:
supercell_matrix^-1 * primitive_matrix
Therefore primitive cell lattice is finally calculated by:
(supercell_lattice * (supercell_matrix)^-1 * primitive_matrix)^T | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/__init__.py#L724-L734 | [
"def _get_primitive_cell(self,\n supercell,\n supercell_matrix,\n primitive_matrix):\n inv_supercell_matrix = np.linalg.inv(supercell_matrix)\n if primitive_matrix is None:\n t_mat = inv_supercell_matrix\n else:\n t_mat = np.dot(inv_supercell_matrix, primitive_matrix)\n\n return get_primitive(supercell, t_mat, self._symprec)\n"
] | class Phono3py(object):
def __init__(self,
unitcell,
supercell_matrix,
primitive_matrix=None,
phonon_supercell_matrix=None,
masses=None,
mesh=None,
band_indices=None,
sigmas=None,
sigma_cutoff=None,
cutoff_frequency=1e-4,
frequency_factor_to_THz=VaspToTHz,
is_symmetry=True,
is_mesh_symmetry=True,
symmetrize_fc3q=False,
symprec=1e-5,
log_level=0,
lapack_zheev_uplo='L'):
if sigmas is None:
self._sigmas = [None]
else:
self._sigmas = sigmas
self._sigma_cutoff = sigma_cutoff
self._symprec = symprec
self._frequency_factor_to_THz = frequency_factor_to_THz
self._is_symmetry = is_symmetry
self._is_mesh_symmetry = is_mesh_symmetry
self._lapack_zheev_uplo = lapack_zheev_uplo
self._symmetrize_fc3q = symmetrize_fc3q
self._cutoff_frequency = cutoff_frequency
self._log_level = log_level
# Create supercell and primitive cell
self._unitcell = unitcell
self._supercell_matrix = supercell_matrix
if type(primitive_matrix) is str and primitive_matrix == 'auto':
self._primitive_matrix = self._guess_primitive_matrix()
else:
self._primitive_matrix = primitive_matrix
self._phonon_supercell_matrix = phonon_supercell_matrix # optional
self._supercell = None
self._primitive = None
self._phonon_supercell = None
self._phonon_primitive = None
self._build_supercell()
self._build_primitive_cell()
self._build_phonon_supercell()
self._build_phonon_primitive_cell()
if masses is not None:
self._set_masses(masses)
# Set supercell, primitive, and phonon supercell symmetries
self._symmetry = None
self._primitive_symmetry = None
self._phonon_supercell_symmetry = None
self._search_symmetry()
self._search_primitive_symmetry()
self._search_phonon_supercell_symmetry()
# Displacements and supercells
self._supercells_with_displacements = None
self._displacement_dataset = None
self._phonon_displacement_dataset = None
self._phonon_supercells_with_displacements = None
# Thermal conductivity
self._thermal_conductivity = None # conductivity_RTA object
# Imaginary part of self energy at frequency points
self._imag_self_energy = None
self._scattering_event_class = None
self._grid_points = None
self._frequency_points = None
self._temperatures = None
# Other variables
self._fc2 = None
self._fc3 = None
self._nac_params = None
# Setup interaction
self._interaction = None
self._mesh_numbers = None
self._band_indices = None
self._band_indices_flatten = None
if mesh is not None:
self._set_mesh_numbers(mesh)
self.set_band_indices(band_indices)
def set_band_indices(self, band_indices):
if band_indices is None:
num_band = self._primitive.get_number_of_atoms() * 3
self._band_indices = [np.arange(num_band, dtype='intc')]
else:
self._band_indices = band_indices
self._band_indices_flatten = np.hstack(
self._band_indices).astype('intc')
def set_phph_interaction(self,
nac_params=None,
nac_q_direction=None,
constant_averaged_interaction=None,
frequency_scale_factor=None,
unit_conversion=None,
solve_dynamical_matrices=True):
if self._mesh_numbers is None:
print("'mesh' has to be set in Phono3py instantiation.")
raise RuntimeError
self._nac_params = nac_params
self._interaction = Interaction(
self._supercell,
self._primitive,
self._mesh_numbers,
self._primitive_symmetry,
fc3=self._fc3,
band_indices=self._band_indices_flatten,
constant_averaged_interaction=constant_averaged_interaction,
frequency_factor_to_THz=self._frequency_factor_to_THz,
frequency_scale_factor=frequency_scale_factor,
unit_conversion=unit_conversion,
cutoff_frequency=self._cutoff_frequency,
is_mesh_symmetry=self._is_mesh_symmetry,
symmetrize_fc3q=self._symmetrize_fc3q,
lapack_zheev_uplo=self._lapack_zheev_uplo)
self._interaction.set_nac_q_direction(nac_q_direction=nac_q_direction)
self._interaction.set_dynamical_matrix(
self._fc2,
self._phonon_supercell,
self._phonon_primitive,
nac_params=self._nac_params,
solve_dynamical_matrices=solve_dynamical_matrices,
verbose=self._log_level)
def set_phonon_data(self, frequencies, eigenvectors, grid_address):
if self._interaction is not None:
return self._interaction.set_phonon_data(frequencies,
eigenvectors,
grid_address)
else:
return False
def get_phonon_data(self):
if self._interaction is not None:
grid_address = self._interaction.get_grid_address()
freqs, eigvecs, _ = self._interaction.get_phonons()
return freqs, eigvecs, grid_address
else:
msg = "set_phph_interaction has to be done."
raise RuntimeError(msg)
def generate_displacements(self,
distance=0.03,
cutoff_pair_distance=None,
is_plusminus='auto',
is_diagonal=True):
direction_dataset = get_third_order_displacements(
self._supercell,
self._symmetry,
is_plusminus=is_plusminus,
is_diagonal=is_diagonal)
self._displacement_dataset = direction_to_displacement(
direction_dataset,
distance,
self._supercell,
cutoff_distance=cutoff_pair_distance)
if self._phonon_supercell_matrix is not None:
# 'is_diagonal=False' below is made intentionally. For
# third-order force constants, we need better accuracy,
# and I expect this choice is better for it, but not very
# sure.
# In phono3py, two atoms are displaced for each
# configuration and the displacements are chosen, first
# displacement from the perfect supercell, then second
# displacement, considering symmetry. If I choose
# is_diagonal=False for the first displacement, the
# symmetry is less broken and the number of second
# displacements can be smaller than in the case of
# is_diagonal=True for the first displacement. This is
# done in the call get_least_displacements() in
# phonon3.displacement_fc3.get_third_order_displacements().
#
# The call get_least_displacements() is only for the
# second order force constants, but 'is_diagonal=False' to
# be consistent with the above function call, and also for
# the accuracy when calculating ph-ph interaction
# strength because displacement directions are better to be
# close to perpendicular each other to fit force constants.
#
# On the discussion of the accuracy, these are just my
# expectation when I designed phono3py in the early time,
# and in fact now I guess not very different. If these are
# little different, then I should not surprise users to
# change the default behaviour. At this moment, this is
# open question and we will have more advance and should
# have better specificy external software on this.
phonon_displacement_directions = get_least_displacements(
self._phonon_supercell_symmetry,
is_plusminus=is_plusminus,
is_diagonal=False)
self._phonon_displacement_dataset = directions_to_displacement_dataset(
phonon_displacement_directions,
distance,
self._phonon_supercell)
def produce_fc2(self,
forces_fc2,
displacement_dataset=None,
symmetrize_fc2=False,
is_compact_fc=False,
use_alm=False,
alm_options=None):
if displacement_dataset is None:
if self._phonon_displacement_dataset is None:
disp_dataset = self._displacement_dataset
else:
disp_dataset = self._phonon_displacement_dataset
else:
disp_dataset = displacement_dataset
for forces, disp1 in zip(forces_fc2, disp_dataset['first_atoms']):
disp1['forces'] = forces
if is_compact_fc:
p2s_map = self._phonon_primitive.p2s_map
else:
p2s_map = None
if use_alm:
from phonopy.interface.alm import get_fc2 as get_fc2_alm
self._fc2 = get_fc2_alm(self._phonon_supercell,
self._phonon_primitive,
disp_dataset,
atom_list=p2s_map,
alm_options=alm_options,
log_level=self._log_level)
else:
self._fc2 = get_fc2(self._phonon_supercell,
self._phonon_supercell_symmetry,
disp_dataset,
atom_list=p2s_map)
if symmetrize_fc2:
if is_compact_fc:
symmetrize_compact_force_constants(
self._fc2, self._phonon_primitive)
else:
symmetrize_force_constants(self._fc2)
def produce_fc3(self,
forces_fc3,
displacement_dataset=None,
cutoff_distance=None, # set fc3 zero
symmetrize_fc3r=False,
is_compact_fc=False,
use_alm=False,
alm_options=None):
if displacement_dataset is None:
disp_dataset = self._displacement_dataset
else:
disp_dataset = displacement_dataset
if use_alm:
from phono3py.other.alm_wrapper import get_fc3 as get_fc3_alm
fc2, fc3 = get_fc3_alm(self._supercell,
self._primitive,
forces_fc3,
disp_dataset,
self._symmetry,
alm_options=alm_options,
is_compact_fc=is_compact_fc,
log_level=self._log_level)
else:
fc2, fc3 = self._get_fc3(forces_fc3,
disp_dataset,
is_compact_fc=is_compact_fc)
if symmetrize_fc3r:
if is_compact_fc:
set_translational_invariance_compact_fc3(
fc3, self._primitive)
set_permutation_symmetry_compact_fc3(fc3, self._primitive)
if self._fc2 is None:
symmetrize_compact_force_constants(fc2,
self._primitive)
else:
set_translational_invariance_fc3(fc3)
set_permutation_symmetry_fc3(fc3)
if self._fc2 is None:
symmetrize_force_constants(fc2)
# Set fc2 and fc3
self._fc3 = fc3
# Normally self._fc2 is overwritten in produce_fc2
if self._fc2 is None:
self._fc2 = fc2
def cutoff_fc3_by_zero(self, cutoff_distance, fc3=None):
if fc3 is None:
_fc3 = self._fc3
else:
_fc3 = fc3
cutoff_fc3_by_zero(_fc3, # overwritten
self._supercell,
cutoff_distance,
self._symprec)
def set_permutation_symmetry(self):
if self._fc2 is not None:
set_permutation_symmetry(self._fc2)
if self._fc3 is not None:
set_permutation_symmetry_fc3(self._fc3)
def set_translational_invariance(self):
if self._fc2 is not None:
set_translational_invariance(self._fc2)
if self._fc3 is not None:
set_translational_invariance_fc3(self._fc3)
@property
def version(self):
return __version__
def get_version(self):
return self.version
def get_interaction_strength(self):
return self._interaction
def get_fc2(self):
return self._fc2
def set_fc2(self, fc2):
self._fc2 = fc2
def get_fc3(self):
return self._fc3
def set_fc3(self, fc3):
self._fc3 = fc3
@property
def nac_params(self):
return self._nac_params
def get_nac_params(self):
return self.nac_params
@property
def primitive(self):
return self._primitive
def get_primitive(self):
return self.primitive
@property
def unitcell(self):
return self._unitcell
def get_unitcell(self):
return self.unitcell
@property
def supercell(self):
return self._supercell
def get_supercell(self):
return self.supercell
@property
def phonon_supercell(self):
return self._phonon_supercell
def get_phonon_supercell(self):
return self.phonon_supercell
@property
def phonon_primitive(self):
return self._phonon_primitive
def get_phonon_primitive(self):
return self.phonon_primitive
@property
def symmetry(self):
"""return symmetry of supercell"""
return self._symmetry
def get_symmetry(self):
return self.symmetry
@property
def primitive_symmetry(self):
"""return symmetry of primitive cell"""
return self._primitive_symmetry
def get_primitive_symmetry(self):
"""return symmetry of primitive cell"""
return self.primitive_symmetry
def get_phonon_supercell_symmetry(self):
return self._phonon_supercell_symmetry
@property
def supercell_matrix(self):
return self._supercell_matrix
def get_supercell_matrix(self):
return self.supercell_matrix
@property
def phonon_supercell_matrix(self):
return self._phonon_supercell_matrix
def get_phonon_supercell_matrix(self):
return self.phonon_supercell_matrix
@property
def primitive_matrix(self):
return self._primitive_matrix
def get_primitive_matrix(self):
return self.primitive_matrix
@property
def unit_conversion_factor(self):
return self._frequency_factor_to_THz
def set_displacement_dataset(self, dataset):
self._displacement_dataset = dataset
@property
def displacement_dataset(self):
return self._displacement_dataset
def get_displacement_dataset(self):
return self.displacement_dataset
def get_phonon_displacement_dataset(self):
return self._phonon_displacement_dataset
def get_supercells_with_displacements(self):
if self._supercells_with_displacements is None:
self._build_supercells_with_displacements()
return self._supercells_with_displacements
def get_phonon_supercells_with_displacements(self):
if self._phonon_supercells_with_displacements is None:
if self._phonon_displacement_dataset is not None:
self._phonon_supercells_with_displacements = \
self._build_phonon_supercells_with_displacements(
self._phonon_supercell,
self._phonon_displacement_dataset)
return self._phonon_supercells_with_displacements
@property
def mesh_numbers(self):
return self._mesh_numbers
def run_imag_self_energy(self,
grid_points,
frequency_step=None,
num_frequency_points=None,
temperatures=None,
scattering_event_class=None,
write_gamma_detail=False,
output_filename=None):
if self._interaction is None:
self.set_phph_interaction()
if temperatures is None:
temperatures = [0.0, 300.0]
self._grid_points = grid_points
self._temperatures = temperatures
self._scattering_event_class = scattering_event_class
self._imag_self_energy, self._frequency_points = get_imag_self_energy(
self._interaction,
grid_points,
self._sigmas,
frequency_step=frequency_step,
num_frequency_points=num_frequency_points,
temperatures=temperatures,
scattering_event_class=scattering_event_class,
write_detail=write_gamma_detail,
output_filename=output_filename,
log_level=self._log_level)
def write_imag_self_energy(self, filename=None):
write_imag_self_energy(
self._imag_self_energy,
self._mesh_numbers,
self._grid_points,
self._band_indices,
self._frequency_points,
self._temperatures,
self._sigmas,
scattering_event_class=self._scattering_event_class,
filename=filename,
is_mesh_symmetry=self._is_mesh_symmetry)
def run_thermal_conductivity(
self,
is_LBTE=False,
temperatures=np.arange(0, 1001, 10, dtype='double'),
is_isotope=False,
mass_variances=None,
grid_points=None,
boundary_mfp=None, # in micrometre
solve_collective_phonon=False,
use_ave_pp=False,
gamma_unit_conversion=None,
mesh_divisors=None,
coarse_mesh_shifts=None,
is_reducible_collision_matrix=False,
is_kappa_star=True,
gv_delta_q=None, # for group velocity
is_full_pp=False,
pinv_cutoff=1.0e-8, # for pseudo-inversion of collision matrix
pinv_solver=0, # solver of pseudo-inversion of collision matrix
write_gamma=False,
read_gamma=False,
is_N_U=False,
write_kappa=False,
write_gamma_detail=False,
write_collision=False,
read_collision=False,
write_pp=False,
read_pp=False,
write_LBTE_solution=False,
compression=None,
input_filename=None,
output_filename=None):
if self._interaction is None:
self.set_phph_interaction()
if is_LBTE:
self._thermal_conductivity = get_thermal_conductivity_LBTE(
self._interaction,
self._primitive_symmetry,
temperatures=temperatures,
sigmas=self._sigmas,
sigma_cutoff=self._sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
grid_points=grid_points,
boundary_mfp=boundary_mfp,
solve_collective_phonon=solve_collective_phonon,
is_reducible_collision_matrix=is_reducible_collision_matrix,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
pinv_cutoff=pinv_cutoff,
pinv_solver=pinv_solver,
write_collision=write_collision,
read_collision=read_collision,
write_kappa=write_kappa,
write_pp=write_pp,
read_pp=read_pp,
write_LBTE_solution=write_LBTE_solution,
compression=compression,
input_filename=input_filename,
output_filename=output_filename,
log_level=self._log_level)
else:
self._thermal_conductivity = get_thermal_conductivity_RTA(
self._interaction,
self._primitive_symmetry,
temperatures=temperatures,
sigmas=self._sigmas,
sigma_cutoff=self._sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
grid_points=grid_points,
boundary_mfp=boundary_mfp,
use_ave_pp=use_ave_pp,
gamma_unit_conversion=gamma_unit_conversion,
mesh_divisors=mesh_divisors,
coarse_mesh_shifts=coarse_mesh_shifts,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
write_gamma=write_gamma,
read_gamma=read_gamma,
is_N_U=is_N_U,
write_kappa=write_kappa,
write_pp=write_pp,
read_pp=read_pp,
write_gamma_detail=write_gamma_detail,
compression=compression,
input_filename=input_filename,
output_filename=output_filename,
log_level=self._log_level)
def get_thermal_conductivity(self):
return self._thermal_conductivity
def get_frequency_shift(
self,
grid_points,
temperatures=np.arange(0, 1001, 10, dtype='double'),
epsilons=None,
output_filename=None):
"""Frequency shift from lowest order diagram is calculated.
Args:
epslins(list of float):
The value to avoid divergence. When multiple values are given
frequency shifts for those values are returned.
"""
if self._interaction is None:
self.set_phph_interaction()
if epsilons is None:
_epsilons = [0.1]
else:
_epsilons = epsilons
self._grid_points = grid_points
get_frequency_shift(self._interaction,
self._grid_points,
self._band_indices,
_epsilons,
temperatures,
output_filename=output_filename,
log_level=self._log_level)
def _search_symmetry(self):
self._symmetry = Symmetry(self._supercell,
self._symprec,
self._is_symmetry)
def _search_primitive_symmetry(self):
self._primitive_symmetry = Symmetry(self._primitive,
self._symprec,
self._is_symmetry)
if (len(self._symmetry.get_pointgroup_operations()) !=
len(self._primitive_symmetry.get_pointgroup_operations())):
print("Warning: point group symmetries of supercell and primitive"
"cell are different.")
def _search_phonon_supercell_symmetry(self):
if self._phonon_supercell_matrix is None:
self._phonon_supercell_symmetry = self._symmetry
else:
self._phonon_supercell_symmetry = Symmetry(self._phonon_supercell,
self._symprec,
self._is_symmetry)
def _build_supercell(self):
self._supercell = get_supercell(self._unitcell,
self._supercell_matrix,
self._symprec)
def _build_phonon_supercell(self):
"""
phonon_supercell:
This supercell is used for harmonic phonons (frequencies,
eigenvectors, group velocities, ...)
phonon_supercell_matrix:
Different supercell size can be specified.
"""
if self._phonon_supercell_matrix is None:
self._phonon_supercell = self._supercell
else:
self._phonon_supercell = get_supercell(
self._unitcell, self._phonon_supercell_matrix, self._symprec)
def _build_phonon_primitive_cell(self):
if self._phonon_supercell_matrix is None:
self._phonon_primitive = self._primitive
else:
self._phonon_primitive = self._get_primitive_cell(
self._phonon_supercell,
self._phonon_supercell_matrix,
self._primitive_matrix)
if (self._primitive is not None and
(self._primitive.get_atomic_numbers() !=
self._phonon_primitive.get_atomic_numbers()).any()):
print(" Primitive cells for fc2 and fc3 can be different.")
raise RuntimeError
def _build_phonon_supercells_with_displacements(self,
supercell,
displacement_dataset):
supercells = []
magmoms = supercell.get_magnetic_moments()
masses = supercell.get_masses()
numbers = supercell.get_atomic_numbers()
lattice = supercell.get_cell()
for disp1 in displacement_dataset['first_atoms']:
disp_cart1 = disp1['displacement']
positions = supercell.get_positions()
positions[disp1['number']] += disp_cart1
supercells.append(
Atoms(numbers=numbers,
masses=masses,
magmoms=magmoms,
positions=positions,
cell=lattice,
pbc=True))
return supercells
def _build_supercells_with_displacements(self):
supercells = []
magmoms = self._supercell.get_magnetic_moments()
masses = self._supercell.get_masses()
numbers = self._supercell.get_atomic_numbers()
lattice = self._supercell.get_cell()
supercells = self._build_phonon_supercells_with_displacements(
self._supercell,
self._displacement_dataset)
for disp1 in self._displacement_dataset['first_atoms']:
disp_cart1 = disp1['displacement']
for disp2 in disp1['second_atoms']:
if 'included' in disp2:
included = disp2['included']
else:
included = True
if included:
positions = self._supercell.get_positions()
positions[disp1['number']] += disp_cart1
positions[disp2['number']] += disp2['displacement']
supercells.append(Atoms(numbers=numbers,
masses=masses,
magmoms=magmoms,
positions=positions,
cell=lattice,
pbc=True))
else:
supercells.append(None)
self._supercells_with_displacements = supercells
def _get_primitive_cell(self,
supercell,
supercell_matrix,
primitive_matrix):
inv_supercell_matrix = np.linalg.inv(supercell_matrix)
if primitive_matrix is None:
t_mat = inv_supercell_matrix
else:
t_mat = np.dot(inv_supercell_matrix, primitive_matrix)
return get_primitive(supercell, t_mat, self._symprec)
def _guess_primitive_matrix(self):
return guess_primitive_matrix(self._unitcell, symprec=self._symprec)
def _set_masses(self, masses):
p_masses = np.array(masses)
self._primitive.set_masses(p_masses)
p2p_map = self._primitive.get_primitive_to_primitive_map()
s_masses = p_masses[[p2p_map[x] for x in
self._primitive.get_supercell_to_primitive_map()]]
self._supercell.set_masses(s_masses)
u2s_map = self._supercell.get_unitcell_to_supercell_map()
u_masses = s_masses[u2s_map]
self._unitcell.set_masses(u_masses)
self._phonon_primitive.set_masses(p_masses)
p2p_map = self._phonon_primitive.get_primitive_to_primitive_map()
s_masses = p_masses[
[p2p_map[x] for x in
self._phonon_primitive.get_supercell_to_primitive_map()]]
self._phonon_supercell.set_masses(s_masses)
def _set_mesh_numbers(self, mesh):
_mesh = np.array(mesh)
mesh_nums = None
if _mesh.shape:
if _mesh.shape == (3,):
mesh_nums = mesh
elif self._primitive_symmetry is None:
mesh_nums = length2mesh(mesh, self._primitive.get_cell())
else:
rotations = self._primitive_symmetry.get_pointgroup_operations()
mesh_nums = length2mesh(mesh, self._primitive.get_cell(),
rotations=rotations)
if mesh_nums is None:
msg = "mesh has inappropriate type."
raise TypeError(msg)
self._mesh_numbers = mesh_nums
def _get_fc3(self,
forces_fc3,
disp_dataset,
is_compact_fc=False):
count = 0
for disp1 in disp_dataset['first_atoms']:
disp1['forces'] = forces_fc3[count]
count += 1
for disp1 in disp_dataset['first_atoms']:
for disp2 in disp1['second_atoms']:
disp2['delta_forces'] = forces_fc3[count] - disp1['forces']
count += 1
fc2, fc3 = get_fc3(self._supercell,
self._primitive,
disp_dataset,
self._symmetry,
is_compact_fc=is_compact_fc,
verbose=self._log_level)
return fc2, fc3
|
atztogo/phono3py | phono3py/phonon3/__init__.py | Phono3py._build_phonon_supercell | python | def _build_phonon_supercell(self):
if self._phonon_supercell_matrix is None:
self._phonon_supercell = self._supercell
else:
self._phonon_supercell = get_supercell(
self._unitcell, self._phonon_supercell_matrix, self._symprec) | phonon_supercell:
This supercell is used for harmonic phonons (frequencies,
eigenvectors, group velocities, ...)
phonon_supercell_matrix:
Different supercell size can be specified. | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/__init__.py#L736-L748 | null | class Phono3py(object):
def __init__(self,
unitcell,
supercell_matrix,
primitive_matrix=None,
phonon_supercell_matrix=None,
masses=None,
mesh=None,
band_indices=None,
sigmas=None,
sigma_cutoff=None,
cutoff_frequency=1e-4,
frequency_factor_to_THz=VaspToTHz,
is_symmetry=True,
is_mesh_symmetry=True,
symmetrize_fc3q=False,
symprec=1e-5,
log_level=0,
lapack_zheev_uplo='L'):
if sigmas is None:
self._sigmas = [None]
else:
self._sigmas = sigmas
self._sigma_cutoff = sigma_cutoff
self._symprec = symprec
self._frequency_factor_to_THz = frequency_factor_to_THz
self._is_symmetry = is_symmetry
self._is_mesh_symmetry = is_mesh_symmetry
self._lapack_zheev_uplo = lapack_zheev_uplo
self._symmetrize_fc3q = symmetrize_fc3q
self._cutoff_frequency = cutoff_frequency
self._log_level = log_level
# Create supercell and primitive cell
self._unitcell = unitcell
self._supercell_matrix = supercell_matrix
if type(primitive_matrix) is str and primitive_matrix == 'auto':
self._primitive_matrix = self._guess_primitive_matrix()
else:
self._primitive_matrix = primitive_matrix
self._phonon_supercell_matrix = phonon_supercell_matrix # optional
self._supercell = None
self._primitive = None
self._phonon_supercell = None
self._phonon_primitive = None
self._build_supercell()
self._build_primitive_cell()
self._build_phonon_supercell()
self._build_phonon_primitive_cell()
if masses is not None:
self._set_masses(masses)
# Set supercell, primitive, and phonon supercell symmetries
self._symmetry = None
self._primitive_symmetry = None
self._phonon_supercell_symmetry = None
self._search_symmetry()
self._search_primitive_symmetry()
self._search_phonon_supercell_symmetry()
# Displacements and supercells
self._supercells_with_displacements = None
self._displacement_dataset = None
self._phonon_displacement_dataset = None
self._phonon_supercells_with_displacements = None
# Thermal conductivity
self._thermal_conductivity = None # conductivity_RTA object
# Imaginary part of self energy at frequency points
self._imag_self_energy = None
self._scattering_event_class = None
self._grid_points = None
self._frequency_points = None
self._temperatures = None
# Other variables
self._fc2 = None
self._fc3 = None
self._nac_params = None
# Setup interaction
self._interaction = None
self._mesh_numbers = None
self._band_indices = None
self._band_indices_flatten = None
if mesh is not None:
self._set_mesh_numbers(mesh)
self.set_band_indices(band_indices)
def set_band_indices(self, band_indices):
if band_indices is None:
num_band = self._primitive.get_number_of_atoms() * 3
self._band_indices = [np.arange(num_band, dtype='intc')]
else:
self._band_indices = band_indices
self._band_indices_flatten = np.hstack(
self._band_indices).astype('intc')
def set_phph_interaction(self,
nac_params=None,
nac_q_direction=None,
constant_averaged_interaction=None,
frequency_scale_factor=None,
unit_conversion=None,
solve_dynamical_matrices=True):
if self._mesh_numbers is None:
print("'mesh' has to be set in Phono3py instantiation.")
raise RuntimeError
self._nac_params = nac_params
self._interaction = Interaction(
self._supercell,
self._primitive,
self._mesh_numbers,
self._primitive_symmetry,
fc3=self._fc3,
band_indices=self._band_indices_flatten,
constant_averaged_interaction=constant_averaged_interaction,
frequency_factor_to_THz=self._frequency_factor_to_THz,
frequency_scale_factor=frequency_scale_factor,
unit_conversion=unit_conversion,
cutoff_frequency=self._cutoff_frequency,
is_mesh_symmetry=self._is_mesh_symmetry,
symmetrize_fc3q=self._symmetrize_fc3q,
lapack_zheev_uplo=self._lapack_zheev_uplo)
self._interaction.set_nac_q_direction(nac_q_direction=nac_q_direction)
self._interaction.set_dynamical_matrix(
self._fc2,
self._phonon_supercell,
self._phonon_primitive,
nac_params=self._nac_params,
solve_dynamical_matrices=solve_dynamical_matrices,
verbose=self._log_level)
def set_phonon_data(self, frequencies, eigenvectors, grid_address):
if self._interaction is not None:
return self._interaction.set_phonon_data(frequencies,
eigenvectors,
grid_address)
else:
return False
def get_phonon_data(self):
if self._interaction is not None:
grid_address = self._interaction.get_grid_address()
freqs, eigvecs, _ = self._interaction.get_phonons()
return freqs, eigvecs, grid_address
else:
msg = "set_phph_interaction has to be done."
raise RuntimeError(msg)
def generate_displacements(self,
distance=0.03,
cutoff_pair_distance=None,
is_plusminus='auto',
is_diagonal=True):
direction_dataset = get_third_order_displacements(
self._supercell,
self._symmetry,
is_plusminus=is_plusminus,
is_diagonal=is_diagonal)
self._displacement_dataset = direction_to_displacement(
direction_dataset,
distance,
self._supercell,
cutoff_distance=cutoff_pair_distance)
if self._phonon_supercell_matrix is not None:
# 'is_diagonal=False' below is made intentionally. For
# third-order force constants, we need better accuracy,
# and I expect this choice is better for it, but not very
# sure.
# In phono3py, two atoms are displaced for each
# configuration and the displacements are chosen, first
# displacement from the perfect supercell, then second
# displacement, considering symmetry. If I choose
# is_diagonal=False for the first displacement, the
# symmetry is less broken and the number of second
# displacements can be smaller than in the case of
# is_diagonal=True for the first displacement. This is
# done in the call get_least_displacements() in
# phonon3.displacement_fc3.get_third_order_displacements().
#
# The call get_least_displacements() is only for the
# second order force constants, but 'is_diagonal=False' to
# be consistent with the above function call, and also for
# the accuracy when calculating ph-ph interaction
# strength because displacement directions are better to be
# close to perpendicular each other to fit force constants.
#
# On the discussion of the accuracy, these are just my
# expectation when I designed phono3py in the early time,
# and in fact now I guess not very different. If these are
# little different, then I should not surprise users to
# change the default behaviour. At this moment, this is
# open question and we will have more advance and should
# have better specificy external software on this.
phonon_displacement_directions = get_least_displacements(
self._phonon_supercell_symmetry,
is_plusminus=is_plusminus,
is_diagonal=False)
self._phonon_displacement_dataset = directions_to_displacement_dataset(
phonon_displacement_directions,
distance,
self._phonon_supercell)
def produce_fc2(self,
forces_fc2,
displacement_dataset=None,
symmetrize_fc2=False,
is_compact_fc=False,
use_alm=False,
alm_options=None):
if displacement_dataset is None:
if self._phonon_displacement_dataset is None:
disp_dataset = self._displacement_dataset
else:
disp_dataset = self._phonon_displacement_dataset
else:
disp_dataset = displacement_dataset
for forces, disp1 in zip(forces_fc2, disp_dataset['first_atoms']):
disp1['forces'] = forces
if is_compact_fc:
p2s_map = self._phonon_primitive.p2s_map
else:
p2s_map = None
if use_alm:
from phonopy.interface.alm import get_fc2 as get_fc2_alm
self._fc2 = get_fc2_alm(self._phonon_supercell,
self._phonon_primitive,
disp_dataset,
atom_list=p2s_map,
alm_options=alm_options,
log_level=self._log_level)
else:
self._fc2 = get_fc2(self._phonon_supercell,
self._phonon_supercell_symmetry,
disp_dataset,
atom_list=p2s_map)
if symmetrize_fc2:
if is_compact_fc:
symmetrize_compact_force_constants(
self._fc2, self._phonon_primitive)
else:
symmetrize_force_constants(self._fc2)
def produce_fc3(self,
forces_fc3,
displacement_dataset=None,
cutoff_distance=None, # set fc3 zero
symmetrize_fc3r=False,
is_compact_fc=False,
use_alm=False,
alm_options=None):
if displacement_dataset is None:
disp_dataset = self._displacement_dataset
else:
disp_dataset = displacement_dataset
if use_alm:
from phono3py.other.alm_wrapper import get_fc3 as get_fc3_alm
fc2, fc3 = get_fc3_alm(self._supercell,
self._primitive,
forces_fc3,
disp_dataset,
self._symmetry,
alm_options=alm_options,
is_compact_fc=is_compact_fc,
log_level=self._log_level)
else:
fc2, fc3 = self._get_fc3(forces_fc3,
disp_dataset,
is_compact_fc=is_compact_fc)
if symmetrize_fc3r:
if is_compact_fc:
set_translational_invariance_compact_fc3(
fc3, self._primitive)
set_permutation_symmetry_compact_fc3(fc3, self._primitive)
if self._fc2 is None:
symmetrize_compact_force_constants(fc2,
self._primitive)
else:
set_translational_invariance_fc3(fc3)
set_permutation_symmetry_fc3(fc3)
if self._fc2 is None:
symmetrize_force_constants(fc2)
# Set fc2 and fc3
self._fc3 = fc3
# Normally self._fc2 is overwritten in produce_fc2
if self._fc2 is None:
self._fc2 = fc2
def cutoff_fc3_by_zero(self, cutoff_distance, fc3=None):
if fc3 is None:
_fc3 = self._fc3
else:
_fc3 = fc3
cutoff_fc3_by_zero(_fc3, # overwritten
self._supercell,
cutoff_distance,
self._symprec)
def set_permutation_symmetry(self):
if self._fc2 is not None:
set_permutation_symmetry(self._fc2)
if self._fc3 is not None:
set_permutation_symmetry_fc3(self._fc3)
def set_translational_invariance(self):
if self._fc2 is not None:
set_translational_invariance(self._fc2)
if self._fc3 is not None:
set_translational_invariance_fc3(self._fc3)
@property
def version(self):
return __version__
def get_version(self):
return self.version
def get_interaction_strength(self):
return self._interaction
def get_fc2(self):
return self._fc2
def set_fc2(self, fc2):
self._fc2 = fc2
def get_fc3(self):
return self._fc3
def set_fc3(self, fc3):
self._fc3 = fc3
@property
def nac_params(self):
return self._nac_params
def get_nac_params(self):
return self.nac_params
@property
def primitive(self):
return self._primitive
def get_primitive(self):
return self.primitive
@property
def unitcell(self):
return self._unitcell
def get_unitcell(self):
return self.unitcell
@property
def supercell(self):
return self._supercell
def get_supercell(self):
return self.supercell
@property
def phonon_supercell(self):
return self._phonon_supercell
def get_phonon_supercell(self):
return self.phonon_supercell
@property
def phonon_primitive(self):
return self._phonon_primitive
def get_phonon_primitive(self):
return self.phonon_primitive
@property
def symmetry(self):
"""return symmetry of supercell"""
return self._symmetry
def get_symmetry(self):
return self.symmetry
@property
def primitive_symmetry(self):
"""return symmetry of primitive cell"""
return self._primitive_symmetry
def get_primitive_symmetry(self):
"""return symmetry of primitive cell"""
return self.primitive_symmetry
def get_phonon_supercell_symmetry(self):
return self._phonon_supercell_symmetry
@property
def supercell_matrix(self):
return self._supercell_matrix
def get_supercell_matrix(self):
return self.supercell_matrix
@property
def phonon_supercell_matrix(self):
return self._phonon_supercell_matrix
def get_phonon_supercell_matrix(self):
return self.phonon_supercell_matrix
@property
def primitive_matrix(self):
return self._primitive_matrix
def get_primitive_matrix(self):
return self.primitive_matrix
@property
def unit_conversion_factor(self):
return self._frequency_factor_to_THz
def set_displacement_dataset(self, dataset):
self._displacement_dataset = dataset
@property
def displacement_dataset(self):
return self._displacement_dataset
def get_displacement_dataset(self):
return self.displacement_dataset
def get_phonon_displacement_dataset(self):
return self._phonon_displacement_dataset
def get_supercells_with_displacements(self):
if self._supercells_with_displacements is None:
self._build_supercells_with_displacements()
return self._supercells_with_displacements
def get_phonon_supercells_with_displacements(self):
if self._phonon_supercells_with_displacements is None:
if self._phonon_displacement_dataset is not None:
self._phonon_supercells_with_displacements = \
self._build_phonon_supercells_with_displacements(
self._phonon_supercell,
self._phonon_displacement_dataset)
return self._phonon_supercells_with_displacements
@property
def mesh_numbers(self):
return self._mesh_numbers
def run_imag_self_energy(self,
grid_points,
frequency_step=None,
num_frequency_points=None,
temperatures=None,
scattering_event_class=None,
write_gamma_detail=False,
output_filename=None):
if self._interaction is None:
self.set_phph_interaction()
if temperatures is None:
temperatures = [0.0, 300.0]
self._grid_points = grid_points
self._temperatures = temperatures
self._scattering_event_class = scattering_event_class
self._imag_self_energy, self._frequency_points = get_imag_self_energy(
self._interaction,
grid_points,
self._sigmas,
frequency_step=frequency_step,
num_frequency_points=num_frequency_points,
temperatures=temperatures,
scattering_event_class=scattering_event_class,
write_detail=write_gamma_detail,
output_filename=output_filename,
log_level=self._log_level)
def write_imag_self_energy(self, filename=None):
write_imag_self_energy(
self._imag_self_energy,
self._mesh_numbers,
self._grid_points,
self._band_indices,
self._frequency_points,
self._temperatures,
self._sigmas,
scattering_event_class=self._scattering_event_class,
filename=filename,
is_mesh_symmetry=self._is_mesh_symmetry)
def run_thermal_conductivity(
self,
is_LBTE=False,
temperatures=np.arange(0, 1001, 10, dtype='double'),
is_isotope=False,
mass_variances=None,
grid_points=None,
boundary_mfp=None, # in micrometre
solve_collective_phonon=False,
use_ave_pp=False,
gamma_unit_conversion=None,
mesh_divisors=None,
coarse_mesh_shifts=None,
is_reducible_collision_matrix=False,
is_kappa_star=True,
gv_delta_q=None, # for group velocity
is_full_pp=False,
pinv_cutoff=1.0e-8, # for pseudo-inversion of collision matrix
pinv_solver=0, # solver of pseudo-inversion of collision matrix
write_gamma=False,
read_gamma=False,
is_N_U=False,
write_kappa=False,
write_gamma_detail=False,
write_collision=False,
read_collision=False,
write_pp=False,
read_pp=False,
write_LBTE_solution=False,
compression=None,
input_filename=None,
output_filename=None):
if self._interaction is None:
self.set_phph_interaction()
if is_LBTE:
self._thermal_conductivity = get_thermal_conductivity_LBTE(
self._interaction,
self._primitive_symmetry,
temperatures=temperatures,
sigmas=self._sigmas,
sigma_cutoff=self._sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
grid_points=grid_points,
boundary_mfp=boundary_mfp,
solve_collective_phonon=solve_collective_phonon,
is_reducible_collision_matrix=is_reducible_collision_matrix,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
pinv_cutoff=pinv_cutoff,
pinv_solver=pinv_solver,
write_collision=write_collision,
read_collision=read_collision,
write_kappa=write_kappa,
write_pp=write_pp,
read_pp=read_pp,
write_LBTE_solution=write_LBTE_solution,
compression=compression,
input_filename=input_filename,
output_filename=output_filename,
log_level=self._log_level)
else:
self._thermal_conductivity = get_thermal_conductivity_RTA(
self._interaction,
self._primitive_symmetry,
temperatures=temperatures,
sigmas=self._sigmas,
sigma_cutoff=self._sigma_cutoff,
is_isotope=is_isotope,
mass_variances=mass_variances,
grid_points=grid_points,
boundary_mfp=boundary_mfp,
use_ave_pp=use_ave_pp,
gamma_unit_conversion=gamma_unit_conversion,
mesh_divisors=mesh_divisors,
coarse_mesh_shifts=coarse_mesh_shifts,
is_kappa_star=is_kappa_star,
gv_delta_q=gv_delta_q,
is_full_pp=is_full_pp,
write_gamma=write_gamma,
read_gamma=read_gamma,
is_N_U=is_N_U,
write_kappa=write_kappa,
write_pp=write_pp,
read_pp=read_pp,
write_gamma_detail=write_gamma_detail,
compression=compression,
input_filename=input_filename,
output_filename=output_filename,
log_level=self._log_level)
def get_thermal_conductivity(self):
return self._thermal_conductivity
def get_frequency_shift(
self,
grid_points,
temperatures=np.arange(0, 1001, 10, dtype='double'),
epsilons=None,
output_filename=None):
"""Frequency shift from lowest order diagram is calculated.
Args:
epslins(list of float):
The value to avoid divergence. When multiple values are given
frequency shifts for those values are returned.
"""
if self._interaction is None:
self.set_phph_interaction()
if epsilons is None:
_epsilons = [0.1]
else:
_epsilons = epsilons
self._grid_points = grid_points
get_frequency_shift(self._interaction,
self._grid_points,
self._band_indices,
_epsilons,
temperatures,
output_filename=output_filename,
log_level=self._log_level)
def _search_symmetry(self):
self._symmetry = Symmetry(self._supercell,
self._symprec,
self._is_symmetry)
def _search_primitive_symmetry(self):
self._primitive_symmetry = Symmetry(self._primitive,
self._symprec,
self._is_symmetry)
if (len(self._symmetry.get_pointgroup_operations()) !=
len(self._primitive_symmetry.get_pointgroup_operations())):
print("Warning: point group symmetries of supercell and primitive"
"cell are different.")
def _search_phonon_supercell_symmetry(self):
if self._phonon_supercell_matrix is None:
self._phonon_supercell_symmetry = self._symmetry
else:
self._phonon_supercell_symmetry = Symmetry(self._phonon_supercell,
self._symprec,
self._is_symmetry)
def _build_supercell(self):
self._supercell = get_supercell(self._unitcell,
self._supercell_matrix,
self._symprec)
def _build_primitive_cell(self):
"""
primitive_matrix:
Relative axes of primitive cell to the input unit cell.
Relative axes to the supercell is calculated by:
supercell_matrix^-1 * primitive_matrix
Therefore primitive cell lattice is finally calculated by:
(supercell_lattice * (supercell_matrix)^-1 * primitive_matrix)^T
"""
self._primitive = self._get_primitive_cell(
self._supercell, self._supercell_matrix, self._primitive_matrix)
def _build_phonon_primitive_cell(self):
if self._phonon_supercell_matrix is None:
self._phonon_primitive = self._primitive
else:
self._phonon_primitive = self._get_primitive_cell(
self._phonon_supercell,
self._phonon_supercell_matrix,
self._primitive_matrix)
if (self._primitive is not None and
(self._primitive.get_atomic_numbers() !=
self._phonon_primitive.get_atomic_numbers()).any()):
print(" Primitive cells for fc2 and fc3 can be different.")
raise RuntimeError
def _build_phonon_supercells_with_displacements(self,
supercell,
displacement_dataset):
supercells = []
magmoms = supercell.get_magnetic_moments()
masses = supercell.get_masses()
numbers = supercell.get_atomic_numbers()
lattice = supercell.get_cell()
for disp1 in displacement_dataset['first_atoms']:
disp_cart1 = disp1['displacement']
positions = supercell.get_positions()
positions[disp1['number']] += disp_cart1
supercells.append(
Atoms(numbers=numbers,
masses=masses,
magmoms=magmoms,
positions=positions,
cell=lattice,
pbc=True))
return supercells
def _build_supercells_with_displacements(self):
supercells = []
magmoms = self._supercell.get_magnetic_moments()
masses = self._supercell.get_masses()
numbers = self._supercell.get_atomic_numbers()
lattice = self._supercell.get_cell()
supercells = self._build_phonon_supercells_with_displacements(
self._supercell,
self._displacement_dataset)
for disp1 in self._displacement_dataset['first_atoms']:
disp_cart1 = disp1['displacement']
for disp2 in disp1['second_atoms']:
if 'included' in disp2:
included = disp2['included']
else:
included = True
if included:
positions = self._supercell.get_positions()
positions[disp1['number']] += disp_cart1
positions[disp2['number']] += disp2['displacement']
supercells.append(Atoms(numbers=numbers,
masses=masses,
magmoms=magmoms,
positions=positions,
cell=lattice,
pbc=True))
else:
supercells.append(None)
self._supercells_with_displacements = supercells
def _get_primitive_cell(self,
supercell,
supercell_matrix,
primitive_matrix):
inv_supercell_matrix = np.linalg.inv(supercell_matrix)
if primitive_matrix is None:
t_mat = inv_supercell_matrix
else:
t_mat = np.dot(inv_supercell_matrix, primitive_matrix)
return get_primitive(supercell, t_mat, self._symprec)
def _guess_primitive_matrix(self):
return guess_primitive_matrix(self._unitcell, symprec=self._symprec)
def _set_masses(self, masses):
p_masses = np.array(masses)
self._primitive.set_masses(p_masses)
p2p_map = self._primitive.get_primitive_to_primitive_map()
s_masses = p_masses[[p2p_map[x] for x in
self._primitive.get_supercell_to_primitive_map()]]
self._supercell.set_masses(s_masses)
u2s_map = self._supercell.get_unitcell_to_supercell_map()
u_masses = s_masses[u2s_map]
self._unitcell.set_masses(u_masses)
self._phonon_primitive.set_masses(p_masses)
p2p_map = self._phonon_primitive.get_primitive_to_primitive_map()
s_masses = p_masses[
[p2p_map[x] for x in
self._phonon_primitive.get_supercell_to_primitive_map()]]
self._phonon_supercell.set_masses(s_masses)
def _set_mesh_numbers(self, mesh):
_mesh = np.array(mesh)
mesh_nums = None
if _mesh.shape:
if _mesh.shape == (3,):
mesh_nums = mesh
elif self._primitive_symmetry is None:
mesh_nums = length2mesh(mesh, self._primitive.get_cell())
else:
rotations = self._primitive_symmetry.get_pointgroup_operations()
mesh_nums = length2mesh(mesh, self._primitive.get_cell(),
rotations=rotations)
if mesh_nums is None:
msg = "mesh has inappropriate type."
raise TypeError(msg)
self._mesh_numbers = mesh_nums
def _get_fc3(self,
forces_fc3,
disp_dataset,
is_compact_fc=False):
count = 0
for disp1 in disp_dataset['first_atoms']:
disp1['forces'] = forces_fc3[count]
count += 1
for disp1 in disp_dataset['first_atoms']:
for disp2 in disp1['second_atoms']:
disp2['delta_forces'] = forces_fc3[count] - disp1['forces']
count += 1
fc2, fc3 = get_fc3(self._supercell,
self._primitive,
disp_dataset,
self._symmetry,
is_compact_fc=is_compact_fc,
verbose=self._log_level)
return fc2, fc3
|
atztogo/phono3py | phono3py/phonon3/triplets.py | get_triplets_at_q | python | def get_triplets_at_q(grid_point,
mesh,
point_group, # real space point group of space group
reciprocal_lattice, # column vectors
is_time_reversal=True,
swappable=True,
stores_triplets_map=False):
map_triplets, map_q, grid_address = _get_triplets_reciprocal_mesh_at_q(
grid_point,
mesh,
point_group,
is_time_reversal=is_time_reversal,
swappable=swappable)
bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,
mesh,
reciprocal_lattice,
is_dense=True)
triplets_at_q, weights = _get_BZ_triplets_at_q(
grid_point,
bz_grid_address,
bz_map,
map_triplets,
mesh)
assert np.prod(mesh) == weights.sum(), \
"Num grid points %d, sum of weight %d" % (
np.prod(mesh), weights.sum())
# These maps are required for collision matrix calculation.
if not stores_triplets_map:
map_triplets = None
map_q = None
return triplets_at_q, weights, bz_grid_address, bz_map, map_triplets, map_q | Parameters
----------
grid_point : int
A grid point
mesh : array_like
Mesh numbers
dtype='intc'
shape=(3,)
point_group : array_like
Rotation matrices in real space. Note that those in reciprocal space
mean these matrices transposed (local terminology).
dtype='intc'
shape=(n_rot, 3, 3)
reciprocal_lattice : array_like
Reciprocal primitive basis vectors given as column vectors
dtype='double'
shape=(3, 3)
is_time_reversal : bool
Inversion symemtry is added if it doesn't exist.
swappable : bool
q1 and q2 can be swapped. By this number of triplets decreases.
Returns
-------
triplets_at_q : ndarray
Symmetry reduced number of triplets are stored as grid point
integer numbers.
dtype='uintp'
shape=(n_triplets, 3)
weights : ndarray
Weights of triplets in Brillouin zone
dtype='intc'
shape=(n_triplets,)
bz_grid_address : ndarray
Integer grid address of the points in Brillouin zone including
surface. The first prod(mesh) numbers of points are
independent. But the rest of points are
translational-symmetrically equivalent to some other points.
dtype='intc'
shape=(n_grid_points, 3)
bz_map : ndarray
Grid point mapping table containing BZ surface. See more
detail in spglib docstring.
dtype='uintp'
shape=(prod(mesh*2),)
map_tripelts : ndarray or None
Returns when stores_triplets_map=True, otherwise None is
returned. Mapping table of all triplets to symmetrically
independent tripelts. More precisely, this gives a list of
index mapping from all q-points to independent q' of
q+q'+q''=G. Considering q' is enough because q is fixed and
q''=G-q-q' where G is automatically determined to choose
smallest |G|.
dtype='uintp'
shape=(prod(mesh),)
map_q : ndarray or None
Returns when stores_triplets_map=True, otherwise None is
returned. Irreducible q-points stabilized by q-point of
specified grid_point.
dtype='uintp'
shape=(prod(mesh),) | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/triplets.py#L17-L114 | [
"def _get_triplets_reciprocal_mesh_at_q(fixed_grid_number,\n mesh,\n rotations,\n is_time_reversal=True,\n swappable=True):\n \"\"\"Search symmetry reduced triplets fixing one q-point\n\n Triplets of (q0, q1, q2) are searched.\n\n Parameters\n ----------\n fixed_grid_number : int\n Grid point of q0\n mesh : array_like\n Mesh numbers\n dtype='intc'\n shape=(3,)\n rotations : array_like\n Rotation matrices in real space. Note that those in reciprocal space\n mean these matrices transposed (local terminology).\n dtype='intc'\n shape=(n_rot, 3, 3)\n is_time_reversal : bool\n Inversion symemtry is added if it doesn't exist.\n swappable : bool\n q1 and q2 can be swapped. By this number of triplets decreases.\n\n \"\"\"\n\n import phono3py._phono3py as phono3c\n\n map_triplets = np.zeros(np.prod(mesh), dtype='uintp')\n map_q = np.zeros(np.prod(mesh), dtype='uintp')\n grid_address = np.zeros((np.prod(mesh), 3), dtype='intc')\n\n phono3c.triplets_reciprocal_mesh_at_q(\n map_triplets,\n map_q,\n grid_address,\n fixed_grid_number,\n np.array(mesh, dtype='intc'),\n is_time_reversal * 1,\n np.array(rotations, dtype='intc', order='C'),\n swappable * 1)\n\n return map_triplets, map_q, grid_address\n",
"def _get_BZ_triplets_at_q(grid_point,\n bz_grid_address,\n bz_map,\n map_triplets,\n mesh):\n import phono3py._phono3py as phono3c\n\n weights = np.zeros(len(map_triplets), dtype='intc')\n for g in map_triplets:\n weights[g] += 1\n ir_weights = np.extract(weights > 0, weights)\n triplets = np.zeros((len(ir_weights), 3), dtype=bz_map.dtype)\n # triplets are overwritten.\n num_ir_ret = phono3c.BZ_triplets_at_q(triplets,\n grid_point,\n bz_grid_address,\n bz_map,\n map_triplets,\n np.array(mesh, dtype='intc'))\n assert num_ir_ret == len(ir_weights)\n\n return triplets, np.array(ir_weights, dtype='intc')\n"
] | import numpy as np
from phonopy.units import THzToEv, Kb
import phonopy.structure.spglib as spg
from phonopy.structure.symmetry import Symmetry
from phonopy.structure.tetrahedron_method import TetrahedronMethod
from phonopy.structure.grid_points import extract_ir_grid_points
def gaussian(x, sigma):
return 1.0 / np.sqrt(2 * np.pi) / sigma * np.exp(-x**2 / 2 / sigma**2)
def occupation(x, t):
return 1.0 / (np.exp(THzToEv * x / (Kb * t)) - 1)
def get_all_triplets(grid_point,
bz_grid_address,
bz_map,
mesh):
triplets_at_q, _ = _get_BZ_triplets_at_q(
grid_point,
bz_grid_address,
bz_map,
np.arange(np.prod(mesh), dtype=bz_map.dtype),
mesh)
return triplets_at_q
def get_nosym_triplets_at_q(grid_point,
mesh,
reciprocal_lattice,
stores_triplets_map=False):
grid_address = get_grid_address(mesh)
bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,
mesh,
reciprocal_lattice,
is_dense=True)
map_triplets = np.arange(len(grid_address), dtype=bz_map.dtype)
triplets_at_q, weights = _get_BZ_triplets_at_q(
grid_point,
bz_grid_address,
bz_map,
map_triplets,
mesh)
if not stores_triplets_map:
map_triplets = None
map_q = None
else:
map_q = map_triplets.copy()
return triplets_at_q, weights, bz_grid_address, bz_map, map_triplets, map_q
def get_grid_address(mesh):
grid_mapping_table, grid_address = spg.get_stabilized_reciprocal_mesh(
mesh,
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]]],
is_time_reversal=False,
is_dense=True)
return grid_address
def get_bz_grid_address(mesh, reciprocal_lattice, with_boundary=False):
grid_address = get_grid_address(mesh)
bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,
mesh,
reciprocal_lattice,
is_dense=True)
if with_boundary:
return bz_grid_address, bz_map
else:
return bz_grid_address[:np.prod(mesh)]
def get_grid_point_from_address_py(address, mesh):
# X runs first in XYZ
# (*In spglib, Z first is possible with MACRO setting.)
m = mesh
return (address[0] % m[0] +
(address[1] % m[1]) * m[0] +
(address[2] % m[2]) * m[0] * m[1])
def get_grid_point_from_address(address, mesh):
"""Grid point number is given by grid address.
Parameters
----------
address : array_like
Grid address.
dtype='intc'
shape=(3,)
mesh : array_like
Mesh numbers.
dtype='intc'
shape=(3,)
Returns
-------
int
Grid point number.
"""
return spg.get_grid_point_from_address(address, mesh)
def get_bz_grid_point_from_address(address, mesh, bz_map):
# X runs first in XYZ
# (*In spglib, Z first is possible with MACRO setting.)
# 2m is defined in kpoint.c of spglib.
m = 2 * np.array(mesh, dtype='intc')
return bz_map[get_grid_point_from_address(address, m)]
def invert_grid_point(grid_point, mesh, grid_address, bz_map):
# gp --> [address] --> [-address] --> inv_gp
address = grid_address[grid_point]
return get_bz_grid_point_from_address(-address, mesh, bz_map)
def get_ir_grid_points(mesh, rotations, mesh_shifts=None):
if mesh_shifts is None:
mesh_shifts = [False, False, False]
grid_mapping_table, grid_address = spg.get_stabilized_reciprocal_mesh(
mesh,
rotations,
is_shift=np.where(mesh_shifts, 1, 0),
is_dense=True)
(ir_grid_points,
ir_grid_weights) = extract_ir_grid_points(grid_mapping_table)
return ir_grid_points, ir_grid_weights, grid_address, grid_mapping_table
def get_grid_points_by_rotations(grid_point,
reciprocal_rotations,
mesh,
mesh_shifts=None):
if mesh_shifts is None:
mesh_shifts = [False, False, False]
return spg.get_grid_points_by_rotations(
grid_point,
reciprocal_rotations,
mesh,
is_shift=np.where(mesh_shifts, 1, 0),
is_dense=True)
def get_BZ_grid_points_by_rotations(grid_point,
reciprocal_rotations,
mesh,
bz_map,
mesh_shifts=None):
if mesh_shifts is None:
mesh_shifts = [False, False, False]
return spg.get_BZ_grid_points_by_rotations(
grid_point,
reciprocal_rotations,
mesh,
bz_map,
is_shift=np.where(mesh_shifts, 1, 0),
is_dense=True)
def reduce_grid_points(mesh_divisors,
grid_address,
dense_grid_points,
dense_grid_weights=None,
coarse_mesh_shifts=None):
divisors = np.array(mesh_divisors, dtype='intc')
if (divisors == 1).all():
coarse_grid_points = np.array(dense_grid_points, dtype='uintp')
if dense_grid_weights is not None:
coarse_grid_weights = np.array(dense_grid_weights, dtype='intc')
else:
if coarse_mesh_shifts is None:
shift = [0, 0, 0]
else:
shift = np.where(coarse_mesh_shifts, divisors // 2, [0, 0, 0])
modulo = grid_address[dense_grid_points] % divisors
condition = (modulo == shift).all(axis=1)
coarse_grid_points = np.extract(condition, dense_grid_points)
if dense_grid_weights is not None:
coarse_grid_weights = np.extract(condition, dense_grid_weights)
if dense_grid_weights is None:
return coarse_grid_points
else:
return coarse_grid_points, coarse_grid_weights
def from_coarse_to_dense_grid_points(dense_mesh,
mesh_divisors,
coarse_grid_points,
coarse_grid_address,
coarse_mesh_shifts=None):
if coarse_mesh_shifts is None:
coarse_mesh_shifts = [False, False, False]
shifts = np.where(coarse_mesh_shifts, 1, 0)
dense_grid_points = []
for cga in coarse_grid_address[coarse_grid_points]:
dense_address = cga * mesh_divisors + shifts * (mesh_divisors // 2)
dense_grid_points.append(get_grid_point_from_address(dense_address,
dense_mesh))
return np.array(dense_grid_points, dtype='uintp')
def get_coarse_ir_grid_points(primitive,
mesh,
mesh_divisors,
coarse_mesh_shifts,
is_kappa_star=True,
symprec=1e-5):
mesh = np.array(mesh, dtype='intc')
symmetry = Symmetry(primitive, symprec)
point_group = symmetry.get_pointgroup_operations()
if mesh_divisors is None:
(ir_grid_points,
ir_grid_weights,
grid_address,
grid_mapping_table) = get_ir_grid_points(mesh, point_group)
else:
mesh_divs = np.array(mesh_divisors, dtype='intc')
coarse_mesh = mesh // mesh_divs
if coarse_mesh_shifts is None:
coarse_mesh_shifts = [False, False, False]
if not is_kappa_star:
coarse_grid_address = get_grid_address(coarse_mesh)
coarse_grid_points = np.arange(np.prod(coarse_mesh), dtype='uintp')
else:
(coarse_ir_grid_points,
coarse_ir_grid_weights,
coarse_grid_address,
coarse_grid_mapping_table) = get_ir_grid_points(
coarse_mesh,
point_group,
mesh_shifts=coarse_mesh_shifts)
ir_grid_points = from_coarse_to_dense_grid_points(
mesh,
mesh_divs,
coarse_grid_points,
coarse_grid_address,
coarse_mesh_shifts=coarse_mesh_shifts)
grid_address = get_grid_address(mesh)
ir_grid_weights = ir_grid_weights
reciprocal_lattice = np.linalg.inv(primitive.get_cell())
bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,
mesh,
reciprocal_lattice,
is_dense=True)
return (ir_grid_points,
ir_grid_weights,
bz_grid_address,
grid_mapping_table)
def get_number_of_triplets(primitive,
mesh,
grid_point,
swappable=True,
symprec=1e-5):
mesh = np.array(mesh, dtype='intc')
symmetry = Symmetry(primitive, symprec)
point_group = symmetry.get_pointgroup_operations()
reciprocal_lattice = np.linalg.inv(primitive.get_cell())
triplets_at_q, _, _, _, _, _ = get_triplets_at_q(
grid_point,
mesh,
point_group,
reciprocal_lattice,
swappable=swappable)
return len(triplets_at_q)
def get_triplets_integration_weights(interaction,
frequency_points,
sigma,
sigma_cutoff=None,
is_collision_matrix=False,
neighboring_phonons=False,
lang='C'):
triplets = interaction.get_triplets_at_q()[0]
frequencies = interaction.get_phonons()[0]
num_band = frequencies.shape[1]
g_zero = None
if is_collision_matrix:
g = np.empty(
(3, len(triplets), len(frequency_points), num_band, num_band),
dtype='double', order='C')
else:
g = np.empty(
(2, len(triplets), len(frequency_points), num_band, num_band),
dtype='double', order='C')
g[:] = 0
if sigma:
if lang == 'C':
import phono3py._phono3py as phono3c
g_zero = np.zeros(g.shape[1:], dtype='byte', order='C')
if sigma_cutoff is None:
cutoff = -1
else:
cutoff = float(sigma_cutoff)
# cutoff < 0 disables g_zero feature.
phono3c.triplets_integration_weights_with_sigma(
g,
g_zero,
frequency_points,
triplets,
frequencies,
sigma,
cutoff)
else:
for i, tp in enumerate(triplets):
f1s = frequencies[tp[1]]
f2s = frequencies[tp[2]]
for j, k in list(np.ndindex((num_band, num_band))):
f1 = f1s[j]
f2 = f2s[k]
g0 = gaussian(frequency_points - f1 - f2, sigma)
g[0, i, :, j, k] = g0
g1 = gaussian(frequency_points + f1 - f2, sigma)
g2 = gaussian(frequency_points - f1 + f2, sigma)
g[1, i, :, j, k] = g1 - g2
if len(g) == 3:
g[2, i, :, j, k] = g0 + g1 + g2
else:
if lang == 'C':
g_zero = np.zeros(g.shape[1:], dtype='byte', order='C')
_set_triplets_integration_weights_c(
g,
g_zero,
interaction,
frequency_points,
neighboring_phonons=neighboring_phonons)
else:
_set_triplets_integration_weights_py(
g, interaction, frequency_points)
return g, g_zero
def get_tetrahedra_vertices(relative_address,
mesh,
triplets_at_q,
bz_grid_address,
bz_map):
bzmesh = mesh * 2
grid_order = [1, mesh[0], mesh[0] * mesh[1]]
bz_grid_order = [1, bzmesh[0], bzmesh[0] * bzmesh[1]]
num_triplets = len(triplets_at_q)
vertices = np.zeros((num_triplets, 2, 24, 4), dtype='uintp')
for i, tp in enumerate(triplets_at_q):
for j, adrs_shift in enumerate(
(relative_address, -relative_address)):
adrs = bz_grid_address[tp[j + 1]] + adrs_shift
bz_gp = np.dot(adrs % bzmesh, bz_grid_order)
gp = np.dot(adrs % mesh, grid_order)
vgp = bz_map[bz_gp]
vertices[i, j] = vgp + (vgp == -1) * (gp + 1)
return vertices
def _get_triplets_reciprocal_mesh_at_q(fixed_grid_number,
mesh,
rotations,
is_time_reversal=True,
swappable=True):
"""Search symmetry reduced triplets fixing one q-point
Triplets of (q0, q1, q2) are searched.
Parameters
----------
fixed_grid_number : int
Grid point of q0
mesh : array_like
Mesh numbers
dtype='intc'
shape=(3,)
rotations : array_like
Rotation matrices in real space. Note that those in reciprocal space
mean these matrices transposed (local terminology).
dtype='intc'
shape=(n_rot, 3, 3)
is_time_reversal : bool
Inversion symemtry is added if it doesn't exist.
swappable : bool
q1 and q2 can be swapped. By this number of triplets decreases.
"""
import phono3py._phono3py as phono3c
map_triplets = np.zeros(np.prod(mesh), dtype='uintp')
map_q = np.zeros(np.prod(mesh), dtype='uintp')
grid_address = np.zeros((np.prod(mesh), 3), dtype='intc')
phono3c.triplets_reciprocal_mesh_at_q(
map_triplets,
map_q,
grid_address,
fixed_grid_number,
np.array(mesh, dtype='intc'),
is_time_reversal * 1,
np.array(rotations, dtype='intc', order='C'),
swappable * 1)
return map_triplets, map_q, grid_address
def _get_BZ_triplets_at_q(grid_point,
bz_grid_address,
bz_map,
map_triplets,
mesh):
import phono3py._phono3py as phono3c
weights = np.zeros(len(map_triplets), dtype='intc')
for g in map_triplets:
weights[g] += 1
ir_weights = np.extract(weights > 0, weights)
triplets = np.zeros((len(ir_weights), 3), dtype=bz_map.dtype)
# triplets are overwritten.
num_ir_ret = phono3c.BZ_triplets_at_q(triplets,
grid_point,
bz_grid_address,
bz_map,
map_triplets,
np.array(mesh, dtype='intc'))
assert num_ir_ret == len(ir_weights)
return triplets, np.array(ir_weights, dtype='intc')
def _set_triplets_integration_weights_c(g,
g_zero,
interaction,
frequency_points,
neighboring_phonons=False):
import phono3py._phono3py as phono3c
reciprocal_lattice = np.linalg.inv(interaction.get_primitive().get_cell())
mesh = interaction.get_mesh_numbers()
thm = TetrahedronMethod(reciprocal_lattice, mesh=mesh)
grid_address = interaction.get_grid_address()
bz_map = interaction.get_bz_map()
triplets_at_q = interaction.get_triplets_at_q()[0]
if neighboring_phonons:
unique_vertices = thm.get_unique_tetrahedra_vertices()
for i, j in zip((1, 2), (1, -1)):
neighboring_grid_points = np.zeros(
len(unique_vertices) * len(triplets_at_q), dtype=bz_map.dtype)
phono3c.neighboring_grid_points(
neighboring_grid_points,
np.array(triplets_at_q[:, i], dtype='uintp').ravel(),
j * unique_vertices,
mesh,
grid_address,
bz_map)
interaction.set_phonons(np.unique(neighboring_grid_points))
phono3c.triplets_integration_weights(
g,
g_zero,
frequency_points,
thm.get_tetrahedra(),
mesh,
triplets_at_q,
interaction.get_phonons()[0],
grid_address,
bz_map)
def _set_triplets_integration_weights_py(g, interaction, frequency_points):
reciprocal_lattice = np.linalg.inv(interaction.get_primitive().get_cell())
mesh = interaction.get_mesh_numbers()
thm = TetrahedronMethod(reciprocal_lattice, mesh=mesh)
grid_address = interaction.get_grid_address()
bz_map = interaction.get_bz_map()
triplets_at_q = interaction.get_triplets_at_q()[0]
tetrahedra_vertices = get_tetrahedra_vertices(
thm.get_tetrahedra(),
mesh,
triplets_at_q,
grid_address,
bz_map)
interaction.set_phonons(np.unique(tetrahedra_vertices))
frequencies = interaction.get_phonons()[0]
num_band = frequencies.shape[1]
for i, vertices in enumerate(tetrahedra_vertices):
for j, k in list(np.ndindex((num_band, num_band))):
f1_v = frequencies[vertices[0], j]
f2_v = frequencies[vertices[1], k]
thm.set_tetrahedra_omegas(f1_v + f2_v)
thm.run(frequency_points)
g0 = thm.get_integration_weight()
g[0, i, :, j, k] = g0
thm.set_tetrahedra_omegas(-f1_v + f2_v)
thm.run(frequency_points)
g1 = thm.get_integration_weight()
thm.set_tetrahedra_omegas(f1_v - f2_v)
thm.run(frequency_points)
g2 = thm.get_integration_weight()
g[1, i, :, j, k] = g1 - g2
if len(g) == 3:
g[2, i, :, j, k] = g0 + g1 + g2
|
atztogo/phono3py | phono3py/phonon3/triplets.py | _get_triplets_reciprocal_mesh_at_q | python | def _get_triplets_reciprocal_mesh_at_q(fixed_grid_number,
mesh,
rotations,
is_time_reversal=True,
swappable=True):
import phono3py._phono3py as phono3c
map_triplets = np.zeros(np.prod(mesh), dtype='uintp')
map_q = np.zeros(np.prod(mesh), dtype='uintp')
grid_address = np.zeros((np.prod(mesh), 3), dtype='intc')
phono3c.triplets_reciprocal_mesh_at_q(
map_triplets,
map_q,
grid_address,
fixed_grid_number,
np.array(mesh, dtype='intc'),
is_time_reversal * 1,
np.array(rotations, dtype='intc', order='C'),
swappable * 1)
return map_triplets, map_q, grid_address | Search symmetry reduced triplets fixing one q-point
Triplets of (q0, q1, q2) are searched.
Parameters
----------
fixed_grid_number : int
Grid point of q0
mesh : array_like
Mesh numbers
dtype='intc'
shape=(3,)
rotations : array_like
Rotation matrices in real space. Note that those in reciprocal space
mean these matrices transposed (local terminology).
dtype='intc'
shape=(n_rot, 3, 3)
is_time_reversal : bool
Inversion symemtry is added if it doesn't exist.
swappable : bool
q1 and q2 can be swapped. By this number of triplets decreases. | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/triplets.py#L476-L521 | null | import numpy as np
from phonopy.units import THzToEv, Kb
import phonopy.structure.spglib as spg
from phonopy.structure.symmetry import Symmetry
from phonopy.structure.tetrahedron_method import TetrahedronMethod
from phonopy.structure.grid_points import extract_ir_grid_points
def gaussian(x, sigma):
return 1.0 / np.sqrt(2 * np.pi) / sigma * np.exp(-x**2 / 2 / sigma**2)
def occupation(x, t):
return 1.0 / (np.exp(THzToEv * x / (Kb * t)) - 1)
def get_triplets_at_q(grid_point,
mesh,
point_group, # real space point group of space group
reciprocal_lattice, # column vectors
is_time_reversal=True,
swappable=True,
stores_triplets_map=False):
"""Parameters
----------
grid_point : int
A grid point
mesh : array_like
Mesh numbers
dtype='intc'
shape=(3,)
point_group : array_like
Rotation matrices in real space. Note that those in reciprocal space
mean these matrices transposed (local terminology).
dtype='intc'
shape=(n_rot, 3, 3)
reciprocal_lattice : array_like
Reciprocal primitive basis vectors given as column vectors
dtype='double'
shape=(3, 3)
is_time_reversal : bool
Inversion symemtry is added if it doesn't exist.
swappable : bool
q1 and q2 can be swapped. By this number of triplets decreases.
Returns
-------
triplets_at_q : ndarray
Symmetry reduced number of triplets are stored as grid point
integer numbers.
dtype='uintp'
shape=(n_triplets, 3)
weights : ndarray
Weights of triplets in Brillouin zone
dtype='intc'
shape=(n_triplets,)
bz_grid_address : ndarray
Integer grid address of the points in Brillouin zone including
surface. The first prod(mesh) numbers of points are
independent. But the rest of points are
translational-symmetrically equivalent to some other points.
dtype='intc'
shape=(n_grid_points, 3)
bz_map : ndarray
Grid point mapping table containing BZ surface. See more
detail in spglib docstring.
dtype='uintp'
shape=(prod(mesh*2),)
map_tripelts : ndarray or None
Returns when stores_triplets_map=True, otherwise None is
returned. Mapping table of all triplets to symmetrically
independent tripelts. More precisely, this gives a list of
index mapping from all q-points to independent q' of
q+q'+q''=G. Considering q' is enough because q is fixed and
q''=G-q-q' where G is automatically determined to choose
smallest |G|.
dtype='uintp'
shape=(prod(mesh),)
map_q : ndarray or None
Returns when stores_triplets_map=True, otherwise None is
returned. Irreducible q-points stabilized by q-point of
specified grid_point.
dtype='uintp'
shape=(prod(mesh),)
"""
map_triplets, map_q, grid_address = _get_triplets_reciprocal_mesh_at_q(
grid_point,
mesh,
point_group,
is_time_reversal=is_time_reversal,
swappable=swappable)
bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,
mesh,
reciprocal_lattice,
is_dense=True)
triplets_at_q, weights = _get_BZ_triplets_at_q(
grid_point,
bz_grid_address,
bz_map,
map_triplets,
mesh)
assert np.prod(mesh) == weights.sum(), \
"Num grid points %d, sum of weight %d" % (
np.prod(mesh), weights.sum())
# These maps are required for collision matrix calculation.
if not stores_triplets_map:
map_triplets = None
map_q = None
return triplets_at_q, weights, bz_grid_address, bz_map, map_triplets, map_q
def get_all_triplets(grid_point,
bz_grid_address,
bz_map,
mesh):
triplets_at_q, _ = _get_BZ_triplets_at_q(
grid_point,
bz_grid_address,
bz_map,
np.arange(np.prod(mesh), dtype=bz_map.dtype),
mesh)
return triplets_at_q
def get_nosym_triplets_at_q(grid_point,
mesh,
reciprocal_lattice,
stores_triplets_map=False):
grid_address = get_grid_address(mesh)
bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,
mesh,
reciprocal_lattice,
is_dense=True)
map_triplets = np.arange(len(grid_address), dtype=bz_map.dtype)
triplets_at_q, weights = _get_BZ_triplets_at_q(
grid_point,
bz_grid_address,
bz_map,
map_triplets,
mesh)
if not stores_triplets_map:
map_triplets = None
map_q = None
else:
map_q = map_triplets.copy()
return triplets_at_q, weights, bz_grid_address, bz_map, map_triplets, map_q
def get_grid_address(mesh):
grid_mapping_table, grid_address = spg.get_stabilized_reciprocal_mesh(
mesh,
[[[1, 0, 0], [0, 1, 0], [0, 0, 1]]],
is_time_reversal=False,
is_dense=True)
return grid_address
def get_bz_grid_address(mesh, reciprocal_lattice, with_boundary=False):
grid_address = get_grid_address(mesh)
bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,
mesh,
reciprocal_lattice,
is_dense=True)
if with_boundary:
return bz_grid_address, bz_map
else:
return bz_grid_address[:np.prod(mesh)]
def get_grid_point_from_address_py(address, mesh):
# X runs first in XYZ
# (*In spglib, Z first is possible with MACRO setting.)
m = mesh
return (address[0] % m[0] +
(address[1] % m[1]) * m[0] +
(address[2] % m[2]) * m[0] * m[1])
def get_grid_point_from_address(address, mesh):
"""Grid point number is given by grid address.
Parameters
----------
address : array_like
Grid address.
dtype='intc'
shape=(3,)
mesh : array_like
Mesh numbers.
dtype='intc'
shape=(3,)
Returns
-------
int
Grid point number.
"""
return spg.get_grid_point_from_address(address, mesh)
def get_bz_grid_point_from_address(address, mesh, bz_map):
# X runs first in XYZ
# (*In spglib, Z first is possible with MACRO setting.)
# 2m is defined in kpoint.c of spglib.
m = 2 * np.array(mesh, dtype='intc')
return bz_map[get_grid_point_from_address(address, m)]
def invert_grid_point(grid_point, mesh, grid_address, bz_map):
# gp --> [address] --> [-address] --> inv_gp
address = grid_address[grid_point]
return get_bz_grid_point_from_address(-address, mesh, bz_map)
def get_ir_grid_points(mesh, rotations, mesh_shifts=None):
if mesh_shifts is None:
mesh_shifts = [False, False, False]
grid_mapping_table, grid_address = spg.get_stabilized_reciprocal_mesh(
mesh,
rotations,
is_shift=np.where(mesh_shifts, 1, 0),
is_dense=True)
(ir_grid_points,
ir_grid_weights) = extract_ir_grid_points(grid_mapping_table)
return ir_grid_points, ir_grid_weights, grid_address, grid_mapping_table
def get_grid_points_by_rotations(grid_point,
reciprocal_rotations,
mesh,
mesh_shifts=None):
if mesh_shifts is None:
mesh_shifts = [False, False, False]
return spg.get_grid_points_by_rotations(
grid_point,
reciprocal_rotations,
mesh,
is_shift=np.where(mesh_shifts, 1, 0),
is_dense=True)
def get_BZ_grid_points_by_rotations(grid_point,
reciprocal_rotations,
mesh,
bz_map,
mesh_shifts=None):
if mesh_shifts is None:
mesh_shifts = [False, False, False]
return spg.get_BZ_grid_points_by_rotations(
grid_point,
reciprocal_rotations,
mesh,
bz_map,
is_shift=np.where(mesh_shifts, 1, 0),
is_dense=True)
def reduce_grid_points(mesh_divisors,
grid_address,
dense_grid_points,
dense_grid_weights=None,
coarse_mesh_shifts=None):
divisors = np.array(mesh_divisors, dtype='intc')
if (divisors == 1).all():
coarse_grid_points = np.array(dense_grid_points, dtype='uintp')
if dense_grid_weights is not None:
coarse_grid_weights = np.array(dense_grid_weights, dtype='intc')
else:
if coarse_mesh_shifts is None:
shift = [0, 0, 0]
else:
shift = np.where(coarse_mesh_shifts, divisors // 2, [0, 0, 0])
modulo = grid_address[dense_grid_points] % divisors
condition = (modulo == shift).all(axis=1)
coarse_grid_points = np.extract(condition, dense_grid_points)
if dense_grid_weights is not None:
coarse_grid_weights = np.extract(condition, dense_grid_weights)
if dense_grid_weights is None:
return coarse_grid_points
else:
return coarse_grid_points, coarse_grid_weights
def from_coarse_to_dense_grid_points(dense_mesh,
mesh_divisors,
coarse_grid_points,
coarse_grid_address,
coarse_mesh_shifts=None):
if coarse_mesh_shifts is None:
coarse_mesh_shifts = [False, False, False]
shifts = np.where(coarse_mesh_shifts, 1, 0)
dense_grid_points = []
for cga in coarse_grid_address[coarse_grid_points]:
dense_address = cga * mesh_divisors + shifts * (mesh_divisors // 2)
dense_grid_points.append(get_grid_point_from_address(dense_address,
dense_mesh))
return np.array(dense_grid_points, dtype='uintp')
def get_coarse_ir_grid_points(primitive,
mesh,
mesh_divisors,
coarse_mesh_shifts,
is_kappa_star=True,
symprec=1e-5):
mesh = np.array(mesh, dtype='intc')
symmetry = Symmetry(primitive, symprec)
point_group = symmetry.get_pointgroup_operations()
if mesh_divisors is None:
(ir_grid_points,
ir_grid_weights,
grid_address,
grid_mapping_table) = get_ir_grid_points(mesh, point_group)
else:
mesh_divs = np.array(mesh_divisors, dtype='intc')
coarse_mesh = mesh // mesh_divs
if coarse_mesh_shifts is None:
coarse_mesh_shifts = [False, False, False]
if not is_kappa_star:
coarse_grid_address = get_grid_address(coarse_mesh)
coarse_grid_points = np.arange(np.prod(coarse_mesh), dtype='uintp')
else:
(coarse_ir_grid_points,
coarse_ir_grid_weights,
coarse_grid_address,
coarse_grid_mapping_table) = get_ir_grid_points(
coarse_mesh,
point_group,
mesh_shifts=coarse_mesh_shifts)
ir_grid_points = from_coarse_to_dense_grid_points(
mesh,
mesh_divs,
coarse_grid_points,
coarse_grid_address,
coarse_mesh_shifts=coarse_mesh_shifts)
grid_address = get_grid_address(mesh)
ir_grid_weights = ir_grid_weights
reciprocal_lattice = np.linalg.inv(primitive.get_cell())
bz_grid_address, bz_map = spg.relocate_BZ_grid_address(grid_address,
mesh,
reciprocal_lattice,
is_dense=True)
return (ir_grid_points,
ir_grid_weights,
bz_grid_address,
grid_mapping_table)
def get_number_of_triplets(primitive,
mesh,
grid_point,
swappable=True,
symprec=1e-5):
mesh = np.array(mesh, dtype='intc')
symmetry = Symmetry(primitive, symprec)
point_group = symmetry.get_pointgroup_operations()
reciprocal_lattice = np.linalg.inv(primitive.get_cell())
triplets_at_q, _, _, _, _, _ = get_triplets_at_q(
grid_point,
mesh,
point_group,
reciprocal_lattice,
swappable=swappable)
return len(triplets_at_q)
def get_triplets_integration_weights(interaction,
frequency_points,
sigma,
sigma_cutoff=None,
is_collision_matrix=False,
neighboring_phonons=False,
lang='C'):
triplets = interaction.get_triplets_at_q()[0]
frequencies = interaction.get_phonons()[0]
num_band = frequencies.shape[1]
g_zero = None
if is_collision_matrix:
g = np.empty(
(3, len(triplets), len(frequency_points), num_band, num_band),
dtype='double', order='C')
else:
g = np.empty(
(2, len(triplets), len(frequency_points), num_band, num_band),
dtype='double', order='C')
g[:] = 0
if sigma:
if lang == 'C':
import phono3py._phono3py as phono3c
g_zero = np.zeros(g.shape[1:], dtype='byte', order='C')
if sigma_cutoff is None:
cutoff = -1
else:
cutoff = float(sigma_cutoff)
# cutoff < 0 disables g_zero feature.
phono3c.triplets_integration_weights_with_sigma(
g,
g_zero,
frequency_points,
triplets,
frequencies,
sigma,
cutoff)
else:
for i, tp in enumerate(triplets):
f1s = frequencies[tp[1]]
f2s = frequencies[tp[2]]
for j, k in list(np.ndindex((num_band, num_band))):
f1 = f1s[j]
f2 = f2s[k]
g0 = gaussian(frequency_points - f1 - f2, sigma)
g[0, i, :, j, k] = g0
g1 = gaussian(frequency_points + f1 - f2, sigma)
g2 = gaussian(frequency_points - f1 + f2, sigma)
g[1, i, :, j, k] = g1 - g2
if len(g) == 3:
g[2, i, :, j, k] = g0 + g1 + g2
else:
if lang == 'C':
g_zero = np.zeros(g.shape[1:], dtype='byte', order='C')
_set_triplets_integration_weights_c(
g,
g_zero,
interaction,
frequency_points,
neighboring_phonons=neighboring_phonons)
else:
_set_triplets_integration_weights_py(
g, interaction, frequency_points)
return g, g_zero
def get_tetrahedra_vertices(relative_address,
mesh,
triplets_at_q,
bz_grid_address,
bz_map):
bzmesh = mesh * 2
grid_order = [1, mesh[0], mesh[0] * mesh[1]]
bz_grid_order = [1, bzmesh[0], bzmesh[0] * bzmesh[1]]
num_triplets = len(triplets_at_q)
vertices = np.zeros((num_triplets, 2, 24, 4), dtype='uintp')
for i, tp in enumerate(triplets_at_q):
for j, adrs_shift in enumerate(
(relative_address, -relative_address)):
adrs = bz_grid_address[tp[j + 1]] + adrs_shift
bz_gp = np.dot(adrs % bzmesh, bz_grid_order)
gp = np.dot(adrs % mesh, grid_order)
vgp = bz_map[bz_gp]
vertices[i, j] = vgp + (vgp == -1) * (gp + 1)
return vertices
def _get_BZ_triplets_at_q(grid_point,
bz_grid_address,
bz_map,
map_triplets,
mesh):
import phono3py._phono3py as phono3c
weights = np.zeros(len(map_triplets), dtype='intc')
for g in map_triplets:
weights[g] += 1
ir_weights = np.extract(weights > 0, weights)
triplets = np.zeros((len(ir_weights), 3), dtype=bz_map.dtype)
# triplets are overwritten.
num_ir_ret = phono3c.BZ_triplets_at_q(triplets,
grid_point,
bz_grid_address,
bz_map,
map_triplets,
np.array(mesh, dtype='intc'))
assert num_ir_ret == len(ir_weights)
return triplets, np.array(ir_weights, dtype='intc')
def _set_triplets_integration_weights_c(g,
g_zero,
interaction,
frequency_points,
neighboring_phonons=False):
import phono3py._phono3py as phono3c
reciprocal_lattice = np.linalg.inv(interaction.get_primitive().get_cell())
mesh = interaction.get_mesh_numbers()
thm = TetrahedronMethod(reciprocal_lattice, mesh=mesh)
grid_address = interaction.get_grid_address()
bz_map = interaction.get_bz_map()
triplets_at_q = interaction.get_triplets_at_q()[0]
if neighboring_phonons:
unique_vertices = thm.get_unique_tetrahedra_vertices()
for i, j in zip((1, 2), (1, -1)):
neighboring_grid_points = np.zeros(
len(unique_vertices) * len(triplets_at_q), dtype=bz_map.dtype)
phono3c.neighboring_grid_points(
neighboring_grid_points,
np.array(triplets_at_q[:, i], dtype='uintp').ravel(),
j * unique_vertices,
mesh,
grid_address,
bz_map)
interaction.set_phonons(np.unique(neighboring_grid_points))
phono3c.triplets_integration_weights(
g,
g_zero,
frequency_points,
thm.get_tetrahedra(),
mesh,
triplets_at_q,
interaction.get_phonons()[0],
grid_address,
bz_map)
def _set_triplets_integration_weights_py(g, interaction, frequency_points):
reciprocal_lattice = np.linalg.inv(interaction.get_primitive().get_cell())
mesh = interaction.get_mesh_numbers()
thm = TetrahedronMethod(reciprocal_lattice, mesh=mesh)
grid_address = interaction.get_grid_address()
bz_map = interaction.get_bz_map()
triplets_at_q = interaction.get_triplets_at_q()[0]
tetrahedra_vertices = get_tetrahedra_vertices(
thm.get_tetrahedra(),
mesh,
triplets_at_q,
grid_address,
bz_map)
interaction.set_phonons(np.unique(tetrahedra_vertices))
frequencies = interaction.get_phonons()[0]
num_band = frequencies.shape[1]
for i, vertices in enumerate(tetrahedra_vertices):
for j, k in list(np.ndindex((num_band, num_band))):
f1_v = frequencies[vertices[0], j]
f2_v = frequencies[vertices[1], k]
thm.set_tetrahedra_omegas(f1_v + f2_v)
thm.run(frequency_points)
g0 = thm.get_integration_weight()
g[0, i, :, j, k] = g0
thm.set_tetrahedra_omegas(-f1_v + f2_v)
thm.run(frequency_points)
g1 = thm.get_integration_weight()
thm.set_tetrahedra_omegas(f1_v - f2_v)
thm.run(frequency_points)
g2 = thm.get_integration_weight()
g[1, i, :, j, k] = g1 - g2
if len(g) == 3:
g[2, i, :, j, k] = g0 + g1 + g2
|
atztogo/phono3py | phono3py/phonon3/interaction.py | Interaction.get_averaged_interaction | python | def get_averaged_interaction(self):
# v[triplet, band0, band, band]
v = self._interaction_strength
w = self._weights_at_q
v_sum = np.dot(w, v.sum(axis=2).sum(axis=2))
return v_sum / np.prod(v.shape[2:]) | Return sum over phonon triplets of interaction strength
See Eq.(21) of PRB 91, 094306 (2015) | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/interaction.py#L160-L171 | null | class Interaction(object):
def __init__(self,
supercell,
primitive,
mesh,
symmetry,
fc3=None,
band_indices=None,
constant_averaged_interaction=None,
frequency_factor_to_THz=VaspToTHz,
frequency_scale_factor=None,
unit_conversion=None,
is_mesh_symmetry=True,
symmetrize_fc3q=False,
cutoff_frequency=None,
lapack_zheev_uplo='L'):
if frequency_scale_factor is None:
self._set_fc3(fc3)
else:
self._set_fc3(fc3 * frequency_scale_factor ** 2)
self._supercell = supercell
self._primitive = primitive
self._mesh = np.array(mesh, dtype='intc')
self._symmetry = symmetry
self._band_indices = None
self._set_band_indices(band_indices)
self._constant_averaged_interaction = constant_averaged_interaction
self._frequency_factor_to_THz = frequency_factor_to_THz
self._frequency_scale_factor = frequency_scale_factor
# Unit to eV^2
if unit_conversion is None:
num_grid = np.prod(self._mesh)
self._unit_conversion = ((Hbar * EV) ** 3 / 36 / 8
* EV ** 2 / Angstrom ** 6
/ (2 * np.pi * THz) ** 3
/ AMU ** 3 / num_grid
/ EV ** 2)
else:
self._unit_conversion = unit_conversion
if cutoff_frequency is None:
self._cutoff_frequency = 0
else:
self._cutoff_frequency = cutoff_frequency
self._is_mesh_symmetry = is_mesh_symmetry
self._symmetrize_fc3q = symmetrize_fc3q
self._lapack_zheev_uplo = lapack_zheev_uplo
self._symprec = symmetry.get_symmetry_tolerance()
self._grid_point = None
self._triplets_at_q = None
self._weights_at_q = None
self._triplets_map_at_q = None
self._ir_map_at_q = None
self._grid_address = None
self._bz_map = None
self._interaction_strength = None
self._g_zero = None
self._phonon_done = None
self._frequencies = None
self._eigenvectors = None
self._dm = None
self._nac_q_direction = None
self._band_index_count = 0
svecs, multiplicity = self._primitive.get_smallest_vectors()
self._smallest_vectors = svecs
self._multiplicity = multiplicity
self._masses = np.array(self._primitive.get_masses(), dtype='double')
self._p2s = self._primitive.get_primitive_to_supercell_map()
self._s2p = self._primitive.get_supercell_to_primitive_map()
self._allocate_phonon()
def run(self, lang='C', g_zero=None):
num_band = self._primitive.get_number_of_atoms() * 3
num_triplets = len(self._triplets_at_q)
self._interaction_strength = np.empty(
(num_triplets, len(self._band_indices), num_band, num_band),
dtype='double')
if self._constant_averaged_interaction is None:
self._interaction_strength[:] = 0
if lang == 'C':
self._run_c(g_zero)
else:
self._run_py()
else:
num_grid = np.prod(self._mesh)
self._interaction_strength[:] = (
self._constant_averaged_interaction / num_grid)
def get_interaction_strength(self):
return self._interaction_strength
def set_interaction_strength(self, pp_strength, g_zero=None):
self._interaction_strength = pp_strength
self._g_zero = g_zero
def get_zero_value_positions(self):
return self._g_zero
def get_mesh_numbers(self):
return self._mesh
def get_phonons(self):
return self._frequencies, self._eigenvectors, self._phonon_done
def get_fc3(self):
return self._fc3
def get_dynamical_matrix(self):
return self._dm
def get_primitive(self):
return self._primitive
def get_supercell(self):
return self._supercell
def get_triplets_at_q(self):
return (self._triplets_at_q,
self._weights_at_q,
self._triplets_map_at_q,
self._ir_map_at_q)
def get_grid_address(self):
return self._grid_address
def get_bz_map(self):
return self._bz_map
def get_band_indices(self):
return self._band_indices
def get_frequency_factor_to_THz(self):
return self._frequency_factor_to_THz
def get_lapack_zheev_uplo(self):
return self._lapack_zheev_uplo
def get_cutoff_frequency(self):
return self._cutoff_frequency
def get_primitive_and_supercell_correspondence(self):
return (self._smallest_vectors,
self._multiplicity,
self._p2s,
self._s2p,
self._masses)
def get_nac_q_direction(self):
return self._nac_q_direction
def get_unit_conversion_factor(self):
return self._unit_conversion
def get_constant_averaged_interaction(self):
return self._constant_averaged_interaction
def set_grid_point(self, grid_point, stores_triplets_map=False):
reciprocal_lattice = np.linalg.inv(self._primitive.get_cell())
if not self._is_mesh_symmetry:
(triplets_at_q,
weights_at_q,
grid_address,
bz_map,
triplets_map_at_q,
ir_map_at_q) = get_nosym_triplets_at_q(
grid_point,
self._mesh,
reciprocal_lattice,
stores_triplets_map=stores_triplets_map)
else:
(triplets_at_q,
weights_at_q,
grid_address,
bz_map,
triplets_map_at_q,
ir_map_at_q) = get_triplets_at_q(
grid_point,
self._mesh,
self._symmetry.get_pointgroup_operations(),
reciprocal_lattice,
stores_triplets_map=stores_triplets_map)
# Special treatment of symmetry is applied when q_direction is used.
if self._nac_q_direction is not None:
if (grid_address[grid_point] == 0).all():
self._phonon_done[grid_point] = 0
self.set_phonons(np.array([grid_point], dtype='uintp'))
rotations = []
for r in self._symmetry.get_pointgroup_operations():
dq = self._nac_q_direction
dq /= np.linalg.norm(dq)
diff = np.dot(dq, r) - dq
if (abs(diff) < 1e-5).all():
rotations.append(r)
(triplets_at_q,
weights_at_q,
grid_address,
bz_map,
triplets_map_at_q,
ir_map_at_q) = get_triplets_at_q(
grid_point,
self._mesh,
np.array(rotations, dtype='intc', order='C'),
reciprocal_lattice,
is_time_reversal=False,
stores_triplets_map=stores_triplets_map)
for triplet in triplets_at_q:
sum_q = (grid_address[triplet]).sum(axis=0)
if (sum_q % self._mesh != 0).any():
print("============= Warning ==================")
print("%s" % triplet)
for tp in triplet:
print("%s %s" %
(grid_address[tp],
np.linalg.norm(
np.dot(reciprocal_lattice,
grid_address[tp] /
self._mesh.astype('double')))))
print("%s" % sum_q)
print("============= Warning ==================")
self._grid_point = grid_point
self._triplets_at_q = triplets_at_q
self._weights_at_q = weights_at_q
self._triplets_map_at_q = triplets_map_at_q
# self._grid_address = grid_address
# self._bz_map = bz_map
self._ir_map_at_q = ir_map_at_q
# set_phonons is unnecessary now because all phonons are calculated in
# set_dynamical_matrix, though Gamma-point is an exception.
# self.set_phonons(self._triplets_at_q.ravel())
def set_dynamical_matrix(self,
fc2,
supercell,
primitive,
nac_params=None,
solve_dynamical_matrices=True,
decimals=None,
verbose=False):
self._dm = get_dynamical_matrix(
fc2,
supercell,
primitive,
nac_params=nac_params,
frequency_scale_factor=self._frequency_scale_factor,
decimals=decimals,
symprec=self._symprec)
if solve_dynamical_matrices:
self.set_phonons(verbose=verbose)
else:
self.set_phonons(np.array([0], dtype='uintp'), verbose=verbose)
if (self._grid_address[0] == 0).all():
if np.sum(self._frequencies[0] < self._cutoff_frequency) < 3:
for i, f in enumerate(self._frequencies[0, :3]):
if not (f < self._cutoff_frequency):
self._frequencies[0, i] = 0
print("=" * 26 + " Warning " + "=" * 26)
print(" Phonon frequency of band index %d at Gamma "
"is calculated to be %f." % (i + 1, f))
print(" But this frequency is forced to be zero.")
print("=" * 61)
def set_nac_q_direction(self, nac_q_direction=None):
if nac_q_direction is not None:
self._nac_q_direction = np.array(nac_q_direction, dtype='double')
def set_phonon_data(self, frequencies, eigenvectors, grid_address):
if grid_address.shape != self._grid_address.shape:
print("=" * 26 + " Warning " + "=" * 26)
print("Input grid address size is inconsistent. "
"Setting phonons faild.")
print("=" * 26 + " Warning " + "=" * 26)
return False
if (self._grid_address - grid_address).all():
print("=" * 26 + " Warning " + "=" * 26)
print("Input grid addresses are inconsistent. "
"Setting phonons faild.")
print("=" * 26 + " Warning " + "=" * 26)
return False
else:
self._phonon_done[:] = 1
self._frequencies[:] = frequencies
self._eigenvectors[:] = eigenvectors
return True
def set_phonons(self, grid_points=None, verbose=False):
if grid_points is None:
_grid_points = np.arange(len(self._grid_address), dtype='uintp')
else:
_grid_points = grid_points
self._set_phonon_c(_grid_points, verbose=verbose)
def delete_interaction_strength(self):
self._interaction_strength = None
self._g_zero = None
def _set_fc3(self, fc3):
if (type(fc3) == np.ndarray and
fc3.dtype == np.dtype('double') and
fc3.flags.aligned and
fc3.flags.owndata and
fc3.flags.c_contiguous):
self._fc3 = fc3
else:
self._fc3 = np.array(fc3, dtype='double', order='C')
def _set_band_indices(self, band_indices):
num_band = self._primitive.get_number_of_atoms() * 3
if band_indices is None:
self._band_indices = np.arange(num_band, dtype='intc')
else:
self._band_indices = np.array(band_indices, dtype='intc')
def _run_c(self, g_zero):
import phono3py._phono3py as phono3c
if g_zero is None or self._symmetrize_fc3q:
_g_zero = np.zeros(self._interaction_strength.shape,
dtype='byte', order='C')
else:
_g_zero = g_zero
phono3c.interaction(self._interaction_strength,
_g_zero,
self._frequencies,
self._eigenvectors,
self._triplets_at_q,
self._grid_address,
self._mesh,
self._fc3,
self._smallest_vectors,
self._multiplicity,
self._masses,
self._p2s,
self._s2p,
self._band_indices,
self._symmetrize_fc3q,
self._cutoff_frequency)
self._interaction_strength *= self._unit_conversion
self._g_zero = g_zero
def _set_phonon_c(self, grid_points, verbose=False):
set_phonon_c(self._dm,
self._frequencies,
self._eigenvectors,
self._phonon_done,
grid_points,
self._grid_address,
self._mesh,
self._frequency_factor_to_THz,
self._nac_q_direction,
self._lapack_zheev_uplo,
verbose=verbose)
def _run_py(self):
r2r = RealToReciprocal(self._fc3,
self._supercell,
self._primitive,
self._mesh,
symprec=self._symprec)
r2n = ReciprocalToNormal(self._primitive,
self._frequencies,
self._eigenvectors,
self._band_indices,
cutoff_frequency=self._cutoff_frequency)
for i, grid_triplet in enumerate(self._triplets_at_q):
print("%d / %d" % (i + 1, len(self._triplets_at_q)))
r2r.run(self._grid_address[grid_triplet])
fc3_reciprocal = r2r.get_fc3_reciprocal()
for gp in grid_triplet:
self._set_phonon_py(gp)
r2n.run(fc3_reciprocal, grid_triplet)
self._interaction_strength[i] = np.abs(
r2n.get_reciprocal_to_normal()) ** 2 * self._unit_conversion
def _set_phonon_py(self, grid_point):
set_phonon_py(grid_point,
self._phonon_done,
self._frequencies,
self._eigenvectors,
self._grid_address,
self._mesh,
self._dm,
self._frequency_factor_to_THz,
self._lapack_zheev_uplo)
def _allocate_phonon(self):
primitive_lattice = np.linalg.inv(self._primitive.get_cell())
self._grid_address, self._bz_map = get_bz_grid_address(
self._mesh, primitive_lattice, with_boundary=True)
num_band = self._primitive.get_number_of_atoms() * 3
num_grid = len(self._grid_address)
self._phonon_done = np.zeros(num_grid, dtype='byte')
self._frequencies = np.zeros((num_grid, num_band), dtype='double')
itemsize = self._frequencies.itemsize
self._eigenvectors = np.zeros((num_grid, num_band, num_band),
dtype=("c%d" % (itemsize * 2)))
|
atztogo/phono3py | phono3py/other/alm_wrapper.py | optimize | python | def optimize(lattice,
positions,
numbers,
displacements,
forces,
alm_options=None,
p2s_map=None,
p2p_map=None,
log_level=0):
from alm import ALM
with ALM(lattice, positions, numbers) as alm:
natom = len(numbers)
alm.set_verbosity(log_level)
nkd = len(np.unique(numbers))
if 'cutoff_distance' not in alm_options:
rcs = -np.ones((2, nkd, nkd), dtype='double')
elif type(alm_options['cutoff_distance']) is float:
rcs = np.ones((2, nkd, nkd), dtype='double')
rcs[0] *= -1
rcs[1] *= alm_options['cutoff_distance']
alm.define(2, rcs)
alm.set_displacement_and_force(displacements, forces)
if 'solver' in alm_options:
solver = alm_options['solver']
else:
solver = 'SimplicialLDLT'
info = alm.optimize(solver=solver)
fc2 = extract_fc2_from_alm(alm,
natom,
atom_list=p2s_map,
p2s_map=p2s_map,
p2p_map=p2p_map)
fc3 = _extract_fc3_from_alm(alm,
natom,
p2s_map=p2s_map,
p2p_map=p2p_map)
return fc2, fc3 | Calculate force constants
lattice : array_like
Basis vectors. a, b, c are given as column vectors.
shape=(3, 3), dtype='double'
positions : array_like
Fractional coordinates of atomic points.
shape=(num_atoms, 3), dtype='double'
numbers : array_like
Atomic numbers.
shape=(num_atoms,), dtype='intc'
displacements : array_like
Atomic displacement patterns in supercells in Cartesian.
dtype='double', shape=(supercells, num_atoms, 3)
forces : array_like
Forces in supercells.
dtype='double', shape=(supercells, num_atoms, 3)
alm_options : dict, optional
Default is None.
List of keys
cutoff_distance : float
solver : str
Either 'SimplicialLDLT' or 'dense'. Default is
'SimplicialLDLT'. | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/other/alm_wrapper.py#L94-L158 | [
"def _extract_fc3_from_alm(alm,\n natom,\n p2s_map=None,\n p2p_map=None):\n p2s_map_alm = alm.getmap_primitive_to_supercell()[0]\n if (p2s_map is not None and\n len(p2s_map_alm) == len(p2s_map) and\n (p2s_map_alm == p2s_map).all()):\n fc3 = np.zeros((len(p2s_map), natom, natom, 3, 3, 3),\n dtype='double', order='C')\n for (fc, indices) in zip(*alm.get_fc(2, mode='origin')):\n v1, v2, v3 = indices // 3\n c1, c2, c3 = indices % 3\n fc3[p2p_map[v1], v2, v3, c1, c2, c3] = fc\n fc3[p2p_map[v1], v3, v2, c1, c3, c2] = fc\n else:\n fc3 = np.zeros((natom, natom, natom, 3, 3, 3),\n dtype='double', order='C')\n for (fc, indices) in zip(*alm.get_fc(2, mode='all')):\n v1, v2, v3 = indices // 3\n c1, c2, c3 = indices % 3\n fc3[v1, v2, v3, c1, c2, c3] = fc\n fc3[v1, v3, v2, c1, c3, c2] = fc\n\n return fc3\n"
] | # Copyright (C) 2016 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import numpy as np
from phonopy.interface.alm import extract_fc2_from_alm
def get_fc3(supercell,
primitive,
forces_fc3,
disp_dataset,
symmetry,
alm_options=None,
is_compact_fc=False,
log_level=0):
assert supercell.get_number_of_atoms() == disp_dataset['natom']
force = np.array(forces_fc3, dtype='double', order='C')
lattice = supercell.get_cell().T
positions = supercell.get_scaled_positions()
numbers = supercell.get_atomic_numbers()
disp, indices = _get_alm_disp_fc3(disp_dataset)
if is_compact_fc:
p2s_map = primitive.p2s_map
p2p_map = primitive.p2p_map
else:
p2s_map = None
p2p_map = None
if log_level:
print("------------------------------"
" ALM FC3 start "
"------------------------------")
print("ALM by T. Tadano, https://github.com/ttadano/ALM")
if log_level == 1:
print("Use -v option to watch detailed ALM log.")
print("")
sys.stdout.flush()
_alm_options = {}
if alm_options is not None:
_alm_options.update(alm_options)
if 'cutoff_distance' in disp_dataset:
_alm_options['cutoff_distance'] = disp_dataset['cutoff_distance']
fc2, fc3 = optimize(lattice, positions, numbers,
disp[indices],
force[indices],
alm_options=_alm_options,
p2s_map=p2s_map,
p2p_map=p2p_map,
log_level=log_level)
if log_level:
print("-------------------------------"
" ALM FC3 end "
"-------------------------------")
return fc2, fc3
def _extract_fc3_from_alm(alm,
natom,
p2s_map=None,
p2p_map=None):
p2s_map_alm = alm.getmap_primitive_to_supercell()[0]
if (p2s_map is not None and
len(p2s_map_alm) == len(p2s_map) and
(p2s_map_alm == p2s_map).all()):
fc3 = np.zeros((len(p2s_map), natom, natom, 3, 3, 3),
dtype='double', order='C')
for (fc, indices) in zip(*alm.get_fc(2, mode='origin')):
v1, v2, v3 = indices // 3
c1, c2, c3 = indices % 3
fc3[p2p_map[v1], v2, v3, c1, c2, c3] = fc
fc3[p2p_map[v1], v3, v2, c1, c3, c2] = fc
else:
fc3 = np.zeros((natom, natom, natom, 3, 3, 3),
dtype='double', order='C')
for (fc, indices) in zip(*alm.get_fc(2, mode='all')):
v1, v2, v3 = indices // 3
c1, c2, c3 = indices % 3
fc3[v1, v2, v3, c1, c2, c3] = fc
fc3[v1, v3, v2, c1, c3, c2] = fc
return fc3
def _get_alm_disp_fc3(disp_dataset):
"""Create displacements of atoms for ALM input
Note
----
Dipslacements of all atoms in supercells for all displacement
configurations in phono3py are returned, i.e., most of
displacements are zero. Only the configurations with 'included' ==
True are included in the list of indices that is returned, too.
Parameters
----------
disp_dataset : dict
Displacement dataset that may be obtained by
file_IO.parse_disp_fc3_yaml.
Returns
-------
disp : ndarray
Displacements of atoms in supercells of all displacement
configurations.
shape=(ndisp, natom, 3)
dtype='double'
indices : list of int
The indices of the displacement configurations with 'included' == True.
"""
natom = disp_dataset['natom']
ndisp = len(disp_dataset['first_atoms'])
for disp1 in disp_dataset['first_atoms']:
ndisp += len(disp1['second_atoms'])
disp = np.zeros((ndisp, natom, 3), dtype='double', order='C')
indices = []
count = 0
for disp1 in disp_dataset['first_atoms']:
indices.append(count)
disp[count, disp1['number']] = disp1['displacement']
count += 1
for disp1 in disp_dataset['first_atoms']:
for disp2 in disp1['second_atoms']:
if 'included' in disp2:
if disp2['included']:
indices.append(count)
else:
indices.append(count)
disp[count, disp1['number']] = disp1['displacement']
disp[count, disp2['number']] = disp2['displacement']
count += 1
return disp, indices
|
atztogo/phono3py | phono3py/other/alm_wrapper.py | _get_alm_disp_fc3 | python | def _get_alm_disp_fc3(disp_dataset):
natom = disp_dataset['natom']
ndisp = len(disp_dataset['first_atoms'])
for disp1 in disp_dataset['first_atoms']:
ndisp += len(disp1['second_atoms'])
disp = np.zeros((ndisp, natom, 3), dtype='double', order='C')
indices = []
count = 0
for disp1 in disp_dataset['first_atoms']:
indices.append(count)
disp[count, disp1['number']] = disp1['displacement']
count += 1
for disp1 in disp_dataset['first_atoms']:
for disp2 in disp1['second_atoms']:
if 'included' in disp2:
if disp2['included']:
indices.append(count)
else:
indices.append(count)
disp[count, disp1['number']] = disp1['displacement']
disp[count, disp2['number']] = disp2['displacement']
count += 1
return disp, indices | Create displacements of atoms for ALM input
Note
----
Dipslacements of all atoms in supercells for all displacement
configurations in phono3py are returned, i.e., most of
displacements are zero. Only the configurations with 'included' ==
True are included in the list of indices that is returned, too.
Parameters
----------
disp_dataset : dict
Displacement dataset that may be obtained by
file_IO.parse_disp_fc3_yaml.
Returns
-------
disp : ndarray
Displacements of atoms in supercells of all displacement
configurations.
shape=(ndisp, natom, 3)
dtype='double'
indices : list of int
The indices of the displacement configurations with 'included' == True. | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/other/alm_wrapper.py#L188-L239 | null | # Copyright (C) 2016 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import numpy as np
from phonopy.interface.alm import extract_fc2_from_alm
def get_fc3(supercell,
primitive,
forces_fc3,
disp_dataset,
symmetry,
alm_options=None,
is_compact_fc=False,
log_level=0):
assert supercell.get_number_of_atoms() == disp_dataset['natom']
force = np.array(forces_fc3, dtype='double', order='C')
lattice = supercell.get_cell().T
positions = supercell.get_scaled_positions()
numbers = supercell.get_atomic_numbers()
disp, indices = _get_alm_disp_fc3(disp_dataset)
if is_compact_fc:
p2s_map = primitive.p2s_map
p2p_map = primitive.p2p_map
else:
p2s_map = None
p2p_map = None
if log_level:
print("------------------------------"
" ALM FC3 start "
"------------------------------")
print("ALM by T. Tadano, https://github.com/ttadano/ALM")
if log_level == 1:
print("Use -v option to watch detailed ALM log.")
print("")
sys.stdout.flush()
_alm_options = {}
if alm_options is not None:
_alm_options.update(alm_options)
if 'cutoff_distance' in disp_dataset:
_alm_options['cutoff_distance'] = disp_dataset['cutoff_distance']
fc2, fc3 = optimize(lattice, positions, numbers,
disp[indices],
force[indices],
alm_options=_alm_options,
p2s_map=p2s_map,
p2p_map=p2p_map,
log_level=log_level)
if log_level:
print("-------------------------------"
" ALM FC3 end "
"-------------------------------")
return fc2, fc3
def optimize(lattice,
positions,
numbers,
displacements,
forces,
alm_options=None,
p2s_map=None,
p2p_map=None,
log_level=0):
"""Calculate force constants
lattice : array_like
Basis vectors. a, b, c are given as column vectors.
shape=(3, 3), dtype='double'
positions : array_like
Fractional coordinates of atomic points.
shape=(num_atoms, 3), dtype='double'
numbers : array_like
Atomic numbers.
shape=(num_atoms,), dtype='intc'
displacements : array_like
Atomic displacement patterns in supercells in Cartesian.
dtype='double', shape=(supercells, num_atoms, 3)
forces : array_like
Forces in supercells.
dtype='double', shape=(supercells, num_atoms, 3)
alm_options : dict, optional
Default is None.
List of keys
cutoff_distance : float
solver : str
Either 'SimplicialLDLT' or 'dense'. Default is
'SimplicialLDLT'.
"""
from alm import ALM
with ALM(lattice, positions, numbers) as alm:
natom = len(numbers)
alm.set_verbosity(log_level)
nkd = len(np.unique(numbers))
if 'cutoff_distance' not in alm_options:
rcs = -np.ones((2, nkd, nkd), dtype='double')
elif type(alm_options['cutoff_distance']) is float:
rcs = np.ones((2, nkd, nkd), dtype='double')
rcs[0] *= -1
rcs[1] *= alm_options['cutoff_distance']
alm.define(2, rcs)
alm.set_displacement_and_force(displacements, forces)
if 'solver' in alm_options:
solver = alm_options['solver']
else:
solver = 'SimplicialLDLT'
info = alm.optimize(solver=solver)
fc2 = extract_fc2_from_alm(alm,
natom,
atom_list=p2s_map,
p2s_map=p2s_map,
p2p_map=p2p_map)
fc3 = _extract_fc3_from_alm(alm,
natom,
p2s_map=p2s_map,
p2p_map=p2p_map)
return fc2, fc3
def _extract_fc3_from_alm(alm,
natom,
p2s_map=None,
p2p_map=None):
p2s_map_alm = alm.getmap_primitive_to_supercell()[0]
if (p2s_map is not None and
len(p2s_map_alm) == len(p2s_map) and
(p2s_map_alm == p2s_map).all()):
fc3 = np.zeros((len(p2s_map), natom, natom, 3, 3, 3),
dtype='double', order='C')
for (fc, indices) in zip(*alm.get_fc(2, mode='origin')):
v1, v2, v3 = indices // 3
c1, c2, c3 = indices % 3
fc3[p2p_map[v1], v2, v3, c1, c2, c3] = fc
fc3[p2p_map[v1], v3, v2, c1, c3, c2] = fc
else:
fc3 = np.zeros((natom, natom, natom, 3, 3, 3),
dtype='double', order='C')
for (fc, indices) in zip(*alm.get_fc(2, mode='all')):
v1, v2, v3 = indices // 3
c1, c2, c3 = indices % 3
fc3[v1, v2, v3, c1, c2, c3] = fc
fc3[v1, v3, v2, c1, c3, c2] = fc
return fc3
|
atztogo/phono3py | phono3py/phonon3/imag_self_energy.py | get_imag_self_energy | python | def get_imag_self_energy(interaction,
grid_points,
sigmas,
frequency_step=None,
num_frequency_points=None,
temperatures=None,
scattering_event_class=None, # class 1 or 2
write_detail=False,
output_filename=None,
log_level=0):
if temperatures is None:
temperatures = [0.0, 300.0]
if temperatures is None:
print("Temperatures have to be set.")
return False
mesh = interaction.get_mesh_numbers()
ise = ImagSelfEnergy(interaction, with_detail=write_detail)
imag_self_energy = []
frequency_points = []
for i, gp in enumerate(grid_points):
ise.set_grid_point(gp)
if log_level:
weights = interaction.get_triplets_at_q()[1]
print("------------------- Imaginary part of self energy (%d/%d) "
"-------------------" % (i + 1, len(grid_points)))
print("Grid point: %d" % gp)
print("Number of ir-triplets: "
"%d / %d" % (len(weights), weights.sum()))
ise.run_interaction()
frequencies = interaction.get_phonons()[0]
max_phonon_freq = np.amax(frequencies)
if log_level:
adrs = interaction.get_grid_address()[gp]
q = adrs.astype('double') / mesh
print("q-point: %s" % q)
print("Phonon frequency:")
text = "[ "
for i, freq in enumerate(frequencies[gp]):
if i % 6 == 0 and i != 0:
text += "\n"
text += "%8.4f " % freq
text += "]"
print(text)
sys.stdout.flush()
gamma_sigmas = []
fp_sigmas = []
if write_detail:
(triplets,
weights,
map_triplets, _) = interaction.get_triplets_at_q()
for j, sigma in enumerate(sigmas):
if log_level:
if sigma:
print("Sigma: %s" % sigma)
else:
print("Tetrahedron method")
ise.set_sigma(sigma)
if sigma:
fmax = max_phonon_freq * 2 + sigma * 4
else:
fmax = max_phonon_freq * 2
fmax *= 1.005
fmin = 0
frequency_points_at_sigma = get_frequency_points(
fmin,
fmax,
frequency_step=frequency_step,
num_frequency_points=num_frequency_points)
fp_sigmas.append(frequency_points_at_sigma)
gamma = np.zeros(
(len(temperatures), len(frequency_points_at_sigma),
len(interaction.get_band_indices())), dtype='double')
if write_detail:
num_band0 = len(interaction.get_band_indices())
num_band = frequencies.shape[1]
detailed_gamma = np.zeros(
(len(temperatures), len(frequency_points_at_sigma),
len(weights), num_band0, num_band, num_band),
dtype='double')
for k, freq_point in enumerate(frequency_points_at_sigma):
ise.set_frequency_points([freq_point])
ise.set_integration_weights(
scattering_event_class=scattering_event_class)
for l, t in enumerate(temperatures):
ise.set_temperature(t)
ise.run()
gamma[l, k] = ise.get_imag_self_energy()[0]
if write_detail:
detailed_gamma[l, k] = (
ise.get_detailed_imag_self_energy()[0])
gamma_sigmas.append(gamma)
if write_detail:
full_filename = write_gamma_detail_to_hdf5(
temperatures,
mesh,
gamma_detail=detailed_gamma,
grid_point=gp,
triplet=triplets,
weight=weights,
triplet_map=map_triplets,
sigma=sigma,
frequency_points=frequency_points_at_sigma,
filename=output_filename)
if log_level:
print("Contribution of each triplet to imaginary part of "
"self energy is written in\n\"%s\"." % full_filename)
imag_self_energy.append(gamma_sigmas)
frequency_points.append(fp_sigmas)
return imag_self_energy, frequency_points | Imaginary part of self energy at frequency points
Band indices to be calculated at are kept in Interaction instance.
Args:
interaction: Ph-ph interaction
grid_points: Grid-point indices to be caclculated on
sigmas:
A set of sigmas. simga=None means to use tetrahedron method,
otherwise smearing method with real positive value of sigma.
frequency_step: Pitch of frequency to be sampled.
num_frequency_points: Number of sampling sampling points to be used
instead of frequency_step.
temperatures: Temperatures to be calculated at.
scattering_event_class:
Extract scattering event class 1 or 2.
log_level: Log level. 0 or non 0 in this method.
Returns:
Tuple: (Imaginary part of self energy, sampling frequency points) | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/imag_self_energy.py#L11-L155 | [
"def get_frequency_points(f_min,\n f_max,\n frequency_step=None,\n num_frequency_points=None):\n if num_frequency_points is None:\n if frequency_step is not None:\n frequency_points = np.arange(\n f_min, f_max, frequency_step, dtype='double')\n else:\n frequency_points = np.array(np.linspace(\n f_min, f_max, 201), dtype='double')\n else:\n frequency_points = np.array(np.linspace(\n f_min, f_max, num_frequency_points), dtype='double')\n\n return frequency_points\n",
"def write_gamma_detail_to_hdf5(temperature,\n mesh,\n gamma_detail=None,\n grid_point=None,\n triplet=None,\n weight=None,\n triplet_map=None,\n triplet_all=None,\n frequency_points=None,\n band_index=None,\n sigma=None,\n sigma_cutoff=None,\n compression=None,\n filename=None,\n verbose=True):\n if band_index is None:\n band_indices = None\n else:\n band_indices = [band_index]\n suffix = _get_filename_suffix(mesh,\n grid_point=grid_point,\n band_indices=band_indices,\n sigma=sigma,\n sigma_cutoff=sigma_cutoff,\n filename=filename)\n full_filename = \"gamma_detail\" + suffix + \".hdf5\"\n\n with h5py.File(full_filename, 'w') as w:\n w.create_dataset('temperature', data=temperature)\n w.create_dataset('mesh', data=mesh)\n if gamma_detail is not None:\n w.create_dataset('gamma_detail', data=gamma_detail,\n compression=compression)\n if triplet is not None:\n w.create_dataset('triplet', data=triplet,\n compression=compression)\n if weight is not None:\n w.create_dataset('weight', data=weight,\n compression=compression)\n if triplet_map is not None:\n w.create_dataset('triplet_map', data=triplet_map,\n compression=compression)\n if triplet_all is not None:\n w.create_dataset('triplet_all', data=triplet_all,\n compression=compression)\n if grid_point is not None:\n w.create_dataset('grid_point', data=grid_point)\n if band_index is not None:\n w.create_dataset('band_index', data=(band_index + 1))\n if sigma is not None:\n w.create_dataset('sigma', data=sigma)\n if sigma_cutoff is not None:\n w.create_dataset('sigma_cutoff_width', data=sigma_cutoff)\n if frequency_points is not None:\n w.create_dataset('frequency_point', data=frequency_points)\n\n if verbose:\n text = \"\"\n text += \"Phonon triplets contributions to Gamma \"\n if grid_point is not None:\n text += \"at gp-%d \" % grid_point\n if band_index is not None:\n text += \"and band_index-%d\\n\" % (band_index + 1)\n if sigma is not None:\n if grid_point is not None:\n text += \"and \"\n else:\n text += \"at \"\n text += \"sigma %s\" % sigma\n if sigma_cutoff is None:\n text += \"\\n\"\n else:\n text += \"(%4.2f SD)\\n\" % sigma_cutoff\n text += \"were written into \"\n else:\n text += \"were written into \"\n if band_index is None:\n text += \"\\n\"\n text += \"\\\"%s\\\".\" % full_filename\n print(text)\n\n return full_filename\n\n return None\n",
"def run(self):\n if self._pp_strength is None:\n self.run_interaction()\n\n num_band0 = self._pp_strength.shape[1]\n if self._frequency_points is None:\n self._imag_self_energy = np.zeros(num_band0, dtype='double')\n if self._with_detail:\n self._detailed_imag_self_energy = np.empty_like(\n self._pp_strength)\n self._detailed_imag_self_energy[:] = 0\n self._ise_N = np.zeros_like(self._imag_self_energy)\n self._ise_U = np.zeros_like(self._imag_self_energy)\n self._run_with_band_indices()\n else:\n self._imag_self_energy = np.zeros(\n (len(self._frequency_points), num_band0),\n order='C', dtype='double')\n if self._with_detail:\n self._detailed_imag_self_energy = np.zeros(\n (len(self._frequency_points),) + self._pp_strength.shape,\n order='C', dtype='double')\n self._ise_N = np.zeros_like(self._imag_self_energy)\n self._ise_U = np.zeros_like(self._imag_self_energy)\n self._run_with_frequency_points()\n",
"def run_interaction(self, is_full_pp=True):\n if is_full_pp or self._frequency_points is not None:\n self._pp.run(lang=self._lang)\n else:\n self._pp.run(lang=self._lang, g_zero=self._g_zero)\n self._pp_strength = self._pp.get_interaction_strength()\n",
"def set_integration_weights(self, scattering_event_class=None):\n if self._frequency_points is None:\n bi = self._pp.get_band_indices()\n f_points = self._frequencies[self._grid_point][bi]\n else:\n f_points = self._frequency_points\n\n self._g, _g_zero = get_triplets_integration_weights(\n self._pp,\n np.array(f_points, dtype='double'),\n self._sigma,\n self._sigma_cutoff,\n is_collision_matrix=self._is_collision_matrix)\n if self._frequency_points is None:\n self._g_zero = _g_zero\n\n if scattering_event_class == 1 or scattering_event_class == 2:\n self._g[scattering_event_class - 1] = 0\n",
"def get_imag_self_energy(self):\n if self._cutoff_frequency is None:\n return self._imag_self_energy\n else:\n return self._average_by_degeneracy(self._imag_self_energy)\n",
"def get_detailed_imag_self_energy(self):\n return self._detailed_imag_self_energy\n",
"def set_grid_point(self, grid_point=None, stores_triplets_map=False):\n if grid_point is None:\n self._grid_point = None\n else:\n self._pp.set_grid_point(grid_point,\n stores_triplets_map=stores_triplets_map)\n self._pp_strength = None\n (self._triplets_at_q,\n self._weights_at_q) = self._pp.get_triplets_at_q()[:2]\n self._grid_point = grid_point\n self._frequencies, self._eigenvectors, _ = self._pp.get_phonons()\n",
"def set_sigma(self, sigma, sigma_cutoff=None):\n if sigma is None:\n self._sigma = None\n else:\n self._sigma = float(sigma)\n\n if sigma_cutoff is None:\n self._sigma_cutoff = None\n else:\n self._sigma_cutoff = float(sigma_cutoff)\n\n self.delete_integration_weights()\n",
"def set_frequency_points(self, frequency_points):\n if frequency_points is None:\n self._frequency_points = None\n else:\n self._frequency_points = np.array(frequency_points, dtype='double')\n",
"def set_temperature(self, temperature):\n if temperature is None:\n self._temperature = None\n else:\n self._temperature = float(temperature)\n",
"def get_mesh_numbers(self):\n return self._mesh\n",
"def get_phonons(self):\n return self._frequencies, self._eigenvectors, self._phonon_done\n",
"def get_triplets_at_q(self):\n return (self._triplets_at_q,\n self._weights_at_q,\n self._triplets_map_at_q,\n self._ir_map_at_q)\n",
"def get_grid_address(self):\n return self._grid_address\n",
"def get_band_indices(self):\n return self._band_indices\n"
] | import sys
import numpy as np
from phonopy.units import Hbar, EV, THz
from phonopy.phonon.degeneracy import degenerate_sets
from phono3py.phonon3.triplets import (get_triplets_integration_weights,
occupation)
from phono3py.file_IO import (write_gamma_detail_to_hdf5,
write_imag_self_energy_at_grid_point)
def get_frequency_points(f_min,
f_max,
frequency_step=None,
num_frequency_points=None):
if num_frequency_points is None:
if frequency_step is not None:
frequency_points = np.arange(
f_min, f_max, frequency_step, dtype='double')
else:
frequency_points = np.array(np.linspace(
f_min, f_max, 201), dtype='double')
else:
frequency_points = np.array(np.linspace(
f_min, f_max, num_frequency_points), dtype='double')
return frequency_points
def write_imag_self_energy(imag_self_energy,
mesh,
grid_points,
band_indices,
frequency_points,
temperatures,
sigmas,
scattering_event_class=None,
filename=None,
is_mesh_symmetry=True):
for gp, ise_sigmas, fp_sigmas in zip(grid_points,
imag_self_energy,
frequency_points):
for sigma, ise_temps, fp in zip(sigmas, ise_sigmas, fp_sigmas):
for t, ise in zip(temperatures, ise_temps):
for i, bi in enumerate(band_indices):
pos = 0
for j in range(i):
pos += len(band_indices[j])
write_imag_self_energy_at_grid_point(
gp,
bi,
mesh,
fp,
ise[:, pos:(pos + len(bi))].sum(axis=1) / len(bi),
sigma=sigma,
temperature=t,
scattering_event_class=scattering_event_class,
filename=filename,
is_mesh_symmetry=is_mesh_symmetry)
def average_by_degeneracy(imag_self_energy, band_indices, freqs_at_gp):
deg_sets = degenerate_sets(freqs_at_gp)
imag_se = np.zeros_like(imag_self_energy)
for dset in deg_sets:
bi_set = []
for i, bi in enumerate(band_indices):
if bi in dset:
bi_set.append(i)
for i in bi_set:
if imag_self_energy.ndim == 1:
imag_se[i] = (imag_self_energy[bi_set].sum() /
len(bi_set))
else:
imag_se[:, i] = (
imag_self_energy[:, bi_set].sum(axis=1) /
len(bi_set))
return imag_se
class ImagSelfEnergy(object):
def __init__(self,
interaction,
frequency_points=None,
temperature=None,
sigma=None,
sigma_cutoff=None,
with_detail=False,
unit_conversion=None,
lang='C'):
self._pp = interaction
self._sigma = None
self.set_sigma(sigma, sigma_cutoff=sigma_cutoff)
self._temperature = None
self.set_temperature(temperature)
self._frequency_points = None
self.set_frequency_points(frequency_points)
self._grid_point = None
self._lang = lang
self._imag_self_energy = None
self._detailed_imag_self_energy = None
self._pp_strength = None
self._frequencies = None
self._triplets_at_q = None
self._weights_at_q = None
self._with_detail = with_detail
self._unit_conversion = None
self._cutoff_frequency = interaction.get_cutoff_frequency()
self._g = None # integration weights
self._g_zero = None
self._mesh = self._pp.get_mesh_numbers()
self._is_collision_matrix = False
# Unit to THz of Gamma
if unit_conversion is None:
self._unit_conversion = (18 * np.pi / (Hbar * EV) ** 2
/ (2 * np.pi * THz) ** 2
* EV ** 2)
else:
self._unit_conversion = unit_conversion
def run(self):
if self._pp_strength is None:
self.run_interaction()
num_band0 = self._pp_strength.shape[1]
if self._frequency_points is None:
self._imag_self_energy = np.zeros(num_band0, dtype='double')
if self._with_detail:
self._detailed_imag_self_energy = np.empty_like(
self._pp_strength)
self._detailed_imag_self_energy[:] = 0
self._ise_N = np.zeros_like(self._imag_self_energy)
self._ise_U = np.zeros_like(self._imag_self_energy)
self._run_with_band_indices()
else:
self._imag_self_energy = np.zeros(
(len(self._frequency_points), num_band0),
order='C', dtype='double')
if self._with_detail:
self._detailed_imag_self_energy = np.zeros(
(len(self._frequency_points),) + self._pp_strength.shape,
order='C', dtype='double')
self._ise_N = np.zeros_like(self._imag_self_energy)
self._ise_U = np.zeros_like(self._imag_self_energy)
self._run_with_frequency_points()
def run_interaction(self, is_full_pp=True):
if is_full_pp or self._frequency_points is not None:
self._pp.run(lang=self._lang)
else:
self._pp.run(lang=self._lang, g_zero=self._g_zero)
self._pp_strength = self._pp.get_interaction_strength()
def set_integration_weights(self, scattering_event_class=None):
if self._frequency_points is None:
bi = self._pp.get_band_indices()
f_points = self._frequencies[self._grid_point][bi]
else:
f_points = self._frequency_points
self._g, _g_zero = get_triplets_integration_weights(
self._pp,
np.array(f_points, dtype='double'),
self._sigma,
self._sigma_cutoff,
is_collision_matrix=self._is_collision_matrix)
if self._frequency_points is None:
self._g_zero = _g_zero
if scattering_event_class == 1 or scattering_event_class == 2:
self._g[scattering_event_class - 1] = 0
def get_imag_self_energy(self):
if self._cutoff_frequency is None:
return self._imag_self_energy
else:
return self._average_by_degeneracy(self._imag_self_energy)
def get_imag_self_energy_N_and_U(self):
if self._cutoff_frequency is None:
return self._ise_N, self._ise_U
else:
return (self._average_by_degeneracy(self._ise_N),
self._average_by_degeneracy(self._ise_U))
def get_detailed_imag_self_energy(self):
return self._detailed_imag_self_energy
def get_integration_weights(self):
return self._g, self._g_zero
def get_unit_conversion_factor(self):
return self._unit_conversion
def set_grid_point(self, grid_point=None, stores_triplets_map=False):
if grid_point is None:
self._grid_point = None
else:
self._pp.set_grid_point(grid_point,
stores_triplets_map=stores_triplets_map)
self._pp_strength = None
(self._triplets_at_q,
self._weights_at_q) = self._pp.get_triplets_at_q()[:2]
self._grid_point = grid_point
self._frequencies, self._eigenvectors, _ = self._pp.get_phonons()
def set_sigma(self, sigma, sigma_cutoff=None):
if sigma is None:
self._sigma = None
else:
self._sigma = float(sigma)
if sigma_cutoff is None:
self._sigma_cutoff = None
else:
self._sigma_cutoff = float(sigma_cutoff)
self.delete_integration_weights()
def set_frequency_points(self, frequency_points):
if frequency_points is None:
self._frequency_points = None
else:
self._frequency_points = np.array(frequency_points, dtype='double')
def set_temperature(self, temperature):
if temperature is None:
self._temperature = None
else:
self._temperature = float(temperature)
def set_averaged_pp_interaction(self, ave_pp):
# set_phonons is unnecessary now because all phonons are calculated in
# self._pp.set_dynamical_matrix, though Gamma-point is an exception,
# which is treatd at self._pp.set_grid_point.
# self._pp.set_phonons(self._triplets_at_q.ravel())
# (self._frequencies,
# self._eigenvectors) = self._pp.get_phonons()[:2]
num_triplets = len(self._triplets_at_q)
num_band = self._pp.get_primitive().get_number_of_atoms() * 3
num_grid = np.prod(self._mesh)
bi = self._pp.get_band_indices()
self._pp_strength = np.zeros(
(num_triplets, len(bi), num_band, num_band), dtype='double')
for i, v_ave in enumerate(ave_pp):
self._pp_strength[:, i, :, :] = v_ave / num_grid
def set_interaction_strength(self, pp_strength):
self._pp_strength = pp_strength
self._pp.set_interaction_strength(pp_strength, g_zero=self._g_zero)
def delete_integration_weights(self):
self._g = None
self._g_zero = None
self._pp_strength = None
def _run_with_band_indices(self):
if self._g is not None:
if self._lang == 'C':
if self._with_detail:
# self._detailed_imag_self_energy.shape =
# (num_triplets, num_band0, num_band, num_band)
# self._imag_self_energy is also set.
self._run_c_detailed_with_band_indices_with_g()
else:
# self._imag_self_energy.shape = (num_band0,)
self._run_c_with_band_indices_with_g()
else:
print("Running into _run_py_with_band_indices_with_g()")
print("This routine is super slow and only for the test.")
self._run_py_with_band_indices_with_g()
else:
print("get_triplets_integration_weights must be executed "
"before calling this method.")
import sys
sys.exit(1)
def _run_with_frequency_points(self):
if self._g is not None:
if self._lang == 'C':
if self._with_detail:
self._run_c_detailed_with_frequency_points_with_g()
else:
self._run_c_with_frequency_points_with_g()
else:
print("Running into _run_py_with_frequency_points_with_g()")
print("This routine is super slow and only for the test.")
self._run_py_with_frequency_points_with_g()
else:
print("get_triplets_integration_weights must be executed "
"before calling this method.")
import sys
sys.exit(1)
def _run_c_with_band_indices_with_g(self):
import phono3py._phono3py as phono3c
if self._g_zero is None:
_g_zero = np.zeros(self._pp_strength.shape,
dtype='byte', order='C')
else:
_g_zero = self._g_zero
phono3c.imag_self_energy_with_g(self._imag_self_energy,
self._pp_strength,
self._triplets_at_q,
self._weights_at_q,
self._frequencies,
self._temperature,
self._g,
_g_zero,
self._cutoff_frequency)
self._imag_self_energy *= self._unit_conversion
def _run_c_detailed_with_band_indices_with_g(self):
import phono3py._phono3py as phono3c
if self._g_zero is None:
_g_zero = np.zeros(self._pp_strength.shape,
dtype='byte', order='C')
else:
_g_zero = self._g_zero
phono3c.detailed_imag_self_energy_with_g(
self._detailed_imag_self_energy,
self._ise_N, # Normal
self._ise_U, # Umklapp
self._pp_strength,
self._triplets_at_q,
self._weights_at_q,
self._pp.get_grid_address(),
self._frequencies,
self._temperature,
self._g,
_g_zero,
self._cutoff_frequency)
self._detailed_imag_self_energy *= self._unit_conversion
self._ise_N *= self._unit_conversion
self._ise_U *= self._unit_conversion
self._imag_self_energy = self._ise_N + self._ise_U
def _run_c_with_frequency_points_with_g(self):
import phono3py._phono3py as phono3c
num_band0 = self._pp_strength.shape[1]
g_shape = list(self._g.shape)
g_shape[2] = num_band0
g = np.zeros(tuple(g_shape), dtype='double', order='C')
ise_at_f = np.zeros(num_band0, dtype='double')
_g_zero = np.zeros(g_shape, dtype='byte', order='C')
for i in range(len(self._frequency_points)):
for j in range(num_band0):
g[:, :, j, :, :] = self._g[:, :, i, :, :]
phono3c.imag_self_energy_with_g(ise_at_f,
self._pp_strength,
self._triplets_at_q,
self._weights_at_q,
self._frequencies,
self._temperature,
g,
_g_zero, # don't use g_zero
self._cutoff_frequency)
self._imag_self_energy[i] = ise_at_f
self._imag_self_energy *= self._unit_conversion
def _run_c_detailed_with_frequency_points_with_g(self):
import phono3py._phono3py as phono3c
num_band0 = self._pp_strength.shape[1]
g_shape = list(self._g.shape)
g_shape[2] = num_band0
g = np.zeros((2,) + self._pp_strength.shape, order='C', dtype='double')
detailed_ise_at_f = np.zeros(
self._detailed_imag_self_energy.shape[1:5],
order='C', dtype='double')
ise_at_f_N = np.zeros(num_band0, dtype='double')
ise_at_f_U = np.zeros(num_band0, dtype='double')
_g_zero = np.zeros(g_shape, dtype='byte', order='C')
for i in range(len(self._frequency_points)):
for j in range(g.shape[2]):
g[:, :, j, :, :] = self._g[:, :, i, :, :]
phono3c.detailed_imag_self_energy_with_g(
detailed_ise_at_f,
ise_at_f_N,
ise_at_f_U,
self._pp_strength,
self._triplets_at_q,
self._weights_at_q,
self._pp.get_grid_address(),
self._frequencies,
self._temperature,
g,
_g_zero,
self._cutoff_frequency)
self._detailed_imag_self_energy[i] = (detailed_ise_at_f *
self._unit_conversion)
self._ise_N[i] = ise_at_f_N * self._unit_conversion
self._ise_U[i] = ise_at_f_U * self._unit_conversion
self._imag_self_energy[i] = self._ise_N[i] + self._ise_U[i]
def _run_py_with_band_indices_with_g(self):
if self._temperature > 0:
self._ise_thm_with_band_indices()
else:
self._ise_thm_with_band_indices_0K()
def _ise_thm_with_band_indices(self):
freqs = self._frequencies[self._triplets_at_q[:, [1, 2]]]
freqs = np.where(freqs > self._cutoff_frequency, freqs, 1)
n = occupation(freqs, self._temperature)
for i, (tp, w, interaction) in enumerate(zip(self._triplets_at_q,
self._weights_at_q,
self._pp_strength)):
for j, k in list(np.ndindex(interaction.shape[1:])):
f1 = self._frequencies[tp[1]][j]
f2 = self._frequencies[tp[2]][k]
if (f1 > self._cutoff_frequency and
f2 > self._cutoff_frequency):
n2 = n[i, 0, j]
n3 = n[i, 1, k]
g1 = self._g[0, i, :, j, k]
g2_g3 = self._g[1, i, :, j, k] # g2 - g3
self._imag_self_energy[:] += (
(n2 + n3 + 1) * g1 +
(n2 - n3) * (g2_g3)) * interaction[:, j, k] * w
self._imag_self_energy *= self._unit_conversion
def _ise_thm_with_band_indices_0K(self):
for i, (w, interaction) in enumerate(zip(self._weights_at_q,
self._pp_strength)):
for j, k in list(np.ndindex(interaction.shape[1:])):
g1 = self._g[0, i, :, j, k]
self._imag_self_energy[:] += g1 * interaction[:, j, k] * w
self._imag_self_energy *= self._unit_conversion
def _run_py_with_frequency_points_with_g(self):
if self._temperature > 0:
self._ise_thm_with_frequency_points()
else:
self._ise_thm_with_frequency_points_0K()
def _ise_thm_with_frequency_points(self):
for i, (tp, w, interaction) in enumerate(zip(self._triplets_at_q,
self._weights_at_q,
self._pp_strength)):
for j, k in list(np.ndindex(interaction.shape[1:])):
f1 = self._frequencies[tp[1]][j]
f2 = self._frequencies[tp[2]][k]
if (f1 > self._cutoff_frequency and
f2 > self._cutoff_frequency):
n2 = occupation(f1, self._temperature)
n3 = occupation(f2, self._temperature)
g1 = self._g[0, i, :, j, k]
g2_g3 = self._g[1, i, :, j, k] # g2 - g3
for l in range(len(interaction)):
self._imag_self_energy[:, l] += (
(n2 + n3 + 1) * g1 +
(n2 - n3) * (g2_g3)) * interaction[l, j, k] * w
self._imag_self_energy *= self._unit_conversion
def _ise_thm_with_frequency_points_0K(self):
for i, (w, interaction) in enumerate(zip(self._weights_at_q,
self._pp_strength)):
for j, k in list(np.ndindex(interaction.shape[1:])):
g1 = self._g[0, i, :, j, k]
for l in range(len(interaction)):
self._imag_self_energy[:, l] += g1 * interaction[l, j, k] * w
self._imag_self_energy *= self._unit_conversion
def _average_by_degeneracy(self, imag_self_energy):
return average_by_degeneracy(imag_self_energy,
self._pp.get_band_indices(),
self._frequencies[self._grid_point])
|
atztogo/phono3py | phono3py/phonon3/fc3.py | distribute_fc3 | python | def distribute_fc3(fc3,
first_disp_atoms,
target_atoms,
lattice,
rotations,
permutations,
s2compact,
verbose=False):
n_satom = fc3.shape[1]
for i_target in target_atoms:
for i_done in first_disp_atoms:
rot_indices = np.where(permutations[:, i_target] == i_done)[0]
if len(rot_indices) > 0:
atom_mapping = np.array(permutations[rot_indices[0]],
dtype='intc')
rot = rotations[rot_indices[0]]
rot_cart_inv = np.array(
similarity_transformation(lattice, rot).T,
dtype='double', order='C')
break
if len(rot_indices) == 0:
print("Position or symmetry may be wrong.")
raise RuntimeError
if verbose > 2:
print(" [ %d, x, x ] to [ %d, x, x ]" %
(i_done + 1, i_target + 1))
sys.stdout.flush()
try:
import phono3py._phono3py as phono3c
phono3c.distribute_fc3(fc3,
int(s2compact[i_target]),
int(s2compact[i_done]),
atom_mapping,
rot_cart_inv)
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
for j in range(n_satom):
j_rot = atom_mapping[j]
for k in range(n_satom):
k_rot = atom_mapping[k]
fc3[i_target, j, k] = third_rank_tensor_rotation(
rot_cart_inv, fc3[i_done, j_rot, k_rot]) | Distribute fc3
fc3[i, :, :, 0:3, 0:3, 0:3] where i=indices done are distributed to
symmetrically equivalent fc3 elements by tensor rotations.
Search symmetry operation (R, t) that performs
i_target -> i_done
and
atom_mapping[i_target] = i_done
fc3[i_target, j_target, k_target] = R_inv[i_done, j, k]
Parameters
----------
target_atoms: list or ndarray
Supercell atom indices to which fc3 are distributed.
s2compact: ndarray
Maps supercell index to compact index. For full-fc3,
s2compact=np.arange(n_satom).
shape=(n_satom,)
dtype=intc | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/fc3.py#L85-L153 | [
"def third_rank_tensor_rotation(rot_cart, tensor):\n rot_tensor = np.zeros((3, 3, 3), dtype='double')\n for i in (0, 1, 2):\n for j in (0, 1, 2):\n for k in (0, 1, 2):\n rot_tensor[i, j, k] = _third_rank_tensor_rotation_elem(\n rot_cart, tensor, i, j, k)\n return rot_tensor\n"
] | import sys
import logging
import numpy as np
from phonopy.harmonic.force_constants import (get_fc2,
similarity_transformation,
distribute_force_constants,
solve_force_constants,
get_rotated_displacement,
get_positions_sent_by_rot_inv,
get_nsym_list_and_s2pp)
from phono3py.phonon3.displacement_fc3 import (get_reduced_site_symmetry,
get_bond_symmetry,
get_equivalent_smallest_vectors)
from phonopy.structure.cells import compute_all_sg_permutations
logger = logging.getLogger(__name__)
def get_fc3(supercell,
primitive,
disp_dataset,
symmetry,
is_compact_fc=False,
verbose=False):
# fc2 has to be full matrix to compute delta-fc2
# p2s_map elements are extracted if is_compact_fc=True at the last part.
fc2 = get_fc2(supercell, symmetry, disp_dataset)
fc3 = _get_fc3_least_atoms(supercell,
primitive,
disp_dataset,
fc2,
symmetry,
is_compact_fc=is_compact_fc,
verbose=verbose)
if verbose:
print("Expanding fc3")
first_disp_atoms = np.unique(
[x['number'] for x in disp_dataset['first_atoms']])
rotations = symmetry.get_symmetry_operations()['rotations']
lattice = supercell.get_cell().T
permutations = symmetry.get_atomic_permutations()
if is_compact_fc:
s2p_map = primitive.get_supercell_to_primitive_map()
p2s_map = primitive.get_primitive_to_supercell_map()
p2p_map = primitive.get_primitive_to_primitive_map()
s2compact = np.array([p2p_map[i] for i in s2p_map], dtype='intc')
for i in first_disp_atoms:
assert i in p2s_map
target_atoms = [i for i in p2s_map if i not in first_disp_atoms]
else:
s2compact = np.arange(supercell.get_number_of_atoms(), dtype='intc')
target_atoms = [i for i in s2compact if i not in first_disp_atoms]
distribute_fc3(fc3,
first_disp_atoms,
target_atoms,
lattice,
rotations,
permutations,
s2compact,
verbose=verbose)
if 'cutoff_distance' in disp_dataset:
if verbose:
print("Cutting-off fc3 (cut-off distance: %f)" %
disp_dataset['cutoff_distance'])
if is_compact_fc:
print("cutoff_fc3 doesn't support compact-fc3 yet.")
raise ValueError
cutoff_fc3(fc3,
supercell,
disp_dataset,
symmetry,
verbose=verbose)
if is_compact_fc:
p2s_map = primitive.get_primitive_to_supercell_map()
fc2 = np.array(fc2[p2s_map], dtype='double', order='C')
return fc2, fc3
def set_permutation_symmetry_fc3(fc3):
try:
import phono3py._phono3py as phono3c
phono3c.permutation_symmetry_fc3(fc3)
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
num_atom = fc3.shape[0]
for i in range(num_atom):
for j in range(i, num_atom):
for k in range(j, num_atom):
fc3_elem = set_permutation_symmetry_fc3_elem(fc3, i, j, k)
copy_permutation_symmetry_fc3_elem(fc3, fc3_elem, i, j, k)
def set_permutation_symmetry_compact_fc3(fc3, primitive):
try:
import phono3py._phono3py as phono3c
s2p_map = primitive.get_supercell_to_primitive_map()
p2s_map = primitive.get_primitive_to_supercell_map()
p2p_map = primitive.get_primitive_to_primitive_map()
permutations = primitive.get_atomic_permutations()
s2pp_map, nsym_list = get_nsym_list_and_s2pp(s2p_map,
p2p_map,
permutations)
phono3c.permutation_symmetry_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list)
except ImportError:
text = ("Import error at phono3c.permutation_symmetry_compact_fc3. "
"Corresponding python code is not implemented.")
raise RuntimeError(text)
def copy_permutation_symmetry_fc3_elem(fc3, fc3_elem, a, b, c):
for (i, j, k) in list(np.ndindex(3, 3, 3)):
fc3[a, b, c, i, j, k] = fc3_elem[i, j, k]
fc3[c, a, b, k, i, j] = fc3_elem[i, j, k]
fc3[b, c, a, j, k, i] = fc3_elem[i, j, k]
fc3[a, c, b, i, k, j] = fc3_elem[i, j, k]
fc3[b, a, c, j, i, k] = fc3_elem[i, j, k]
fc3[c, b, a, k, j, i] = fc3_elem[i, j, k]
def set_permutation_symmetry_fc3_elem(fc3, a, b, c, divisor=6):
tensor3 = np.zeros((3, 3, 3), dtype='double')
for (i, j, k) in list(np.ndindex(3, 3, 3)):
tensor3[i, j, k] = (fc3[a, b, c, i, j, k] +
fc3[c, a, b, k, i, j] +
fc3[b, c, a, j, k, i] +
fc3[a, c, b, i, k, j] +
fc3[b, a, c, j, i, k] +
fc3[c, b, a, k, j, i]) / divisor
return tensor3
def set_translational_invariance_fc3(fc3):
for i in range(3):
set_translational_invariance_fc3_per_index(fc3, index=i)
def set_translational_invariance_compact_fc3(fc3, primitive):
try:
import phono3py._phono3py as phono3c
s2p_map = primitive.get_supercell_to_primitive_map()
p2s_map = primitive.get_primitive_to_supercell_map()
p2p_map = primitive.get_primitive_to_primitive_map()
permutations = primitive.get_atomic_permutations()
s2pp_map, nsym_list = get_nsym_list_and_s2pp(s2p_map,
p2p_map,
permutations)
phono3c.transpose_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list,
0) # dim[0] <--> dim[1]
set_translational_invariance_fc3_per_index(fc3, index=1)
phono3c.transpose_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list,
0) # dim[0] <--> dim[1]
set_translational_invariance_fc3_per_index(fc3, index=1)
set_translational_invariance_fc3_per_index(fc3, index=2)
except ImportError:
text = ("Import error at phono3c.tranpose_compact_fc3. "
"Corresponding python code is not implemented.")
raise RuntimeError(text)
def set_translational_invariance_fc3_per_index(fc3, index=0):
for i in range(fc3.shape[(1 + index) % 3]):
for j in range(fc3.shape[(2 + index) % 3]):
for k, l, m in list(np.ndindex(3, 3, 3)):
if index == 0:
fc3[:, i, j, k, l, m] -= np.sum(
fc3[:, i, j, k, l, m]) / fc3.shape[0]
elif index == 1:
fc3[j, :, i, k, l, m] -= np.sum(
fc3[j, :, i, k, l, m]) / fc3.shape[1]
elif index == 2:
fc3[i, j, :, k, l, m] -= np.sum(
fc3[i, j, :, k, l, m]) / fc3.shape[2]
def third_rank_tensor_rotation(rot_cart, tensor):
rot_tensor = np.zeros((3, 3, 3), dtype='double')
for i in (0, 1, 2):
for j in (0, 1, 2):
for k in (0, 1, 2):
rot_tensor[i, j, k] = _third_rank_tensor_rotation_elem(
rot_cart, tensor, i, j, k)
return rot_tensor
def get_delta_fc2(dataset_second_atoms,
atom1,
fc2,
supercell,
reduced_site_sym,
symprec):
logger.debug("get_delta_fc2")
disp_fc2 = get_constrained_fc2(supercell,
dataset_second_atoms,
atom1,
reduced_site_sym,
symprec)
return disp_fc2 - fc2
def get_constrained_fc2(supercell,
dataset_second_atoms,
atom1,
reduced_site_sym,
symprec):
"""
dataset_second_atoms: [{'number': 7,
'displacement': [],
'delta_forces': []}, ...]
"""
lattice = supercell.get_cell().T
positions = supercell.get_scaled_positions()
num_atom = supercell.get_number_of_atoms()
fc2 = np.zeros((num_atom, num_atom, 3, 3), dtype='double')
atom_list = np.unique([x['number'] for x in dataset_second_atoms])
for atom2 in atom_list:
disps2 = []
sets_of_forces = []
for disps_second in dataset_second_atoms:
if atom2 != disps_second['number']:
continue
bond_sym = get_bond_symmetry(
reduced_site_sym,
lattice,
positions,
atom1,
atom2,
symprec)
disps2.append(disps_second['displacement'])
sets_of_forces.append(disps_second['delta_forces'])
solve_force_constants(fc2,
atom2,
disps2,
sets_of_forces,
supercell,
bond_sym,
symprec)
# Shift positions according to set atom1 is at origin
pos_center = positions[atom1].copy()
positions -= pos_center
rotations = np.array(reduced_site_sym, dtype='intc', order='C')
translations = np.zeros((len(reduced_site_sym), 3),
dtype='double', order='C')
permutations = compute_all_sg_permutations(positions,
rotations,
translations,
lattice,
symprec)
distribute_force_constants(fc2,
atom_list,
lattice,
rotations,
permutations)
return fc2
def solve_fc3(first_atom_num,
supercell,
site_symmetry,
displacements_first,
delta_fc2s,
symprec,
pinv_solver="numpy",
verbose=False):
logger.debug("solve_fc3")
if pinv_solver == "numpy":
solver = "numpy.linalg.pinv"
else:
try:
import phono3py._lapackepy as lapackepy
solver = "lapacke-dgesvd"
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
solver = "numpy.linalg.pinv"
if verbose:
text = ("Computing fc3[ %d, x, x ] using %s with " %
(first_atom_num + 1, solver))
if len(displacements_first) > 1:
text += "displacements:"
else:
text += "a displacement:"
print(text)
for i, v in enumerate(displacements_first):
print(" [%7.4f %7.4f %7.4f]" % tuple(v))
sys.stdout.flush()
if verbose > 2:
print(" Site symmetry:")
for i, v in enumerate(site_symmetry):
print(" [%2d %2d %2d] #%2d" % tuple(list(v[0])+[i + 1]))
print(" [%2d %2d %2d]" % tuple(v[1]))
print(" [%2d %2d %2d]\n" % tuple(v[2]))
sys.stdout.flush()
lattice = supercell.get_cell().T
site_sym_cart = np.array([similarity_transformation(lattice, sym)
for sym in site_symmetry],
dtype='double', order='C')
num_atom = supercell.get_number_of_atoms()
positions = supercell.get_scaled_positions()
pos_center = positions[first_atom_num].copy()
positions -= pos_center
logger.debug("get_positions_sent_by_rot_inv")
rot_map_syms = get_positions_sent_by_rot_inv(lattice,
positions,
site_symmetry,
symprec)
rot_disps = get_rotated_displacement(displacements_first, site_sym_cart)
logger.debug("pinv")
if "numpy" in solver:
inv_U = np.array(np.linalg.pinv(rot_disps), dtype='double', order='C')
else:
inv_U = np.zeros((rot_disps.shape[1], rot_disps.shape[0]),
dtype='double', order='C')
lapackepy.pinv(inv_U, rot_disps, 1e-13)
fc3 = np.zeros((num_atom, num_atom, 3, 3, 3), dtype='double', order='C')
logger.debug("rotate_delta_fc2s")
try:
import phono3py._phono3py as phono3c
phono3c.rotate_delta_fc2s(fc3,
delta_fc2s,
inv_U,
site_sym_cart,
rot_map_syms)
except ImportError:
for i, j in np.ndindex(num_atom, num_atom):
fc3[i, j] = np.dot(inv_U, _get_rotated_fc2s(
i, j, delta_fc2s, rot_map_syms, site_sym_cart)
).reshape(3, 3, 3)
return fc3
def cutoff_fc3(fc3,
supercell,
disp_dataset,
symmetry,
verbose=False):
if verbose:
print("Building atom mapping table...")
fc3_done = _get_fc3_done(supercell, disp_dataset, symmetry, fc3.shape[:3])
if verbose:
print("Creating contracted fc3...")
num_atom = supercell.get_number_of_atoms()
for i in range(num_atom):
for j in range(i, num_atom):
for k in range(j, num_atom):
ave_fc3 = _set_permutation_symmetry_fc3_elem_with_cutoff(
fc3, fc3_done, i, j, k)
copy_permutation_symmetry_fc3_elem(fc3, ave_fc3, i, j, k)
def cutoff_fc3_by_zero(fc3, supercell, cutoff_distance, symprec=1e-5):
num_atom = supercell.get_number_of_atoms()
lattice = supercell.get_cell().T
min_distances = np.zeros((num_atom, num_atom), dtype='double')
for i in range(num_atom): # run in supercell
for j in range(num_atom): # run in primitive
min_distances[i, j] = np.linalg.norm(
np.dot(lattice,
get_equivalent_smallest_vectors(
i, j, supercell, symprec)[0]))
for i, j, k in np.ndindex(num_atom, num_atom, num_atom):
for pair in ((i, j), (j, k), (k, i)):
if min_distances[pair] > cutoff_distance:
fc3[i, j, k] = 0
break
def show_drift_fc3(fc3,
primitive=None,
name="fc3"):
if fc3.shape[0] == fc3.shape[1]:
num_atom = fc3.shape[0]
maxval1 = 0
maxval2 = 0
maxval3 = 0
klm1 = [0, 0, 0]
klm2 = [0, 0, 0]
klm3 = [0, 0, 0]
for i, j, k, l, m in list(np.ndindex((num_atom, num_atom, 3, 3, 3))):
val1 = fc3[:, i, j, k, l, m].sum()
val2 = fc3[i, :, j, k, l, m].sum()
val3 = fc3[i, j, :, k, l, m].sum()
if abs(val1) > abs(maxval1):
maxval1 = val1
klm1 = [k, l, m]
if abs(val2) > abs(maxval2):
maxval2 = val2
klm2 = [k, l, m]
if abs(val3) > abs(maxval3):
maxval3 = val3
klm3 = [k, l, m]
else:
try:
import phono3py._phono3py as phono3c
s2p_map = primitive.get_supercell_to_primitive_map()
p2s_map = primitive.get_primitive_to_supercell_map()
p2p_map = primitive.get_primitive_to_primitive_map()
permutations = primitive.get_atomic_permutations()
s2pp_map, nsym_list = get_nsym_list_and_s2pp(s2p_map,
p2p_map,
permutations)
num_patom = fc3.shape[0]
num_satom = fc3.shape[1]
maxval1 = 0
maxval2 = 0
maxval3 = 0
klm1 = [0, 0, 0]
klm2 = [0, 0, 0]
klm3 = [0, 0, 0]
phono3c.transpose_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list,
0) # dim[0] <--> dim[1]
for i, j, k, l, m in np.ndindex((num_patom, num_satom, 3, 3, 3)):
val1 = fc3[i, :, j, k, l, m].sum()
if abs(val1) > abs(maxval1):
maxval1 = val1
klm1 = [k, l, m]
phono3c.transpose_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list,
0) # dim[0] <--> dim[1]
for i, j, k, l, m in np.ndindex((num_patom, num_satom, 3, 3, 3)):
val2 = fc3[i, :, j, k, l, m].sum()
val3 = fc3[i, j, :, k, l, m].sum()
if abs(val2) > abs(maxval2):
maxval2 = val2
klm2 = [k, l, m]
if abs(val3) > abs(maxval3):
maxval3 = val3
klm3 = [k, l, m]
except ImportError:
text = ("Import error at phono3c.tranpose_compact_fc3. "
"Corresponding python code is not implemented.")
raise RuntimeError(text)
text = "Max drift of %s: " % name
text += "%f (%s%s%s) " % (maxval1,
"xyz"[klm1[0]], "xyz"[klm1[1]], "xyz"[klm1[2]])
text += "%f (%s%s%s) " % (maxval2,
"xyz"[klm2[0]], "xyz"[klm2[1]], "xyz"[klm2[2]])
text += "%f (%s%s%s)" % (maxval3,
"xyz"[klm3[0]], "xyz"[klm3[1]], "xyz"[klm3[2]])
print(text)
def _set_permutation_symmetry_fc3_elem_with_cutoff(fc3, fc3_done, a, b, c):
sum_done = (fc3_done[a, b, c] +
fc3_done[c, a, b] +
fc3_done[b, c, a] +
fc3_done[b, a, c] +
fc3_done[c, b, a] +
fc3_done[a, c, b])
tensor3 = np.zeros((3, 3, 3), dtype='double')
if sum_done > 0:
for (i, j, k) in list(np.ndindex(3, 3, 3)):
tensor3[i, j, k] = (fc3[a, b, c, i, j, k] * fc3_done[a, b, c] +
fc3[c, a, b, k, i, j] * fc3_done[c, a, b] +
fc3[b, c, a, j, k, i] * fc3_done[b, c, a] +
fc3[a, c, b, i, k, j] * fc3_done[a, c, b] +
fc3[b, a, c, j, i, k] * fc3_done[b, a, c] +
fc3[c, b, a, k, j, i] * fc3_done[c, b, a])
tensor3[i, j, k] /= sum_done
return tensor3
def _get_fc3_least_atoms(supercell,
primitive,
disp_dataset,
fc2,
symmetry,
is_compact_fc=False,
verbose=True):
symprec = symmetry.get_symmetry_tolerance()
num_satom = supercell.get_number_of_atoms()
unique_first_atom_nums = np.unique(
[x['number'] for x in disp_dataset['first_atoms']])
if is_compact_fc:
num_patom = primitive.get_number_of_atoms()
s2p_map = primitive.get_supercell_to_primitive_map()
p2p_map = primitive.get_primitive_to_primitive_map()
first_atom_nums = []
for i in unique_first_atom_nums:
if i != s2p_map[i]:
print("Something wrong in disp_fc3.yaml")
raise RuntimeError
else:
first_atom_nums.append(i)
fc3 = np.zeros((num_patom, num_satom, num_satom, 3, 3, 3),
dtype='double', order='C')
else:
first_atom_nums = unique_first_atom_nums
fc3 = np.zeros((num_satom, num_satom, num_satom, 3, 3, 3),
dtype='double', order='C')
for first_atom_num in first_atom_nums:
site_symmetry = symmetry.get_site_symmetry(first_atom_num)
displacements_first = []
delta_fc2s = []
for dataset_first_atom in disp_dataset['first_atoms']:
if first_atom_num != dataset_first_atom['number']:
continue
displacements_first.append(dataset_first_atom['displacement'])
if 'delta_fc2' in dataset_first_atom:
delta_fc2s.append(dataset_first_atom['delta_fc2'])
else:
direction = np.dot(dataset_first_atom['displacement'],
np.linalg.inv(supercell.get_cell()))
reduced_site_sym = get_reduced_site_symmetry(
site_symmetry, direction, symprec)
delta_fc2s.append(get_delta_fc2(
dataset_first_atom['second_atoms'],
dataset_first_atom['number'],
fc2,
supercell,
reduced_site_sym,
symprec))
fc3_first = solve_fc3(first_atom_num,
supercell,
site_symmetry,
displacements_first,
np.array(delta_fc2s, dtype='double', order='C'),
symprec,
verbose=verbose)
if is_compact_fc:
fc3[p2p_map[s2p_map[first_atom_num]]] = fc3_first
else:
fc3[first_atom_num] = fc3_first
return fc3
def _get_rotated_fc2s(i, j, fc2s, rot_map_syms, site_sym_cart):
rotated_fc2s = []
for fc2 in fc2s:
for sym, map_sym in zip(site_sym_cart, rot_map_syms):
fc2_rot = fc2[map_sym[i], map_sym[j]]
rotated_fc2s.append(similarity_transformation(sym, fc2_rot))
return np.reshape(rotated_fc2s, (-1, 9))
def _third_rank_tensor_rotation_elem(rot, tensor, l, m, n):
sum_elems = 0.
for i in (0, 1, 2):
for j in (0, 1, 2):
for k in (0, 1, 2):
sum_elems += (rot[l, i] * rot[m, j] * rot[n, k]
* tensor[i, j, k])
return sum_elems
def _get_fc3_done(supercell, disp_dataset, symmetry, array_shape):
num_atom = supercell.get_number_of_atoms()
fc3_done = np.zeros(array_shape, dtype='byte')
symprec = symmetry.get_symmetry_tolerance()
lattice = supercell.get_cell().T
positions = supercell.get_scaled_positions()
rotations = symmetry.get_symmetry_operations()['rotations']
translations = symmetry.get_symmetry_operations()['translations']
atom_mapping = []
for rot, trans in zip(rotations, translations):
atom_indices = [
_get_atom_by_symmetry(lattice,
positions,
rot,
trans,
i,
symprec) for i in range(num_atom)]
atom_mapping.append(atom_indices)
for dataset_first_atom in disp_dataset['first_atoms']:
first_atom_num = dataset_first_atom['number']
site_symmetry = symmetry.get_site_symmetry(first_atom_num)
direction = np.dot(dataset_first_atom['displacement'],
np.linalg.inv(supercell.get_cell()))
reduced_site_sym = get_reduced_site_symmetry(
site_symmetry, direction, symprec)
least_second_atom_nums = []
for second_atoms in dataset_first_atom['second_atoms']:
if 'included' in second_atoms:
if second_atoms['included']:
least_second_atom_nums.append(second_atoms['number'])
elif 'cutoff_distance' in disp_dataset:
min_vec = get_equivalent_smallest_vectors(
first_atom_num,
second_atoms['number'],
supercell,
symprec)[0]
min_distance = np.linalg.norm(np.dot(lattice, min_vec))
if 'pair_distance' in second_atoms:
assert (abs(min_distance - second_atoms['pair_distance'])
< 1e-4)
if min_distance < disp_dataset['cutoff_distance']:
least_second_atom_nums.append(second_atoms['number'])
positions_shifted = positions - positions[first_atom_num]
least_second_atom_nums = np.unique(least_second_atom_nums)
for red_rot in reduced_site_sym:
second_atom_nums = [
_get_atom_by_symmetry(lattice,
positions_shifted,
red_rot,
np.zeros(3, dtype='double'),
i,
symprec) for i in least_second_atom_nums]
second_atom_nums = np.unique(second_atom_nums)
for i in range(len(rotations)):
rotated_atom1 = atom_mapping[i][first_atom_num]
for j in second_atom_nums:
fc3_done[rotated_atom1, atom_mapping[i][j]] = 1
return fc3_done
def _get_atom_by_symmetry(lattice,
positions,
rotation,
trans,
atom_number,
symprec):
rot_pos = np.dot(positions[atom_number], rotation.T) + trans
diffs = positions - rot_pos
diffs -= np.rint(diffs)
dists = np.sqrt((np.dot(diffs, lattice.T) ** 2).sum(axis=1))
rot_atoms = np.where(dists < symprec)[0] # only one should be found
if len(rot_atoms) > 0:
return rot_atoms[0]
else:
print("Position or symmetry is wrong.")
raise ValueError
|
atztogo/phono3py | phono3py/phonon3/fc3.py | get_constrained_fc2 | python | def get_constrained_fc2(supercell,
dataset_second_atoms,
atom1,
reduced_site_sym,
symprec):
lattice = supercell.get_cell().T
positions = supercell.get_scaled_positions()
num_atom = supercell.get_number_of_atoms()
fc2 = np.zeros((num_atom, num_atom, 3, 3), dtype='double')
atom_list = np.unique([x['number'] for x in dataset_second_atoms])
for atom2 in atom_list:
disps2 = []
sets_of_forces = []
for disps_second in dataset_second_atoms:
if atom2 != disps_second['number']:
continue
bond_sym = get_bond_symmetry(
reduced_site_sym,
lattice,
positions,
atom1,
atom2,
symprec)
disps2.append(disps_second['displacement'])
sets_of_forces.append(disps_second['delta_forces'])
solve_force_constants(fc2,
atom2,
disps2,
sets_of_forces,
supercell,
bond_sym,
symprec)
# Shift positions according to set atom1 is at origin
pos_center = positions[atom1].copy()
positions -= pos_center
rotations = np.array(reduced_site_sym, dtype='intc', order='C')
translations = np.zeros((len(reduced_site_sym), 3),
dtype='double', order='C')
permutations = compute_all_sg_permutations(positions,
rotations,
translations,
lattice,
symprec)
distribute_force_constants(fc2,
atom_list,
lattice,
rotations,
permutations)
return fc2 | dataset_second_atoms: [{'number': 7,
'displacement': [],
'delta_forces': []}, ...] | train | https://github.com/atztogo/phono3py/blob/edfcf36cdc7c5392906a9df57d3ee0f3141404df/phono3py/phonon3/fc3.py#L290-L347 | [
"def get_bond_symmetry(site_symmetry,\n lattice,\n positions,\n atom_center,\n atom_disp,\n symprec=1e-5):\n \"\"\"\n Bond symmetry is the symmetry operations that keep the symmetry\n of the cell containing two fixed atoms.\n \"\"\"\n bond_sym = []\n pos = positions\n for rot in site_symmetry:\n rot_pos = (np.dot(pos[atom_disp] - pos[atom_center], rot.T) +\n pos[atom_center])\n diff = pos[atom_disp] - rot_pos\n diff -= np.rint(diff)\n dist = np.linalg.norm(np.dot(lattice, diff))\n if dist < symprec:\n bond_sym.append(rot)\n\n return np.array(bond_sym)\n"
] | import sys
import logging
import numpy as np
from phonopy.harmonic.force_constants import (get_fc2,
similarity_transformation,
distribute_force_constants,
solve_force_constants,
get_rotated_displacement,
get_positions_sent_by_rot_inv,
get_nsym_list_and_s2pp)
from phono3py.phonon3.displacement_fc3 import (get_reduced_site_symmetry,
get_bond_symmetry,
get_equivalent_smallest_vectors)
from phonopy.structure.cells import compute_all_sg_permutations
logger = logging.getLogger(__name__)
def get_fc3(supercell,
primitive,
disp_dataset,
symmetry,
is_compact_fc=False,
verbose=False):
# fc2 has to be full matrix to compute delta-fc2
# p2s_map elements are extracted if is_compact_fc=True at the last part.
fc2 = get_fc2(supercell, symmetry, disp_dataset)
fc3 = _get_fc3_least_atoms(supercell,
primitive,
disp_dataset,
fc2,
symmetry,
is_compact_fc=is_compact_fc,
verbose=verbose)
if verbose:
print("Expanding fc3")
first_disp_atoms = np.unique(
[x['number'] for x in disp_dataset['first_atoms']])
rotations = symmetry.get_symmetry_operations()['rotations']
lattice = supercell.get_cell().T
permutations = symmetry.get_atomic_permutations()
if is_compact_fc:
s2p_map = primitive.get_supercell_to_primitive_map()
p2s_map = primitive.get_primitive_to_supercell_map()
p2p_map = primitive.get_primitive_to_primitive_map()
s2compact = np.array([p2p_map[i] for i in s2p_map], dtype='intc')
for i in first_disp_atoms:
assert i in p2s_map
target_atoms = [i for i in p2s_map if i not in first_disp_atoms]
else:
s2compact = np.arange(supercell.get_number_of_atoms(), dtype='intc')
target_atoms = [i for i in s2compact if i not in first_disp_atoms]
distribute_fc3(fc3,
first_disp_atoms,
target_atoms,
lattice,
rotations,
permutations,
s2compact,
verbose=verbose)
if 'cutoff_distance' in disp_dataset:
if verbose:
print("Cutting-off fc3 (cut-off distance: %f)" %
disp_dataset['cutoff_distance'])
if is_compact_fc:
print("cutoff_fc3 doesn't support compact-fc3 yet.")
raise ValueError
cutoff_fc3(fc3,
supercell,
disp_dataset,
symmetry,
verbose=verbose)
if is_compact_fc:
p2s_map = primitive.get_primitive_to_supercell_map()
fc2 = np.array(fc2[p2s_map], dtype='double', order='C')
return fc2, fc3
def distribute_fc3(fc3,
first_disp_atoms,
target_atoms,
lattice,
rotations,
permutations,
s2compact,
verbose=False):
"""Distribute fc3
fc3[i, :, :, 0:3, 0:3, 0:3] where i=indices done are distributed to
symmetrically equivalent fc3 elements by tensor rotations.
Search symmetry operation (R, t) that performs
i_target -> i_done
and
atom_mapping[i_target] = i_done
fc3[i_target, j_target, k_target] = R_inv[i_done, j, k]
Parameters
----------
target_atoms: list or ndarray
Supercell atom indices to which fc3 are distributed.
s2compact: ndarray
Maps supercell index to compact index. For full-fc3,
s2compact=np.arange(n_satom).
shape=(n_satom,)
dtype=intc
"""
n_satom = fc3.shape[1]
for i_target in target_atoms:
for i_done in first_disp_atoms:
rot_indices = np.where(permutations[:, i_target] == i_done)[0]
if len(rot_indices) > 0:
atom_mapping = np.array(permutations[rot_indices[0]],
dtype='intc')
rot = rotations[rot_indices[0]]
rot_cart_inv = np.array(
similarity_transformation(lattice, rot).T,
dtype='double', order='C')
break
if len(rot_indices) == 0:
print("Position or symmetry may be wrong.")
raise RuntimeError
if verbose > 2:
print(" [ %d, x, x ] to [ %d, x, x ]" %
(i_done + 1, i_target + 1))
sys.stdout.flush()
try:
import phono3py._phono3py as phono3c
phono3c.distribute_fc3(fc3,
int(s2compact[i_target]),
int(s2compact[i_done]),
atom_mapping,
rot_cart_inv)
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
for j in range(n_satom):
j_rot = atom_mapping[j]
for k in range(n_satom):
k_rot = atom_mapping[k]
fc3[i_target, j, k] = third_rank_tensor_rotation(
rot_cart_inv, fc3[i_done, j_rot, k_rot])
def set_permutation_symmetry_fc3(fc3):
try:
import phono3py._phono3py as phono3c
phono3c.permutation_symmetry_fc3(fc3)
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
num_atom = fc3.shape[0]
for i in range(num_atom):
for j in range(i, num_atom):
for k in range(j, num_atom):
fc3_elem = set_permutation_symmetry_fc3_elem(fc3, i, j, k)
copy_permutation_symmetry_fc3_elem(fc3, fc3_elem, i, j, k)
def set_permutation_symmetry_compact_fc3(fc3, primitive):
try:
import phono3py._phono3py as phono3c
s2p_map = primitive.get_supercell_to_primitive_map()
p2s_map = primitive.get_primitive_to_supercell_map()
p2p_map = primitive.get_primitive_to_primitive_map()
permutations = primitive.get_atomic_permutations()
s2pp_map, nsym_list = get_nsym_list_and_s2pp(s2p_map,
p2p_map,
permutations)
phono3c.permutation_symmetry_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list)
except ImportError:
text = ("Import error at phono3c.permutation_symmetry_compact_fc3. "
"Corresponding python code is not implemented.")
raise RuntimeError(text)
def copy_permutation_symmetry_fc3_elem(fc3, fc3_elem, a, b, c):
for (i, j, k) in list(np.ndindex(3, 3, 3)):
fc3[a, b, c, i, j, k] = fc3_elem[i, j, k]
fc3[c, a, b, k, i, j] = fc3_elem[i, j, k]
fc3[b, c, a, j, k, i] = fc3_elem[i, j, k]
fc3[a, c, b, i, k, j] = fc3_elem[i, j, k]
fc3[b, a, c, j, i, k] = fc3_elem[i, j, k]
fc3[c, b, a, k, j, i] = fc3_elem[i, j, k]
def set_permutation_symmetry_fc3_elem(fc3, a, b, c, divisor=6):
tensor3 = np.zeros((3, 3, 3), dtype='double')
for (i, j, k) in list(np.ndindex(3, 3, 3)):
tensor3[i, j, k] = (fc3[a, b, c, i, j, k] +
fc3[c, a, b, k, i, j] +
fc3[b, c, a, j, k, i] +
fc3[a, c, b, i, k, j] +
fc3[b, a, c, j, i, k] +
fc3[c, b, a, k, j, i]) / divisor
return tensor3
def set_translational_invariance_fc3(fc3):
for i in range(3):
set_translational_invariance_fc3_per_index(fc3, index=i)
def set_translational_invariance_compact_fc3(fc3, primitive):
try:
import phono3py._phono3py as phono3c
s2p_map = primitive.get_supercell_to_primitive_map()
p2s_map = primitive.get_primitive_to_supercell_map()
p2p_map = primitive.get_primitive_to_primitive_map()
permutations = primitive.get_atomic_permutations()
s2pp_map, nsym_list = get_nsym_list_and_s2pp(s2p_map,
p2p_map,
permutations)
phono3c.transpose_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list,
0) # dim[0] <--> dim[1]
set_translational_invariance_fc3_per_index(fc3, index=1)
phono3c.transpose_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list,
0) # dim[0] <--> dim[1]
set_translational_invariance_fc3_per_index(fc3, index=1)
set_translational_invariance_fc3_per_index(fc3, index=2)
except ImportError:
text = ("Import error at phono3c.tranpose_compact_fc3. "
"Corresponding python code is not implemented.")
raise RuntimeError(text)
def set_translational_invariance_fc3_per_index(fc3, index=0):
for i in range(fc3.shape[(1 + index) % 3]):
for j in range(fc3.shape[(2 + index) % 3]):
for k, l, m in list(np.ndindex(3, 3, 3)):
if index == 0:
fc3[:, i, j, k, l, m] -= np.sum(
fc3[:, i, j, k, l, m]) / fc3.shape[0]
elif index == 1:
fc3[j, :, i, k, l, m] -= np.sum(
fc3[j, :, i, k, l, m]) / fc3.shape[1]
elif index == 2:
fc3[i, j, :, k, l, m] -= np.sum(
fc3[i, j, :, k, l, m]) / fc3.shape[2]
def third_rank_tensor_rotation(rot_cart, tensor):
rot_tensor = np.zeros((3, 3, 3), dtype='double')
for i in (0, 1, 2):
for j in (0, 1, 2):
for k in (0, 1, 2):
rot_tensor[i, j, k] = _third_rank_tensor_rotation_elem(
rot_cart, tensor, i, j, k)
return rot_tensor
def get_delta_fc2(dataset_second_atoms,
atom1,
fc2,
supercell,
reduced_site_sym,
symprec):
logger.debug("get_delta_fc2")
disp_fc2 = get_constrained_fc2(supercell,
dataset_second_atoms,
atom1,
reduced_site_sym,
symprec)
return disp_fc2 - fc2
def solve_fc3(first_atom_num,
supercell,
site_symmetry,
displacements_first,
delta_fc2s,
symprec,
pinv_solver="numpy",
verbose=False):
logger.debug("solve_fc3")
if pinv_solver == "numpy":
solver = "numpy.linalg.pinv"
else:
try:
import phono3py._lapackepy as lapackepy
solver = "lapacke-dgesvd"
except ImportError:
print("Phono3py C-routine is not compiled correctly.")
solver = "numpy.linalg.pinv"
if verbose:
text = ("Computing fc3[ %d, x, x ] using %s with " %
(first_atom_num + 1, solver))
if len(displacements_first) > 1:
text += "displacements:"
else:
text += "a displacement:"
print(text)
for i, v in enumerate(displacements_first):
print(" [%7.4f %7.4f %7.4f]" % tuple(v))
sys.stdout.flush()
if verbose > 2:
print(" Site symmetry:")
for i, v in enumerate(site_symmetry):
print(" [%2d %2d %2d] #%2d" % tuple(list(v[0])+[i + 1]))
print(" [%2d %2d %2d]" % tuple(v[1]))
print(" [%2d %2d %2d]\n" % tuple(v[2]))
sys.stdout.flush()
lattice = supercell.get_cell().T
site_sym_cart = np.array([similarity_transformation(lattice, sym)
for sym in site_symmetry],
dtype='double', order='C')
num_atom = supercell.get_number_of_atoms()
positions = supercell.get_scaled_positions()
pos_center = positions[first_atom_num].copy()
positions -= pos_center
logger.debug("get_positions_sent_by_rot_inv")
rot_map_syms = get_positions_sent_by_rot_inv(lattice,
positions,
site_symmetry,
symprec)
rot_disps = get_rotated_displacement(displacements_first, site_sym_cart)
logger.debug("pinv")
if "numpy" in solver:
inv_U = np.array(np.linalg.pinv(rot_disps), dtype='double', order='C')
else:
inv_U = np.zeros((rot_disps.shape[1], rot_disps.shape[0]),
dtype='double', order='C')
lapackepy.pinv(inv_U, rot_disps, 1e-13)
fc3 = np.zeros((num_atom, num_atom, 3, 3, 3), dtype='double', order='C')
logger.debug("rotate_delta_fc2s")
try:
import phono3py._phono3py as phono3c
phono3c.rotate_delta_fc2s(fc3,
delta_fc2s,
inv_U,
site_sym_cart,
rot_map_syms)
except ImportError:
for i, j in np.ndindex(num_atom, num_atom):
fc3[i, j] = np.dot(inv_U, _get_rotated_fc2s(
i, j, delta_fc2s, rot_map_syms, site_sym_cart)
).reshape(3, 3, 3)
return fc3
def cutoff_fc3(fc3,
supercell,
disp_dataset,
symmetry,
verbose=False):
if verbose:
print("Building atom mapping table...")
fc3_done = _get_fc3_done(supercell, disp_dataset, symmetry, fc3.shape[:3])
if verbose:
print("Creating contracted fc3...")
num_atom = supercell.get_number_of_atoms()
for i in range(num_atom):
for j in range(i, num_atom):
for k in range(j, num_atom):
ave_fc3 = _set_permutation_symmetry_fc3_elem_with_cutoff(
fc3, fc3_done, i, j, k)
copy_permutation_symmetry_fc3_elem(fc3, ave_fc3, i, j, k)
def cutoff_fc3_by_zero(fc3, supercell, cutoff_distance, symprec=1e-5):
num_atom = supercell.get_number_of_atoms()
lattice = supercell.get_cell().T
min_distances = np.zeros((num_atom, num_atom), dtype='double')
for i in range(num_atom): # run in supercell
for j in range(num_atom): # run in primitive
min_distances[i, j] = np.linalg.norm(
np.dot(lattice,
get_equivalent_smallest_vectors(
i, j, supercell, symprec)[0]))
for i, j, k in np.ndindex(num_atom, num_atom, num_atom):
for pair in ((i, j), (j, k), (k, i)):
if min_distances[pair] > cutoff_distance:
fc3[i, j, k] = 0
break
def show_drift_fc3(fc3,
primitive=None,
name="fc3"):
if fc3.shape[0] == fc3.shape[1]:
num_atom = fc3.shape[0]
maxval1 = 0
maxval2 = 0
maxval3 = 0
klm1 = [0, 0, 0]
klm2 = [0, 0, 0]
klm3 = [0, 0, 0]
for i, j, k, l, m in list(np.ndindex((num_atom, num_atom, 3, 3, 3))):
val1 = fc3[:, i, j, k, l, m].sum()
val2 = fc3[i, :, j, k, l, m].sum()
val3 = fc3[i, j, :, k, l, m].sum()
if abs(val1) > abs(maxval1):
maxval1 = val1
klm1 = [k, l, m]
if abs(val2) > abs(maxval2):
maxval2 = val2
klm2 = [k, l, m]
if abs(val3) > abs(maxval3):
maxval3 = val3
klm3 = [k, l, m]
else:
try:
import phono3py._phono3py as phono3c
s2p_map = primitive.get_supercell_to_primitive_map()
p2s_map = primitive.get_primitive_to_supercell_map()
p2p_map = primitive.get_primitive_to_primitive_map()
permutations = primitive.get_atomic_permutations()
s2pp_map, nsym_list = get_nsym_list_and_s2pp(s2p_map,
p2p_map,
permutations)
num_patom = fc3.shape[0]
num_satom = fc3.shape[1]
maxval1 = 0
maxval2 = 0
maxval3 = 0
klm1 = [0, 0, 0]
klm2 = [0, 0, 0]
klm3 = [0, 0, 0]
phono3c.transpose_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list,
0) # dim[0] <--> dim[1]
for i, j, k, l, m in np.ndindex((num_patom, num_satom, 3, 3, 3)):
val1 = fc3[i, :, j, k, l, m].sum()
if abs(val1) > abs(maxval1):
maxval1 = val1
klm1 = [k, l, m]
phono3c.transpose_compact_fc3(fc3,
permutations,
s2pp_map,
p2s_map,
nsym_list,
0) # dim[0] <--> dim[1]
for i, j, k, l, m in np.ndindex((num_patom, num_satom, 3, 3, 3)):
val2 = fc3[i, :, j, k, l, m].sum()
val3 = fc3[i, j, :, k, l, m].sum()
if abs(val2) > abs(maxval2):
maxval2 = val2
klm2 = [k, l, m]
if abs(val3) > abs(maxval3):
maxval3 = val3
klm3 = [k, l, m]
except ImportError:
text = ("Import error at phono3c.tranpose_compact_fc3. "
"Corresponding python code is not implemented.")
raise RuntimeError(text)
text = "Max drift of %s: " % name
text += "%f (%s%s%s) " % (maxval1,
"xyz"[klm1[0]], "xyz"[klm1[1]], "xyz"[klm1[2]])
text += "%f (%s%s%s) " % (maxval2,
"xyz"[klm2[0]], "xyz"[klm2[1]], "xyz"[klm2[2]])
text += "%f (%s%s%s)" % (maxval3,
"xyz"[klm3[0]], "xyz"[klm3[1]], "xyz"[klm3[2]])
print(text)
def _set_permutation_symmetry_fc3_elem_with_cutoff(fc3, fc3_done, a, b, c):
sum_done = (fc3_done[a, b, c] +
fc3_done[c, a, b] +
fc3_done[b, c, a] +
fc3_done[b, a, c] +
fc3_done[c, b, a] +
fc3_done[a, c, b])
tensor3 = np.zeros((3, 3, 3), dtype='double')
if sum_done > 0:
for (i, j, k) in list(np.ndindex(3, 3, 3)):
tensor3[i, j, k] = (fc3[a, b, c, i, j, k] * fc3_done[a, b, c] +
fc3[c, a, b, k, i, j] * fc3_done[c, a, b] +
fc3[b, c, a, j, k, i] * fc3_done[b, c, a] +
fc3[a, c, b, i, k, j] * fc3_done[a, c, b] +
fc3[b, a, c, j, i, k] * fc3_done[b, a, c] +
fc3[c, b, a, k, j, i] * fc3_done[c, b, a])
tensor3[i, j, k] /= sum_done
return tensor3
def _get_fc3_least_atoms(supercell,
primitive,
disp_dataset,
fc2,
symmetry,
is_compact_fc=False,
verbose=True):
symprec = symmetry.get_symmetry_tolerance()
num_satom = supercell.get_number_of_atoms()
unique_first_atom_nums = np.unique(
[x['number'] for x in disp_dataset['first_atoms']])
if is_compact_fc:
num_patom = primitive.get_number_of_atoms()
s2p_map = primitive.get_supercell_to_primitive_map()
p2p_map = primitive.get_primitive_to_primitive_map()
first_atom_nums = []
for i in unique_first_atom_nums:
if i != s2p_map[i]:
print("Something wrong in disp_fc3.yaml")
raise RuntimeError
else:
first_atom_nums.append(i)
fc3 = np.zeros((num_patom, num_satom, num_satom, 3, 3, 3),
dtype='double', order='C')
else:
first_atom_nums = unique_first_atom_nums
fc3 = np.zeros((num_satom, num_satom, num_satom, 3, 3, 3),
dtype='double', order='C')
for first_atom_num in first_atom_nums:
site_symmetry = symmetry.get_site_symmetry(first_atom_num)
displacements_first = []
delta_fc2s = []
for dataset_first_atom in disp_dataset['first_atoms']:
if first_atom_num != dataset_first_atom['number']:
continue
displacements_first.append(dataset_first_atom['displacement'])
if 'delta_fc2' in dataset_first_atom:
delta_fc2s.append(dataset_first_atom['delta_fc2'])
else:
direction = np.dot(dataset_first_atom['displacement'],
np.linalg.inv(supercell.get_cell()))
reduced_site_sym = get_reduced_site_symmetry(
site_symmetry, direction, symprec)
delta_fc2s.append(get_delta_fc2(
dataset_first_atom['second_atoms'],
dataset_first_atom['number'],
fc2,
supercell,
reduced_site_sym,
symprec))
fc3_first = solve_fc3(first_atom_num,
supercell,
site_symmetry,
displacements_first,
np.array(delta_fc2s, dtype='double', order='C'),
symprec,
verbose=verbose)
if is_compact_fc:
fc3[p2p_map[s2p_map[first_atom_num]]] = fc3_first
else:
fc3[first_atom_num] = fc3_first
return fc3
def _get_rotated_fc2s(i, j, fc2s, rot_map_syms, site_sym_cart):
rotated_fc2s = []
for fc2 in fc2s:
for sym, map_sym in zip(site_sym_cart, rot_map_syms):
fc2_rot = fc2[map_sym[i], map_sym[j]]
rotated_fc2s.append(similarity_transformation(sym, fc2_rot))
return np.reshape(rotated_fc2s, (-1, 9))
def _third_rank_tensor_rotation_elem(rot, tensor, l, m, n):
sum_elems = 0.
for i in (0, 1, 2):
for j in (0, 1, 2):
for k in (0, 1, 2):
sum_elems += (rot[l, i] * rot[m, j] * rot[n, k]
* tensor[i, j, k])
return sum_elems
def _get_fc3_done(supercell, disp_dataset, symmetry, array_shape):
num_atom = supercell.get_number_of_atoms()
fc3_done = np.zeros(array_shape, dtype='byte')
symprec = symmetry.get_symmetry_tolerance()
lattice = supercell.get_cell().T
positions = supercell.get_scaled_positions()
rotations = symmetry.get_symmetry_operations()['rotations']
translations = symmetry.get_symmetry_operations()['translations']
atom_mapping = []
for rot, trans in zip(rotations, translations):
atom_indices = [
_get_atom_by_symmetry(lattice,
positions,
rot,
trans,
i,
symprec) for i in range(num_atom)]
atom_mapping.append(atom_indices)
for dataset_first_atom in disp_dataset['first_atoms']:
first_atom_num = dataset_first_atom['number']
site_symmetry = symmetry.get_site_symmetry(first_atom_num)
direction = np.dot(dataset_first_atom['displacement'],
np.linalg.inv(supercell.get_cell()))
reduced_site_sym = get_reduced_site_symmetry(
site_symmetry, direction, symprec)
least_second_atom_nums = []
for second_atoms in dataset_first_atom['second_atoms']:
if 'included' in second_atoms:
if second_atoms['included']:
least_second_atom_nums.append(second_atoms['number'])
elif 'cutoff_distance' in disp_dataset:
min_vec = get_equivalent_smallest_vectors(
first_atom_num,
second_atoms['number'],
supercell,
symprec)[0]
min_distance = np.linalg.norm(np.dot(lattice, min_vec))
if 'pair_distance' in second_atoms:
assert (abs(min_distance - second_atoms['pair_distance'])
< 1e-4)
if min_distance < disp_dataset['cutoff_distance']:
least_second_atom_nums.append(second_atoms['number'])
positions_shifted = positions - positions[first_atom_num]
least_second_atom_nums = np.unique(least_second_atom_nums)
for red_rot in reduced_site_sym:
second_atom_nums = [
_get_atom_by_symmetry(lattice,
positions_shifted,
red_rot,
np.zeros(3, dtype='double'),
i,
symprec) for i in least_second_atom_nums]
second_atom_nums = np.unique(second_atom_nums)
for i in range(len(rotations)):
rotated_atom1 = atom_mapping[i][first_atom_num]
for j in second_atom_nums:
fc3_done[rotated_atom1, atom_mapping[i][j]] = 1
return fc3_done
def _get_atom_by_symmetry(lattice,
positions,
rotation,
trans,
atom_number,
symprec):
rot_pos = np.dot(positions[atom_number], rotation.T) + trans
diffs = positions - rot_pos
diffs -= np.rint(diffs)
dists = np.sqrt((np.dot(diffs, lattice.T) ** 2).sum(axis=1))
rot_atoms = np.where(dists < symprec)[0] # only one should be found
if len(rot_atoms) > 0:
return rot_atoms[0]
else:
print("Position or symmetry is wrong.")
raise ValueError
|
skoczen/will | will/plugins/friendly/talk_back.py | TalkBackPlugin.talk_back | python | def talk_back(self, message):
quote = self.get_quote()
if quote:
self.reply("Actually, she said things like this: \n%s" % quote) | that's what she said: Tells you some things she actually said. :) | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/friendly/talk_back.py#L35-L39 | [
"def reply(self, event, content=None, message=None, package_for_scheduling=False, **kwargs):\n message = self.get_message(message)\n\n if \"channel\" in kwargs:\n logging.error(\n \"I was just asked to talk to %(channel)s, but I can't use channel using .reply() - \"\n \"it's just for replying to the person who talked to me. Please use .say() instead.\" % kwargs\n )\n return\n if \"service\" in kwargs:\n logging.error(\n \"I was just asked to talk to %(service)s, but I can't use a service using .reply() - \"\n \"it's just for replying to the person who talked to me. Please use .say() instead.\" % kwargs\n )\n return\n if \"room\" in kwargs:\n logging.error(\n \"I was just asked to talk to %(room)s, but I can't use room using .reply() - \"\n \"it's just for replying to the person who talked to me. Please use .say() instead.\" % kwargs\n )\n return\n\n # Be really smart about what we're getting back.\n if (\n (\n (event and hasattr(event, \"will_internal_type\") and event.will_internal_type == \"Message\")\n or (event and hasattr(event, \"will_internal_type\") and event.will_internal_type == \"Event\")\n ) and type(content) == type(\"words\")\n ):\n # \"1.x world - user passed a message and a string. Keep rolling.\"\n pass\n elif (\n (\n (content and hasattr(content, \"will_internal_type\") and content.will_internal_type == \"Message\")\n or (content and hasattr(content, \"will_internal_type\") and content.will_internal_type == \"Event\")\n ) and type(event) == type(\"words\")\n ):\n # \"User passed the string and message object backwards, and we're in a 1.x world\"\n temp_content = content\n content = event\n event = temp_content\n del temp_content\n elif (\n type(event) == type(\"words\")\n and not content\n ):\n # \"We're in the Will 2.0 automagic event finding.\"\n content = event\n event = self.message\n\n else:\n # \"No magic needed.\"\n pass\n\n # Be smart about backend.\n if hasattr(event, \"data\"):\n message = event.data\n elif hasattr(self, \"message\") and hasattr(self.message, \"data\"):\n message = self.message.data\n\n backend = self.get_backend(message)\n if backend:\n e = Event(\n type=\"reply\",\n content=content,\n topic=\"message.outgoing.%s\" % backend,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return e\n else:\n self.publish(\"message.outgoing.%s\" % backend, e)\n",
"def get_quote(self):\n quote = None\n response = requests.get(self.QUOTES_URL)\n if response.status_code == 200:\n try:\n quote_obj = response.json()['results'][0]\n quote = u'%s ~ %s' % (quote_obj['text'], quote_obj['author'])\n except ValueError:\n raise Exception(\n \"Response from '%s' could not be decoded as JSON:\\n%s\"\n % (self.QUOTES_URL, response)\n )\n except KeyError as e:\n raise Exception(\n \"Response from '%s' did not contain field: %s\\n%s\"\n % (self.QUOTES_URL, e, response)\n )\n\n else:\n raise Exception(\n \"Got an error from '%s': %s\\n%s\"\n % (self.QUOTES_URL, response.status_code, response)\n )\n return quote\n"
] | class TalkBackPlugin(WillPlugin):
QUOTES_URL = "https://underquoted.herokuapp.com/api/v2/quotations/?random=true&limit=1"
def get_quote(self):
quote = None
response = requests.get(self.QUOTES_URL)
if response.status_code == 200:
try:
quote_obj = response.json()['results'][0]
quote = u'%s ~ %s' % (quote_obj['text'], quote_obj['author'])
except ValueError:
raise Exception(
"Response from '%s' could not be decoded as JSON:\n%s"
% (self.QUOTES_URL, response)
)
except KeyError as e:
raise Exception(
"Response from '%s' did not contain field: %s\n%s"
% (self.QUOTES_URL, e, response)
)
else:
raise Exception(
"Got an error from '%s': %s\n%s"
% (self.QUOTES_URL, response.status_code, response)
)
return quote
@hear("that'?s what she said")
|
skoczen/will | will/settings.py | auto_key | python | def auto_key():
import uuid
import time
import random
import hashlib
node = uuid.getnode()
h = hashlib.md5()
h.update(str("%s" % node).encode('utf-8'))
key1 = h.hexdigest()
time.sleep(random.uniform(0, 0.5))
node = uuid.getnode()
h = hashlib.md5()
h.update(str("%s" % node).encode('utf-8'))
key2 = h.hexdigest()
time.sleep(random.uniform(0, 0.5))
node = uuid.getnode()
h = hashlib.md5()
h.update(str("%s" % node).encode('utf-8'))
key3 = h.hexdigest()
if key1 == key2 and key2 == key3:
return key1
return False | This method attempts to auto-generate a unique cryptographic key based on the hardware ID.
It should *NOT* be used in production, or to replace a proper key, but it can help get will
running in local and test environments more easily. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/settings.py#L9-L41 | null | import os
import sys
from will.utils import show_valid, warn, note, error
from clint.textui import puts, indent
from six.moves.urllib import parse
from six.moves import input
def import_settings(quiet=True):
"""This method takes care of importing settings from the environment, and config.py file.
Order of operations:
1. Imports all WILL_ settings from the environment, and strips off the WILL_
2. Imports settings from config.py
3. Sets defaults for any missing, required settings.
This method takes a quiet kwarg, that when False, prints helpful output. Called that way during bootstrapping.
"""
settings = {}
# Import from environment, handle environment-specific parsing.
for k, v in os.environ.items():
if k[:5] == "WILL_":
k = k[5:]
settings[k] = v
if "HIPCHAT_ROOMS" in settings and type(settings["HIPCHAT_ROOMS"]) is type("tes"):
settings["HIPCHAT_ROOMS"] = settings["HIPCHAT_ROOMS"].split(";")
if "ROOMS" in settings:
settings["ROOMS"] = settings["ROOMS"].split(";")
if "PLUGINS" in settings:
settings["PLUGINS"] = settings["PLUGINS"].split(";")
if 'PLUGIN_BLACKLIST' in settings:
settings["PLUGIN_BLACKLIST"] = (settings["PLUGIN_BLACKLIST"].split(";")
if settings["PLUGIN_BLACKLIST"] else [])
# If HIPCHAT_SERVER is set, we need to change the USERNAME slightly
# for XMPP to work.
if "HIPCHAT_SERVER" in settings:
settings["USERNAME"] = "{user}@{host}".\
format(user=settings["USERNAME"].split("@")[0],
host=settings["HIPCHAT_SERVER"])
else:
settings["HIPCHAT_SERVER"] = "api.hipchat.com"
# Import from config
if not quiet:
puts("Importing config.py... ")
with indent(2):
try:
had_warning = False
try:
import config
except ImportError:
# Missing config.py. Check for config.py.dist
if os.path.isfile("config.py.dist"):
confirm = input(
"Hi, looks like you're just starting up!\nI didn't find a config.py, but I do see config.py.dist here. Want me to use that? (y/n) "
).lower()
if confirm in ["y", "yes"]:
print("Great! One moment.\n\n")
os.rename("config.py.dist", "config.py")
import config
else:
print("Ok. I can't start without one though. Quitting now!")
sys.exit(1)
else:
error("I'm missing my config.py file. Usually one comes with the installation - maybe it got lost?")
sys.exit(1)
for k, v in config.__dict__.items():
# Ignore private variables
if "__" not in k:
if k in os.environ and v != os.environ[k] and not quiet:
warn("%s is set in the environment as '%s', but overridden in"
" config.py as '%s'." % (k, os.environ[k], v))
had_warning = True
settings[k] = v
if not had_warning and not quiet:
show_valid("Valid.")
except:
# TODO: Check to see if there's a config.py.dist
if not quiet:
warn("no config.py found. This might be ok, but more likely, "
"you haven't copied config.py.dist over to config.py")
if not quiet:
puts("Verifying settings... ")
with indent(2):
# Deprecation and backwards-compatibility for Will 1.x-> 2.x
DEPRECATED_BUT_MAPPED_SETTINGS = {
"USERNAME": "HIPCHAT_USERNAME",
"PASSWORD": "HIPCHAT_PASSWORD",
"V1_TOKEN": "HIPCHAT_V1_TOKEN",
"V2_TOKEN": "HIPCHAT_V2_TOKEN",
"TOKEN": "HIPCHAT_V1_TOKEN",
"ROOMS": "HIPCHAT_ROOMS",
"NAME": "HIPCHAT_NAME",
"HANDLE": "HIPCHAT_HANDLE",
"DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
"SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
}
deprecation_warn_shown = False
for k, v in DEPRECATED_BUT_MAPPED_SETTINGS.items():
if not v in settings and k in settings:
if not deprecation_warn_shown and not quiet:
error("Deprecated settings. The following settings will stop working in Will 2.2:")
deprecation_warn_shown = True
if not quiet:
warn("Please update %s to %s. " % (k, v))
settings[v] = settings[k]
del settings[k]
# Migrate from 1.x
if "CHAT_BACKENDS" in settings and "IO_BACKENDS" not in settings:
IO_BACKENDS = []
for c in settings["CHAT_BACKENDS"]:
IO_BACKENDS.append("will.backends.io_adapters.%s" % c)
settings["IO_BACKENDS"] = IO_BACKENDS
if not quiet:
warn(
"Deprecated settings. Please update your config.py from:"
"\n CHAT_BACKENDS = %s\n to\n IO_BACKENDS = %s" %
(settings["CHAT_BACKENDS"], IO_BACKENDS)
)
if "CHAT_BACKENDS" not in settings and "IO_BACKENDS" not in settings:
if not quiet:
warn("""Deprecated settings. No backend found, so we're defaulting to hipchat and shell only.
Please add this to your config.py:
IO_BACKENDS = "
"will.backends.io_adapters.hipchat",
"will.backends.io_adapters.shell",
# "will.backends.io_adapters.slack",
# "will.backends.io_adapters.rocketchat",
]
""")
settings["IO_BACKENDS"] = [
"will.backends.io_adapters.hipchat",
"will.backends.io_adapters.shell",
]
if "ANALYZE_BACKENDS" not in settings:
if not quiet:
note("No ANALYZE_BACKENDS specified. Defaulting to history only.")
settings["ANALYZE_BACKENDS"] = [
"will.backends.analysis.nothing",
"will.backends.analysis.history",
]
if "GENERATION_BACKENDS" not in settings:
if not quiet:
note("No GENERATION_BACKENDS specified. Defaulting to fuzzy_all_matches and strict_regex.")
settings["GENERATION_BACKENDS"] = [
"will.backends.generation.fuzzy_all_matches",
"will.backends.generation.strict_regex",
]
if "EXECUTION_BACKENDS" not in settings:
if not quiet:
note("No EXECUTION_BACKENDS specified. Defaulting to best_score.")
settings["EXECUTION_BACKENDS"] = [
"will.backends.execution.best_score",
]
# Set for hipchat
for b in settings["IO_BACKENDS"]:
if "hipchat" in b:
if "ALLOW_INSECURE_HIPCHAT_SERVER" in settings \
and (
settings["ALLOW_INSECURE_HIPCHAT_SERVER"] is True
or settings["ALLOW_INSECURE_HIPCHAT_SERVER"].lower() == "true"
):
warn("You are choosing to run will with SSL disabled. "
"This is INSECURE and should NEVER be deployed outside a development environment.")
settings["ALLOW_INSECURE_HIPCHAT_SERVER"] = True
settings["REQUESTS_OPTIONS"] = {
"verify": False,
}
else:
settings["ALLOW_INSECURE_HIPCHAT_SERVER"] = False
if "HIPCHAT_ROOMS" not in settings:
if not quiet:
warn("no HIPCHAT_ROOMS list found in the environment or config. "
"This is ok - Will will just join all available HIPCHAT_rooms.")
settings["HIPCHAT_ROOMS"] = None
if (
"HIPCHAT_DEFAULT_ROOM" not in settings and "HIPCHAT_ROOMS" in settings
and settings["HIPCHAT_ROOMS"] and len(settings["HIPCHAT_ROOMS"]) > 0
):
if not quiet:
warn("no HIPCHAT_DEFAULT_ROOM found in the environment or config. "
"Defaulting to '%s', the first one." % settings["HIPCHAT_ROOMS"][0])
settings["HIPCHAT_DEFAULT_ROOM"] = settings["HIPCHAT_ROOMS"][0]
if "HIPCHAT_HANDLE" in settings and "HIPCHAT_HANDLE_NOTED" not in settings:
if not quiet:
note(
"""HIPCHAT_HANDLE is no longer required (or used), as Will knows how to get\n
his current handle from the HipChat servers."""
)
settings["HIPCHAT_HANDLE_NOTED"] = True
if "HIPCHAT_NAME" in settings and "HIPCHAT_NAME_NOTED" not in settings:
if not quiet:
note(
"""HIPCHAT_NAME is no longer required (or used), as Will knows how to get\n
his current name from the HipChat servers."""
)
settings["HIPCHAT_NAME_NOTED"] = True
# Rocket.chat
for b in settings["IO_BACKENDS"]:
if "rocketchat" in b:
if "ROCKETCHAT_USERNAME" in settings and "ROCKETCHAT_EMAIL" not in settings:
settings["ROCKETCHAT_EMAIL"] = settings["ROCKETCHAT_USERNAME"]
if "ROCKETCHAT_URL" in settings:
if settings["ROCKETCHAT_URL"].endswith("/"):
settings["ROCKETCHAT_URL"] = settings["ROCKETCHAT_URL"][:-1]
if (
"DEFAULT_BACKEND" not in settings and "IO_BACKENDS" in settings
and settings["IO_BACKENDS"] and len(settings["IO_BACKENDS"]) > 0
):
if not quiet:
note("no DEFAULT_BACKEND found in the environment or config.\n "
" Defaulting to '%s', the first one." % settings["IO_BACKENDS"][0])
settings["DEFAULT_BACKEND"] = settings["IO_BACKENDS"][0]
for b in settings["IO_BACKENDS"]:
if "slack" in b and "SLACK_DEFAULT_CHANNEL" not in settings and not quiet:
warn(
"No SLACK_DEFAULT_CHANNEL set - any messages sent without an explicit channel will go "
"to a non-deterministic channel that will has access to "
"- this is almost certainly not what you want."
)
if "HTTPSERVER_PORT" not in settings:
# For heroku
if "PORT" in os.environ:
settings["HTTPSERVER_PORT"] = os.environ["PORT"]
else:
if not quiet:
warn("no HTTPSERVER_PORT found in the environment or config. Defaulting to ':80'.")
settings["HTTPSERVER_PORT"] = "80"
if "STORAGE_BACKEND" not in settings:
if not quiet:
warn("No STORAGE_BACKEND specified. Defaulting to redis.")
settings["STORAGE_BACKEND"] = "redis"
if "PUBSUB_BACKEND" not in settings:
if not quiet:
warn("No PUBSUB_BACKEND specified. Defaulting to redis.")
settings["PUBSUB_BACKEND"] = "redis"
if settings["STORAGE_BACKEND"] == "redis" or settings["PUBSUB_BACKEND"] == "redis":
if "REDIS_URL" not in settings:
# For heroku
if "REDIS_URL" in os.environ:
settings["REDIS_URL"] = os.environ["REDIS_URL"]
if not quiet:
note("WILL_REDIS_URL not set, but it appears you're using Heroku Redis or another standard REDIS_URL. If so, all good.")
if "REDISCLOUD_URL" in os.environ:
settings["REDIS_URL"] = os.environ["REDISCLOUD_URL"]
if not quiet:
note("WILL_REDIS_URL not set, but it appears you're using RedisCloud. If so, all good.")
elif "REDISTOGO_URL" in os.environ:
settings["REDIS_URL"] = os.environ["REDISTOGO_URL"]
if not quiet:
note("WILL_REDIS_URL not set, but it appears you're using RedisToGo. If so, all good.")
elif "OPENREDIS_URL" in os.environ:
settings["REDIS_URL"] = os.environ["OPENREDIS_URL"]
if not quiet:
note("WILL_REDIS_URL not set, but it appears you're using OpenRedis. If so, all good.")
else:
settings["REDIS_URL"] = "redis://localhost:6379/7"
if not quiet:
note("WILL_REDIS_URL not set. Defaulting to redis://localhost:6379/7.")
if not settings["REDIS_URL"].startswith("redis://"):
settings["REDIS_URL"] = "redis://%s" % settings["REDIS_URL"]
if "REDIS_MAX_CONNECTIONS" not in settings or not settings["REDIS_MAX_CONNECTIONS"]:
settings["REDIS_MAX_CONNECTIONS"] = 4
if not quiet:
note("REDIS_MAX_CONNECTIONS not set. Defaulting to 4.")
if settings["STORAGE_BACKEND"] == "file":
if "FILE_DIR" not in settings:
settings["FILE_DIR"] = "~/.will/"
if not quiet:
note("FILE_DIR not set. Defaulting to ~/.will/")
if settings["STORAGE_BACKEND"] == "couchbase":
if "COUCHBASE_URL" not in settings:
settings["COUCHBASE_URL"] = "couchbase:///will"
if not quiet:
note("COUCHBASE_URL not set. Defaulting to couchbase:///will")
if "PUBLIC_URL" not in settings:
default_public = "http://localhost:%s" % settings["HTTPSERVER_PORT"]
settings["PUBLIC_URL"] = default_public
if not quiet:
note("no PUBLIC_URL found in the environment or config.\n Defaulting to '%s'." % default_public)
if not "REQUESTS_OPTIONS" in settings:
settings["REQUESTS_OPTIONS"] = {}
if "TEMPLATE_DIRS" not in settings:
if "WILL_TEMPLATE_DIRS_PICKLED" in os.environ:
# All good
pass
else:
settings["TEMPLATE_DIRS"] = []
if "WILL_HANDLE" not in settings:
if "HANDLE" in settings:
settings["WILL_HANDLE"] = settings["HANDLE"]
elif "SLACK_HANDLE" in settings:
settings["WILL_HANDLE"] = settings["SLACK_HANDLE"]
elif "HIPCHAT_HANDLE" in settings:
settings["WILL_HANDLE"] = settings["HIPCHAT_HANDLE"]
elif "ROCKETCHAT_HANDLE" in settings:
settings["WILL_HANDLE"] = settings["ROCKETCHAT_HANDLE"]
else:
settings["WILL_HANDLE"] = "will"
if "ADMINS" not in settings:
settings["ADMINS"] = "*"
else:
if "WILL_ADMINS" in os.environ:
settings["ADMINS"] = [a.strip().lower() for a in settings.get('ADMINS', '').split(';') if a.strip()]
if "ADMINS" in settings and settings["ADMINS"] != "*":
warn("ADMINS is now deprecated, and will be removed at the end of 2017. Please use ACL instead. See below for details")
note("Change your config.py to:\n ACL = {\n 'admins': %s\n }" % settings["ADMINS"])
if "DISABLE_ACL" not in settings:
settings["DISABLE_ACL"] = False
if "PROXY_URL" in settings:
parsed_proxy_url = parse.urlparse(settings["PROXY_URL"])
settings["USE_PROXY"] = True
settings["PROXY_HOSTNAME"] = parsed_proxy_url.hostname
settings["PROXY_USERNAME"] = parsed_proxy_url.username
settings["PROXY_PASSWORD"] = parsed_proxy_url.password
settings["PROXY_PORT"] = parsed_proxy_url.port
else:
settings["USE_PROXY"] = False
if "EVENT_LOOP_INTERVAL" not in settings:
settings["EVENT_LOOP_INTERVAL"] = 0.025
if "LOGLEVEL" not in settings:
settings["LOGLEVEL"] = "ERROR"
if "ENABLE_INTERNAL_ENCRYPTION" not in settings:
settings["ENABLE_INTERNAL_ENCRYPTION"] = True
if "SECRET_KEY" not in settings:
if not quiet:
if "ENABLE_INTERNAL_ENCRYPTION" in settings and settings["ENABLE_INTERNAL_ENCRYPTION"]:
key = auto_key()
if key:
warn(
"""No SECRET_KEY specified and ENABLE_INTERNAL_ENCRYPTION is on.\n
Temporarily auto-generating a key specific to this computer:\n {}\n
Please set WILL_SECRET_KEY in the environment as soon as possible to ensure \n
Will is able to access information from previous runs.""".format(key)
)
else:
error(
"""ENABLE_INTERNAL_ENCRYPTION is turned on, but a SECRET_KEY has not been given.\n
We tried to automatically generate temporary SECRET_KEY, but this appears to be a \n"
shared or virtualized environment.\n Please set a unique secret key in the
environment as WILL_SECRET_KEY to run will."""
)
print(" Unable to start will without a SECRET_KEY while encryption is turned on. Shutting down.")
sys.exit(1)
settings["SECRET_KEY"] = key
os.environ["WILL_SECRET_KEY"] = settings["SECRET_KEY"]
os.environ["WILL_EPHEMERAL_SECRET_KEY"] = "True"
if "FUZZY_MINIMUM_MATCH_CONFIDENCE" not in settings:
settings["FUZZY_MINIMUM_MATCH_CONFIDENCE"] = 91
if "FUZZY_REGEX_ALLOWABLE_ERRORS" not in settings:
settings["FUZZY_REGEX_ALLOWABLE_ERRORS"] = 3
# Set them in the module namespace
for k in sorted(settings, key=lambda x: x[0]):
if not quiet:
show_valid(k)
globals()[k] = settings[k]
import_settings()
|
skoczen/will | will/settings.py | import_settings | python | def import_settings(quiet=True):
settings = {}
# Import from environment, handle environment-specific parsing.
for k, v in os.environ.items():
if k[:5] == "WILL_":
k = k[5:]
settings[k] = v
if "HIPCHAT_ROOMS" in settings and type(settings["HIPCHAT_ROOMS"]) is type("tes"):
settings["HIPCHAT_ROOMS"] = settings["HIPCHAT_ROOMS"].split(";")
if "ROOMS" in settings:
settings["ROOMS"] = settings["ROOMS"].split(";")
if "PLUGINS" in settings:
settings["PLUGINS"] = settings["PLUGINS"].split(";")
if 'PLUGIN_BLACKLIST' in settings:
settings["PLUGIN_BLACKLIST"] = (settings["PLUGIN_BLACKLIST"].split(";")
if settings["PLUGIN_BLACKLIST"] else [])
# If HIPCHAT_SERVER is set, we need to change the USERNAME slightly
# for XMPP to work.
if "HIPCHAT_SERVER" in settings:
settings["USERNAME"] = "{user}@{host}".\
format(user=settings["USERNAME"].split("@")[0],
host=settings["HIPCHAT_SERVER"])
else:
settings["HIPCHAT_SERVER"] = "api.hipchat.com"
# Import from config
if not quiet:
puts("Importing config.py... ")
with indent(2):
try:
had_warning = False
try:
import config
except ImportError:
# Missing config.py. Check for config.py.dist
if os.path.isfile("config.py.dist"):
confirm = input(
"Hi, looks like you're just starting up!\nI didn't find a config.py, but I do see config.py.dist here. Want me to use that? (y/n) "
).lower()
if confirm in ["y", "yes"]:
print("Great! One moment.\n\n")
os.rename("config.py.dist", "config.py")
import config
else:
print("Ok. I can't start without one though. Quitting now!")
sys.exit(1)
else:
error("I'm missing my config.py file. Usually one comes with the installation - maybe it got lost?")
sys.exit(1)
for k, v in config.__dict__.items():
# Ignore private variables
if "__" not in k:
if k in os.environ and v != os.environ[k] and not quiet:
warn("%s is set in the environment as '%s', but overridden in"
" config.py as '%s'." % (k, os.environ[k], v))
had_warning = True
settings[k] = v
if not had_warning and not quiet:
show_valid("Valid.")
except:
# TODO: Check to see if there's a config.py.dist
if not quiet:
warn("no config.py found. This might be ok, but more likely, "
"you haven't copied config.py.dist over to config.py")
if not quiet:
puts("Verifying settings... ")
with indent(2):
# Deprecation and backwards-compatibility for Will 1.x-> 2.x
DEPRECATED_BUT_MAPPED_SETTINGS = {
"USERNAME": "HIPCHAT_USERNAME",
"PASSWORD": "HIPCHAT_PASSWORD",
"V1_TOKEN": "HIPCHAT_V1_TOKEN",
"V2_TOKEN": "HIPCHAT_V2_TOKEN",
"TOKEN": "HIPCHAT_V1_TOKEN",
"ROOMS": "HIPCHAT_ROOMS",
"NAME": "HIPCHAT_NAME",
"HANDLE": "HIPCHAT_HANDLE",
"DEFAULT_ROOM": "HIPCHAT_DEFAULT_ROOM",
"SLACK_DEFAULT_ROOM": "SLACK_DEFAULT_CHANNEL",
}
deprecation_warn_shown = False
for k, v in DEPRECATED_BUT_MAPPED_SETTINGS.items():
if not v in settings and k in settings:
if not deprecation_warn_shown and not quiet:
error("Deprecated settings. The following settings will stop working in Will 2.2:")
deprecation_warn_shown = True
if not quiet:
warn("Please update %s to %s. " % (k, v))
settings[v] = settings[k]
del settings[k]
# Migrate from 1.x
if "CHAT_BACKENDS" in settings and "IO_BACKENDS" not in settings:
IO_BACKENDS = []
for c in settings["CHAT_BACKENDS"]:
IO_BACKENDS.append("will.backends.io_adapters.%s" % c)
settings["IO_BACKENDS"] = IO_BACKENDS
if not quiet:
warn(
"Deprecated settings. Please update your config.py from:"
"\n CHAT_BACKENDS = %s\n to\n IO_BACKENDS = %s" %
(settings["CHAT_BACKENDS"], IO_BACKENDS)
)
if "CHAT_BACKENDS" not in settings and "IO_BACKENDS" not in settings:
if not quiet:
warn("""Deprecated settings. No backend found, so we're defaulting to hipchat and shell only.
Please add this to your config.py:
IO_BACKENDS = "
"will.backends.io_adapters.hipchat",
"will.backends.io_adapters.shell",
# "will.backends.io_adapters.slack",
# "will.backends.io_adapters.rocketchat",
]
""")
settings["IO_BACKENDS"] = [
"will.backends.io_adapters.hipchat",
"will.backends.io_adapters.shell",
]
if "ANALYZE_BACKENDS" not in settings:
if not quiet:
note("No ANALYZE_BACKENDS specified. Defaulting to history only.")
settings["ANALYZE_BACKENDS"] = [
"will.backends.analysis.nothing",
"will.backends.analysis.history",
]
if "GENERATION_BACKENDS" not in settings:
if not quiet:
note("No GENERATION_BACKENDS specified. Defaulting to fuzzy_all_matches and strict_regex.")
settings["GENERATION_BACKENDS"] = [
"will.backends.generation.fuzzy_all_matches",
"will.backends.generation.strict_regex",
]
if "EXECUTION_BACKENDS" not in settings:
if not quiet:
note("No EXECUTION_BACKENDS specified. Defaulting to best_score.")
settings["EXECUTION_BACKENDS"] = [
"will.backends.execution.best_score",
]
# Set for hipchat
for b in settings["IO_BACKENDS"]:
if "hipchat" in b:
if "ALLOW_INSECURE_HIPCHAT_SERVER" in settings \
and (
settings["ALLOW_INSECURE_HIPCHAT_SERVER"] is True
or settings["ALLOW_INSECURE_HIPCHAT_SERVER"].lower() == "true"
):
warn("You are choosing to run will with SSL disabled. "
"This is INSECURE and should NEVER be deployed outside a development environment.")
settings["ALLOW_INSECURE_HIPCHAT_SERVER"] = True
settings["REQUESTS_OPTIONS"] = {
"verify": False,
}
else:
settings["ALLOW_INSECURE_HIPCHAT_SERVER"] = False
if "HIPCHAT_ROOMS" not in settings:
if not quiet:
warn("no HIPCHAT_ROOMS list found in the environment or config. "
"This is ok - Will will just join all available HIPCHAT_rooms.")
settings["HIPCHAT_ROOMS"] = None
if (
"HIPCHAT_DEFAULT_ROOM" not in settings and "HIPCHAT_ROOMS" in settings
and settings["HIPCHAT_ROOMS"] and len(settings["HIPCHAT_ROOMS"]) > 0
):
if not quiet:
warn("no HIPCHAT_DEFAULT_ROOM found in the environment or config. "
"Defaulting to '%s', the first one." % settings["HIPCHAT_ROOMS"][0])
settings["HIPCHAT_DEFAULT_ROOM"] = settings["HIPCHAT_ROOMS"][0]
if "HIPCHAT_HANDLE" in settings and "HIPCHAT_HANDLE_NOTED" not in settings:
if not quiet:
note(
"""HIPCHAT_HANDLE is no longer required (or used), as Will knows how to get\n
his current handle from the HipChat servers."""
)
settings["HIPCHAT_HANDLE_NOTED"] = True
if "HIPCHAT_NAME" in settings and "HIPCHAT_NAME_NOTED" not in settings:
if not quiet:
note(
"""HIPCHAT_NAME is no longer required (or used), as Will knows how to get\n
his current name from the HipChat servers."""
)
settings["HIPCHAT_NAME_NOTED"] = True
# Rocket.chat
for b in settings["IO_BACKENDS"]:
if "rocketchat" in b:
if "ROCKETCHAT_USERNAME" in settings and "ROCKETCHAT_EMAIL" not in settings:
settings["ROCKETCHAT_EMAIL"] = settings["ROCKETCHAT_USERNAME"]
if "ROCKETCHAT_URL" in settings:
if settings["ROCKETCHAT_URL"].endswith("/"):
settings["ROCKETCHAT_URL"] = settings["ROCKETCHAT_URL"][:-1]
if (
"DEFAULT_BACKEND" not in settings and "IO_BACKENDS" in settings
and settings["IO_BACKENDS"] and len(settings["IO_BACKENDS"]) > 0
):
if not quiet:
note("no DEFAULT_BACKEND found in the environment or config.\n "
" Defaulting to '%s', the first one." % settings["IO_BACKENDS"][0])
settings["DEFAULT_BACKEND"] = settings["IO_BACKENDS"][0]
for b in settings["IO_BACKENDS"]:
if "slack" in b and "SLACK_DEFAULT_CHANNEL" not in settings and not quiet:
warn(
"No SLACK_DEFAULT_CHANNEL set - any messages sent without an explicit channel will go "
"to a non-deterministic channel that will has access to "
"- this is almost certainly not what you want."
)
if "HTTPSERVER_PORT" not in settings:
# For heroku
if "PORT" in os.environ:
settings["HTTPSERVER_PORT"] = os.environ["PORT"]
else:
if not quiet:
warn("no HTTPSERVER_PORT found in the environment or config. Defaulting to ':80'.")
settings["HTTPSERVER_PORT"] = "80"
if "STORAGE_BACKEND" not in settings:
if not quiet:
warn("No STORAGE_BACKEND specified. Defaulting to redis.")
settings["STORAGE_BACKEND"] = "redis"
if "PUBSUB_BACKEND" not in settings:
if not quiet:
warn("No PUBSUB_BACKEND specified. Defaulting to redis.")
settings["PUBSUB_BACKEND"] = "redis"
if settings["STORAGE_BACKEND"] == "redis" or settings["PUBSUB_BACKEND"] == "redis":
if "REDIS_URL" not in settings:
# For heroku
if "REDIS_URL" in os.environ:
settings["REDIS_URL"] = os.environ["REDIS_URL"]
if not quiet:
note("WILL_REDIS_URL not set, but it appears you're using Heroku Redis or another standard REDIS_URL. If so, all good.")
if "REDISCLOUD_URL" in os.environ:
settings["REDIS_URL"] = os.environ["REDISCLOUD_URL"]
if not quiet:
note("WILL_REDIS_URL not set, but it appears you're using RedisCloud. If so, all good.")
elif "REDISTOGO_URL" in os.environ:
settings["REDIS_URL"] = os.environ["REDISTOGO_URL"]
if not quiet:
note("WILL_REDIS_URL not set, but it appears you're using RedisToGo. If so, all good.")
elif "OPENREDIS_URL" in os.environ:
settings["REDIS_URL"] = os.environ["OPENREDIS_URL"]
if not quiet:
note("WILL_REDIS_URL not set, but it appears you're using OpenRedis. If so, all good.")
else:
settings["REDIS_URL"] = "redis://localhost:6379/7"
if not quiet:
note("WILL_REDIS_URL not set. Defaulting to redis://localhost:6379/7.")
if not settings["REDIS_URL"].startswith("redis://"):
settings["REDIS_URL"] = "redis://%s" % settings["REDIS_URL"]
if "REDIS_MAX_CONNECTIONS" not in settings or not settings["REDIS_MAX_CONNECTIONS"]:
settings["REDIS_MAX_CONNECTIONS"] = 4
if not quiet:
note("REDIS_MAX_CONNECTIONS not set. Defaulting to 4.")
if settings["STORAGE_BACKEND"] == "file":
if "FILE_DIR" not in settings:
settings["FILE_DIR"] = "~/.will/"
if not quiet:
note("FILE_DIR not set. Defaulting to ~/.will/")
if settings["STORAGE_BACKEND"] == "couchbase":
if "COUCHBASE_URL" not in settings:
settings["COUCHBASE_URL"] = "couchbase:///will"
if not quiet:
note("COUCHBASE_URL not set. Defaulting to couchbase:///will")
if "PUBLIC_URL" not in settings:
default_public = "http://localhost:%s" % settings["HTTPSERVER_PORT"]
settings["PUBLIC_URL"] = default_public
if not quiet:
note("no PUBLIC_URL found in the environment or config.\n Defaulting to '%s'." % default_public)
if not "REQUESTS_OPTIONS" in settings:
settings["REQUESTS_OPTIONS"] = {}
if "TEMPLATE_DIRS" not in settings:
if "WILL_TEMPLATE_DIRS_PICKLED" in os.environ:
# All good
pass
else:
settings["TEMPLATE_DIRS"] = []
if "WILL_HANDLE" not in settings:
if "HANDLE" in settings:
settings["WILL_HANDLE"] = settings["HANDLE"]
elif "SLACK_HANDLE" in settings:
settings["WILL_HANDLE"] = settings["SLACK_HANDLE"]
elif "HIPCHAT_HANDLE" in settings:
settings["WILL_HANDLE"] = settings["HIPCHAT_HANDLE"]
elif "ROCKETCHAT_HANDLE" in settings:
settings["WILL_HANDLE"] = settings["ROCKETCHAT_HANDLE"]
else:
settings["WILL_HANDLE"] = "will"
if "ADMINS" not in settings:
settings["ADMINS"] = "*"
else:
if "WILL_ADMINS" in os.environ:
settings["ADMINS"] = [a.strip().lower() for a in settings.get('ADMINS', '').split(';') if a.strip()]
if "ADMINS" in settings and settings["ADMINS"] != "*":
warn("ADMINS is now deprecated, and will be removed at the end of 2017. Please use ACL instead. See below for details")
note("Change your config.py to:\n ACL = {\n 'admins': %s\n }" % settings["ADMINS"])
if "DISABLE_ACL" not in settings:
settings["DISABLE_ACL"] = False
if "PROXY_URL" in settings:
parsed_proxy_url = parse.urlparse(settings["PROXY_URL"])
settings["USE_PROXY"] = True
settings["PROXY_HOSTNAME"] = parsed_proxy_url.hostname
settings["PROXY_USERNAME"] = parsed_proxy_url.username
settings["PROXY_PASSWORD"] = parsed_proxy_url.password
settings["PROXY_PORT"] = parsed_proxy_url.port
else:
settings["USE_PROXY"] = False
if "EVENT_LOOP_INTERVAL" not in settings:
settings["EVENT_LOOP_INTERVAL"] = 0.025
if "LOGLEVEL" not in settings:
settings["LOGLEVEL"] = "ERROR"
if "ENABLE_INTERNAL_ENCRYPTION" not in settings:
settings["ENABLE_INTERNAL_ENCRYPTION"] = True
if "SECRET_KEY" not in settings:
if not quiet:
if "ENABLE_INTERNAL_ENCRYPTION" in settings and settings["ENABLE_INTERNAL_ENCRYPTION"]:
key = auto_key()
if key:
warn(
"""No SECRET_KEY specified and ENABLE_INTERNAL_ENCRYPTION is on.\n
Temporarily auto-generating a key specific to this computer:\n {}\n
Please set WILL_SECRET_KEY in the environment as soon as possible to ensure \n
Will is able to access information from previous runs.""".format(key)
)
else:
error(
"""ENABLE_INTERNAL_ENCRYPTION is turned on, but a SECRET_KEY has not been given.\n
We tried to automatically generate temporary SECRET_KEY, but this appears to be a \n"
shared or virtualized environment.\n Please set a unique secret key in the
environment as WILL_SECRET_KEY to run will."""
)
print(" Unable to start will without a SECRET_KEY while encryption is turned on. Shutting down.")
sys.exit(1)
settings["SECRET_KEY"] = key
os.environ["WILL_SECRET_KEY"] = settings["SECRET_KEY"]
os.environ["WILL_EPHEMERAL_SECRET_KEY"] = "True"
if "FUZZY_MINIMUM_MATCH_CONFIDENCE" not in settings:
settings["FUZZY_MINIMUM_MATCH_CONFIDENCE"] = 91
if "FUZZY_REGEX_ALLOWABLE_ERRORS" not in settings:
settings["FUZZY_REGEX_ALLOWABLE_ERRORS"] = 3
# Set them in the module namespace
for k in sorted(settings, key=lambda x: x[0]):
if not quiet:
show_valid(k)
globals()[k] = settings[k] | This method takes care of importing settings from the environment, and config.py file.
Order of operations:
1. Imports all WILL_ settings from the environment, and strips off the WILL_
2. Imports settings from config.py
3. Sets defaults for any missing, required settings.
This method takes a quiet kwarg, that when False, prints helpful output. Called that way during bootstrapping. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/settings.py#L44-L436 | [
"def warn(warn_string):\n puts(colored.yellow(\"! Warning: %s\" % warn_string))\n",
"def error(err_string):\n puts(colored.red(\"ERROR: %s\" % err_string))\n",
"def show_valid(valid_str):\n puts(colored.green(u\"✓ %s\" % valid_str))\n",
"def note(warn_string):\n puts(colored.cyan(\"- Note: %s\" % warn_string))\n",
"def auto_key():\n \"\"\"This method attempts to auto-generate a unique cryptographic key based on the hardware ID.\n It should *NOT* be used in production, or to replace a proper key, but it can help get will\n running in local and test environments more easily.\"\"\"\n import uuid\n import time\n import random\n import hashlib\n\n node = uuid.getnode()\n\n h = hashlib.md5()\n h.update(str(\"%s\" % node).encode('utf-8'))\n key1 = h.hexdigest()\n\n time.sleep(random.uniform(0, 0.5))\n node = uuid.getnode()\n\n h = hashlib.md5()\n h.update(str(\"%s\" % node).encode('utf-8'))\n key2 = h.hexdigest()\n\n time.sleep(random.uniform(0, 0.5))\n node = uuid.getnode()\n\n h = hashlib.md5()\n h.update(str(\"%s\" % node).encode('utf-8'))\n key3 = h.hexdigest()\n\n if key1 == key2 and key2 == key3:\n return key1\n\n return False\n"
] | import os
import sys
from will.utils import show_valid, warn, note, error
from clint.textui import puts, indent
from six.moves.urllib import parse
from six.moves import input
def auto_key():
"""This method attempts to auto-generate a unique cryptographic key based on the hardware ID.
It should *NOT* be used in production, or to replace a proper key, but it can help get will
running in local and test environments more easily."""
import uuid
import time
import random
import hashlib
node = uuid.getnode()
h = hashlib.md5()
h.update(str("%s" % node).encode('utf-8'))
key1 = h.hexdigest()
time.sleep(random.uniform(0, 0.5))
node = uuid.getnode()
h = hashlib.md5()
h.update(str("%s" % node).encode('utf-8'))
key2 = h.hexdigest()
time.sleep(random.uniform(0, 0.5))
node = uuid.getnode()
h = hashlib.md5()
h.update(str("%s" % node).encode('utf-8'))
key3 = h.hexdigest()
if key1 == key2 and key2 == key3:
return key1
return False
import_settings()
|
skoczen/will | will/scripts/generate_will_project.py | main | python | def main():
print_head()
puts("Welcome to the will project generator.")
puts("")
if args.config_dist_only:
print("Generating config.py.dist...")
else:
print("\nGenerating will scaffold...")
current_dir = os.getcwd()
plugins_dir = os.path.join(current_dir, "plugins")
templates_dir = os.path.join(current_dir, "templates")
if not args.config_dist_only:
print(" /plugins")
# Set up the directories
if not os.path.exists(plugins_dir):
os.makedirs(plugins_dir)
print(" __init__.py")
# Create the plugins __init__.py
with open(os.path.join(plugins_dir, "__init__.py"), 'w+') as f:
pass
print(" morning.py")
# Create the morning plugin
morning_file_path = os.path.join(plugins_dir, "morning.py")
if not os.path.exists(morning_file_path):
with open(morning_file_path, 'w+') as f:
f.write("""from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
class MorningPlugin(WillPlugin):
@respond_to("^good morning")
def good_morning(self, message):
self.reply("oh, g'morning!")
""")
print(" /templates")
if not os.path.exists(templates_dir):
os.makedirs(templates_dir)
print(" blank.html")
# Create the plugins __init__.py
with open(os.path.join(templates_dir, "blank.html"), 'w+') as f:
pass
print(" .gitignore")
# Create .gitignore, or at least add shelf.db
gitignore_path = os.path.join(current_dir, ".gitignore")
if not os.path.exists(gitignore_path):
with open(gitignore_path, 'w+') as f:
f.write("""*.py[cod]
pip-log.txt
shelf.db
""")
else:
append_ignore = False
with open(gitignore_path, "r+") as f:
if "shelf.db" not in f.read():
append_ignore = True
if append_ignore:
with open(gitignore_path, "a") as f:
f.write("\nshelf.db\n")
# Create run_will.py
print(" run_will.py")
run_will_path = os.path.join(current_dir, "run_will.py")
if not os.path.exists(run_will_path):
with open(run_will_path, 'w+') as f:
f.write("""#!/usr/bin/env python
from will.main import WillBot
if __name__ == '__main__':
bot = WillBot()
bot.bootstrap()
""")
# And make it executable
st = os.stat('run_will.py')
os.chmod("run_will.py", st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
# Create config.py
print(" config.py.dist")
config_path = os.path.join(current_dir, "config.py.dist")
if not os.path.exists(config_path) or ask_user("! config.py.dist exists. Overwrite it?"):
with open(os.path.join(PROJECT_ROOT, "config.py.dist"), "r") as source_f:
source = source_f.read()
if args.backends:
for backend in SERVICE_BACKENDS:
if backend in args.backends:
_enable_service(backend, source)
else:
__disable_service(backend, source)
else:
# Ask user thru cmd line what backends to enable
print("\nWill supports a few different service backends. Let's set up the ones you want:\n")
source = enable_disable_service("Slack", source)
source = enable_disable_service("HipChat", source)
source = enable_disable_service("Rocket.Chat", source)
source = enable_disable_service("Shell", source)
with open(config_path, "w+") as f:
config = source
f.write(config)
if not args.config_dist_only:
print(" requirements.txt")
# Create requirements.txt
requirements_path = os.path.join(current_dir, "requirements.txt")
if not os.path.exists(requirements_path) or ask_user("! requirements.txt exists. Overwrite it?"):
with open(requirements_path, 'w+') as f:
f.write(requirements_txt)
print(" Procfile")
# Create Procfile
requirements_path = os.path.join(current_dir, "Procfile")
if not os.path.exists(requirements_path):
with open(requirements_path, 'w+') as f:
f.write("web: python run_will.py")
print(" README.md")
# Create the readme
readme_path = os.path.join(current_dir, "README.md")
if not os.path.exists(readme_path):
with open(readme_path, 'w+') as f:
f.write("""
This is our bot, a [will](https://github.com/skoczen/will) bot.
""")
print("\nDone.")
print("\n Your will is now ready to go. Run ./run_will.py to get started!")
else:
print("\nCreated a config.py.dist. Open it up to see what's new!\n") | Creates the following structure:
/plugins
__init__.py
hello.py
/templates
blank.html
.gitignore
run_will.py
requirements.txt
Procfile
README.md | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/scripts/generate_will_project.py#L70-L222 | [
"def print_head():\n puts(r\"\"\"\n ___/-\\___\n ___|_________|___\n | |\n |--O---O--|\n | |\n | |\n | \\___/ |\n |_________|\n\n Will: Hi!\n\"\"\")\n",
"def ask_user(question):\n response = \"?\"\n while response not in [\"y\", \"n\"]:\n response = input(\"%s [y/n] \" % question)\n if response not in [\"y\", \"n\"]:\n print(\"Please enter 'y' or 'n'.\")\n return response.startswith(\"y\")\n",
"def _enable_service(service_name, source):\n global requirements_txt\n source = source.replace('# \"will.backends.io_adapters.%s\"' % cleaned(service_name),\n '\"will.backends.io_adapters.%s\"' % cleaned(service_name))\n req_path = os.path.join(os.path.join(PROJECT_ROOT, \"..\", \"requirements\"), \"%s.txt\" % cleaned(service_name))\n print(req_path)\n if os.path.exists(req_path):\n with open(req_path, 'r') as f:\n requirements_txt = \"%s\\n# %s\\n%s\" % (requirements_txt, service_name, f.read())\n return source\n",
"def __disable_service(service_name, source):\n return source.replace('\"will.backends.io_adapters.%s\"' % cleaned(service_name),\n '\"# will.backends.io_adapters.%s\"' % cleaned(service_name))\n",
"def enable_disable_service(service_name, source):\n if ask_user(\" Do you want to enable %s support?\" % (service_name)):\n return _enable_service(service_name, source)\n else:\n return __disable_service(service_name, source)\n"
] | #!/usr/bin/env python
import argparse
import os
import stat
import sys
from six.moves import input
from clint.textui import puts
from will.utils import print_head
SERVICE_BACKENDS = ('Slack', 'HipChat', 'Rocket.chat', 'Shell')
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__)))
sys.path.append(PROJECT_ROOT)
sys.path.append(os.getcwd())
parser = argparse.ArgumentParser()
parser.add_argument(
'--config-dist-only',
action='store_true',
help='Only output a config.py.dist.'
)
parser.add_argument('--backends', nargs='+',
choices=SERVICE_BACKENDS,
help='Choose service backends to support.')
args = parser.parse_args()
requirements_txt = "will\n"
class EmptyObj(object):
pass
def cleaned(service_name):
return service_name.lower().replace(".", ''),
def ask_user(question):
response = "?"
while response not in ["y", "n"]:
response = input("%s [y/n] " % question)
if response not in ["y", "n"]:
print("Please enter 'y' or 'n'.")
return response.startswith("y")
def _enable_service(service_name, source):
global requirements_txt
source = source.replace('# "will.backends.io_adapters.%s"' % cleaned(service_name),
'"will.backends.io_adapters.%s"' % cleaned(service_name))
req_path = os.path.join(os.path.join(PROJECT_ROOT, "..", "requirements"), "%s.txt" % cleaned(service_name))
print(req_path)
if os.path.exists(req_path):
with open(req_path, 'r') as f:
requirements_txt = "%s\n# %s\n%s" % (requirements_txt, service_name, f.read())
return source
def __disable_service(service_name, source):
return source.replace('"will.backends.io_adapters.%s"' % cleaned(service_name),
'"# will.backends.io_adapters.%s"' % cleaned(service_name))
def enable_disable_service(service_name, source):
if ask_user(" Do you want to enable %s support?" % (service_name)):
return _enable_service(service_name, source)
else:
return __disable_service(service_name, source)
if __name__ == '__main__':
main()
|
skoczen/will | will/plugins/productivity/images.py | ImagesPlugin.image_me | python | def image_me(self, message, search_query):
if (
getattr(settings, "GOOGLE_API_KEY", False)
and getattr(settings, "GOOGLE_CUSTOM_SEARCH_ENGINE_ID", False)
):
self.say(
"Sorry, I'm missing my GOOGLE_API_KEY and GOOGLE_CUSTOM_SEARCH_ENGINE_ID."
" Can someone give them to me?", color="red"
)
# https://developers.google.com/custom-search/json-api/v1/reference/cse/list?hl=en
data = {
"q": search_query,
"key": settings.GOOGLE_API_KEY,
"cx": settings.GOOGLE_CUSTOM_SEARCH_ENGINE_ID,
"safe": "medium",
"num": 8,
"searchType": "image",
}
r = requests.get("https://www.googleapis.com/customsearch/v1", params=data)
r.raise_for_status()
try:
response = r.json()
results = [result["link"] for result in response["items"] if "items" in r.json()]
except TypeError:
results = []
else:
# Fall back to a really ugly hack.
logging.warn(
"Hey, I'm using a pretty ugly hack to get those images, and it might break. "
"Please set my GOOGLE_API_KEY and GOOGLE_CUSTOM_SEARCH_ENGINE_ID when you have a chance."
)
r = requests.get("https://www.google.com/search?tbm=isch&safe=active&q=%s" % search_query)
results = []
content = r.content.decode("utf-8")
index = content.find("<img")
while index != -1:
src_start = content.find('src=', index)
src_end = content.find(" ", src_start)
match = content[src_start+5: src_end-1]
index = content.find("<img", src_end)
results.append(match)
if results:
url = random.choice(results)
self.say("%s" % url, message=message)
else:
self.say("Couldn't find anything!", message=message) | image me ___ : Search google images for ___, and post a random one. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/productivity/images.py#L12-L60 | [
"def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):\n logging.info(\"self.say\")\n logging.info(content)\n if channel:\n room = channel\n elif room:\n channel = room\n\n if not \"channel\" in kwargs and channel:\n kwargs[\"channel\"] = channel\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n\n if backend:\n e = Event(\n type=\"say\",\n content=content,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return \"message.outgoing.%s\" % backend, e\n else:\n logging.info(\"putting in queue: %s\" % content)\n self.publish(\"message.outgoing.%s\" % backend, e)\n"
] | class ImagesPlugin(WillPlugin):
@respond_to("image me (?P<search_query>.*)$")
@respond_to("gif me (?P<search_query>.*$)")
def gif_me(self, message, search_query):
if (
getattr(settings, "GOOGLE_API_KEY", False)
and getattr(settings, "GOOGLE_CUSTOM_SEARCH_ENGINE_ID", False)
):
self.say(
"Sorry, I'm missing my GOOGLE_API_KEY and GOOGLE_CUSTOM_SEARCH_ENGINE_ID."
" Can someone give them to me?", color="red"
)
# https://developers.google.com/custom-search/json-api/v1/reference/cse/list?hl=en
data = {
"q": search_query,
"key": settings.GOOGLE_API_KEY,
"cx": settings.GOOGLE_CUSTOM_SEARCH_ENGINE_ID,
"safe": "medium",
"num": 8,
"searchType": "image",
"imgType": "animated",
}
r = requests.get("https://www.googleapis.com/customsearch/v1", params=data)
r.raise_for_status()
try:
response = r.json()
results = [result["link"] for result in response["items"] if "items" in r.json()]
except TypeError:
results = []
else:
# Fall back to a really ugly hack.
logging.warn(
"Hey, I'm using a pretty ugly hack to get those images, and it might break. "
"Please set my GOOGLE_API_KEY and GOOGLE_CUSTOM_SEARCH_ENGINE_ID when you have a chance."
)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
}
r = requests.get("https://www.google.com/search?tbm=isch&tbs=itp:animated&safe=active&q=%s" % search_query, headers=headers)
results = []
content = r.content.decode("utf-8")
index = content.find('"ou":')
while index != -1:
src_start = content.find('"ou":', index)
src_end = content.find('","', src_start)
match = content[src_start+6: src_end]
index = content.find('"ou":', src_end)
results.append(match)
if results:
url = random.choice(results)
self.say("%s" % url, message=message)
else:
self.say("Couldn't find anything!", message=message)
|
skoczen/will | will/plugins/productivity/bitly.py | BitlyPlugin.say_bitly_short_url | python | def say_bitly_short_url(self, message, long_url=None):
try:
import bitly_api # pip install bitly_api
except ImportError:
raise ImportError(
"Can't load BitlyPlugin, since the bitly_api python module isn't installed.\n"
"To install it, run:\n"
" pip install bitly_api"
)
# use oauth2 endpoints
c = bitly_api.Connection(access_token=settings.BITLY_ACCESS_TOKEN)
response = c.shorten(uri=long_url)
short_url = response['url']
self.say("Shorten URL: %s" % short_url, message=message) | bitly ___: Shorten long_url using bitly service. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/productivity/bitly.py#L12-L28 | [
"def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):\n logging.info(\"self.say\")\n logging.info(content)\n if channel:\n room = channel\n elif room:\n channel = room\n\n if not \"channel\" in kwargs and channel:\n kwargs[\"channel\"] = channel\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n\n if backend:\n e = Event(\n type=\"say\",\n content=content,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return \"message.outgoing.%s\" % backend, e\n else:\n logging.info(\"putting in queue: %s\" % content)\n self.publish(\"message.outgoing.%s\" % backend, e)\n"
] | class BitlyPlugin(WillPlugin):
@require_settings("BITLY_ACCESS_TOKEN",)
@respond_to("^bitly (?P<long_url>.*)$")
|
skoczen/will | will/plugins/productivity/remind.py | RemindPlugin.remind_me_at | python | def remind_me_at(self, message, reminder_text=None, remind_time=None, to_string=""):
parsed_time = self.parse_natural_time(remind_time)
natural_datetime = self.to_natural_day_and_time(parsed_time)
if to_string:
formatted_to_string = to_string
else:
formatted_to_string = ""
formatted_reminder_text = "%(mention_handle)s, you asked me to remind you%(to_string)s %(reminder_text)s" % {
"mention_handle": message.sender.mention_handle,
"from_handle": message.sender.handle,
"reminder_text": reminder_text,
"to_string": formatted_to_string,
}
self.schedule_say(formatted_reminder_text, parsed_time, message=message, notify=True)
self.say("%(reminder_text)s %(natural_datetime)s. Got it." % locals(), message=message) | remind me to ___ at ___: Set a reminder for a thing, at a time. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/productivity/remind.py#L8-L23 | [
"def parse_natural_time(self, time_str):\n cal = pdt.Calendar()\n time_tuple = cal.parse(time_str)[0][:-2]\n\n return datetime.datetime(*time_tuple)\n",
"def to_natural_day_and_time(self, dt, with_timezone=False):\n if dt.minute == 0:\n if with_timezone:\n time_str = \"%s %s\" % (dt.strftime(\"%I%p\").lower(), time.tzname[0])\n else:\n time_str = dt.strftime(\"%I%p\").lower()\n else:\n if with_timezone:\n time_str = \"%s %s\" % (dt.strftime(\"%I:%M%p\").lower(), time.tzname[0])\n else:\n time_str = dt.strftime(\"%I:%M%p\").lower()\n\n full_str = \"%s at %s\" % (self.to_natural_day(dt), time_str)\n return self.strip_leading_zeros(full_str)\n",
"def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):\n logging.info(\"self.say\")\n logging.info(content)\n if channel:\n room = channel\n elif room:\n channel = room\n\n if not \"channel\" in kwargs and channel:\n kwargs[\"channel\"] = channel\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n\n if backend:\n e = Event(\n type=\"say\",\n content=content,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return \"message.outgoing.%s\" % backend, e\n else:\n logging.info(\"putting in queue: %s\" % content)\n self.publish(\"message.outgoing.%s\" % backend, e)\n",
"def schedule_say(self, content, when, message=None, room=None, channel=None, service=None, *args, **kwargs):\n if channel:\n room = channel\n elif room:\n channel = room\n\n if \"content\" in kwargs:\n if content:\n del kwargs[\"content\"]\n else:\n content = kwargs[\"content\"]\n\n topic, packaged_event = self.say(\n content, message=message, channel=channel,\n service=service, package_for_scheduling=True, *args, **kwargs\n )\n self.add_outgoing_event_to_schedule(when, {\n \"type\": \"message\",\n \"topic\": topic,\n \"event\": packaged_event,\n })\n"
] | class RemindPlugin(WillPlugin):
@respond_to(r"(?:can |will you )?remind me(?P<to_string> to)? (?P<reminder_text>.*?) (at|on|in) (?P<remind_time>.*)?\??")
@respond_to(r"(?:can|will you )?remind (?P<reminder_recipient>(?!me).*?)(?P<to_string> to>) ?(?P<reminder_text>.*?) (at|on|in) (?P<remind_time>.*)?\??")
def remind_somebody_at(self, message, reminder_recipient=None, reminder_text=None, remind_time=None, to_string=""):
"""remind ___ to ___ at ___: Set a reminder for a thing, at a time for somebody else."""
parsed_time = self.parse_natural_time(remind_time)
natural_datetime = self.to_natural_day_and_time(parsed_time)
if to_string:
formatted_to_string = to_string
else:
formatted_to_string = ""
formatted_reminder_text = \
"%(reminder_recipient)s, %(from_handle)s asked me to remind you%(to_string)s %(reminder_text)s" % {
"reminder_recipient": reminder_recipient,
"from_handle": message.sender.mention_handle,
"reminder_text": reminder_text,
"to_string": formatted_to_string,
}
self.schedule_say(formatted_reminder_text, parsed_time, message=message, notify=True)
self.say("%(reminder_text)s %(natural_datetime)s. Got it." % locals(), message=message)
|
skoczen/will | will/backends/pubsub/base.py | PubSubPrivateBase.publish | python | def publish(self, topic, obj, reference_message=None):
logging.debug("Publishing topic (%s): \n%s" % (topic, obj))
e = Event(
data=obj,
type=topic,
)
if hasattr(obj, "sender"):
e.sender = obj.sender
if reference_message:
original_incoming_event_hash = None
if hasattr(reference_message, "original_incoming_event_hash"):
original_incoming_event_hash = reference_message.original_incoming_event_hash
elif hasattr(reference_message, "source") and hasattr(reference_message.source, "hash"):
original_incoming_event_hash = reference_message.source.hash
elif hasattr(reference_message, "source") and hasattr(reference_message.source, "original_incoming_event_hash"):
original_incoming_event_hash = reference_message.source.original_incoming_event_hash
elif hasattr(reference_message, "hash"):
original_incoming_event_hash = reference_message.hash
if original_incoming_event_hash:
e.original_incoming_event_hash = original_incoming_event_hash
return self.publish_to_backend(
self._localize_topic(topic),
self.encrypt(e)
) | Sends an object out over the pubsub connection, properly formatted,
and conforming to the protocol. Handles pickling for the wire, etc.
This method should *not* be subclassed. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/backends/pubsub/base.py#L21-L51 | [
"def _localize_topic(self, topic):\n cleaned_topic = topic\n if type(topic) == type([]):\n cleaned_topic = []\n for t in topic:\n if not t.startswith(settings.SECRET_KEY):\n cleaned_topic.append(\"%s.%s\" % (settings.SECRET_KEY, t))\n\n elif not topic.startswith(settings.SECRET_KEY):\n cleaned_topic = \"%s.%s\" % (settings.SECRET_KEY, topic)\n return cleaned_topic\n",
"def encrypt(self, raw):\n return self.encryption_backend.encrypt_to_b64(raw)\n"
] | class PubSubPrivateBase(SettingsMixin, EncryptionMixin):
"""
The private bits of the base pubsub backend.
"""
def __init__(self, *args, **kwargs):
self.recent_hashes = []
def unsubscribe(self, topic):
# This is mostly here for semantic consistency.
self.do_unsubscribe(topic)
def _localize_topic(self, topic):
cleaned_topic = topic
if type(topic) == type([]):
cleaned_topic = []
for t in topic:
if not t.startswith(settings.SECRET_KEY):
cleaned_topic.append("%s.%s" % (settings.SECRET_KEY, t))
elif not topic.startswith(settings.SECRET_KEY):
cleaned_topic = "%s.%s" % (settings.SECRET_KEY, topic)
return cleaned_topic
def subscribe(self, topic):
return self.do_subscribe(self._localize_topic(topic))
def get_message(self):
"""
Gets the latest object from the backend, and handles unpickling
and validation.
"""
try:
m = self.get_from_backend()
if m and m["type"] not in SKIP_TYPES:
return self.decrypt(m["data"])
except AttributeError:
raise Exception("Tried to call get message without having subscribed first!")
except (KeyboardInterrupt, SystemExit):
pass
except:
logging.critical("Error in watching pubsub get message: \n%s" % traceback.format_exc())
return None
|
skoczen/will | will/backends/pubsub/base.py | PubSubPrivateBase.get_message | python | def get_message(self):
try:
m = self.get_from_backend()
if m and m["type"] not in SKIP_TYPES:
return self.decrypt(m["data"])
except AttributeError:
raise Exception("Tried to call get message without having subscribed first!")
except (KeyboardInterrupt, SystemExit):
pass
except:
logging.critical("Error in watching pubsub get message: \n%s" % traceback.format_exc())
return None | Gets the latest object from the backend, and handles unpickling
and validation. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/backends/pubsub/base.py#L72-L88 | [
"def decrypt(self, enc):\n if enc:\n return self.encryption_backend.decrypt_from_b64(enc)\n return None\n"
] | class PubSubPrivateBase(SettingsMixin, EncryptionMixin):
"""
The private bits of the base pubsub backend.
"""
def __init__(self, *args, **kwargs):
self.recent_hashes = []
def publish(self, topic, obj, reference_message=None):
"""
Sends an object out over the pubsub connection, properly formatted,
and conforming to the protocol. Handles pickling for the wire, etc.
This method should *not* be subclassed.
"""
logging.debug("Publishing topic (%s): \n%s" % (topic, obj))
e = Event(
data=obj,
type=topic,
)
if hasattr(obj, "sender"):
e.sender = obj.sender
if reference_message:
original_incoming_event_hash = None
if hasattr(reference_message, "original_incoming_event_hash"):
original_incoming_event_hash = reference_message.original_incoming_event_hash
elif hasattr(reference_message, "source") and hasattr(reference_message.source, "hash"):
original_incoming_event_hash = reference_message.source.hash
elif hasattr(reference_message, "source") and hasattr(reference_message.source, "original_incoming_event_hash"):
original_incoming_event_hash = reference_message.source.original_incoming_event_hash
elif hasattr(reference_message, "hash"):
original_incoming_event_hash = reference_message.hash
if original_incoming_event_hash:
e.original_incoming_event_hash = original_incoming_event_hash
return self.publish_to_backend(
self._localize_topic(topic),
self.encrypt(e)
)
def unsubscribe(self, topic):
# This is mostly here for semantic consistency.
self.do_unsubscribe(topic)
def _localize_topic(self, topic):
cleaned_topic = topic
if type(topic) == type([]):
cleaned_topic = []
for t in topic:
if not t.startswith(settings.SECRET_KEY):
cleaned_topic.append("%s.%s" % (settings.SECRET_KEY, t))
elif not topic.startswith(settings.SECRET_KEY):
cleaned_topic = "%s.%s" % (settings.SECRET_KEY, topic)
return cleaned_topic
def subscribe(self, topic):
return self.do_subscribe(self._localize_topic(topic))
|
skoczen/will | will/plugins/fun/wordgame.py | WordGamePlugin.word_game_round | python | def word_game_round(self, message):
"play a word game: Play a game where you think of words that start with a letter and fit a topic."
letter = random.choice(string.ascii_uppercase)
topics = []
while len(topics) < 10:
new_topic = random.choice(WORD_GAME_TOPICS)
if new_topic not in topics:
topics.append({
"index": len(topics) + 1,
"topic": new_topic
})
context = {
"letter": letter,
"topics": topics
}
self.say(rendered_template("word_game.html", context), message=message) | play a word game: Play a game where you think of words that start with a letter and fit a topic. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/fun/wordgame.py#L208-L226 | [
"def rendered_template(template_name, context=None, custom_filters=[]):\n import os\n from jinja2 import Environment, FileSystemLoader\n\n template_dirs = os.environ[\"WILL_TEMPLATE_DIRS_PICKLED\"].split(\";;\")\n loader = FileSystemLoader(template_dirs)\n env = Environment(loader=loader)\n\n if isinstance(custom_filters, list):\n for custom_filter in custom_filters:\n env.filters[custom_filter.__name__] = custom_filter\n\n if context is not None:\n this_template = env.get_template(template_name)\n return this_template.render(**context)\n else:\n def wrap(f):\n def wrapped_f(*args, **kwargs):\n context = f(*args, **kwargs)\n if isinstance(context, dict):\n template = env.get_template(template_name)\n return template.render(**context)\n else:\n return context\n wrapped_f.will_fn_metadata = getattr(f, \"will_fn_metadata\", {})\n return wrapped_f\n return wrap\n",
"def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):\n logging.info(\"self.say\")\n logging.info(content)\n if channel:\n room = channel\n elif room:\n channel = room\n\n if not \"channel\" in kwargs and channel:\n kwargs[\"channel\"] = channel\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n\n if backend:\n e = Event(\n type=\"say\",\n content=content,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return \"message.outgoing.%s\" % backend, e\n else:\n logging.info(\"putting in queue: %s\" % content)\n self.publish(\"message.outgoing.%s\" % backend, e)\n"
] | class WordGamePlugin(WillPlugin):
@respond_to(r"^(play a word game|scattegories)(\!\.)?$")
|
skoczen/will | will/plugins/devops/emergency_contacts.py | EmergencyContactsPlugin.set_my_info | python | def set_my_info(self, message, contact_info=""):
contacts = self.load("contact_info", {})
contacts[message.sender.handle] = {
"info": contact_info,
"name": message.sender.name,
}
self.save("contact_info", contacts)
self.say("Got it.", message=message) | set my contact info to ____: Set your emergency contact info. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/devops/emergency_contacts.py#L8-L16 | [
"def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):\n logging.info(\"self.say\")\n logging.info(content)\n if channel:\n room = channel\n elif room:\n channel = room\n\n if not \"channel\" in kwargs and channel:\n kwargs[\"channel\"] = channel\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n\n if backend:\n e = Event(\n type=\"say\",\n content=content,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return \"message.outgoing.%s\" % backend, e\n else:\n logging.info(\"putting in queue: %s\" % content)\n self.publish(\"message.outgoing.%s\" % backend, e)\n",
"def save(self, key, value, expire=None):\n self.bootstrap_storage()\n try:\n return self.storage.save(key, pickle.dumps(value), expire=expire)\n except:\n logging.exception(\"Unable to save %s\", key)\n",
"def load(self, key, default=None):\n self.bootstrap_storage()\n try:\n val = self.storage.load(key)\n if val is not None:\n return pickle.loads(val)\n return default\n except:\n # logging.exception(\"Failed to load %s\", key)\n return default\n"
] | class EmergencyContactsPlugin(WillPlugin):
@respond_to("^set my contact info to (?P<contact_info>.*)", multiline=True)
@respond_to("^contact info$")
def respond_to_contact_info(self, message):
"""contact info: Show everyone's emergency contact info."""
contacts = self.load("contact_info", {})
context = {
"contacts": contacts,
}
contact_html = rendered_template("contact_info.html", context)
self.say(contact_html, message=message)
|
skoczen/will | will/plugins/devops/emergency_contacts.py | EmergencyContactsPlugin.respond_to_contact_info | python | def respond_to_contact_info(self, message):
contacts = self.load("contact_info", {})
context = {
"contacts": contacts,
}
contact_html = rendered_template("contact_info.html", context)
self.say(contact_html, message=message) | contact info: Show everyone's emergency contact info. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/devops/emergency_contacts.py#L19-L26 | [
"def rendered_template(template_name, context=None, custom_filters=[]):\n import os\n from jinja2 import Environment, FileSystemLoader\n\n template_dirs = os.environ[\"WILL_TEMPLATE_DIRS_PICKLED\"].split(\";;\")\n loader = FileSystemLoader(template_dirs)\n env = Environment(loader=loader)\n\n if isinstance(custom_filters, list):\n for custom_filter in custom_filters:\n env.filters[custom_filter.__name__] = custom_filter\n\n if context is not None:\n this_template = env.get_template(template_name)\n return this_template.render(**context)\n else:\n def wrap(f):\n def wrapped_f(*args, **kwargs):\n context = f(*args, **kwargs)\n if isinstance(context, dict):\n template = env.get_template(template_name)\n return template.render(**context)\n else:\n return context\n wrapped_f.will_fn_metadata = getattr(f, \"will_fn_metadata\", {})\n return wrapped_f\n return wrap\n",
"def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):\n logging.info(\"self.say\")\n logging.info(content)\n if channel:\n room = channel\n elif room:\n channel = room\n\n if not \"channel\" in kwargs and channel:\n kwargs[\"channel\"] = channel\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n\n if backend:\n e = Event(\n type=\"say\",\n content=content,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return \"message.outgoing.%s\" % backend, e\n else:\n logging.info(\"putting in queue: %s\" % content)\n self.publish(\"message.outgoing.%s\" % backend, e)\n",
"def load(self, key, default=None):\n self.bootstrap_storage()\n try:\n val = self.storage.load(key)\n if val is not None:\n return pickle.loads(val)\n return default\n except:\n # logging.exception(\"Failed to load %s\", key)\n return default\n"
] | class EmergencyContactsPlugin(WillPlugin):
@respond_to("^set my contact info to (?P<contact_info>.*)", multiline=True)
def set_my_info(self, message, contact_info=""):
"""set my contact info to ____: Set your emergency contact info."""
contacts = self.load("contact_info", {})
contacts[message.sender.handle] = {
"info": contact_info,
"name": message.sender.name,
}
self.save("contact_info", contacts)
self.say("Got it.", message=message)
@respond_to("^contact info$")
|
skoczen/will | will/plugins/friendly/random_topic.py | RandomTopicPlugin.give_us_somethin_to_talk_about | python | def give_us_somethin_to_talk_about(self, message):
r = requests.get("http://www.chatoms.com/chatom.json?Normal=1&Fun=2&Philosophy=3&Out+There=4")
data = r.json()
self.set_topic(data["text"], message=message) | new topic: set the room topic to a random conversation starter. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/friendly/random_topic.py#L10-L14 | [
"def set_topic(self, topic, message=None, room=None, channel=None, service=None, **kwargs):\n if channel:\n room = channel\n elif room:\n channel = room\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n e = Event(\n type=\"topic_change\",\n content=topic,\n topic=\"message.outgoing.%s\" % backend,\n source_message=message,\n kwargs=kwargs,\n )\n self.publish(\"message.outgoing.%s\" % backend, e)\n"
] | class RandomTopicPlugin(WillPlugin):
@respond_to("new topic")
|
skoczen/will | will/plugins/chat_room/rooms.py | RoomsPlugin.list_rooms | python | def list_rooms(self, message):
context = {"rooms": self.available_rooms.values(), }
self.say(rendered_template("rooms.html", context), message=message, html=True) | what are the rooms?: List all the rooms I know about. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/chat_room/rooms.py#L8-L11 | [
"def rendered_template(template_name, context=None, custom_filters=[]):\n import os\n from jinja2 import Environment, FileSystemLoader\n\n template_dirs = os.environ[\"WILL_TEMPLATE_DIRS_PICKLED\"].split(\";;\")\n loader = FileSystemLoader(template_dirs)\n env = Environment(loader=loader)\n\n if isinstance(custom_filters, list):\n for custom_filter in custom_filters:\n env.filters[custom_filter.__name__] = custom_filter\n\n if context is not None:\n this_template = env.get_template(template_name)\n return this_template.render(**context)\n else:\n def wrap(f):\n def wrapped_f(*args, **kwargs):\n context = f(*args, **kwargs)\n if isinstance(context, dict):\n template = env.get_template(template_name)\n return template.render(**context)\n else:\n return context\n wrapped_f.will_fn_metadata = getattr(f, \"will_fn_metadata\", {})\n return wrapped_f\n return wrap\n",
"def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):\n logging.info(\"self.say\")\n logging.info(content)\n if channel:\n room = channel\n elif room:\n channel = room\n\n if not \"channel\" in kwargs and channel:\n kwargs[\"channel\"] = channel\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n\n if backend:\n e = Event(\n type=\"say\",\n content=content,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return \"message.outgoing.%s\" % backend, e\n else:\n logging.info(\"putting in queue: %s\" % content)\n self.publish(\"message.outgoing.%s\" % backend, e)\n"
] | class RoomsPlugin(WillPlugin):
@respond_to(r"what are the rooms\?")
@respond_to("^update the room list")
def update_rooms(self, message):
self.update_available_rooms()
self.say("Done!", message=message)
@respond_to(r"who is in this room\?")
def participants_in_room(self, message):
"""who is in this room?: List all the participants of this room."""
room = self.get_room_from_message(message)
context = {"participants": room.participants, }
self.say(rendered_template("participants.html", context), message=message, html=True)
|
skoczen/will | will/plugins/chat_room/rooms.py | RoomsPlugin.participants_in_room | python | def participants_in_room(self, message):
room = self.get_room_from_message(message)
context = {"participants": room.participants, }
self.say(rendered_template("participants.html", context), message=message, html=True) | who is in this room?: List all the participants of this room. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/chat_room/rooms.py#L19-L23 | [
"def rendered_template(template_name, context=None, custom_filters=[]):\n import os\n from jinja2 import Environment, FileSystemLoader\n\n template_dirs = os.environ[\"WILL_TEMPLATE_DIRS_PICKLED\"].split(\";;\")\n loader = FileSystemLoader(template_dirs)\n env = Environment(loader=loader)\n\n if isinstance(custom_filters, list):\n for custom_filter in custom_filters:\n env.filters[custom_filter.__name__] = custom_filter\n\n if context is not None:\n this_template = env.get_template(template_name)\n return this_template.render(**context)\n else:\n def wrap(f):\n def wrapped_f(*args, **kwargs):\n context = f(*args, **kwargs)\n if isinstance(context, dict):\n template = env.get_template(template_name)\n return template.render(**context)\n else:\n return context\n wrapped_f.will_fn_metadata = getattr(f, \"will_fn_metadata\", {})\n return wrapped_f\n return wrap\n",
"def get_room_from_message(self, message):\n return self.get_room_from_name_or_id(message.data.channel.name)\n",
"def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):\n logging.info(\"self.say\")\n logging.info(content)\n if channel:\n room = channel\n elif room:\n channel = room\n\n if not \"channel\" in kwargs and channel:\n kwargs[\"channel\"] = channel\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n\n if backend:\n e = Event(\n type=\"say\",\n content=content,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return \"message.outgoing.%s\" % backend, e\n else:\n logging.info(\"putting in queue: %s\" % content)\n self.publish(\"message.outgoing.%s\" % backend, e)\n"
] | class RoomsPlugin(WillPlugin):
@respond_to(r"what are the rooms\?")
def list_rooms(self, message):
"""what are the rooms?: List all the rooms I know about."""
context = {"rooms": self.available_rooms.values(), }
self.say(rendered_template("rooms.html", context), message=message, html=True)
@respond_to("^update the room list")
def update_rooms(self, message):
self.update_available_rooms()
self.say("Done!", message=message)
@respond_to(r"who is in this room\?")
|
skoczen/will | will/plugins/help/help.py | HelpPlugin.help | python | def help(self, message, plugin=None):
# help_data = self.load("help_files")
selected_modules = help_modules = self.load("help_modules")
self.say("Sure thing, %s." % message.sender.handle)
help_text = "Here's what I know how to do:"
if plugin and plugin in help_modules:
help_text = "Here's what I know how to do about %s:" % plugin
selected_modules = dict()
selected_modules[plugin] = help_modules[plugin]
for k in sorted(selected_modules, key=lambda x: x[0]):
help_data = selected_modules[k]
if help_data:
help_text += "<br/><br/><b>%s</b>:" % k
for line in help_data:
if line:
if ":" in line:
line = " <b>%s</b>%s" % (line[:line.find(":")], line[line.find(":"):])
help_text += "<br/> %s" % line
self.say(help_text, html=True) | help: the normal help you're reading. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/help/help.py#L8-L31 | [
"def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):\n logging.info(\"self.say\")\n logging.info(content)\n if channel:\n room = channel\n elif room:\n channel = room\n\n if not \"channel\" in kwargs and channel:\n kwargs[\"channel\"] = channel\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n\n if backend:\n e = Event(\n type=\"say\",\n content=content,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return \"message.outgoing.%s\" % backend, e\n else:\n logging.info(\"putting in queue: %s\" % content)\n self.publish(\"message.outgoing.%s\" % backend, e)\n",
"def load(self, key, default=None):\n self.bootstrap_storage()\n try:\n val = self.storage.load(key)\n if val is not None:\n return pickle.loads(val)\n return default\n except:\n # logging.exception(\"Failed to load %s\", key)\n return default\n"
] | class HelpPlugin(WillPlugin):
@respond_to("^help(?: (?P<plugin>.*))?$")
|
skoczen/will | will/plugins/help/programmer_help.py | ProgrammerHelpPlugin.help | python | def help(self, message):
all_regexes = self.load("all_listener_regexes")
help_text = "Here's everything I know how to listen to:"
for r in all_regexes:
help_text += "\n%s" % r
self.say(help_text, message=message) | programmer help: Advanced programmer-y help. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/help/programmer_help.py#L8-L15 | [
"def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):\n logging.info(\"self.say\")\n logging.info(content)\n if channel:\n room = channel\n elif room:\n channel = room\n\n if not \"channel\" in kwargs and channel:\n kwargs[\"channel\"] = channel\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n\n if backend:\n e = Event(\n type=\"say\",\n content=content,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return \"message.outgoing.%s\" % backend, e\n else:\n logging.info(\"putting in queue: %s\" % content)\n self.publish(\"message.outgoing.%s\" % backend, e)\n",
"def load(self, key, default=None):\n self.bootstrap_storage()\n try:\n val = self.storage.load(key)\n if val is not None:\n return pickle.loads(val)\n return default\n except:\n # logging.exception(\"Failed to load %s\", key)\n return default\n"
] | class ProgrammerHelpPlugin(WillPlugin):
@respond_to("^programmer help$")
|
skoczen/will | will/plugins/productivity/world_time.py | TimePlugin.what_time_is_it_in | python | def what_time_is_it_in(self, message, place):
location = get_location(place)
if location is not None:
tz = get_timezone(location.lat, location.long)
if tz is not None:
ct = datetime.datetime.now(tz=pytz.timezone(tz))
self.say("It's %(time)s in %(place)s." % {'time': self.to_natural_day_and_time(ct),
'place': location.name}, message=message)
else:
self.say("I couldn't find timezone for %(place)s." % {'place': location.name}, message=message)
else:
self.say("I couldn't find anywhere named %(place)s." % {'place': location.name}, message=message) | what time is it in ___: Say the time in almost any city on earth. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/productivity/world_time.py#L60-L72 | [
"def get_location(place):\n try:\n payload = {'address': place, 'sensor': False}\n r = requests.get('http://maps.googleapis.com/maps/api/geocode/json', params=payload)\n resp = r.json()\n if resp[\"status\"] != \"OK\":\n return None\n else:\n location = GoogleLocation(resp)\n\n return location\n except Exception as e:\n logger.error(\"Failed to fetch geocode for %(place)s. Error %(error)s\" % {'place': place, 'error': e})\n return None\n",
"def get_timezone(lat, lng):\n try:\n payload = {'location': \"%(latitude)s,%(longitude)s\" % {'latitude': lat,\n 'longitude': lng},\n 'timestamp': int(time.time()),\n 'sensor': False}\n r = requests.get('https://maps.googleapis.com/maps/api/timezone/json', params=payload)\n resp = r.json()\n if resp[\"status\"] == \"OK\":\n tz = resp['timeZoneId']\n return tz\n else:\n return None\n except Exception as e:\n logger.error(\"Failed to fetch timezone for %(lat)s,%(lng)s. Error %(error)s\" % {'lat': lat,\n 'lng': lng,\n 'error': e})\n return None\n",
"def to_natural_day_and_time(self, dt, with_timezone=False):\n if dt.minute == 0:\n if with_timezone:\n time_str = \"%s %s\" % (dt.strftime(\"%I%p\").lower(), time.tzname[0])\n else:\n time_str = dt.strftime(\"%I%p\").lower()\n else:\n if with_timezone:\n time_str = \"%s %s\" % (dt.strftime(\"%I:%M%p\").lower(), time.tzname[0])\n else:\n time_str = dt.strftime(\"%I:%M%p\").lower()\n\n full_str = \"%s at %s\" % (self.to_natural_day(dt), time_str)\n return self.strip_leading_zeros(full_str)\n",
"def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):\n logging.info(\"self.say\")\n logging.info(content)\n if channel:\n room = channel\n elif room:\n channel = room\n\n if not \"channel\" in kwargs and channel:\n kwargs[\"channel\"] = channel\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n\n if backend:\n e = Event(\n type=\"say\",\n content=content,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return \"message.outgoing.%s\" % backend, e\n else:\n logging.info(\"putting in queue: %s\" % content)\n self.publish(\"message.outgoing.%s\" % backend, e)\n"
] | class TimePlugin(WillPlugin):
@respond_to(r"what time is it in (?P<place>.*)?\?+")
@respond_to(r"what time is it(\?)?$", multiline=False)
def what_time_is_it(self, message):
"""what time is it: Say the time where I am."""
now = datetime.datetime.now()
self.say("It's %s." % self.to_natural_day_and_time(now, with_timezone=True), message=message)
|
skoczen/will | will/plugins/productivity/world_time.py | TimePlugin.what_time_is_it | python | def what_time_is_it(self, message):
now = datetime.datetime.now()
self.say("It's %s." % self.to_natural_day_and_time(now, with_timezone=True), message=message) | what time is it: Say the time where I am. | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/productivity/world_time.py#L75-L78 | [
"def to_natural_day_and_time(self, dt, with_timezone=False):\n if dt.minute == 0:\n if with_timezone:\n time_str = \"%s %s\" % (dt.strftime(\"%I%p\").lower(), time.tzname[0])\n else:\n time_str = dt.strftime(\"%I%p\").lower()\n else:\n if with_timezone:\n time_str = \"%s %s\" % (dt.strftime(\"%I:%M%p\").lower(), time.tzname[0])\n else:\n time_str = dt.strftime(\"%I:%M%p\").lower()\n\n full_str = \"%s at %s\" % (self.to_natural_day(dt), time_str)\n return self.strip_leading_zeros(full_str)\n",
"def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):\n logging.info(\"self.say\")\n logging.info(content)\n if channel:\n room = channel\n elif room:\n channel = room\n\n if not \"channel\" in kwargs and channel:\n kwargs[\"channel\"] = channel\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n\n if backend:\n e = Event(\n type=\"say\",\n content=content,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return \"message.outgoing.%s\" % backend, e\n else:\n logging.info(\"putting in queue: %s\" % content)\n self.publish(\"message.outgoing.%s\" % backend, e)\n"
] | class TimePlugin(WillPlugin):
@respond_to(r"what time is it in (?P<place>.*)?\?+")
def what_time_is_it_in(self, message, place):
"""what time is it in ___: Say the time in almost any city on earth."""
location = get_location(place)
if location is not None:
tz = get_timezone(location.lat, location.long)
if tz is not None:
ct = datetime.datetime.now(tz=pytz.timezone(tz))
self.say("It's %(time)s in %(place)s." % {'time': self.to_natural_day_and_time(ct),
'place': location.name}, message=message)
else:
self.say("I couldn't find timezone for %(place)s." % {'place': location.name}, message=message)
else:
self.say("I couldn't find anywhere named %(place)s." % {'place': location.name}, message=message)
@respond_to(r"what time is it(\?)?$", multiline=False)
|
skoczen/will | will/plugins/fun/googlepoem.py | GooglePoemPlugin.google_poem | python | def google_poem(self, message, topic):
r = requests.get("http://www.google.com/complete/search?output=toolbar&q=" + topic + "%20")
xmldoc = minidom.parseString(r.text)
item_list = xmldoc.getElementsByTagName("suggestion")
context = {"topic": topic, "lines": [x.attributes["data"].value for x in item_list[:4]]}
self.say(rendered_template("gpoem.html", context), message, html=True) | make a poem about __: show a google poem about __ | train | https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/fun/googlepoem.py#L9-L15 | [
"def rendered_template(template_name, context=None, custom_filters=[]):\n import os\n from jinja2 import Environment, FileSystemLoader\n\n template_dirs = os.environ[\"WILL_TEMPLATE_DIRS_PICKLED\"].split(\";;\")\n loader = FileSystemLoader(template_dirs)\n env = Environment(loader=loader)\n\n if isinstance(custom_filters, list):\n for custom_filter in custom_filters:\n env.filters[custom_filter.__name__] = custom_filter\n\n if context is not None:\n this_template = env.get_template(template_name)\n return this_template.render(**context)\n else:\n def wrap(f):\n def wrapped_f(*args, **kwargs):\n context = f(*args, **kwargs)\n if isinstance(context, dict):\n template = env.get_template(template_name)\n return template.render(**context)\n else:\n return context\n wrapped_f.will_fn_metadata = getattr(f, \"will_fn_metadata\", {})\n return wrapped_f\n return wrap\n",
"def say(self, content, message=None, room=None, channel=None, service=None, package_for_scheduling=False, **kwargs):\n logging.info(\"self.say\")\n logging.info(content)\n if channel:\n room = channel\n elif room:\n channel = room\n\n if not \"channel\" in kwargs and channel:\n kwargs[\"channel\"] = channel\n\n message = self.get_message(message)\n message = self._trim_for_execution(message)\n backend = self.get_backend(message, service=service)\n\n if backend:\n e = Event(\n type=\"say\",\n content=content,\n source_message=message,\n kwargs=kwargs,\n )\n if package_for_scheduling:\n return \"message.outgoing.%s\" % backend, e\n else:\n logging.info(\"putting in queue: %s\" % content)\n self.publish(\"message.outgoing.%s\" % backend, e)\n"
] | class GooglePoemPlugin(WillPlugin):
@respond_to("^(gpoem|make a poem about) (?P<topic>.*)$")
|
seomoz/shovel | shovel/runner.py | run | python | def run(*args):
'''Run the normal shovel functionality'''
import os
import sys
import argparse
import pkg_resources
# First off, read the arguments
parser = argparse.ArgumentParser(prog='shovel', description='Rake, for Python')
parser.add_argument('method', help='The task to run')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='Be extra talkative')
parser.add_argument('--dry-run', dest='dryRun', action='store_true',
help='Show the args that would be used')
ver = pkg_resources.require('shovel')[0].version
parser.add_argument('--version', action='version',
version='Shovel v %s' % ver, help='print the version of Shovel.')
# Parse our arguments
if args:
clargs, remaining = parser.parse_known_args(args=args)
else: # pragma: no cover
clargs, remaining = parser.parse_known_args()
if clargs.verbose:
logger.setLevel(logging.DEBUG)
args, kwargs = parse(remaining)
# Import all of the files we want
shovel = Shovel()
# Read in any tasks that have already been defined
shovel.extend(Task.clear())
for path in [
os.path.expanduser('~/.shovel.py'),
os.path.expanduser('~/.shovel')]:
if os.path.exists(path): # pragma: no cover
shovel.read(path, os.path.expanduser('~/'))
shovel_home = os.environ.get('SHOVEL_HOME')
if shovel_home and os.path.exists(shovel_home):
shovel.read(shovel_home, shovel_home)
for path in ['shovel.py', 'shovel']:
if os.path.exists(path):
shovel.read(path)
# If it's help we're looking for, look no further
if clargs.method == 'help':
print(help.shovel_help(shovel, *args, **kwargs))
elif clargs.method == 'tasks':
tasks = list(v for _, v in shovel.items())
if not tasks:
print('No tasks found!')
else:
names = list(t.fullname for t in tasks)
docs = list(t.doc for t in tasks)
# The width of the screen
width = 80
import shutil
try:
width, _ = shutil.get_terminal_size(fallback=(0, width))
except AttributeError:
pass
# Create the format with padding for the longest name, and to
# accomodate the screen width
format = '%%-%is # %%-%is' % (
max(len(name) for name in names), width)
for name, doc in zip(names, docs):
print(format % (name, doc))
elif clargs.method:
# Try to get the first command provided
try:
tasks = shovel.tasks(clargs.method)
except KeyError:
print('Could not find task "%s"' % clargs.method, file=sys.stderr)
exit(1)
if len(tasks) > 1:
print('Specifier "%s" matches multiple tasks:' % clargs.method, file=sys.stderr)
for task in tasks:
print('\t%s' % task.fullname, file=sys.stderr)
exit(2)
task = tasks[0]
if clargs.dryRun:
print(task.dry(*args, **kwargs))
else:
task(*args, **kwargs) | Run the normal shovel functionality | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/runner.py#L30-L123 | [
"def parse(tokens):\n '''Parse the provided string to produce *args and **kwargs'''\n args = []\n kwargs = {}\n last = None\n for token in tokens:\n if token.startswith('--'):\n # If this is a keyword flag, but we've already got one that we've\n # parsed, then we're going to interpret it as a bool\n if last:\n kwargs[last] = True\n # See if it is the --foo=5 style\n last, _, value = token.strip('-').partition('=')\n if value:\n kwargs[last] = value\n last = None\n elif last != None:\n kwargs[last] = token\n last = None\n else:\n args.append(token)\n\n # If there's a dangling last, set that bool\n if last:\n kwargs[last] = True\n\n return args, kwargs\n",
"def shovel_help(shovel, *names):\n '''Return a string about help with the tasks, or lists tasks available'''\n # If names are provided, and the name refers to a group of tasks, print out\n # the tasks and a brief docstring. Otherwise, just enumerate all the tasks\n # available\n if not len(names):\n return heirarchical_help(shovel, '')\n else:\n for name in names:\n task = shovel[name]\n if isinstance(task, Shovel):\n return heirarchical_help(task, name)\n else:\n return task.help()\n",
"def extend(self, tasks):\n '''Add tasks to this particular shovel'''\n self._tasks.extend(tasks)\n for task in tasks:\n # We'll now go through all of our tasks and group them into\n # sub-shovels\n current = self.map\n modules = task.fullname.split('.')\n for module in modules[:-1]:\n if not isinstance(current[module], Shovel):\n logger.warn('Overriding task %s with a module' %\n current[module].file)\n shovel = Shovel()\n shovel.overrides = current[module]\n current[module] = shovel\n current = current[module].map\n\n # Now we'll put the task in this particular sub-shovel\n name = modules[-1]\n if name in current:\n logger.warn('Overriding %s with %s' % (\n '.'.join(modules), task.file))\n task.overrides = current[name]\n current[name] = task\n",
"def read(self, path, base=None):\n '''Import some tasks'''\n if base == None:\n base = os.getcwd()\n absolute = os.path.abspath(path)\n if os.path.isfile(absolute):\n # Load that particular file\n logger.info('Loading %s' % absolute)\n self.extend(Task.load(path, base))\n elif os.path.isdir(absolute):\n # Walk this directory looking for tasks\n tasks = []\n for root, _, files in os.walk(absolute):\n files = [f for f in files if f.endswith('.py')]\n for child in files:\n absolute = os.path.join(root, child)\n logger.info('Loading %s' % absolute)\n tasks.extend(Task.load(absolute, base))\n self.extend(tasks)\n",
"def items(self):\n '''Return a list of tuples of all the keys and tasks'''\n pairs = []\n for key, value in self.map.items():\n if isinstance(value, Shovel):\n pairs.extend([(key + '.' + k, v) for k, v in value.items()])\n else:\n pairs.append((key, value))\n return sorted(pairs)\n",
"def tasks(self, name):\n '''Get all the tasks that match a name'''\n found = self[name]\n if isinstance(found, Shovel):\n return [v for _, v in found.items()]\n return [found]\n",
"def clear(cls):\n '''Clear and return the cache'''\n cached = cls._cache\n cls._cache = []\n return cached\n"
] | # Copyright (c) 2011-2014 Moz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import logging
from .tasks import Shovel, Task
from .parser import parse
from . import help, logger
|
seomoz/shovel | shovel/help.py | heirarchical_helper | python | def heirarchical_helper(shovel, prefix, level=0):
'''Return a list of tuples of (fullname, docstring, level) for all the
tasks in the provided shovel'''
result = []
for key, value in sorted(shovel.map.items()):
if prefix:
key = prefix + '.' + key
if isinstance(value, Shovel):
result.append((key, None, level))
result.extend(heirarchical_helper(value, key, level + 1))
else:
result.append((key, value.doc or '(No docstring)', level))
return result | Return a list of tuples of (fullname, docstring, level) for all the
tasks in the provided shovel | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/help.py#L29-L41 | [
"def heirarchical_helper(shovel, prefix, level=0):\n '''Return a list of tuples of (fullname, docstring, level) for all the\n tasks in the provided shovel'''\n result = []\n for key, value in sorted(shovel.map.items()):\n if prefix:\n key = prefix + '.' + key\n if isinstance(value, Shovel):\n result.append((key, None, level))\n result.extend(heirarchical_helper(value, key, level + 1))\n else:\n result.append((key, value.doc or '(No docstring)', level))\n return result\n"
] | # Copyright (c) 2011-2014 Moz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''Helpers for displaying help'''
import re
from shovel.tasks import Shovel
def heirarchical_help(shovel, prefix):
'''Given a shovel of tasks, display a heirarchical list of the tasks'''
result = []
tuples = heirarchical_helper(shovel, prefix)
if not tuples:
return ''
# We need to figure out the longest fullname length
longest = max(len(name + ' ' * level) for name, _, level in tuples)
fmt = '%%%is => %%-50s' % longest
for name, docstring, level in tuples:
if docstring == None:
result.append(' ' * level + name + '/')
else:
docstring = re.sub(r'\s+', ' ', docstring).strip()
if len(docstring) > 50:
docstring = docstring[:47] + '...'
result.append(fmt % (name, docstring))
return '\n'.join(result)
def shovel_help(shovel, *names):
'''Return a string about help with the tasks, or lists tasks available'''
# If names are provided, and the name refers to a group of tasks, print out
# the tasks and a brief docstring. Otherwise, just enumerate all the tasks
# available
if not len(names):
return heirarchical_help(shovel, '')
else:
for name in names:
task = shovel[name]
if isinstance(task, Shovel):
return heirarchical_help(task, name)
else:
return task.help()
|
seomoz/shovel | shovel/help.py | heirarchical_help | python | def heirarchical_help(shovel, prefix):
'''Given a shovel of tasks, display a heirarchical list of the tasks'''
result = []
tuples = heirarchical_helper(shovel, prefix)
if not tuples:
return ''
# We need to figure out the longest fullname length
longest = max(len(name + ' ' * level) for name, _, level in tuples)
fmt = '%%%is => %%-50s' % longest
for name, docstring, level in tuples:
if docstring == None:
result.append(' ' * level + name + '/')
else:
docstring = re.sub(r'\s+', ' ', docstring).strip()
if len(docstring) > 50:
docstring = docstring[:47] + '...'
result.append(fmt % (name, docstring))
return '\n'.join(result) | Given a shovel of tasks, display a heirarchical list of the tasks | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/help.py#L44-L62 | [
"def heirarchical_helper(shovel, prefix, level=0):\n '''Return a list of tuples of (fullname, docstring, level) for all the\n tasks in the provided shovel'''\n result = []\n for key, value in sorted(shovel.map.items()):\n if prefix:\n key = prefix + '.' + key\n if isinstance(value, Shovel):\n result.append((key, None, level))\n result.extend(heirarchical_helper(value, key, level + 1))\n else:\n result.append((key, value.doc or '(No docstring)', level))\n return result\n"
] | # Copyright (c) 2011-2014 Moz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''Helpers for displaying help'''
import re
from shovel.tasks import Shovel
def heirarchical_helper(shovel, prefix, level=0):
'''Return a list of tuples of (fullname, docstring, level) for all the
tasks in the provided shovel'''
result = []
for key, value in sorted(shovel.map.items()):
if prefix:
key = prefix + '.' + key
if isinstance(value, Shovel):
result.append((key, None, level))
result.extend(heirarchical_helper(value, key, level + 1))
else:
result.append((key, value.doc or '(No docstring)', level))
return result
def shovel_help(shovel, *names):
'''Return a string about help with the tasks, or lists tasks available'''
# If names are provided, and the name refers to a group of tasks, print out
# the tasks and a brief docstring. Otherwise, just enumerate all the tasks
# available
if not len(names):
return heirarchical_help(shovel, '')
else:
for name in names:
task = shovel[name]
if isinstance(task, Shovel):
return heirarchical_help(task, name)
else:
return task.help()
|
seomoz/shovel | shovel/help.py | shovel_help | python | def shovel_help(shovel, *names):
'''Return a string about help with the tasks, or lists tasks available'''
# If names are provided, and the name refers to a group of tasks, print out
# the tasks and a brief docstring. Otherwise, just enumerate all the tasks
# available
if not len(names):
return heirarchical_help(shovel, '')
else:
for name in names:
task = shovel[name]
if isinstance(task, Shovel):
return heirarchical_help(task, name)
else:
return task.help() | Return a string about help with the tasks, or lists tasks available | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/help.py#L65-L78 | [
"def heirarchical_help(shovel, prefix):\n '''Given a shovel of tasks, display a heirarchical list of the tasks'''\n result = []\n tuples = heirarchical_helper(shovel, prefix)\n if not tuples:\n return ''\n\n # We need to figure out the longest fullname length\n longest = max(len(name + ' ' * level) for name, _, level in tuples)\n fmt = '%%%is => %%-50s' % longest\n for name, docstring, level in tuples:\n if docstring == None:\n result.append(' ' * level + name + '/')\n else:\n docstring = re.sub(r'\\s+', ' ', docstring).strip()\n if len(docstring) > 50:\n docstring = docstring[:47] + '...'\n result.append(fmt % (name, docstring))\n return '\\n'.join(result)\n"
] | # Copyright (c) 2011-2014 Moz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''Helpers for displaying help'''
import re
from shovel.tasks import Shovel
def heirarchical_helper(shovel, prefix, level=0):
'''Return a list of tuples of (fullname, docstring, level) for all the
tasks in the provided shovel'''
result = []
for key, value in sorted(shovel.map.items()):
if prefix:
key = prefix + '.' + key
if isinstance(value, Shovel):
result.append((key, None, level))
result.extend(heirarchical_helper(value, key, level + 1))
else:
result.append((key, value.doc or '(No docstring)', level))
return result
def heirarchical_help(shovel, prefix):
'''Given a shovel of tasks, display a heirarchical list of the tasks'''
result = []
tuples = heirarchical_helper(shovel, prefix)
if not tuples:
return ''
# We need to figure out the longest fullname length
longest = max(len(name + ' ' * level) for name, _, level in tuples)
fmt = '%%%is => %%-50s' % longest
for name, docstring, level in tuples:
if docstring == None:
result.append(' ' * level + name + '/')
else:
docstring = re.sub(r'\s+', ' ', docstring).strip()
if len(docstring) > 50:
docstring = docstring[:47] + '...'
result.append(fmt % (name, docstring))
return '\n'.join(result)
|
seomoz/shovel | shovel/tasks.py | Shovel.load | python | def load(cls, path, base=None):
'''Either load a path and return a shovel object or return None'''
obj = cls()
obj.read(path, base)
return obj | Either load a path and return a shovel object or return None | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L44-L48 | [
"def read(self, path, base=None):\n '''Import some tasks'''\n if base == None:\n base = os.getcwd()\n absolute = os.path.abspath(path)\n if os.path.isfile(absolute):\n # Load that particular file\n logger.info('Loading %s' % absolute)\n self.extend(Task.load(path, base))\n elif os.path.isdir(absolute):\n # Walk this directory looking for tasks\n tasks = []\n for root, _, files in os.walk(absolute):\n files = [f for f in files if f.endswith('.py')]\n for child in files:\n absolute = os.path.join(root, child)\n logger.info('Loading %s' % absolute)\n tasks.extend(Task.load(absolute, base))\n self.extend(tasks)\n"
] | class Shovel(object):
'''A collection of tasks contained in a file or folder'''
@classmethod
def __init__(self, tasks=None):
self.overrides = None
self._tasks = tasks or []
self.map = defaultdict(Shovel)
self.extend(tasks or [])
def extend(self, tasks):
'''Add tasks to this particular shovel'''
self._tasks.extend(tasks)
for task in tasks:
# We'll now go through all of our tasks and group them into
# sub-shovels
current = self.map
modules = task.fullname.split('.')
for module in modules[:-1]:
if not isinstance(current[module], Shovel):
logger.warn('Overriding task %s with a module' %
current[module].file)
shovel = Shovel()
shovel.overrides = current[module]
current[module] = shovel
current = current[module].map
# Now we'll put the task in this particular sub-shovel
name = modules[-1]
if name in current:
logger.warn('Overriding %s with %s' % (
'.'.join(modules), task.file))
task.overrides = current[name]
current[name] = task
def read(self, path, base=None):
'''Import some tasks'''
if base == None:
base = os.getcwd()
absolute = os.path.abspath(path)
if os.path.isfile(absolute):
# Load that particular file
logger.info('Loading %s' % absolute)
self.extend(Task.load(path, base))
elif os.path.isdir(absolute):
# Walk this directory looking for tasks
tasks = []
for root, _, files in os.walk(absolute):
files = [f for f in files if f.endswith('.py')]
for child in files:
absolute = os.path.join(root, child)
logger.info('Loading %s' % absolute)
tasks.extend(Task.load(absolute, base))
self.extend(tasks)
def __getitem__(self, key):
'''Find a task with the provided name'''
current = self.map
split = key.split('.')
for module in split[:-1]:
if module not in current:
raise KeyError('Module not found')
current = current[module].map
if split[-1] not in current:
raise KeyError('Task not found')
return current[split[-1]]
def __contains__(self, key):
try:
return bool(self.__getitem__(key))
except KeyError:
return False
def keys(self):
'''Return all valid keys'''
keys = []
for key, value in self.map.items():
if isinstance(value, Shovel):
keys.extend([key + '.' + k for k in value.keys()])
else:
keys.append(key)
return sorted(keys)
def items(self):
'''Return a list of tuples of all the keys and tasks'''
pairs = []
for key, value in self.map.items():
if isinstance(value, Shovel):
pairs.extend([(key + '.' + k, v) for k, v in value.items()])
else:
pairs.append((key, value))
return sorted(pairs)
def tasks(self, name):
'''Get all the tasks that match a name'''
found = self[name]
if isinstance(found, Shovel):
return [v for _, v in found.items()]
return [found]
|
seomoz/shovel | shovel/tasks.py | Shovel.extend | python | def extend(self, tasks):
'''Add tasks to this particular shovel'''
self._tasks.extend(tasks)
for task in tasks:
# We'll now go through all of our tasks and group them into
# sub-shovels
current = self.map
modules = task.fullname.split('.')
for module in modules[:-1]:
if not isinstance(current[module], Shovel):
logger.warn('Overriding task %s with a module' %
current[module].file)
shovel = Shovel()
shovel.overrides = current[module]
current[module] = shovel
current = current[module].map
# Now we'll put the task in this particular sub-shovel
name = modules[-1]
if name in current:
logger.warn('Overriding %s with %s' % (
'.'.join(modules), task.file))
task.overrides = current[name]
current[name] = task | Add tasks to this particular shovel | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L56-L79 | null | class Shovel(object):
'''A collection of tasks contained in a file or folder'''
@classmethod
def load(cls, path, base=None):
'''Either load a path and return a shovel object or return None'''
obj = cls()
obj.read(path, base)
return obj
def __init__(self, tasks=None):
self.overrides = None
self._tasks = tasks or []
self.map = defaultdict(Shovel)
self.extend(tasks or [])
def read(self, path, base=None):
'''Import some tasks'''
if base == None:
base = os.getcwd()
absolute = os.path.abspath(path)
if os.path.isfile(absolute):
# Load that particular file
logger.info('Loading %s' % absolute)
self.extend(Task.load(path, base))
elif os.path.isdir(absolute):
# Walk this directory looking for tasks
tasks = []
for root, _, files in os.walk(absolute):
files = [f for f in files if f.endswith('.py')]
for child in files:
absolute = os.path.join(root, child)
logger.info('Loading %s' % absolute)
tasks.extend(Task.load(absolute, base))
self.extend(tasks)
def __getitem__(self, key):
'''Find a task with the provided name'''
current = self.map
split = key.split('.')
for module in split[:-1]:
if module not in current:
raise KeyError('Module not found')
current = current[module].map
if split[-1] not in current:
raise KeyError('Task not found')
return current[split[-1]]
def __contains__(self, key):
try:
return bool(self.__getitem__(key))
except KeyError:
return False
def keys(self):
'''Return all valid keys'''
keys = []
for key, value in self.map.items():
if isinstance(value, Shovel):
keys.extend([key + '.' + k for k in value.keys()])
else:
keys.append(key)
return sorted(keys)
def items(self):
'''Return a list of tuples of all the keys and tasks'''
pairs = []
for key, value in self.map.items():
if isinstance(value, Shovel):
pairs.extend([(key + '.' + k, v) for k, v in value.items()])
else:
pairs.append((key, value))
return sorted(pairs)
def tasks(self, name):
'''Get all the tasks that match a name'''
found = self[name]
if isinstance(found, Shovel):
return [v for _, v in found.items()]
return [found]
|
seomoz/shovel | shovel/tasks.py | Shovel.read | python | def read(self, path, base=None):
'''Import some tasks'''
if base == None:
base = os.getcwd()
absolute = os.path.abspath(path)
if os.path.isfile(absolute):
# Load that particular file
logger.info('Loading %s' % absolute)
self.extend(Task.load(path, base))
elif os.path.isdir(absolute):
# Walk this directory looking for tasks
tasks = []
for root, _, files in os.walk(absolute):
files = [f for f in files if f.endswith('.py')]
for child in files:
absolute = os.path.join(root, child)
logger.info('Loading %s' % absolute)
tasks.extend(Task.load(absolute, base))
self.extend(tasks) | Import some tasks | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L81-L99 | [
"def extend(self, tasks):\n '''Add tasks to this particular shovel'''\n self._tasks.extend(tasks)\n for task in tasks:\n # We'll now go through all of our tasks and group them into\n # sub-shovels\n current = self.map\n modules = task.fullname.split('.')\n for module in modules[:-1]:\n if not isinstance(current[module], Shovel):\n logger.warn('Overriding task %s with a module' %\n current[module].file)\n shovel = Shovel()\n shovel.overrides = current[module]\n current[module] = shovel\n current = current[module].map\n\n # Now we'll put the task in this particular sub-shovel\n name = modules[-1]\n if name in current:\n logger.warn('Overriding %s with %s' % (\n '.'.join(modules), task.file))\n task.overrides = current[name]\n current[name] = task\n",
"def load(cls, path, base=None):\n '''Return a list of the tasks stored in a file'''\n base = base or os.getcwd()\n absolute = os.path.abspath(path)\n parent = os.path.dirname(absolute)\n name, _, _ = os.path.basename(absolute).rpartition('.py')\n fobj, path, description = imp.find_module(name, [parent])\n try:\n imp.load_module(name, fobj, path, description)\n finally:\n if fobj:\n fobj.close()\n # Manipulate the full names of the tasks to be relative to the provided\n # base\n relative, _, _ = os.path.relpath(path, base).rpartition('.py')\n for task in cls._cache:\n parts = relative.split(os.path.sep)\n parts.append(task.name)\n # If it's either in shovel.py, or folder/__init__.py, then we\n # should consider it as being at one level above that file\n parts = [part.strip('.') for part in parts if part not in\n ('shovel', '.shovel', '__init__', '.', '..', '')]\n task.fullname = '.'.join(parts)\n logger.debug('Found task %s in %s' % (task.fullname, task.module))\n return cls.clear()\n"
] | class Shovel(object):
'''A collection of tasks contained in a file or folder'''
@classmethod
def load(cls, path, base=None):
'''Either load a path and return a shovel object or return None'''
obj = cls()
obj.read(path, base)
return obj
def __init__(self, tasks=None):
self.overrides = None
self._tasks = tasks or []
self.map = defaultdict(Shovel)
self.extend(tasks or [])
def extend(self, tasks):
'''Add tasks to this particular shovel'''
self._tasks.extend(tasks)
for task in tasks:
# We'll now go through all of our tasks and group them into
# sub-shovels
current = self.map
modules = task.fullname.split('.')
for module in modules[:-1]:
if not isinstance(current[module], Shovel):
logger.warn('Overriding task %s with a module' %
current[module].file)
shovel = Shovel()
shovel.overrides = current[module]
current[module] = shovel
current = current[module].map
# Now we'll put the task in this particular sub-shovel
name = modules[-1]
if name in current:
logger.warn('Overriding %s with %s' % (
'.'.join(modules), task.file))
task.overrides = current[name]
current[name] = task
def __getitem__(self, key):
'''Find a task with the provided name'''
current = self.map
split = key.split('.')
for module in split[:-1]:
if module not in current:
raise KeyError('Module not found')
current = current[module].map
if split[-1] not in current:
raise KeyError('Task not found')
return current[split[-1]]
def __contains__(self, key):
try:
return bool(self.__getitem__(key))
except KeyError:
return False
def keys(self):
'''Return all valid keys'''
keys = []
for key, value in self.map.items():
if isinstance(value, Shovel):
keys.extend([key + '.' + k for k in value.keys()])
else:
keys.append(key)
return sorted(keys)
def items(self):
'''Return a list of tuples of all the keys and tasks'''
pairs = []
for key, value in self.map.items():
if isinstance(value, Shovel):
pairs.extend([(key + '.' + k, v) for k, v in value.items()])
else:
pairs.append((key, value))
return sorted(pairs)
def tasks(self, name):
'''Get all the tasks that match a name'''
found = self[name]
if isinstance(found, Shovel):
return [v for _, v in found.items()]
return [found]
|
seomoz/shovel | shovel/tasks.py | Shovel.keys | python | def keys(self):
'''Return all valid keys'''
keys = []
for key, value in self.map.items():
if isinstance(value, Shovel):
keys.extend([key + '.' + k for k in value.keys()])
else:
keys.append(key)
return sorted(keys) | Return all valid keys | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L119-L127 | null | class Shovel(object):
'''A collection of tasks contained in a file or folder'''
@classmethod
def load(cls, path, base=None):
'''Either load a path and return a shovel object or return None'''
obj = cls()
obj.read(path, base)
return obj
def __init__(self, tasks=None):
self.overrides = None
self._tasks = tasks or []
self.map = defaultdict(Shovel)
self.extend(tasks or [])
def extend(self, tasks):
'''Add tasks to this particular shovel'''
self._tasks.extend(tasks)
for task in tasks:
# We'll now go through all of our tasks and group them into
# sub-shovels
current = self.map
modules = task.fullname.split('.')
for module in modules[:-1]:
if not isinstance(current[module], Shovel):
logger.warn('Overriding task %s with a module' %
current[module].file)
shovel = Shovel()
shovel.overrides = current[module]
current[module] = shovel
current = current[module].map
# Now we'll put the task in this particular sub-shovel
name = modules[-1]
if name in current:
logger.warn('Overriding %s with %s' % (
'.'.join(modules), task.file))
task.overrides = current[name]
current[name] = task
def read(self, path, base=None):
'''Import some tasks'''
if base == None:
base = os.getcwd()
absolute = os.path.abspath(path)
if os.path.isfile(absolute):
# Load that particular file
logger.info('Loading %s' % absolute)
self.extend(Task.load(path, base))
elif os.path.isdir(absolute):
# Walk this directory looking for tasks
tasks = []
for root, _, files in os.walk(absolute):
files = [f for f in files if f.endswith('.py')]
for child in files:
absolute = os.path.join(root, child)
logger.info('Loading %s' % absolute)
tasks.extend(Task.load(absolute, base))
self.extend(tasks)
def __getitem__(self, key):
'''Find a task with the provided name'''
current = self.map
split = key.split('.')
for module in split[:-1]:
if module not in current:
raise KeyError('Module not found')
current = current[module].map
if split[-1] not in current:
raise KeyError('Task not found')
return current[split[-1]]
def __contains__(self, key):
try:
return bool(self.__getitem__(key))
except KeyError:
return False
def items(self):
'''Return a list of tuples of all the keys and tasks'''
pairs = []
for key, value in self.map.items():
if isinstance(value, Shovel):
pairs.extend([(key + '.' + k, v) for k, v in value.items()])
else:
pairs.append((key, value))
return sorted(pairs)
def tasks(self, name):
'''Get all the tasks that match a name'''
found = self[name]
if isinstance(found, Shovel):
return [v for _, v in found.items()]
return [found]
|
seomoz/shovel | shovel/tasks.py | Shovel.items | python | def items(self):
'''Return a list of tuples of all the keys and tasks'''
pairs = []
for key, value in self.map.items():
if isinstance(value, Shovel):
pairs.extend([(key + '.' + k, v) for k, v in value.items()])
else:
pairs.append((key, value))
return sorted(pairs) | Return a list of tuples of all the keys and tasks | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L129-L137 | null | class Shovel(object):
'''A collection of tasks contained in a file or folder'''
@classmethod
def load(cls, path, base=None):
'''Either load a path and return a shovel object or return None'''
obj = cls()
obj.read(path, base)
return obj
def __init__(self, tasks=None):
self.overrides = None
self._tasks = tasks or []
self.map = defaultdict(Shovel)
self.extend(tasks or [])
def extend(self, tasks):
'''Add tasks to this particular shovel'''
self._tasks.extend(tasks)
for task in tasks:
# We'll now go through all of our tasks and group them into
# sub-shovels
current = self.map
modules = task.fullname.split('.')
for module in modules[:-1]:
if not isinstance(current[module], Shovel):
logger.warn('Overriding task %s with a module' %
current[module].file)
shovel = Shovel()
shovel.overrides = current[module]
current[module] = shovel
current = current[module].map
# Now we'll put the task in this particular sub-shovel
name = modules[-1]
if name in current:
logger.warn('Overriding %s with %s' % (
'.'.join(modules), task.file))
task.overrides = current[name]
current[name] = task
def read(self, path, base=None):
'''Import some tasks'''
if base == None:
base = os.getcwd()
absolute = os.path.abspath(path)
if os.path.isfile(absolute):
# Load that particular file
logger.info('Loading %s' % absolute)
self.extend(Task.load(path, base))
elif os.path.isdir(absolute):
# Walk this directory looking for tasks
tasks = []
for root, _, files in os.walk(absolute):
files = [f for f in files if f.endswith('.py')]
for child in files:
absolute = os.path.join(root, child)
logger.info('Loading %s' % absolute)
tasks.extend(Task.load(absolute, base))
self.extend(tasks)
def __getitem__(self, key):
'''Find a task with the provided name'''
current = self.map
split = key.split('.')
for module in split[:-1]:
if module not in current:
raise KeyError('Module not found')
current = current[module].map
if split[-1] not in current:
raise KeyError('Task not found')
return current[split[-1]]
def __contains__(self, key):
try:
return bool(self.__getitem__(key))
except KeyError:
return False
def keys(self):
'''Return all valid keys'''
keys = []
for key, value in self.map.items():
if isinstance(value, Shovel):
keys.extend([key + '.' + k for k in value.keys()])
else:
keys.append(key)
return sorted(keys)
def tasks(self, name):
'''Get all the tasks that match a name'''
found = self[name]
if isinstance(found, Shovel):
return [v for _, v in found.items()]
return [found]
|
seomoz/shovel | shovel/tasks.py | Shovel.tasks | python | def tasks(self, name):
'''Get all the tasks that match a name'''
found = self[name]
if isinstance(found, Shovel):
return [v for _, v in found.items()]
return [found] | Get all the tasks that match a name | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L139-L144 | null | class Shovel(object):
'''A collection of tasks contained in a file or folder'''
@classmethod
def load(cls, path, base=None):
'''Either load a path and return a shovel object or return None'''
obj = cls()
obj.read(path, base)
return obj
def __init__(self, tasks=None):
self.overrides = None
self._tasks = tasks or []
self.map = defaultdict(Shovel)
self.extend(tasks or [])
def extend(self, tasks):
'''Add tasks to this particular shovel'''
self._tasks.extend(tasks)
for task in tasks:
# We'll now go through all of our tasks and group them into
# sub-shovels
current = self.map
modules = task.fullname.split('.')
for module in modules[:-1]:
if not isinstance(current[module], Shovel):
logger.warn('Overriding task %s with a module' %
current[module].file)
shovel = Shovel()
shovel.overrides = current[module]
current[module] = shovel
current = current[module].map
# Now we'll put the task in this particular sub-shovel
name = modules[-1]
if name in current:
logger.warn('Overriding %s with %s' % (
'.'.join(modules), task.file))
task.overrides = current[name]
current[name] = task
def read(self, path, base=None):
'''Import some tasks'''
if base == None:
base = os.getcwd()
absolute = os.path.abspath(path)
if os.path.isfile(absolute):
# Load that particular file
logger.info('Loading %s' % absolute)
self.extend(Task.load(path, base))
elif os.path.isdir(absolute):
# Walk this directory looking for tasks
tasks = []
for root, _, files in os.walk(absolute):
files = [f for f in files if f.endswith('.py')]
for child in files:
absolute = os.path.join(root, child)
logger.info('Loading %s' % absolute)
tasks.extend(Task.load(absolute, base))
self.extend(tasks)
def __getitem__(self, key):
'''Find a task with the provided name'''
current = self.map
split = key.split('.')
for module in split[:-1]:
if module not in current:
raise KeyError('Module not found')
current = current[module].map
if split[-1] not in current:
raise KeyError('Task not found')
return current[split[-1]]
def __contains__(self, key):
try:
return bool(self.__getitem__(key))
except KeyError:
return False
def keys(self):
'''Return all valid keys'''
keys = []
for key, value in self.map.items():
if isinstance(value, Shovel):
keys.extend([key + '.' + k for k in value.keys()])
else:
keys.append(key)
return sorted(keys)
def items(self):
'''Return a list of tuples of all the keys and tasks'''
pairs = []
for key, value in self.map.items():
if isinstance(value, Shovel):
pairs.extend([(key + '.' + k, v) for k, v in value.items()])
else:
pairs.append((key, value))
return sorted(pairs)
|
seomoz/shovel | shovel/tasks.py | Task.make | python | def make(cls, obj):
'''Given a callable object, return a new callable object'''
try:
cls._cache.append(Task(obj))
except Exception:
logger.exception('Unable to make task for %s' % repr(obj)) | Given a callable object, return a new callable object | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L165-L170 | null | class Task(object):
'''An object representative of a task'''
# There's an interesting problem associated with this process of loading
# tasks from a file. We invoke it with a 'load', but then we get access to
# the tasks through decorators. As such, the decorator just accumulates
# the tasks that it has seen as it creates them, puts them in a cache, and
# eventually that cache will be consumed as a usable object. This is that
# cache. Put another way:
#
# 1. Clear cache
# 2. Load module
# 3. Fill cache with tasks created with @task
# 4. Once loaded, organize the cached tasks
_cache = []
# This is to help find tasks given their path
_tasks = {}
@classmethod
@classmethod
def load(cls, path, base=None):
'''Return a list of the tasks stored in a file'''
base = base or os.getcwd()
absolute = os.path.abspath(path)
parent = os.path.dirname(absolute)
name, _, _ = os.path.basename(absolute).rpartition('.py')
fobj, path, description = imp.find_module(name, [parent])
try:
imp.load_module(name, fobj, path, description)
finally:
if fobj:
fobj.close()
# Manipulate the full names of the tasks to be relative to the provided
# base
relative, _, _ = os.path.relpath(path, base).rpartition('.py')
for task in cls._cache:
parts = relative.split(os.path.sep)
parts.append(task.name)
# If it's either in shovel.py, or folder/__init__.py, then we
# should consider it as being at one level above that file
parts = [part.strip('.') for part in parts if part not in
('shovel', '.shovel', '__init__', '.', '..', '')]
task.fullname = '.'.join(parts)
logger.debug('Found task %s in %s' % (task.fullname, task.module))
return cls.clear()
@classmethod
def clear(cls):
'''Clear and return the cache'''
cached = cls._cache
cls._cache = []
return cached
def __init__(self, obj):
if not callable(obj):
raise TypeError('Object not callable: %s' % obj)
# Save some attributes about the task
self.name = obj.__name__
self.doc = inspect.getdoc(obj) or ''
# If the provided object is a type (like a class), we'll treat
# it a little differently from if it's a pure function. The
# assumption is that the class will be instantiated wit no
# arguments, and then called with the provided arguments
if isinstance(obj, type):
try:
self._obj = obj()
except:
raise TypeError(
'%s => Task classes must take no arguments' % self.name)
self.spec = inspect.getargspec(self._obj.__call__)
self.doc = inspect.getdoc(self._obj.__call__) or self.doc
self.line = 'Unknown line'
self.file = 'Unknown file'
else:
self.spec = inspect.getargspec(obj)
self._obj = obj
self.line = obj.__code__.co_firstlineno
self.file = obj.__code__.co_filename
self.module = self._obj.__module__
self.fullname = self.name
# What module / etc. this overrides, if any
self.overrides = None
def __call__(self, *args, **kwargs):
'''Invoke the task itself'''
try:
return self._obj(*args, **kwargs)
except Exception as exc:
logger.exception('Failed to run task %s' % self.name)
raise(exc)
def capture(self, *args, **kwargs):
'''Run a task and return a dictionary with stderr, stdout and the
return value. Also, the traceback from the exception if there was
one'''
import traceback
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
stdout, stderr = sys.stdout, sys.stderr
sys.stdout = out = StringIO()
sys.stderr = err = StringIO()
result = {
'exception': None,
'stderr': None,
'stdout': None,
'return': None
}
try:
result['return'] = self.__call__(*args, **kwargs)
except Exception:
result['exception'] = traceback.format_exc()
sys.stdout, sys.stderr = stdout, stderr
result['stderr'] = err.getvalue()
result['stdout'] = out.getvalue()
return result
def dry(self, *args, **kwargs):
'''Perform a dry-run of the task'''
return 'Would have executed:\n%s%s' % (
self.name, Args(self.spec).explain(*args, **kwargs))
def help(self):
'''Return the help string of the task'''
# This returns a help string for a given task of the form:
#
# ==================================================
# <name>
# ============================== (If supplied)
# <docstring>
# ============================== (If overrides other tasks)
# Overrides <other task file>
# ==============================
# From <file> on <line>
# ==============================
# <name>(Argspec)
result = [
'=' * 50,
self.name
]
# And the doc, if it exists
if self.doc:
result.extend([
'=' * 30,
self.doc
])
override = self.overrides
while override:
if isinstance(override, Shovel):
result.append('Overrides module')
else:
result.append('Overrides %s' % override.file)
override = override.overrides
# Print where we read this function in from
result.extend([
'=' * 30,
'From %s on line %i' % (self.file, self.line),
'=' * 30,
'%s%s' % (self.name, str(Args(self.spec)))
])
return os.linesep.join(result)
|
seomoz/shovel | shovel/tasks.py | Task.load | python | def load(cls, path, base=None):
'''Return a list of the tasks stored in a file'''
base = base or os.getcwd()
absolute = os.path.abspath(path)
parent = os.path.dirname(absolute)
name, _, _ = os.path.basename(absolute).rpartition('.py')
fobj, path, description = imp.find_module(name, [parent])
try:
imp.load_module(name, fobj, path, description)
finally:
if fobj:
fobj.close()
# Manipulate the full names of the tasks to be relative to the provided
# base
relative, _, _ = os.path.relpath(path, base).rpartition('.py')
for task in cls._cache:
parts = relative.split(os.path.sep)
parts.append(task.name)
# If it's either in shovel.py, or folder/__init__.py, then we
# should consider it as being at one level above that file
parts = [part.strip('.') for part in parts if part not in
('shovel', '.shovel', '__init__', '.', '..', '')]
task.fullname = '.'.join(parts)
logger.debug('Found task %s in %s' % (task.fullname, task.module))
return cls.clear() | Return a list of the tasks stored in a file | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L173-L197 | null | class Task(object):
'''An object representative of a task'''
# There's an interesting problem associated with this process of loading
# tasks from a file. We invoke it with a 'load', but then we get access to
# the tasks through decorators. As such, the decorator just accumulates
# the tasks that it has seen as it creates them, puts them in a cache, and
# eventually that cache will be consumed as a usable object. This is that
# cache. Put another way:
#
# 1. Clear cache
# 2. Load module
# 3. Fill cache with tasks created with @task
# 4. Once loaded, organize the cached tasks
_cache = []
# This is to help find tasks given their path
_tasks = {}
@classmethod
def make(cls, obj):
'''Given a callable object, return a new callable object'''
try:
cls._cache.append(Task(obj))
except Exception:
logger.exception('Unable to make task for %s' % repr(obj))
@classmethod
@classmethod
def clear(cls):
'''Clear and return the cache'''
cached = cls._cache
cls._cache = []
return cached
def __init__(self, obj):
if not callable(obj):
raise TypeError('Object not callable: %s' % obj)
# Save some attributes about the task
self.name = obj.__name__
self.doc = inspect.getdoc(obj) or ''
# If the provided object is a type (like a class), we'll treat
# it a little differently from if it's a pure function. The
# assumption is that the class will be instantiated wit no
# arguments, and then called with the provided arguments
if isinstance(obj, type):
try:
self._obj = obj()
except:
raise TypeError(
'%s => Task classes must take no arguments' % self.name)
self.spec = inspect.getargspec(self._obj.__call__)
self.doc = inspect.getdoc(self._obj.__call__) or self.doc
self.line = 'Unknown line'
self.file = 'Unknown file'
else:
self.spec = inspect.getargspec(obj)
self._obj = obj
self.line = obj.__code__.co_firstlineno
self.file = obj.__code__.co_filename
self.module = self._obj.__module__
self.fullname = self.name
# What module / etc. this overrides, if any
self.overrides = None
def __call__(self, *args, **kwargs):
'''Invoke the task itself'''
try:
return self._obj(*args, **kwargs)
except Exception as exc:
logger.exception('Failed to run task %s' % self.name)
raise(exc)
def capture(self, *args, **kwargs):
'''Run a task and return a dictionary with stderr, stdout and the
return value. Also, the traceback from the exception if there was
one'''
import traceback
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
stdout, stderr = sys.stdout, sys.stderr
sys.stdout = out = StringIO()
sys.stderr = err = StringIO()
result = {
'exception': None,
'stderr': None,
'stdout': None,
'return': None
}
try:
result['return'] = self.__call__(*args, **kwargs)
except Exception:
result['exception'] = traceback.format_exc()
sys.stdout, sys.stderr = stdout, stderr
result['stderr'] = err.getvalue()
result['stdout'] = out.getvalue()
return result
def dry(self, *args, **kwargs):
'''Perform a dry-run of the task'''
return 'Would have executed:\n%s%s' % (
self.name, Args(self.spec).explain(*args, **kwargs))
def help(self):
'''Return the help string of the task'''
# This returns a help string for a given task of the form:
#
# ==================================================
# <name>
# ============================== (If supplied)
# <docstring>
# ============================== (If overrides other tasks)
# Overrides <other task file>
# ==============================
# From <file> on <line>
# ==============================
# <name>(Argspec)
result = [
'=' * 50,
self.name
]
# And the doc, if it exists
if self.doc:
result.extend([
'=' * 30,
self.doc
])
override = self.overrides
while override:
if isinstance(override, Shovel):
result.append('Overrides module')
else:
result.append('Overrides %s' % override.file)
override = override.overrides
# Print where we read this function in from
result.extend([
'=' * 30,
'From %s on line %i' % (self.file, self.line),
'=' * 30,
'%s%s' % (self.name, str(Args(self.spec)))
])
return os.linesep.join(result)
|
seomoz/shovel | shovel/tasks.py | Task.capture | python | def capture(self, *args, **kwargs):
'''Run a task and return a dictionary with stderr, stdout and the
return value. Also, the traceback from the exception if there was
one'''
import traceback
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
stdout, stderr = sys.stdout, sys.stderr
sys.stdout = out = StringIO()
sys.stderr = err = StringIO()
result = {
'exception': None,
'stderr': None,
'stdout': None,
'return': None
}
try:
result['return'] = self.__call__(*args, **kwargs)
except Exception:
result['exception'] = traceback.format_exc()
sys.stdout, sys.stderr = stdout, stderr
result['stderr'] = err.getvalue()
result['stdout'] = out.getvalue()
return result | Run a task and return a dictionary with stderr, stdout and the
return value. Also, the traceback from the exception if there was
one | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L248-L273 | [
"def __call__(self, *args, **kwargs):\n '''Invoke the task itself'''\n try:\n return self._obj(*args, **kwargs)\n except Exception as exc:\n logger.exception('Failed to run task %s' % self.name)\n raise(exc)\n"
] | class Task(object):
'''An object representative of a task'''
# There's an interesting problem associated with this process of loading
# tasks from a file. We invoke it with a 'load', but then we get access to
# the tasks through decorators. As such, the decorator just accumulates
# the tasks that it has seen as it creates them, puts them in a cache, and
# eventually that cache will be consumed as a usable object. This is that
# cache. Put another way:
#
# 1. Clear cache
# 2. Load module
# 3. Fill cache with tasks created with @task
# 4. Once loaded, organize the cached tasks
_cache = []
# This is to help find tasks given their path
_tasks = {}
@classmethod
def make(cls, obj):
'''Given a callable object, return a new callable object'''
try:
cls._cache.append(Task(obj))
except Exception:
logger.exception('Unable to make task for %s' % repr(obj))
@classmethod
def load(cls, path, base=None):
'''Return a list of the tasks stored in a file'''
base = base or os.getcwd()
absolute = os.path.abspath(path)
parent = os.path.dirname(absolute)
name, _, _ = os.path.basename(absolute).rpartition('.py')
fobj, path, description = imp.find_module(name, [parent])
try:
imp.load_module(name, fobj, path, description)
finally:
if fobj:
fobj.close()
# Manipulate the full names of the tasks to be relative to the provided
# base
relative, _, _ = os.path.relpath(path, base).rpartition('.py')
for task in cls._cache:
parts = relative.split(os.path.sep)
parts.append(task.name)
# If it's either in shovel.py, or folder/__init__.py, then we
# should consider it as being at one level above that file
parts = [part.strip('.') for part in parts if part not in
('shovel', '.shovel', '__init__', '.', '..', '')]
task.fullname = '.'.join(parts)
logger.debug('Found task %s in %s' % (task.fullname, task.module))
return cls.clear()
@classmethod
def clear(cls):
'''Clear and return the cache'''
cached = cls._cache
cls._cache = []
return cached
def __init__(self, obj):
if not callable(obj):
raise TypeError('Object not callable: %s' % obj)
# Save some attributes about the task
self.name = obj.__name__
self.doc = inspect.getdoc(obj) or ''
# If the provided object is a type (like a class), we'll treat
# it a little differently from if it's a pure function. The
# assumption is that the class will be instantiated wit no
# arguments, and then called with the provided arguments
if isinstance(obj, type):
try:
self._obj = obj()
except:
raise TypeError(
'%s => Task classes must take no arguments' % self.name)
self.spec = inspect.getargspec(self._obj.__call__)
self.doc = inspect.getdoc(self._obj.__call__) or self.doc
self.line = 'Unknown line'
self.file = 'Unknown file'
else:
self.spec = inspect.getargspec(obj)
self._obj = obj
self.line = obj.__code__.co_firstlineno
self.file = obj.__code__.co_filename
self.module = self._obj.__module__
self.fullname = self.name
# What module / etc. this overrides, if any
self.overrides = None
def __call__(self, *args, **kwargs):
'''Invoke the task itself'''
try:
return self._obj(*args, **kwargs)
except Exception as exc:
logger.exception('Failed to run task %s' % self.name)
raise(exc)
def dry(self, *args, **kwargs):
'''Perform a dry-run of the task'''
return 'Would have executed:\n%s%s' % (
self.name, Args(self.spec).explain(*args, **kwargs))
def help(self):
'''Return the help string of the task'''
# This returns a help string for a given task of the form:
#
# ==================================================
# <name>
# ============================== (If supplied)
# <docstring>
# ============================== (If overrides other tasks)
# Overrides <other task file>
# ==============================
# From <file> on <line>
# ==============================
# <name>(Argspec)
result = [
'=' * 50,
self.name
]
# And the doc, if it exists
if self.doc:
result.extend([
'=' * 30,
self.doc
])
override = self.overrides
while override:
if isinstance(override, Shovel):
result.append('Overrides module')
else:
result.append('Overrides %s' % override.file)
override = override.overrides
# Print where we read this function in from
result.extend([
'=' * 30,
'From %s on line %i' % (self.file, self.line),
'=' * 30,
'%s%s' % (self.name, str(Args(self.spec)))
])
return os.linesep.join(result)
|
seomoz/shovel | shovel/tasks.py | Task.dry | python | def dry(self, *args, **kwargs):
'''Perform a dry-run of the task'''
return 'Would have executed:\n%s%s' % (
self.name, Args(self.spec).explain(*args, **kwargs)) | Perform a dry-run of the task | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L275-L278 | [
"def explain(self, *args, **kwargs):\n '''Return a string that describes how these args are interpreted'''\n args = self.get(*args, **kwargs)\n results = ['%s = %s' % (name, value) for name, value in args.required]\n results.extend(['%s = %s (overridden)' % (\n name, value) for name, value in args.overridden])\n results.extend(['%s = %s (default)' % (\n name, value) for name, value in args.defaulted])\n if self._varargs:\n results.append('%s = %s' % (self._varargs, args.varargs))\n if self._kwargs:\n results.append('%s = %s' % (self._kwargs, args.kwargs))\n return '\\n\\t'.join(results)\n"
] | class Task(object):
'''An object representative of a task'''
# There's an interesting problem associated with this process of loading
# tasks from a file. We invoke it with a 'load', but then we get access to
# the tasks through decorators. As such, the decorator just accumulates
# the tasks that it has seen as it creates them, puts them in a cache, and
# eventually that cache will be consumed as a usable object. This is that
# cache. Put another way:
#
# 1. Clear cache
# 2. Load module
# 3. Fill cache with tasks created with @task
# 4. Once loaded, organize the cached tasks
_cache = []
# This is to help find tasks given their path
_tasks = {}
@classmethod
def make(cls, obj):
'''Given a callable object, return a new callable object'''
try:
cls._cache.append(Task(obj))
except Exception:
logger.exception('Unable to make task for %s' % repr(obj))
@classmethod
def load(cls, path, base=None):
'''Return a list of the tasks stored in a file'''
base = base or os.getcwd()
absolute = os.path.abspath(path)
parent = os.path.dirname(absolute)
name, _, _ = os.path.basename(absolute).rpartition('.py')
fobj, path, description = imp.find_module(name, [parent])
try:
imp.load_module(name, fobj, path, description)
finally:
if fobj:
fobj.close()
# Manipulate the full names of the tasks to be relative to the provided
# base
relative, _, _ = os.path.relpath(path, base).rpartition('.py')
for task in cls._cache:
parts = relative.split(os.path.sep)
parts.append(task.name)
# If it's either in shovel.py, or folder/__init__.py, then we
# should consider it as being at one level above that file
parts = [part.strip('.') for part in parts if part not in
('shovel', '.shovel', '__init__', '.', '..', '')]
task.fullname = '.'.join(parts)
logger.debug('Found task %s in %s' % (task.fullname, task.module))
return cls.clear()
@classmethod
def clear(cls):
'''Clear and return the cache'''
cached = cls._cache
cls._cache = []
return cached
def __init__(self, obj):
if not callable(obj):
raise TypeError('Object not callable: %s' % obj)
# Save some attributes about the task
self.name = obj.__name__
self.doc = inspect.getdoc(obj) or ''
# If the provided object is a type (like a class), we'll treat
# it a little differently from if it's a pure function. The
# assumption is that the class will be instantiated wit no
# arguments, and then called with the provided arguments
if isinstance(obj, type):
try:
self._obj = obj()
except:
raise TypeError(
'%s => Task classes must take no arguments' % self.name)
self.spec = inspect.getargspec(self._obj.__call__)
self.doc = inspect.getdoc(self._obj.__call__) or self.doc
self.line = 'Unknown line'
self.file = 'Unknown file'
else:
self.spec = inspect.getargspec(obj)
self._obj = obj
self.line = obj.__code__.co_firstlineno
self.file = obj.__code__.co_filename
self.module = self._obj.__module__
self.fullname = self.name
# What module / etc. this overrides, if any
self.overrides = None
def __call__(self, *args, **kwargs):
'''Invoke the task itself'''
try:
return self._obj(*args, **kwargs)
except Exception as exc:
logger.exception('Failed to run task %s' % self.name)
raise(exc)
def capture(self, *args, **kwargs):
'''Run a task and return a dictionary with stderr, stdout and the
return value. Also, the traceback from the exception if there was
one'''
import traceback
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
stdout, stderr = sys.stdout, sys.stderr
sys.stdout = out = StringIO()
sys.stderr = err = StringIO()
result = {
'exception': None,
'stderr': None,
'stdout': None,
'return': None
}
try:
result['return'] = self.__call__(*args, **kwargs)
except Exception:
result['exception'] = traceback.format_exc()
sys.stdout, sys.stderr = stdout, stderr
result['stderr'] = err.getvalue()
result['stdout'] = out.getvalue()
return result
def help(self):
'''Return the help string of the task'''
# This returns a help string for a given task of the form:
#
# ==================================================
# <name>
# ============================== (If supplied)
# <docstring>
# ============================== (If overrides other tasks)
# Overrides <other task file>
# ==============================
# From <file> on <line>
# ==============================
# <name>(Argspec)
result = [
'=' * 50,
self.name
]
# And the doc, if it exists
if self.doc:
result.extend([
'=' * 30,
self.doc
])
override = self.overrides
while override:
if isinstance(override, Shovel):
result.append('Overrides module')
else:
result.append('Overrides %s' % override.file)
override = override.overrides
# Print where we read this function in from
result.extend([
'=' * 30,
'From %s on line %i' % (self.file, self.line),
'=' * 30,
'%s%s' % (self.name, str(Args(self.spec)))
])
return os.linesep.join(result)
|
seomoz/shovel | shovel/tasks.py | Task.help | python | def help(self):
'''Return the help string of the task'''
# This returns a help string for a given task of the form:
#
# ==================================================
# <name>
# ============================== (If supplied)
# <docstring>
# ============================== (If overrides other tasks)
# Overrides <other task file>
# ==============================
# From <file> on <line>
# ==============================
# <name>(Argspec)
result = [
'=' * 50,
self.name
]
# And the doc, if it exists
if self.doc:
result.extend([
'=' * 30,
self.doc
])
override = self.overrides
while override:
if isinstance(override, Shovel):
result.append('Overrides module')
else:
result.append('Overrides %s' % override.file)
override = override.overrides
# Print where we read this function in from
result.extend([
'=' * 30,
'From %s on line %i' % (self.file, self.line),
'=' * 30,
'%s%s' % (self.name, str(Args(self.spec)))
])
return os.linesep.join(result) | Return the help string of the task | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/tasks.py#L280-L321 | null | class Task(object):
'''An object representative of a task'''
# There's an interesting problem associated with this process of loading
# tasks from a file. We invoke it with a 'load', but then we get access to
# the tasks through decorators. As such, the decorator just accumulates
# the tasks that it has seen as it creates them, puts them in a cache, and
# eventually that cache will be consumed as a usable object. This is that
# cache. Put another way:
#
# 1. Clear cache
# 2. Load module
# 3. Fill cache with tasks created with @task
# 4. Once loaded, organize the cached tasks
_cache = []
# This is to help find tasks given their path
_tasks = {}
@classmethod
def make(cls, obj):
'''Given a callable object, return a new callable object'''
try:
cls._cache.append(Task(obj))
except Exception:
logger.exception('Unable to make task for %s' % repr(obj))
@classmethod
def load(cls, path, base=None):
'''Return a list of the tasks stored in a file'''
base = base or os.getcwd()
absolute = os.path.abspath(path)
parent = os.path.dirname(absolute)
name, _, _ = os.path.basename(absolute).rpartition('.py')
fobj, path, description = imp.find_module(name, [parent])
try:
imp.load_module(name, fobj, path, description)
finally:
if fobj:
fobj.close()
# Manipulate the full names of the tasks to be relative to the provided
# base
relative, _, _ = os.path.relpath(path, base).rpartition('.py')
for task in cls._cache:
parts = relative.split(os.path.sep)
parts.append(task.name)
# If it's either in shovel.py, or folder/__init__.py, then we
# should consider it as being at one level above that file
parts = [part.strip('.') for part in parts if part not in
('shovel', '.shovel', '__init__', '.', '..', '')]
task.fullname = '.'.join(parts)
logger.debug('Found task %s in %s' % (task.fullname, task.module))
return cls.clear()
@classmethod
def clear(cls):
'''Clear and return the cache'''
cached = cls._cache
cls._cache = []
return cached
def __init__(self, obj):
if not callable(obj):
raise TypeError('Object not callable: %s' % obj)
# Save some attributes about the task
self.name = obj.__name__
self.doc = inspect.getdoc(obj) or ''
# If the provided object is a type (like a class), we'll treat
# it a little differently from if it's a pure function. The
# assumption is that the class will be instantiated wit no
# arguments, and then called with the provided arguments
if isinstance(obj, type):
try:
self._obj = obj()
except:
raise TypeError(
'%s => Task classes must take no arguments' % self.name)
self.spec = inspect.getargspec(self._obj.__call__)
self.doc = inspect.getdoc(self._obj.__call__) or self.doc
self.line = 'Unknown line'
self.file = 'Unknown file'
else:
self.spec = inspect.getargspec(obj)
self._obj = obj
self.line = obj.__code__.co_firstlineno
self.file = obj.__code__.co_filename
self.module = self._obj.__module__
self.fullname = self.name
# What module / etc. this overrides, if any
self.overrides = None
def __call__(self, *args, **kwargs):
'''Invoke the task itself'''
try:
return self._obj(*args, **kwargs)
except Exception as exc:
logger.exception('Failed to run task %s' % self.name)
raise(exc)
def capture(self, *args, **kwargs):
'''Run a task and return a dictionary with stderr, stdout and the
return value. Also, the traceback from the exception if there was
one'''
import traceback
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
stdout, stderr = sys.stdout, sys.stderr
sys.stdout = out = StringIO()
sys.stderr = err = StringIO()
result = {
'exception': None,
'stderr': None,
'stdout': None,
'return': None
}
try:
result['return'] = self.__call__(*args, **kwargs)
except Exception:
result['exception'] = traceback.format_exc()
sys.stdout, sys.stderr = stdout, stderr
result['stderr'] = err.getvalue()
result['stdout'] = out.getvalue()
return result
def dry(self, *args, **kwargs):
'''Perform a dry-run of the task'''
return 'Would have executed:\n%s%s' % (
self.name, Args(self.spec).explain(*args, **kwargs))
|
seomoz/shovel | shovel/args.py | Args.explain | python | def explain(self, *args, **kwargs):
'''Return a string that describes how these args are interpreted'''
args = self.get(*args, **kwargs)
results = ['%s = %s' % (name, value) for name, value in args.required]
results.extend(['%s = %s (overridden)' % (
name, value) for name, value in args.overridden])
results.extend(['%s = %s (default)' % (
name, value) for name, value in args.defaulted])
if self._varargs:
results.append('%s = %s' % (self._varargs, args.varargs))
if self._kwargs:
results.append('%s = %s' % (self._kwargs, args.kwargs))
return '\n\t'.join(results) | Return a string that describes how these args are interpreted | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/args.py#L69-L81 | [
"def get(self, *args, **kwargs):\n '''Evaluate this argspec with the provided arguments'''\n # We'll go through all of our required args and make sure they're\n # present\n required = [arg for arg in self._args if arg not in kwargs]\n if len(args) < len(required):\n raise TypeError('Missing arguments %s' % required[len(args):])\n required = list(zip(required, args))\n args = args[len(required):]\n\n # Now we'll look through our defaults, if there are any\n defaulted = [(name, default) for name, default in self._defaults\n if name not in kwargs]\n overridden = list(zip([d[0] for d in defaulted], args))\n args = args[len(overridden):]\n defaulted = defaulted[len(overridden):]\n\n # And anything left over is in varargs\n if args and not self._varargs:\n raise TypeError('Too many arguments provided')\n\n return ArgTuple(required, overridden, defaulted, args, kwargs)\n"
] | class Args(object):
'''Represents an argspec, and evaluates provided arguments to complete an
invocation. It wraps an `argspec`, and provides some utility functionality
around actually evaluating args and kwargs given that argspec.'''
@classmethod
def parse(cls, obj):
'''Get the Args object associated with the argspec'''
return cls(inspect.getargspec(obj))
def __init__(self, spec):
# We need to keep track of all our arguments and their defaults. Since
# defaults are provided from the tail end of the positional args, we'll
# reverse those and the defaults from the argspec and pair them. Then
# we'll add the required positional arguments and get a list of all
# args and whether or not they have defaults
self._defaults = list(reversed(
list(zip(reversed(spec.args or []), reversed(spec.defaults or [])))
))
# Now, take all the args that don't have a default
self._args = spec.args[:(len(spec.args) - len(self._defaults))]
# Now our internal args is a list of tuples of variable
# names and their corresponding default values
self._varargs = spec.varargs
self._kwargs = spec.keywords
def __str__(self):
results = []
results.extend(self._args)
results.extend('%s=%s' % (k, v) for k, v in self._defaults)
if self._varargs:
results.append('*%s' % self._varargs)
if self._kwargs:
results.append('**%s' % self._kwargs)
return '(' + ', '.join(results) + ')'
def get(self, *args, **kwargs):
'''Evaluate this argspec with the provided arguments'''
# We'll go through all of our required args and make sure they're
# present
required = [arg for arg in self._args if arg not in kwargs]
if len(args) < len(required):
raise TypeError('Missing arguments %s' % required[len(args):])
required = list(zip(required, args))
args = args[len(required):]
# Now we'll look through our defaults, if there are any
defaulted = [(name, default) for name, default in self._defaults
if name not in kwargs]
overridden = list(zip([d[0] for d in defaulted], args))
args = args[len(overridden):]
defaulted = defaulted[len(overridden):]
# And anything left over is in varargs
if args and not self._varargs:
raise TypeError('Too many arguments provided')
return ArgTuple(required, overridden, defaulted, args, kwargs)
|
seomoz/shovel | shovel/args.py | Args.get | python | def get(self, *args, **kwargs):
'''Evaluate this argspec with the provided arguments'''
# We'll go through all of our required args and make sure they're
# present
required = [arg for arg in self._args if arg not in kwargs]
if len(args) < len(required):
raise TypeError('Missing arguments %s' % required[len(args):])
required = list(zip(required, args))
args = args[len(required):]
# Now we'll look through our defaults, if there are any
defaulted = [(name, default) for name, default in self._defaults
if name not in kwargs]
overridden = list(zip([d[0] for d in defaulted], args))
args = args[len(overridden):]
defaulted = defaulted[len(overridden):]
# And anything left over is in varargs
if args and not self._varargs:
raise TypeError('Too many arguments provided')
return ArgTuple(required, overridden, defaulted, args, kwargs) | Evaluate this argspec with the provided arguments | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/args.py#L83-L104 | null | class Args(object):
'''Represents an argspec, and evaluates provided arguments to complete an
invocation. It wraps an `argspec`, and provides some utility functionality
around actually evaluating args and kwargs given that argspec.'''
@classmethod
def parse(cls, obj):
'''Get the Args object associated with the argspec'''
return cls(inspect.getargspec(obj))
def __init__(self, spec):
# We need to keep track of all our arguments and their defaults. Since
# defaults are provided from the tail end of the positional args, we'll
# reverse those and the defaults from the argspec and pair them. Then
# we'll add the required positional arguments and get a list of all
# args and whether or not they have defaults
self._defaults = list(reversed(
list(zip(reversed(spec.args or []), reversed(spec.defaults or [])))
))
# Now, take all the args that don't have a default
self._args = spec.args[:(len(spec.args) - len(self._defaults))]
# Now our internal args is a list of tuples of variable
# names and their corresponding default values
self._varargs = spec.varargs
self._kwargs = spec.keywords
def __str__(self):
results = []
results.extend(self._args)
results.extend('%s=%s' % (k, v) for k, v in self._defaults)
if self._varargs:
results.append('*%s' % self._varargs)
if self._kwargs:
results.append('**%s' % self._kwargs)
return '(' + ', '.join(results) + ')'
def explain(self, *args, **kwargs):
'''Return a string that describes how these args are interpreted'''
args = self.get(*args, **kwargs)
results = ['%s = %s' % (name, value) for name, value in args.required]
results.extend(['%s = %s (overridden)' % (
name, value) for name, value in args.overridden])
results.extend(['%s = %s (default)' % (
name, value) for name, value in args.defaulted])
if self._varargs:
results.append('%s = %s' % (self._varargs, args.varargs))
if self._kwargs:
results.append('%s = %s' % (self._kwargs, args.kwargs))
return '\n\t'.join(results)
|
seomoz/shovel | shovel/parser.py | parse | python | def parse(tokens):
'''Parse the provided string to produce *args and **kwargs'''
args = []
kwargs = {}
last = None
for token in tokens:
if token.startswith('--'):
# If this is a keyword flag, but we've already got one that we've
# parsed, then we're going to interpret it as a bool
if last:
kwargs[last] = True
# See if it is the --foo=5 style
last, _, value = token.strip('-').partition('=')
if value:
kwargs[last] = value
last = None
elif last != None:
kwargs[last] = token
last = None
else:
args.append(token)
# If there's a dangling last, set that bool
if last:
kwargs[last] = True
return args, kwargs | Parse the provided string to produce *args and **kwargs | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel/parser.py#L25-L51 | null | # Copyright (c) 2011-2014 Moz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''Helping functions for parsing CLI interface stuff'''
|
seomoz/shovel | shovel.py | sumnum | python | def sumnum(*args):
'''Computes the sum of the provided numbers'''
print('%s = %f' % (' + '.join(args), sum(float(arg) for arg in args))) | Computes the sum of the provided numbers | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel.py#L9-L11 | null | from shovel import task
@task
def hello(name):
'''Prints hello and the provided name'''
print('Hello, %s!' % name)
@task
@task
def attributes(name, **kwargs):
'''Prints a name, and all keyword attributes'''
print('%s has attributes:' % name)
for key, value in kwargs.items():
print('\t%s => %s' % (key, value))
|
seomoz/shovel | shovel.py | attributes | python | def attributes(name, **kwargs):
'''Prints a name, and all keyword attributes'''
print('%s has attributes:' % name)
for key, value in kwargs.items():
print('\t%s => %s' % (key, value)) | Prints a name, and all keyword attributes | train | https://github.com/seomoz/shovel/blob/fc29232b2b8be33972f8fb498a91a67e334f057f/shovel.py#L14-L18 | null | from shovel import task
@task
def hello(name):
'''Prints hello and the provided name'''
print('Hello, %s!' % name)
@task
def sumnum(*args):
'''Computes the sum of the provided numbers'''
print('%s = %f' % (' + '.join(args), sum(float(arg) for arg in args)))
@task
|
pandas-profiling/pandas-profiling | pandas_profiling/__init__.py | ProfileReport.get_rejected_variables | python | def get_rejected_variables(self, threshold=0.9):
variable_profile = self.description_set['variables']
result = []
if hasattr(variable_profile, 'correlation'):
result = variable_profile.index[variable_profile.correlation > threshold].tolist()
return result | Return a list of variable names being rejected for high
correlation with one of remaining variables.
Parameters:
----------
threshold : float
Correlation value which is above the threshold are rejected
Returns
-------
list
The list of rejected variables or an empty list if the correlation has not been computed. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/__init__.py#L86-L104 | null | class ProfileReport(object):
"""Generate a profile report from a Dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Attributes
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Methods
-------
get_description
Return the description (a raw statistical summary) of the dataset.
get_rejected_variables
Return the list of rejected variable or an empty list if there is no rejected variables.
to_file
Write the report to a file.
to_html
Return the report as an HTML string.
"""
html = ''
file = None
def __init__(self, df, **kwargs):
"""Constructor see class documentation
"""
sample = kwargs.get('sample', df.head())
description_set = describe_df(df, **kwargs)
self.html = to_html(sample,
description_set)
self.description_set = description_set
def get_description(self):
"""Return the description (a raw statistical summary) of the dataset.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
"""
return self.description_set
def get_rejected_variables(self, threshold=0.9):
"""Return a list of variable names being rejected for high
correlation with one of remaining variables.
Parameters:
----------
threshold : float
Correlation value which is above the threshold are rejected
Returns
-------
list
The list of rejected variables or an empty list if the correlation has not been computed.
"""
variable_profile = self.description_set['variables']
result = []
if hasattr(variable_profile, 'correlation'):
result = variable_profile.index[variable_profile.correlation > threshold].tolist()
return result
def to_file(self, outputfile=DEFAULT_OUTPUTFILE):
"""Write the report to a file.
By default a name is generated.
Parameters:
----------
outputfile : str
The name or the path of the file to generale including the extension (.html).
"""
if outputfile != NO_OUTPUTFILE:
if outputfile == DEFAULT_OUTPUTFILE:
outputfile = 'profile_' + str(hash(self)) + ".html"
# TODO: should be done in the template
with codecs.open(outputfile, 'w+b', encoding='utf8') as self.file:
self.file.write(templates.template('wrapper').render(content=self.html))
def to_html(self):
"""Generate and return complete template as lengthy string
for using with frameworks.
Returns
-------
str
The HTML output.
"""
return templates.template('wrapper').render(content=self.html)
def _repr_html_(self):
"""Used to output the HTML representation to a Jupyter notebook
Returns
-------
str
The HTML internal representation.
"""
return self.html
def __str__(self):
"""Overwrite of the str method.
Returns
-------
str
A string representation of the object.
"""
return "Output written to file " + str(self.file.name)
|
pandas-profiling/pandas-profiling | pandas_profiling/__init__.py | ProfileReport.to_file | python | def to_file(self, outputfile=DEFAULT_OUTPUTFILE):
if outputfile != NO_OUTPUTFILE:
if outputfile == DEFAULT_OUTPUTFILE:
outputfile = 'profile_' + str(hash(self)) + ".html"
# TODO: should be done in the template
with codecs.open(outputfile, 'w+b', encoding='utf8') as self.file:
self.file.write(templates.template('wrapper').render(content=self.html)) | Write the report to a file.
By default a name is generated.
Parameters:
----------
outputfile : str
The name or the path of the file to generale including the extension (.html). | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/__init__.py#L106-L122 | [
"def template(template_name):\n \"\"\"Return a jinja template ready for rendering. If needed, global variables are initialized.\n\n Parameters\n ----------\n template_name: str, the name of the template as defined in the templates mapping\n\n Returns\n -------\n The Jinja template ready for rendering\n \"\"\"\n globals = None\n if template_name.startswith('row_'):\n # This is a row template setting global variable\n globals = dict()\n globals['vartype'] = var_type[template_name.split('_')[1].upper()]\n return jinja2_env.get_template(templates[template_name], globals=globals)\n"
] | class ProfileReport(object):
"""Generate a profile report from a Dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Attributes
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Methods
-------
get_description
Return the description (a raw statistical summary) of the dataset.
get_rejected_variables
Return the list of rejected variable or an empty list if there is no rejected variables.
to_file
Write the report to a file.
to_html
Return the report as an HTML string.
"""
html = ''
file = None
def __init__(self, df, **kwargs):
"""Constructor see class documentation
"""
sample = kwargs.get('sample', df.head())
description_set = describe_df(df, **kwargs)
self.html = to_html(sample,
description_set)
self.description_set = description_set
def get_description(self):
"""Return the description (a raw statistical summary) of the dataset.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
"""
return self.description_set
def get_rejected_variables(self, threshold=0.9):
"""Return a list of variable names being rejected for high
correlation with one of remaining variables.
Parameters:
----------
threshold : float
Correlation value which is above the threshold are rejected
Returns
-------
list
The list of rejected variables or an empty list if the correlation has not been computed.
"""
variable_profile = self.description_set['variables']
result = []
if hasattr(variable_profile, 'correlation'):
result = variable_profile.index[variable_profile.correlation > threshold].tolist()
return result
def to_file(self, outputfile=DEFAULT_OUTPUTFILE):
"""Write the report to a file.
By default a name is generated.
Parameters:
----------
outputfile : str
The name or the path of the file to generale including the extension (.html).
"""
if outputfile != NO_OUTPUTFILE:
if outputfile == DEFAULT_OUTPUTFILE:
outputfile = 'profile_' + str(hash(self)) + ".html"
# TODO: should be done in the template
with codecs.open(outputfile, 'w+b', encoding='utf8') as self.file:
self.file.write(templates.template('wrapper').render(content=self.html))
def to_html(self):
"""Generate and return complete template as lengthy string
for using with frameworks.
Returns
-------
str
The HTML output.
"""
return templates.template('wrapper').render(content=self.html)
def _repr_html_(self):
"""Used to output the HTML representation to a Jupyter notebook
Returns
-------
str
The HTML internal representation.
"""
return self.html
def __str__(self):
"""Overwrite of the str method.
Returns
-------
str
A string representation of the object.
"""
return "Output written to file " + str(self.file.name)
|
pandas-profiling/pandas-profiling | pandas_profiling/templates.py | template | python | def template(template_name):
globals = None
if template_name.startswith('row_'):
# This is a row template setting global variable
globals = dict()
globals['vartype'] = var_type[template_name.split('_')[1].upper()]
return jinja2_env.get_template(templates[template_name], globals=globals) | Return a jinja template ready for rendering. If needed, global variables are initialized.
Parameters
----------
template_name: str, the name of the template as defined in the templates mapping
Returns
-------
The Jinja template ready for rendering | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/templates.py#L44-L60 | null | # coding=UTF-8
"""Contains all templates used for generating the HTML profile report"""
from jinja2 import Environment, PackageLoader
# Initializing Jinja
pl = PackageLoader('pandas_profiling', 'templates')
jinja2_env = Environment(lstrip_blocks=True, trim_blocks=True, loader=pl)
# Mapping between template name and file
templates = {'freq_table_row': 'freq_table_row.html',
'mini_freq_table_row': 'mini_freq_table_row.html',
'freq_table': 'freq_table.html',
'mini_freq_table': 'mini_freq_table.html',
'row_num': 'row_num.html',
'row_date': 'row_date.html',
'row_cat': 'row_cat.html',
'row_bool': 'row_bool.html',
'row_corr': 'row_corr.html',
'row_recoded': 'row_recoded.html',
'row_const': 'row_const.html',
'row_unique': 'row_unique.html',
'row_unsupported': 'row_unsupported.html',
'overview': 'overview.html',
'sample': 'sample.html',
'base': 'base.html',
'wrapper': 'wrapper.html',
'correlations' : 'correlations.html'
}
# Mapping between row type and var type
var_type = {'NUM': 'Numeric',
'DATE': 'Date',
'CAT': 'Categorical',
'UNIQUE': 'Categorical, Unique',
'BOOL': 'Boolean',
'CONST': 'Constant',
'CORR': 'Highly correlated',
'RECODED': 'Recoded',
'UNSUPPORTED': 'Unsupported'
}
# mapping between row type and template name
row_templates_dict = {'NUM': template('row_num'),
'DATE': template('row_date'),
'DISCRETE': template('row_num'),
'CAT': template('row_cat'),
'BOOL': template('row_bool'),
'UNIQUE': template('row_unique'),
'CONST': template('row_const'),
'CORR': template('row_corr'),
'RECODED': template('row_recoded'),
'UNSUPPORTED': template('row_unsupported')
}
# The number of column to use in the display of the frequency table according to the category
mini_freq_table_nb_col = {'CAT': 6, 'BOOL': 3}
messages = dict()
messages['CONST'] = u'<a href="#pp_var_{0[varname]}"><code>{0[varname]}</code></a> has constant value {0[mode]} <span class="label label-primary">Rejected</span>'
messages['CORR'] = u'<a href="#pp_var_{0[varname]}"><code>{0[varname]}</code></a> is highly correlated with <a href="#pp_var_{0[correlation_var]}"><code>{0[correlation_var]}</code></a> (ρ = {0[correlation]}) <span class="label label-primary">Rejected</span>'
messages['RECODED'] = u'<a href="#pp_var_{0[varname]}"><code>{0[varname]}</code></a> is a recoding of <a href="#pp_var_{0[correlation_var]}"><code>{0[correlation_var]}</code></a> <span class="label label-primary">Rejected</span>'
messages['HIGH_CARDINALITY'] = u'<a href="#pp_var_{0[varname]}"><code>{varname}</code></a> has a high cardinality: {0[distinct_count]} distinct values <span class="label label-warning">Warning</span>'
messages['UNSUPPORTED'] = u'<a href="#pp_var_{0[varname]}"><code>{0[varname]}</code></a> is an unsupported type, check if it needs cleaning or further analysis <span class="label label-warning">Warning</span>'
messages['n_duplicates'] = u'Dataset has {0[n_duplicates]} duplicate rows <span class="label label-warning">Warning</span>'
messages['skewness'] = u'<a href="#pp_var_{0[varname]}"><code>{varname}</code></a> is highly skewed (γ1 = {0[skewness]}) <span class="label label-info">Skewed</span>'
messages['p_missing'] = u'<a href="#pp_var_{0[varname]}"><code>{varname}</code></a> has {0[n_missing]} / {0[p_missing]} missing values <span class="label label-default">Missing</span>'
messages['p_infinite'] = u'<a href="#pp_var_{0[varname]}"><code>{varname}</code></a> has {0[n_infinite]} / {0[p_infinite]} infinite values <span class="label label-default">Infinite</span>'
messages['p_zeros'] = u'<a href="#pp_var_{0[varname]}"><code>{varname}</code></a> has {0[n_zeros]} / {0[p_zeros]} zeros <span class="label label-info">Zeros</span>'
message_row = u'<li>{message}</li>'
|
pandas-profiling/pandas-profiling | pandas_profiling/report.py | to_html | python | def to_html(sample, stats_object):
n_obs = stats_object['table']['n']
value_formatters = formatters.value_formatters
row_formatters = formatters.row_formatters
if not isinstance(sample, pd.DataFrame):
raise TypeError("sample must be of type pandas.DataFrame")
if not isinstance(stats_object, dict):
raise TypeError("stats_object must be of type dict. Did you generate this using the pandas_profiling.describe() function?")
if not set({'table', 'variables', 'freq', 'correlations'}).issubset(set(stats_object.keys())):
raise TypeError(
"stats_object badly formatted. Did you generate this using the pandas_profiling.describe() function?")
def fmt(value, name):
if pd.isnull(value):
return ""
if name in value_formatters:
return value_formatters[name](value)
elif isinstance(value, float):
return value_formatters[formatters.DEFAULT_FLOAT_FORMATTER](value)
else:
try:
return unicode(value) # Python 2
except NameError:
return str(value) # Python 3
def _format_row(freq, label, max_freq, row_template, n, extra_class=''):
if max_freq != 0:
width = int(freq / max_freq * 99) + 1
else:
width = 1
if width > 20:
label_in_bar = freq
label_after_bar = ""
else:
label_in_bar = " "
label_after_bar = freq
return row_template.render(label=label,
width=width,
count=freq,
percentage='{:2.1f}'.format(freq / n * 100),
extra_class=extra_class,
label_in_bar=label_in_bar,
label_after_bar=label_after_bar)
def freq_table(freqtable, n, table_template, row_template, max_number_to_print, nb_col=6):
freq_rows_html = u''
if max_number_to_print > n:
max_number_to_print=n
if max_number_to_print < len(freqtable):
freq_other = sum(freqtable.iloc[max_number_to_print:])
min_freq = freqtable.values[max_number_to_print]
else:
freq_other = 0
min_freq = 0
freq_missing = n - sum(freqtable)
max_freq = max(freqtable.values[0], freq_other, freq_missing)
# TODO: Correctly sort missing and other
for label, freq in six.iteritems(freqtable.iloc[0:max_number_to_print]):
freq_rows_html += _format_row(freq, label, max_freq, row_template, n)
if freq_other > min_freq:
freq_rows_html += _format_row(freq_other,
"Other values (%s)" % (freqtable.count() - max_number_to_print), max_freq, row_template, n,
extra_class='other')
if freq_missing > min_freq:
freq_rows_html += _format_row(freq_missing, "(Missing)", max_freq, row_template, n, extra_class='missing')
return table_template.render(rows=freq_rows_html, varid=hash(idx), nb_col=nb_col)
def extreme_obs_table(freqtable, table_template, row_template, number_to_print, n, ascending = True):
# If it's mixed between base types (str, int) convert to str. Pure "mixed" types are filtered during type discovery
if "mixed" in freqtable.index.inferred_type:
freqtable.index = freqtable.index.astype(str)
sorted_freqTable = freqtable.sort_index()
if ascending:
obs_to_print = sorted_freqTable.iloc[:number_to_print]
else:
obs_to_print = sorted_freqTable.iloc[-number_to_print:]
freq_rows_html = ''
max_freq = max(obs_to_print.values)
for label, freq in six.iteritems(obs_to_print):
freq_rows_html += _format_row(freq, label, max_freq, row_template, n)
return table_template.render(rows=freq_rows_html)
# Variables
rows_html = u""
messages = []
render_htmls = {}
for idx, row in stats_object['variables'].iterrows():
formatted_values = {'varname': idx, 'varid': hash(idx)}
row_classes = {}
for col, value in six.iteritems(row):
formatted_values[col] = fmt(value, col)
for col in set(row.index) & six.viewkeys(row_formatters):
row_classes[col] = row_formatters[col](row[col])
if row_classes[col] == "alert" and col in templates.messages:
messages.append(templates.messages[col].format(formatted_values, varname = idx))
if row['type'] in {'CAT', 'BOOL'}:
formatted_values['minifreqtable'] = freq_table(stats_object['freq'][idx], n_obs,
templates.template('mini_freq_table'),
templates.template('mini_freq_table_row'),
3,
templates.mini_freq_table_nb_col[row['type']])
if row['distinct_count'] > 50:
messages.append(templates.messages['HIGH_CARDINALITY'].format(formatted_values, varname = idx))
row_classes['distinct_count'] = "alert"
else:
row_classes['distinct_count'] = ""
if row['type'] == 'UNIQUE':
obs = stats_object['freq'][idx].index
formatted_values['firstn'] = pd.DataFrame(obs[0:3], columns=["First 3 values"]).to_html(classes="example_values", index=False)
formatted_values['lastn'] = pd.DataFrame(obs[-3:], columns=["Last 3 values"]).to_html(classes="example_values", index=False)
if row['type'] == 'UNSUPPORTED':
formatted_values['varname'] = idx
messages.append(templates.messages[row['type']].format(formatted_values))
elif row['type'] in {'CORR', 'CONST', 'RECODED'}:
formatted_values['varname'] = idx
messages.append(templates.messages[row['type']].format(formatted_values))
else:
formatted_values['freqtable'] = freq_table(stats_object['freq'][idx], n_obs,
templates.template('freq_table'), templates.template('freq_table_row'), 10)
formatted_values['firstn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = True)
formatted_values['lastn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = False)
rows_html += templates.row_templates_dict[row['type']].render(values=formatted_values, row_classes=row_classes)
render_htmls['rows_html'] = rows_html
# Overview
formatted_values = {k: fmt(v, k) for k, v in six.iteritems(stats_object['table'])}
row_classes={}
for col in six.viewkeys(stats_object['table']) & six.viewkeys(row_formatters):
row_classes[col] = row_formatters[col](stats_object['table'][col])
if row_classes[col] == "alert" and col in templates.messages:
messages.append(templates.messages[col].format(formatted_values, varname = idx))
messages_html = u''
for msg in messages:
messages_html += templates.message_row.format(message=msg)
overview_html = templates.template('overview').render(values=formatted_values, row_classes = row_classes, messages=messages_html)
render_htmls['overview_html'] = overview_html
# Add plot of matrix correlation if the dataframe is not empty
if len(stats_object['correlations']['pearson']) > 0:
pearson_matrix = plot.correlation_matrix(stats_object['correlations']['pearson'], 'Pearson')
spearman_matrix = plot.correlation_matrix(stats_object['correlations']['spearman'], 'Spearman')
correlations_html = templates.template('correlations').render(
values={'pearson_matrix': pearson_matrix, 'spearman_matrix': spearman_matrix})
render_htmls['correlations_html'] = correlations_html
# Add sample
sample_html = templates.template('sample').render(sample_table_html=sample.to_html(classes="sample"))
render_htmls['sample_html'] = sample_html
# TODO: should be done in the template
return templates.template('base').render(render_htmls) | Generate a HTML report from summary statistics and a given sample.
Parameters
----------
sample : DataFrame
the sample you want to print
stats_object : dict
Summary statistics. Should be generated with an appropriate describe() function
Returns
-------
str
containing profile report in HTML format
Notes
-----
* This function as to be refactored since it's huge and it contains inner functions | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/report.py#L11-L214 | [
"def template(template_name):\n \"\"\"Return a jinja template ready for rendering. If needed, global variables are initialized.\n\n Parameters\n ----------\n template_name: str, the name of the template as defined in the templates mapping\n\n Returns\n -------\n The Jinja template ready for rendering\n \"\"\"\n globals = None\n if template_name.startswith('row_'):\n # This is a row template setting global variable\n globals = dict()\n globals['vartype'] = var_type[template_name.split('_')[1].upper()]\n return jinja2_env.get_template(templates[template_name], globals=globals)\n",
"def correlation_matrix(corrdf, title, **kwargs):\n \"\"\"Plot image of a matrix correlation.\n Parameters\n ----------\n corrdf: DataFrame\n The matrix correlation to plot.\n title: str\n The matrix title\n Returns\n -------\n str, The resulting image encoded as a string.\n \"\"\"\n imgdata = BytesIO()\n fig_cor, axes_cor = plt.subplots(1, 1)\n labels = corrdf.columns\n matrix_image = axes_cor.imshow(corrdf, vmin=-1, vmax=1, interpolation=\"nearest\", cmap='bwr')\n plt.title(title, size=18)\n plt.colorbar(matrix_image)\n axes_cor.set_xticks(np.arange(0, corrdf.shape[0], corrdf.shape[0] * 1.0 / len(labels)))\n axes_cor.set_yticks(np.arange(0, corrdf.shape[1], corrdf.shape[1] * 1.0 / len(labels)))\n axes_cor.set_xticklabels(labels, rotation=90)\n axes_cor.set_yticklabels(labels)\n\n matrix_image.figure.savefig(imgdata, bbox_inches='tight')\n imgdata.seek(0)\n result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))\n plt.close(matrix_image.figure)\n return result_string\n",
"def fmt(value, name):\n if pd.isnull(value):\n return \"\"\n if name in value_formatters:\n return value_formatters[name](value)\n elif isinstance(value, float):\n return value_formatters[formatters.DEFAULT_FLOAT_FORMATTER](value)\n else:\n try:\n return unicode(value) # Python 2\n except NameError:\n return str(value) # Python 3\n",
"def freq_table(freqtable, n, table_template, row_template, max_number_to_print, nb_col=6):\n\n freq_rows_html = u''\n\n if max_number_to_print > n:\n max_number_to_print=n\n\n if max_number_to_print < len(freqtable):\n freq_other = sum(freqtable.iloc[max_number_to_print:])\n min_freq = freqtable.values[max_number_to_print]\n else:\n freq_other = 0\n min_freq = 0\n\n freq_missing = n - sum(freqtable)\n max_freq = max(freqtable.values[0], freq_other, freq_missing)\n\n # TODO: Correctly sort missing and other\n\n for label, freq in six.iteritems(freqtable.iloc[0:max_number_to_print]):\n freq_rows_html += _format_row(freq, label, max_freq, row_template, n)\n\n if freq_other > min_freq:\n freq_rows_html += _format_row(freq_other,\n \"Other values (%s)\" % (freqtable.count() - max_number_to_print), max_freq, row_template, n,\n extra_class='other')\n\n if freq_missing > min_freq:\n freq_rows_html += _format_row(freq_missing, \"(Missing)\", max_freq, row_template, n, extra_class='missing')\n\n return table_template.render(rows=freq_rows_html, varid=hash(idx), nb_col=nb_col)\n"
] | # -*- coding: utf-8 -*-
"""Generate reports"""
import sys
import six
import pandas as pd
import pandas_profiling.formatters as formatters
import pandas_profiling.templates as templates
import pandas_profiling.plot as plot
def to_html(sample, stats_object):
"""Generate a HTML report from summary statistics and a given sample.
Parameters
----------
sample : DataFrame
the sample you want to print
stats_object : dict
Summary statistics. Should be generated with an appropriate describe() function
Returns
-------
str
containing profile report in HTML format
Notes
-----
* This function as to be refactored since it's huge and it contains inner functions
"""
n_obs = stats_object['table']['n']
value_formatters = formatters.value_formatters
row_formatters = formatters.row_formatters
if not isinstance(sample, pd.DataFrame):
raise TypeError("sample must be of type pandas.DataFrame")
if not isinstance(stats_object, dict):
raise TypeError("stats_object must be of type dict. Did you generate this using the pandas_profiling.describe() function?")
if not set({'table', 'variables', 'freq', 'correlations'}).issubset(set(stats_object.keys())):
raise TypeError(
"stats_object badly formatted. Did you generate this using the pandas_profiling.describe() function?")
def fmt(value, name):
if pd.isnull(value):
return ""
if name in value_formatters:
return value_formatters[name](value)
elif isinstance(value, float):
return value_formatters[formatters.DEFAULT_FLOAT_FORMATTER](value)
else:
try:
return unicode(value) # Python 2
except NameError:
return str(value) # Python 3
def _format_row(freq, label, max_freq, row_template, n, extra_class=''):
if max_freq != 0:
width = int(freq / max_freq * 99) + 1
else:
width = 1
if width > 20:
label_in_bar = freq
label_after_bar = ""
else:
label_in_bar = " "
label_after_bar = freq
return row_template.render(label=label,
width=width,
count=freq,
percentage='{:2.1f}'.format(freq / n * 100),
extra_class=extra_class,
label_in_bar=label_in_bar,
label_after_bar=label_after_bar)
def freq_table(freqtable, n, table_template, row_template, max_number_to_print, nb_col=6):
freq_rows_html = u''
if max_number_to_print > n:
max_number_to_print=n
if max_number_to_print < len(freqtable):
freq_other = sum(freqtable.iloc[max_number_to_print:])
min_freq = freqtable.values[max_number_to_print]
else:
freq_other = 0
min_freq = 0
freq_missing = n - sum(freqtable)
max_freq = max(freqtable.values[0], freq_other, freq_missing)
# TODO: Correctly sort missing and other
for label, freq in six.iteritems(freqtable.iloc[0:max_number_to_print]):
freq_rows_html += _format_row(freq, label, max_freq, row_template, n)
if freq_other > min_freq:
freq_rows_html += _format_row(freq_other,
"Other values (%s)" % (freqtable.count() - max_number_to_print), max_freq, row_template, n,
extra_class='other')
if freq_missing > min_freq:
freq_rows_html += _format_row(freq_missing, "(Missing)", max_freq, row_template, n, extra_class='missing')
return table_template.render(rows=freq_rows_html, varid=hash(idx), nb_col=nb_col)
def extreme_obs_table(freqtable, table_template, row_template, number_to_print, n, ascending = True):
# If it's mixed between base types (str, int) convert to str. Pure "mixed" types are filtered during type discovery
if "mixed" in freqtable.index.inferred_type:
freqtable.index = freqtable.index.astype(str)
sorted_freqTable = freqtable.sort_index()
if ascending:
obs_to_print = sorted_freqTable.iloc[:number_to_print]
else:
obs_to_print = sorted_freqTable.iloc[-number_to_print:]
freq_rows_html = ''
max_freq = max(obs_to_print.values)
for label, freq in six.iteritems(obs_to_print):
freq_rows_html += _format_row(freq, label, max_freq, row_template, n)
return table_template.render(rows=freq_rows_html)
# Variables
rows_html = u""
messages = []
render_htmls = {}
for idx, row in stats_object['variables'].iterrows():
formatted_values = {'varname': idx, 'varid': hash(idx)}
row_classes = {}
for col, value in six.iteritems(row):
formatted_values[col] = fmt(value, col)
for col in set(row.index) & six.viewkeys(row_formatters):
row_classes[col] = row_formatters[col](row[col])
if row_classes[col] == "alert" and col in templates.messages:
messages.append(templates.messages[col].format(formatted_values, varname = idx))
if row['type'] in {'CAT', 'BOOL'}:
formatted_values['minifreqtable'] = freq_table(stats_object['freq'][idx], n_obs,
templates.template('mini_freq_table'),
templates.template('mini_freq_table_row'),
3,
templates.mini_freq_table_nb_col[row['type']])
if row['distinct_count'] > 50:
messages.append(templates.messages['HIGH_CARDINALITY'].format(formatted_values, varname = idx))
row_classes['distinct_count'] = "alert"
else:
row_classes['distinct_count'] = ""
if row['type'] == 'UNIQUE':
obs = stats_object['freq'][idx].index
formatted_values['firstn'] = pd.DataFrame(obs[0:3], columns=["First 3 values"]).to_html(classes="example_values", index=False)
formatted_values['lastn'] = pd.DataFrame(obs[-3:], columns=["Last 3 values"]).to_html(classes="example_values", index=False)
if row['type'] == 'UNSUPPORTED':
formatted_values['varname'] = idx
messages.append(templates.messages[row['type']].format(formatted_values))
elif row['type'] in {'CORR', 'CONST', 'RECODED'}:
formatted_values['varname'] = idx
messages.append(templates.messages[row['type']].format(formatted_values))
else:
formatted_values['freqtable'] = freq_table(stats_object['freq'][idx], n_obs,
templates.template('freq_table'), templates.template('freq_table_row'), 10)
formatted_values['firstn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = True)
formatted_values['lastn_expanded'] = extreme_obs_table(stats_object['freq'][idx], templates.template('freq_table'), templates.template('freq_table_row'), 5, n_obs, ascending = False)
rows_html += templates.row_templates_dict[row['type']].render(values=formatted_values, row_classes=row_classes)
render_htmls['rows_html'] = rows_html
# Overview
formatted_values = {k: fmt(v, k) for k, v in six.iteritems(stats_object['table'])}
row_classes={}
for col in six.viewkeys(stats_object['table']) & six.viewkeys(row_formatters):
row_classes[col] = row_formatters[col](stats_object['table'][col])
if row_classes[col] == "alert" and col in templates.messages:
messages.append(templates.messages[col].format(formatted_values, varname = idx))
messages_html = u''
for msg in messages:
messages_html += templates.message_row.format(message=msg)
overview_html = templates.template('overview').render(values=formatted_values, row_classes = row_classes, messages=messages_html)
render_htmls['overview_html'] = overview_html
# Add plot of matrix correlation if the dataframe is not empty
if len(stats_object['correlations']['pearson']) > 0:
pearson_matrix = plot.correlation_matrix(stats_object['correlations']['pearson'], 'Pearson')
spearman_matrix = plot.correlation_matrix(stats_object['correlations']['spearman'], 'Spearman')
correlations_html = templates.template('correlations').render(
values={'pearson_matrix': pearson_matrix, 'spearman_matrix': spearman_matrix})
render_htmls['correlations_html'] = correlations_html
# Add sample
sample_html = templates.template('sample').render(sample_table_html=sample.to_html(classes="sample"))
render_htmls['sample_html'] = sample_html
# TODO: should be done in the template
return templates.template('base').render(render_htmls)
|
pandas-profiling/pandas-profiling | pandas_profiling/describe.py | describe_numeric_1d | python | def describe_numeric_1d(series, **kwargs):
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
stats['kurtosis'] = series.kurt()
stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name) | Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/describe.py#L15-L56 | [
"def histogram(series, **kwargs):\n \"\"\"Plot an histogram of the data.\n\n Parameters\n ----------\n series: Series\n The data to plot.\n\n Returns\n -------\n str\n The resulting image encoded as a string.\n \"\"\"\n imgdata = BytesIO()\n plot = _plot_histogram(series, **kwargs)\n plot.figure.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.1, wspace=0, hspace=0)\n plot.figure.savefig(imgdata)\n imgdata.seek(0)\n result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))\n # TODO Think about writing this to disk instead of caching them in strings\n plt.close(plot.figure)\n return result_string\n",
"def mini_histogram(series, **kwargs):\n \"\"\"Plot a small (mini) histogram of the data.\n\n Parameters\n ----------\n series: Series\n The data to plot.\n\n Returns\n -------\n str\n The resulting image encoded as a string.\n \"\"\"\n imgdata = BytesIO()\n plot = _plot_histogram(series, figsize=(2, 0.75), **kwargs)\n plot.axes.get_yaxis().set_visible(False)\n\n if LooseVersion(matplotlib.__version__) <= '1.5.9':\n plot.set_axis_bgcolor(\"w\")\n else:\n plot.set_facecolor(\"w\")\n\n xticks = plot.xaxis.get_major_ticks()\n for tick in xticks[1:-1]:\n tick.set_visible(False)\n tick.label.set_visible(False)\n for tick in (xticks[0], xticks[-1]):\n tick.label.set_fontsize(8)\n plot.figure.subplots_adjust(left=0.15, right=0.85, top=1, bottom=0.35, wspace=0, hspace=0)\n plot.figure.savefig(imgdata)\n imgdata.seek(0)\n result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))\n plt.close(plot.figure)\n return result_string\n"
] | # -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_date_1d(series):
"""Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name)
def describe_categorical_1d(series):
"""Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name)
def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name)
def describe_constant_1d(series):
"""Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name)
def describe_unique_1d(series):
"""Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name)
def describe_supported(series, **kwargs):
"""Compute summary statistics of a supported variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
value_counts, distinct_count = base.get_groupby_statistic(series)
if count > distinct_count > 1:
mode = series.mode().iloc[0]
else:
mode = series[0]
results_data = {'count': count,
'distinct_count': distinct_count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'is_unique': distinct_count == leng,
'mode': mode,
'p_unique': distinct_count * 1.0 / leng}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_unsupported(series, **kwargs):
"""Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_1d(data, **kwargs):
"""Compute summary statistics of a variable (a Series).
The description is different according to the type of the variable.
However a set of common stats is also computed.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Replace infinite values with NaNs to avoid issues with
# histograms later.
data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)
result = pd.Series({}, name=data.name)
vartype = base.get_vartype(data)
if vartype == base.S_TYPE_UNSUPPORTED:
result = result.append(describe_unsupported(data))
else:
result = result.append(describe_supported(data))
if vartype == base.S_TYPE_CONST:
result = result.append(describe_constant_1d(data))
elif vartype == base.TYPE_BOOL:
result = result.append(describe_boolean_1d(data))
elif vartype == base.TYPE_NUM:
result = result.append(describe_numeric_1d(data, **kwargs))
elif vartype == base.TYPE_DATE:
result = result.append(describe_date_1d(data))
elif vartype == base.S_TYPE_UNIQUE:
result = result.append(describe_unique_1d(data))
else:
# TYPE_CAT
result = result.append(describe_categorical_1d(data))
return result
def multiprocess_func(x, **kwargs):
return x[0], describe_1d(x[1], **kwargs)
def describe(df, bins=10, check_correlation=True, correlation_threshold=0.9, correlation_overrides=None, check_recoded=False, pool_size=multiprocessing.cpu_count(), **kwargs):
"""Generates a dict containing summary statistics for a given dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Parameters
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
Notes:
------
* The section dedicated to check the correlation should be externalized
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("df must be of type pandas.DataFrame")
if df.empty:
raise ValueError("df can not be empty")
try:
# reset matplotlib style before use
# Fails in matplotlib 1.4.x so plot might look bad
matplotlib.style.use("default")
except:
pass
try:
# Ignore FutureWarning
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
except:
pass
matplotlib.style.use(resource_filename(__name__, "pandas_profiling.mplstyle"))
# Clearing the cache before computing stats
base.clear_cache()
if not pd.Index(np.arange(0, len(df))).equals(df.index):
# Treat index as any other column
df = df.reset_index()
kwargs.update({'bins': bins})
# Describe all variables in a univariate way
if pool_size == 1:
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in map(local_multiprocess_func, df.iteritems())}
else:
pool = multiprocessing.Pool(pool_size)
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}
pool.close()
# Get correlations
dfcorrPear = df.corr(method="pearson")
dfcorrSpear = df.corr(method="spearman")
# Check correlations between variable
if check_correlation is True:
''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9
If x~y and y~z but not x~z, it would be better to delete only y
Better way would be to find out which variable causes the highest increase in multicollinearity.
'''
corr = dfcorrPear.copy()
for x, corr_x in corr.iterrows():
if correlation_overrides and x in correlation_overrides:
continue
for y, corr in corr_x.iteritems():
if x == y: break
if corr > correlation_threshold:
ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])
if check_recoded:
categorical_variables = [(name, data) for (name, data) in df.iteritems() if base.get_vartype(data)=='CAT']
for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):
if correlation_overrides and name1 in correlation_overrides:
continue
confusion_matrix=pd.crosstab(data1,data2)
if confusion_matrix.values.diagonal().sum() == len(df):
ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])
# Convert ldesc to a DataFrame
names = []
ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
variable_stats.columns.names = df.columns.names
# General statistics
table_stats = {}
table_stats['n'] = len(df)
table_stats['nvar'] = len(df.columns)
table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])
unsupported_columns = variable_stats.transpose()[variable_stats.transpose().type != base.S_TYPE_UNSUPPORTED].index.tolist()
table_stats['n_duplicates'] = sum(df.duplicated(subset=unsupported_columns)) if len(unsupported_columns) > 0 else 0
memsize = df.memory_usage(index=True).sum()
table_stats['memsize'] = formatters.fmt_bytesize(memsize)
table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])
table_stats.update({k: 0 for k in ("NUM", "DATE", "CONST", "CAT", "UNIQUE", "CORR", "RECODED", "BOOL", "UNSUPPORTED")})
table_stats.update(dict(variable_stats.loc['type'].value_counts()))
table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']
return {
'table': table_stats,
'variables': variable_stats.T,
'freq': {k: (base.get_groupby_statistic(df[k])[0] if variable_stats[k].type != base.S_TYPE_UNSUPPORTED else None) for k in df.columns},
'correlations': {'pearson': dfcorrPear, 'spearman': dfcorrSpear}
}
|
pandas-profiling/pandas-profiling | pandas_profiling/describe.py | describe_date_1d | python | def describe_date_1d(series):
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name) | Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/describe.py#L59-L82 | [
"def histogram(series, **kwargs):\n \"\"\"Plot an histogram of the data.\n\n Parameters\n ----------\n series: Series\n The data to plot.\n\n Returns\n -------\n str\n The resulting image encoded as a string.\n \"\"\"\n imgdata = BytesIO()\n plot = _plot_histogram(series, **kwargs)\n plot.figure.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.1, wspace=0, hspace=0)\n plot.figure.savefig(imgdata)\n imgdata.seek(0)\n result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))\n # TODO Think about writing this to disk instead of caching them in strings\n plt.close(plot.figure)\n return result_string\n",
"def mini_histogram(series, **kwargs):\n \"\"\"Plot a small (mini) histogram of the data.\n\n Parameters\n ----------\n series: Series\n The data to plot.\n\n Returns\n -------\n str\n The resulting image encoded as a string.\n \"\"\"\n imgdata = BytesIO()\n plot = _plot_histogram(series, figsize=(2, 0.75), **kwargs)\n plot.axes.get_yaxis().set_visible(False)\n\n if LooseVersion(matplotlib.__version__) <= '1.5.9':\n plot.set_axis_bgcolor(\"w\")\n else:\n plot.set_facecolor(\"w\")\n\n xticks = plot.xaxis.get_major_ticks()\n for tick in xticks[1:-1]:\n tick.set_visible(False)\n tick.label.set_visible(False)\n for tick in (xticks[0], xticks[-1]):\n tick.label.set_fontsize(8)\n plot.figure.subplots_adjust(left=0.15, right=0.85, top=1, bottom=0.35, wspace=0, hspace=0)\n plot.figure.savefig(imgdata)\n imgdata.seek(0)\n result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))\n plt.close(plot.figure)\n return result_string\n"
] | # -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
stats['kurtosis'] = series.kurt()
stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name)
def describe_categorical_1d(series):
"""Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name)
def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name)
def describe_constant_1d(series):
"""Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name)
def describe_unique_1d(series):
"""Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name)
def describe_supported(series, **kwargs):
"""Compute summary statistics of a supported variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
value_counts, distinct_count = base.get_groupby_statistic(series)
if count > distinct_count > 1:
mode = series.mode().iloc[0]
else:
mode = series[0]
results_data = {'count': count,
'distinct_count': distinct_count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'is_unique': distinct_count == leng,
'mode': mode,
'p_unique': distinct_count * 1.0 / leng}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_unsupported(series, **kwargs):
"""Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_1d(data, **kwargs):
"""Compute summary statistics of a variable (a Series).
The description is different according to the type of the variable.
However a set of common stats is also computed.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Replace infinite values with NaNs to avoid issues with
# histograms later.
data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)
result = pd.Series({}, name=data.name)
vartype = base.get_vartype(data)
if vartype == base.S_TYPE_UNSUPPORTED:
result = result.append(describe_unsupported(data))
else:
result = result.append(describe_supported(data))
if vartype == base.S_TYPE_CONST:
result = result.append(describe_constant_1d(data))
elif vartype == base.TYPE_BOOL:
result = result.append(describe_boolean_1d(data))
elif vartype == base.TYPE_NUM:
result = result.append(describe_numeric_1d(data, **kwargs))
elif vartype == base.TYPE_DATE:
result = result.append(describe_date_1d(data))
elif vartype == base.S_TYPE_UNIQUE:
result = result.append(describe_unique_1d(data))
else:
# TYPE_CAT
result = result.append(describe_categorical_1d(data))
return result
def multiprocess_func(x, **kwargs):
return x[0], describe_1d(x[1], **kwargs)
def describe(df, bins=10, check_correlation=True, correlation_threshold=0.9, correlation_overrides=None, check_recoded=False, pool_size=multiprocessing.cpu_count(), **kwargs):
"""Generates a dict containing summary statistics for a given dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Parameters
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
Notes:
------
* The section dedicated to check the correlation should be externalized
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("df must be of type pandas.DataFrame")
if df.empty:
raise ValueError("df can not be empty")
try:
# reset matplotlib style before use
# Fails in matplotlib 1.4.x so plot might look bad
matplotlib.style.use("default")
except:
pass
try:
# Ignore FutureWarning
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
except:
pass
matplotlib.style.use(resource_filename(__name__, "pandas_profiling.mplstyle"))
# Clearing the cache before computing stats
base.clear_cache()
if not pd.Index(np.arange(0, len(df))).equals(df.index):
# Treat index as any other column
df = df.reset_index()
kwargs.update({'bins': bins})
# Describe all variables in a univariate way
if pool_size == 1:
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in map(local_multiprocess_func, df.iteritems())}
else:
pool = multiprocessing.Pool(pool_size)
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}
pool.close()
# Get correlations
dfcorrPear = df.corr(method="pearson")
dfcorrSpear = df.corr(method="spearman")
# Check correlations between variable
if check_correlation is True:
''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9
If x~y and y~z but not x~z, it would be better to delete only y
Better way would be to find out which variable causes the highest increase in multicollinearity.
'''
corr = dfcorrPear.copy()
for x, corr_x in corr.iterrows():
if correlation_overrides and x in correlation_overrides:
continue
for y, corr in corr_x.iteritems():
if x == y: break
if corr > correlation_threshold:
ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])
if check_recoded:
categorical_variables = [(name, data) for (name, data) in df.iteritems() if base.get_vartype(data)=='CAT']
for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):
if correlation_overrides and name1 in correlation_overrides:
continue
confusion_matrix=pd.crosstab(data1,data2)
if confusion_matrix.values.diagonal().sum() == len(df):
ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])
# Convert ldesc to a DataFrame
names = []
ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
variable_stats.columns.names = df.columns.names
# General statistics
table_stats = {}
table_stats['n'] = len(df)
table_stats['nvar'] = len(df.columns)
table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])
unsupported_columns = variable_stats.transpose()[variable_stats.transpose().type != base.S_TYPE_UNSUPPORTED].index.tolist()
table_stats['n_duplicates'] = sum(df.duplicated(subset=unsupported_columns)) if len(unsupported_columns) > 0 else 0
memsize = df.memory_usage(index=True).sum()
table_stats['memsize'] = formatters.fmt_bytesize(memsize)
table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])
table_stats.update({k: 0 for k in ("NUM", "DATE", "CONST", "CAT", "UNIQUE", "CORR", "RECODED", "BOOL", "UNSUPPORTED")})
table_stats.update(dict(variable_stats.loc['type'].value_counts()))
table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']
return {
'table': table_stats,
'variables': variable_stats.T,
'freq': {k: (base.get_groupby_statistic(df[k])[0] if variable_stats[k].type != base.S_TYPE_UNSUPPORTED else None) for k in df.columns},
'correlations': {'pearson': dfcorrPear, 'spearman': dfcorrSpear}
}
|
pandas-profiling/pandas-profiling | pandas_profiling/describe.py | describe_categorical_1d | python | def describe_categorical_1d(series):
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name) | Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/describe.py#L84-L107 | [
"def get_groupby_statistic(data):\n \"\"\"Calculate value counts and distinct count of a variable (technically a Series).\n\n The result is cached by column name in a global variable to avoid recomputing.\n\n Parameters\n ----------\n data : Series\n The data type of the Series.\n\n Returns\n -------\n list\n value count and distinct count\n \"\"\"\n if data.name is not None and data.name in _VALUE_COUNTS_MEMO:\n return _VALUE_COUNTS_MEMO[data.name]\n\n value_counts_with_nan = data.value_counts(dropna=False)\n value_counts_without_nan = value_counts_with_nan.reset_index().dropna().set_index('index').iloc[:,0]\n distinct_count_with_nan = value_counts_with_nan.count()\n\n # When the inferred type of the index is just \"mixed\" probably the types within the series are tuple, dict, list and so on...\n if value_counts_without_nan.index.inferred_type == \"mixed\":\n raise TypeError('Not supported mixed type')\n\n result = [value_counts_without_nan, distinct_count_with_nan]\n\n if data.name is not None:\n _VALUE_COUNTS_MEMO[data.name] = result\n\n return result\n",
"def get_vartype(data):\n \"\"\"Infer the type of a variable (technically a Series).\n\n The types supported are split in standard types and special types.\n\n Standard types:\n * Categorical (`TYPE_CAT`): the default type if no other one can be determined\n * Numerical (`TYPE_NUM`): if it contains numbers\n * Boolean (`TYPE_BOOL`): at this time only detected if it contains boolean values, see todo\n * Date (`TYPE_DATE`): if it contains datetime\n\n Special types:\n * Constant (`S_TYPE_CONST`): if all values in the variable are equal\n * Unique (`S_TYPE_UNIQUE`): if all values in the variable are different\n * Unsupported (`S_TYPE_UNSUPPORTED`): if the variable is unsupported\n\n The result is cached by column name in a global variable to avoid recomputing.\n\n Parameters\n ----------\n data : Series\n The data type of the Series.\n\n Returns\n -------\n str\n The data type of the Series.\n\n Notes\n ----\n * Should improve verification when a categorical or numeric field has 3 values, it could be a categorical field\n or just a boolean with NaN values\n * #72: Numeric with low Distinct count should be treated as \"Categorical\"\n \"\"\"\n if data.name is not None and data.name in _MEMO:\n return _MEMO[data.name]\n\n vartype = None\n try:\n distinct_count = get_groupby_statistic(data)[1]\n leng = len(data)\n\n if distinct_count <= 1:\n vartype = S_TYPE_CONST\n elif pd.api.types.is_bool_dtype(data) or (distinct_count == 2 and pd.api.types.is_numeric_dtype(data)):\n vartype = TYPE_BOOL\n elif pd.api.types.is_numeric_dtype(data):\n vartype = TYPE_NUM\n elif pd.api.types.is_datetime64_dtype(data):\n vartype = TYPE_DATE\n elif distinct_count == leng:\n vartype = S_TYPE_UNIQUE\n else:\n vartype = TYPE_CAT\n except:\n vartype = S_TYPE_UNSUPPORTED\n\n if data.name is not None:\n _MEMO[data.name] = vartype\n\n return vartype\n"
] | # -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
stats['kurtosis'] = series.kurt()
stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name)
def describe_date_1d(series):
"""Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name)
def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name)
def describe_constant_1d(series):
"""Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name)
def describe_unique_1d(series):
"""Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name)
def describe_supported(series, **kwargs):
"""Compute summary statistics of a supported variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
value_counts, distinct_count = base.get_groupby_statistic(series)
if count > distinct_count > 1:
mode = series.mode().iloc[0]
else:
mode = series[0]
results_data = {'count': count,
'distinct_count': distinct_count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'is_unique': distinct_count == leng,
'mode': mode,
'p_unique': distinct_count * 1.0 / leng}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_unsupported(series, **kwargs):
"""Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_1d(data, **kwargs):
"""Compute summary statistics of a variable (a Series).
The description is different according to the type of the variable.
However a set of common stats is also computed.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Replace infinite values with NaNs to avoid issues with
# histograms later.
data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)
result = pd.Series({}, name=data.name)
vartype = base.get_vartype(data)
if vartype == base.S_TYPE_UNSUPPORTED:
result = result.append(describe_unsupported(data))
else:
result = result.append(describe_supported(data))
if vartype == base.S_TYPE_CONST:
result = result.append(describe_constant_1d(data))
elif vartype == base.TYPE_BOOL:
result = result.append(describe_boolean_1d(data))
elif vartype == base.TYPE_NUM:
result = result.append(describe_numeric_1d(data, **kwargs))
elif vartype == base.TYPE_DATE:
result = result.append(describe_date_1d(data))
elif vartype == base.S_TYPE_UNIQUE:
result = result.append(describe_unique_1d(data))
else:
# TYPE_CAT
result = result.append(describe_categorical_1d(data))
return result
def multiprocess_func(x, **kwargs):
return x[0], describe_1d(x[1], **kwargs)
def describe(df, bins=10, check_correlation=True, correlation_threshold=0.9, correlation_overrides=None, check_recoded=False, pool_size=multiprocessing.cpu_count(), **kwargs):
"""Generates a dict containing summary statistics for a given dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Parameters
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
Notes:
------
* The section dedicated to check the correlation should be externalized
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("df must be of type pandas.DataFrame")
if df.empty:
raise ValueError("df can not be empty")
try:
# reset matplotlib style before use
# Fails in matplotlib 1.4.x so plot might look bad
matplotlib.style.use("default")
except:
pass
try:
# Ignore FutureWarning
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
except:
pass
matplotlib.style.use(resource_filename(__name__, "pandas_profiling.mplstyle"))
# Clearing the cache before computing stats
base.clear_cache()
if not pd.Index(np.arange(0, len(df))).equals(df.index):
# Treat index as any other column
df = df.reset_index()
kwargs.update({'bins': bins})
# Describe all variables in a univariate way
if pool_size == 1:
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in map(local_multiprocess_func, df.iteritems())}
else:
pool = multiprocessing.Pool(pool_size)
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}
pool.close()
# Get correlations
dfcorrPear = df.corr(method="pearson")
dfcorrSpear = df.corr(method="spearman")
# Check correlations between variable
if check_correlation is True:
''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9
If x~y and y~z but not x~z, it would be better to delete only y
Better way would be to find out which variable causes the highest increase in multicollinearity.
'''
corr = dfcorrPear.copy()
for x, corr_x in corr.iterrows():
if correlation_overrides and x in correlation_overrides:
continue
for y, corr in corr_x.iteritems():
if x == y: break
if corr > correlation_threshold:
ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])
if check_recoded:
categorical_variables = [(name, data) for (name, data) in df.iteritems() if base.get_vartype(data)=='CAT']
for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):
if correlation_overrides and name1 in correlation_overrides:
continue
confusion_matrix=pd.crosstab(data1,data2)
if confusion_matrix.values.diagonal().sum() == len(df):
ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])
# Convert ldesc to a DataFrame
names = []
ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
variable_stats.columns.names = df.columns.names
# General statistics
table_stats = {}
table_stats['n'] = len(df)
table_stats['nvar'] = len(df.columns)
table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])
unsupported_columns = variable_stats.transpose()[variable_stats.transpose().type != base.S_TYPE_UNSUPPORTED].index.tolist()
table_stats['n_duplicates'] = sum(df.duplicated(subset=unsupported_columns)) if len(unsupported_columns) > 0 else 0
memsize = df.memory_usage(index=True).sum()
table_stats['memsize'] = formatters.fmt_bytesize(memsize)
table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])
table_stats.update({k: 0 for k in ("NUM", "DATE", "CONST", "CAT", "UNIQUE", "CORR", "RECODED", "BOOL", "UNSUPPORTED")})
table_stats.update(dict(variable_stats.loc['type'].value_counts()))
table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']
return {
'table': table_stats,
'variables': variable_stats.T,
'freq': {k: (base.get_groupby_statistic(df[k])[0] if variable_stats[k].type != base.S_TYPE_UNSUPPORTED else None) for k in df.columns},
'correlations': {'pearson': dfcorrPear, 'spearman': dfcorrSpear}
}
|
pandas-profiling/pandas-profiling | pandas_profiling/describe.py | describe_boolean_1d | python | def describe_boolean_1d(series):
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name) | Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/describe.py#L109-L131 | [
"def get_groupby_statistic(data):\n \"\"\"Calculate value counts and distinct count of a variable (technically a Series).\n\n The result is cached by column name in a global variable to avoid recomputing.\n\n Parameters\n ----------\n data : Series\n The data type of the Series.\n\n Returns\n -------\n list\n value count and distinct count\n \"\"\"\n if data.name is not None and data.name in _VALUE_COUNTS_MEMO:\n return _VALUE_COUNTS_MEMO[data.name]\n\n value_counts_with_nan = data.value_counts(dropna=False)\n value_counts_without_nan = value_counts_with_nan.reset_index().dropna().set_index('index').iloc[:,0]\n distinct_count_with_nan = value_counts_with_nan.count()\n\n # When the inferred type of the index is just \"mixed\" probably the types within the series are tuple, dict, list and so on...\n if value_counts_without_nan.index.inferred_type == \"mixed\":\n raise TypeError('Not supported mixed type')\n\n result = [value_counts_without_nan, distinct_count_with_nan]\n\n if data.name is not None:\n _VALUE_COUNTS_MEMO[data.name] = result\n\n return result\n"
] | # -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
stats['kurtosis'] = series.kurt()
stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name)
def describe_date_1d(series):
"""Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name)
def describe_categorical_1d(series):
"""Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name)
def describe_constant_1d(series):
"""Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name)
def describe_unique_1d(series):
"""Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name)
def describe_supported(series, **kwargs):
"""Compute summary statistics of a supported variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
value_counts, distinct_count = base.get_groupby_statistic(series)
if count > distinct_count > 1:
mode = series.mode().iloc[0]
else:
mode = series[0]
results_data = {'count': count,
'distinct_count': distinct_count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'is_unique': distinct_count == leng,
'mode': mode,
'p_unique': distinct_count * 1.0 / leng}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_unsupported(series, **kwargs):
"""Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_1d(data, **kwargs):
"""Compute summary statistics of a variable (a Series).
The description is different according to the type of the variable.
However a set of common stats is also computed.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Replace infinite values with NaNs to avoid issues with
# histograms later.
data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)
result = pd.Series({}, name=data.name)
vartype = base.get_vartype(data)
if vartype == base.S_TYPE_UNSUPPORTED:
result = result.append(describe_unsupported(data))
else:
result = result.append(describe_supported(data))
if vartype == base.S_TYPE_CONST:
result = result.append(describe_constant_1d(data))
elif vartype == base.TYPE_BOOL:
result = result.append(describe_boolean_1d(data))
elif vartype == base.TYPE_NUM:
result = result.append(describe_numeric_1d(data, **kwargs))
elif vartype == base.TYPE_DATE:
result = result.append(describe_date_1d(data))
elif vartype == base.S_TYPE_UNIQUE:
result = result.append(describe_unique_1d(data))
else:
# TYPE_CAT
result = result.append(describe_categorical_1d(data))
return result
def multiprocess_func(x, **kwargs):
return x[0], describe_1d(x[1], **kwargs)
def describe(df, bins=10, check_correlation=True, correlation_threshold=0.9, correlation_overrides=None, check_recoded=False, pool_size=multiprocessing.cpu_count(), **kwargs):
"""Generates a dict containing summary statistics for a given dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Parameters
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
Notes:
------
* The section dedicated to check the correlation should be externalized
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("df must be of type pandas.DataFrame")
if df.empty:
raise ValueError("df can not be empty")
try:
# reset matplotlib style before use
# Fails in matplotlib 1.4.x so plot might look bad
matplotlib.style.use("default")
except:
pass
try:
# Ignore FutureWarning
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
except:
pass
matplotlib.style.use(resource_filename(__name__, "pandas_profiling.mplstyle"))
# Clearing the cache before computing stats
base.clear_cache()
if not pd.Index(np.arange(0, len(df))).equals(df.index):
# Treat index as any other column
df = df.reset_index()
kwargs.update({'bins': bins})
# Describe all variables in a univariate way
if pool_size == 1:
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in map(local_multiprocess_func, df.iteritems())}
else:
pool = multiprocessing.Pool(pool_size)
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}
pool.close()
# Get correlations
dfcorrPear = df.corr(method="pearson")
dfcorrSpear = df.corr(method="spearman")
# Check correlations between variable
if check_correlation is True:
''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9
If x~y and y~z but not x~z, it would be better to delete only y
Better way would be to find out which variable causes the highest increase in multicollinearity.
'''
corr = dfcorrPear.copy()
for x, corr_x in corr.iterrows():
if correlation_overrides and x in correlation_overrides:
continue
for y, corr in corr_x.iteritems():
if x == y: break
if corr > correlation_threshold:
ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])
if check_recoded:
categorical_variables = [(name, data) for (name, data) in df.iteritems() if base.get_vartype(data)=='CAT']
for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):
if correlation_overrides and name1 in correlation_overrides:
continue
confusion_matrix=pd.crosstab(data1,data2)
if confusion_matrix.values.diagonal().sum() == len(df):
ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])
# Convert ldesc to a DataFrame
names = []
ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
variable_stats.columns.names = df.columns.names
# General statistics
table_stats = {}
table_stats['n'] = len(df)
table_stats['nvar'] = len(df.columns)
table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])
unsupported_columns = variable_stats.transpose()[variable_stats.transpose().type != base.S_TYPE_UNSUPPORTED].index.tolist()
table_stats['n_duplicates'] = sum(df.duplicated(subset=unsupported_columns)) if len(unsupported_columns) > 0 else 0
memsize = df.memory_usage(index=True).sum()
table_stats['memsize'] = formatters.fmt_bytesize(memsize)
table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])
table_stats.update({k: 0 for k in ("NUM", "DATE", "CONST", "CAT", "UNIQUE", "CORR", "RECODED", "BOOL", "UNSUPPORTED")})
table_stats.update(dict(variable_stats.loc['type'].value_counts()))
table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']
return {
'table': table_stats,
'variables': variable_stats.T,
'freq': {k: (base.get_groupby_statistic(df[k])[0] if variable_stats[k].type != base.S_TYPE_UNSUPPORTED else None) for k in df.columns},
'correlations': {'pearson': dfcorrPear, 'spearman': dfcorrSpear}
}
|
pandas-profiling/pandas-profiling | pandas_profiling/describe.py | describe_constant_1d | python | def describe_constant_1d(series):
return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name) | Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/describe.py#L133-L146 | null | # -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
stats['kurtosis'] = series.kurt()
stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name)
def describe_date_1d(series):
"""Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name)
def describe_categorical_1d(series):
"""Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name)
def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name)
def describe_unique_1d(series):
"""Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name)
def describe_supported(series, **kwargs):
"""Compute summary statistics of a supported variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
value_counts, distinct_count = base.get_groupby_statistic(series)
if count > distinct_count > 1:
mode = series.mode().iloc[0]
else:
mode = series[0]
results_data = {'count': count,
'distinct_count': distinct_count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'is_unique': distinct_count == leng,
'mode': mode,
'p_unique': distinct_count * 1.0 / leng}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_unsupported(series, **kwargs):
"""Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_1d(data, **kwargs):
"""Compute summary statistics of a variable (a Series).
The description is different according to the type of the variable.
However a set of common stats is also computed.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Replace infinite values with NaNs to avoid issues with
# histograms later.
data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)
result = pd.Series({}, name=data.name)
vartype = base.get_vartype(data)
if vartype == base.S_TYPE_UNSUPPORTED:
result = result.append(describe_unsupported(data))
else:
result = result.append(describe_supported(data))
if vartype == base.S_TYPE_CONST:
result = result.append(describe_constant_1d(data))
elif vartype == base.TYPE_BOOL:
result = result.append(describe_boolean_1d(data))
elif vartype == base.TYPE_NUM:
result = result.append(describe_numeric_1d(data, **kwargs))
elif vartype == base.TYPE_DATE:
result = result.append(describe_date_1d(data))
elif vartype == base.S_TYPE_UNIQUE:
result = result.append(describe_unique_1d(data))
else:
# TYPE_CAT
result = result.append(describe_categorical_1d(data))
return result
def multiprocess_func(x, **kwargs):
return x[0], describe_1d(x[1], **kwargs)
def describe(df, bins=10, check_correlation=True, correlation_threshold=0.9, correlation_overrides=None, check_recoded=False, pool_size=multiprocessing.cpu_count(), **kwargs):
"""Generates a dict containing summary statistics for a given dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Parameters
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
Notes:
------
* The section dedicated to check the correlation should be externalized
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("df must be of type pandas.DataFrame")
if df.empty:
raise ValueError("df can not be empty")
try:
# reset matplotlib style before use
# Fails in matplotlib 1.4.x so plot might look bad
matplotlib.style.use("default")
except:
pass
try:
# Ignore FutureWarning
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
except:
pass
matplotlib.style.use(resource_filename(__name__, "pandas_profiling.mplstyle"))
# Clearing the cache before computing stats
base.clear_cache()
if not pd.Index(np.arange(0, len(df))).equals(df.index):
# Treat index as any other column
df = df.reset_index()
kwargs.update({'bins': bins})
# Describe all variables in a univariate way
if pool_size == 1:
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in map(local_multiprocess_func, df.iteritems())}
else:
pool = multiprocessing.Pool(pool_size)
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}
pool.close()
# Get correlations
dfcorrPear = df.corr(method="pearson")
dfcorrSpear = df.corr(method="spearman")
# Check correlations between variable
if check_correlation is True:
''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9
If x~y and y~z but not x~z, it would be better to delete only y
Better way would be to find out which variable causes the highest increase in multicollinearity.
'''
corr = dfcorrPear.copy()
for x, corr_x in corr.iterrows():
if correlation_overrides and x in correlation_overrides:
continue
for y, corr in corr_x.iteritems():
if x == y: break
if corr > correlation_threshold:
ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])
if check_recoded:
categorical_variables = [(name, data) for (name, data) in df.iteritems() if base.get_vartype(data)=='CAT']
for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):
if correlation_overrides and name1 in correlation_overrides:
continue
confusion_matrix=pd.crosstab(data1,data2)
if confusion_matrix.values.diagonal().sum() == len(df):
ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])
# Convert ldesc to a DataFrame
names = []
ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
variable_stats.columns.names = df.columns.names
# General statistics
table_stats = {}
table_stats['n'] = len(df)
table_stats['nvar'] = len(df.columns)
table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])
unsupported_columns = variable_stats.transpose()[variable_stats.transpose().type != base.S_TYPE_UNSUPPORTED].index.tolist()
table_stats['n_duplicates'] = sum(df.duplicated(subset=unsupported_columns)) if len(unsupported_columns) > 0 else 0
memsize = df.memory_usage(index=True).sum()
table_stats['memsize'] = formatters.fmt_bytesize(memsize)
table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])
table_stats.update({k: 0 for k in ("NUM", "DATE", "CONST", "CAT", "UNIQUE", "CORR", "RECODED", "BOOL", "UNSUPPORTED")})
table_stats.update(dict(variable_stats.loc['type'].value_counts()))
table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']
return {
'table': table_stats,
'variables': variable_stats.T,
'freq': {k: (base.get_groupby_statistic(df[k])[0] if variable_stats[k].type != base.S_TYPE_UNSUPPORTED else None) for k in df.columns},
'correlations': {'pearson': dfcorrPear, 'spearman': dfcorrSpear}
}
|
pandas-profiling/pandas-profiling | pandas_profiling/describe.py | describe_unique_1d | python | def describe_unique_1d(series):
return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name) | Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/describe.py#L148-L161 | null | # -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
stats['kurtosis'] = series.kurt()
stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name)
def describe_date_1d(series):
"""Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name)
def describe_categorical_1d(series):
"""Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name)
def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name)
def describe_constant_1d(series):
"""Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name)
def describe_supported(series, **kwargs):
"""Compute summary statistics of a supported variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
value_counts, distinct_count = base.get_groupby_statistic(series)
if count > distinct_count > 1:
mode = series.mode().iloc[0]
else:
mode = series[0]
results_data = {'count': count,
'distinct_count': distinct_count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'is_unique': distinct_count == leng,
'mode': mode,
'p_unique': distinct_count * 1.0 / leng}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_unsupported(series, **kwargs):
"""Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_1d(data, **kwargs):
"""Compute summary statistics of a variable (a Series).
The description is different according to the type of the variable.
However a set of common stats is also computed.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Replace infinite values with NaNs to avoid issues with
# histograms later.
data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)
result = pd.Series({}, name=data.name)
vartype = base.get_vartype(data)
if vartype == base.S_TYPE_UNSUPPORTED:
result = result.append(describe_unsupported(data))
else:
result = result.append(describe_supported(data))
if vartype == base.S_TYPE_CONST:
result = result.append(describe_constant_1d(data))
elif vartype == base.TYPE_BOOL:
result = result.append(describe_boolean_1d(data))
elif vartype == base.TYPE_NUM:
result = result.append(describe_numeric_1d(data, **kwargs))
elif vartype == base.TYPE_DATE:
result = result.append(describe_date_1d(data))
elif vartype == base.S_TYPE_UNIQUE:
result = result.append(describe_unique_1d(data))
else:
# TYPE_CAT
result = result.append(describe_categorical_1d(data))
return result
def multiprocess_func(x, **kwargs):
return x[0], describe_1d(x[1], **kwargs)
def describe(df, bins=10, check_correlation=True, correlation_threshold=0.9, correlation_overrides=None, check_recoded=False, pool_size=multiprocessing.cpu_count(), **kwargs):
"""Generates a dict containing summary statistics for a given dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Parameters
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
Notes:
------
* The section dedicated to check the correlation should be externalized
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("df must be of type pandas.DataFrame")
if df.empty:
raise ValueError("df can not be empty")
try:
# reset matplotlib style before use
# Fails in matplotlib 1.4.x so plot might look bad
matplotlib.style.use("default")
except:
pass
try:
# Ignore FutureWarning
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
except:
pass
matplotlib.style.use(resource_filename(__name__, "pandas_profiling.mplstyle"))
# Clearing the cache before computing stats
base.clear_cache()
if not pd.Index(np.arange(0, len(df))).equals(df.index):
# Treat index as any other column
df = df.reset_index()
kwargs.update({'bins': bins})
# Describe all variables in a univariate way
if pool_size == 1:
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in map(local_multiprocess_func, df.iteritems())}
else:
pool = multiprocessing.Pool(pool_size)
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}
pool.close()
# Get correlations
dfcorrPear = df.corr(method="pearson")
dfcorrSpear = df.corr(method="spearman")
# Check correlations between variable
if check_correlation is True:
''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9
If x~y and y~z but not x~z, it would be better to delete only y
Better way would be to find out which variable causes the highest increase in multicollinearity.
'''
corr = dfcorrPear.copy()
for x, corr_x in corr.iterrows():
if correlation_overrides and x in correlation_overrides:
continue
for y, corr in corr_x.iteritems():
if x == y: break
if corr > correlation_threshold:
ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])
if check_recoded:
categorical_variables = [(name, data) for (name, data) in df.iteritems() if base.get_vartype(data)=='CAT']
for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):
if correlation_overrides and name1 in correlation_overrides:
continue
confusion_matrix=pd.crosstab(data1,data2)
if confusion_matrix.values.diagonal().sum() == len(df):
ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])
# Convert ldesc to a DataFrame
names = []
ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
variable_stats.columns.names = df.columns.names
# General statistics
table_stats = {}
table_stats['n'] = len(df)
table_stats['nvar'] = len(df.columns)
table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])
unsupported_columns = variable_stats.transpose()[variable_stats.transpose().type != base.S_TYPE_UNSUPPORTED].index.tolist()
table_stats['n_duplicates'] = sum(df.duplicated(subset=unsupported_columns)) if len(unsupported_columns) > 0 else 0
memsize = df.memory_usage(index=True).sum()
table_stats['memsize'] = formatters.fmt_bytesize(memsize)
table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])
table_stats.update({k: 0 for k in ("NUM", "DATE", "CONST", "CAT", "UNIQUE", "CORR", "RECODED", "BOOL", "UNSUPPORTED")})
table_stats.update(dict(variable_stats.loc['type'].value_counts()))
table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']
return {
'table': table_stats,
'variables': variable_stats.T,
'freq': {k: (base.get_groupby_statistic(df[k])[0] if variable_stats[k].type != base.S_TYPE_UNSUPPORTED else None) for k in df.columns},
'correlations': {'pearson': dfcorrPear, 'spearman': dfcorrSpear}
}
|
pandas-profiling/pandas-profiling | pandas_profiling/describe.py | describe_supported | python | def describe_supported(series, **kwargs):
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
value_counts, distinct_count = base.get_groupby_statistic(series)
if count > distinct_count > 1:
mode = series.mode().iloc[0]
else:
mode = series[0]
results_data = {'count': count,
'distinct_count': distinct_count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'is_unique': distinct_count == leng,
'mode': mode,
'p_unique': distinct_count * 1.0 / leng}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name) | Compute summary statistics of a supported variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/describe.py#L163-L201 | [
"def get_groupby_statistic(data):\n \"\"\"Calculate value counts and distinct count of a variable (technically a Series).\n\n The result is cached by column name in a global variable to avoid recomputing.\n\n Parameters\n ----------\n data : Series\n The data type of the Series.\n\n Returns\n -------\n list\n value count and distinct count\n \"\"\"\n if data.name is not None and data.name in _VALUE_COUNTS_MEMO:\n return _VALUE_COUNTS_MEMO[data.name]\n\n value_counts_with_nan = data.value_counts(dropna=False)\n value_counts_without_nan = value_counts_with_nan.reset_index().dropna().set_index('index').iloc[:,0]\n distinct_count_with_nan = value_counts_with_nan.count()\n\n # When the inferred type of the index is just \"mixed\" probably the types within the series are tuple, dict, list and so on...\n if value_counts_without_nan.index.inferred_type == \"mixed\":\n raise TypeError('Not supported mixed type')\n\n result = [value_counts_without_nan, distinct_count_with_nan]\n\n if data.name is not None:\n _VALUE_COUNTS_MEMO[data.name] = result\n\n return result\n"
] | # -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
stats['kurtosis'] = series.kurt()
stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name)
def describe_date_1d(series):
"""Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name)
def describe_categorical_1d(series):
"""Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name)
def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name)
def describe_constant_1d(series):
"""Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name)
def describe_unique_1d(series):
"""Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name)
def describe_unsupported(series, **kwargs):
"""Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_1d(data, **kwargs):
"""Compute summary statistics of a variable (a Series).
The description is different according to the type of the variable.
However a set of common stats is also computed.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Replace infinite values with NaNs to avoid issues with
# histograms later.
data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)
result = pd.Series({}, name=data.name)
vartype = base.get_vartype(data)
if vartype == base.S_TYPE_UNSUPPORTED:
result = result.append(describe_unsupported(data))
else:
result = result.append(describe_supported(data))
if vartype == base.S_TYPE_CONST:
result = result.append(describe_constant_1d(data))
elif vartype == base.TYPE_BOOL:
result = result.append(describe_boolean_1d(data))
elif vartype == base.TYPE_NUM:
result = result.append(describe_numeric_1d(data, **kwargs))
elif vartype == base.TYPE_DATE:
result = result.append(describe_date_1d(data))
elif vartype == base.S_TYPE_UNIQUE:
result = result.append(describe_unique_1d(data))
else:
# TYPE_CAT
result = result.append(describe_categorical_1d(data))
return result
def multiprocess_func(x, **kwargs):
return x[0], describe_1d(x[1], **kwargs)
def describe(df, bins=10, check_correlation=True, correlation_threshold=0.9, correlation_overrides=None, check_recoded=False, pool_size=multiprocessing.cpu_count(), **kwargs):
"""Generates a dict containing summary statistics for a given dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Parameters
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
Notes:
------
* The section dedicated to check the correlation should be externalized
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("df must be of type pandas.DataFrame")
if df.empty:
raise ValueError("df can not be empty")
try:
# reset matplotlib style before use
# Fails in matplotlib 1.4.x so plot might look bad
matplotlib.style.use("default")
except:
pass
try:
# Ignore FutureWarning
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
except:
pass
matplotlib.style.use(resource_filename(__name__, "pandas_profiling.mplstyle"))
# Clearing the cache before computing stats
base.clear_cache()
if not pd.Index(np.arange(0, len(df))).equals(df.index):
# Treat index as any other column
df = df.reset_index()
kwargs.update({'bins': bins})
# Describe all variables in a univariate way
if pool_size == 1:
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in map(local_multiprocess_func, df.iteritems())}
else:
pool = multiprocessing.Pool(pool_size)
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}
pool.close()
# Get correlations
dfcorrPear = df.corr(method="pearson")
dfcorrSpear = df.corr(method="spearman")
# Check correlations between variable
if check_correlation is True:
''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9
If x~y and y~z but not x~z, it would be better to delete only y
Better way would be to find out which variable causes the highest increase in multicollinearity.
'''
corr = dfcorrPear.copy()
for x, corr_x in corr.iterrows():
if correlation_overrides and x in correlation_overrides:
continue
for y, corr in corr_x.iteritems():
if x == y: break
if corr > correlation_threshold:
ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])
if check_recoded:
categorical_variables = [(name, data) for (name, data) in df.iteritems() if base.get_vartype(data)=='CAT']
for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):
if correlation_overrides and name1 in correlation_overrides:
continue
confusion_matrix=pd.crosstab(data1,data2)
if confusion_matrix.values.diagonal().sum() == len(df):
ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])
# Convert ldesc to a DataFrame
names = []
ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
variable_stats.columns.names = df.columns.names
# General statistics
table_stats = {}
table_stats['n'] = len(df)
table_stats['nvar'] = len(df.columns)
table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])
unsupported_columns = variable_stats.transpose()[variable_stats.transpose().type != base.S_TYPE_UNSUPPORTED].index.tolist()
table_stats['n_duplicates'] = sum(df.duplicated(subset=unsupported_columns)) if len(unsupported_columns) > 0 else 0
memsize = df.memory_usage(index=True).sum()
table_stats['memsize'] = formatters.fmt_bytesize(memsize)
table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])
table_stats.update({k: 0 for k in ("NUM", "DATE", "CONST", "CAT", "UNIQUE", "CORR", "RECODED", "BOOL", "UNSUPPORTED")})
table_stats.update(dict(variable_stats.loc['type'].value_counts()))
table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']
return {
'table': table_stats,
'variables': variable_stats.T,
'freq': {k: (base.get_groupby_statistic(df[k])[0] if variable_stats[k].type != base.S_TYPE_UNSUPPORTED else None) for k in df.columns},
'correlations': {'pearson': dfcorrPear, 'spearman': dfcorrSpear}
}
|
pandas-profiling/pandas-profiling | pandas_profiling/describe.py | describe_unsupported | python | def describe_unsupported(series, **kwargs):
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name) | Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/describe.py#L203-L233 | null | # -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
stats['kurtosis'] = series.kurt()
stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name)
def describe_date_1d(series):
"""Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name)
def describe_categorical_1d(series):
"""Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name)
def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name)
def describe_constant_1d(series):
"""Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name)
def describe_unique_1d(series):
"""Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name)
def describe_supported(series, **kwargs):
"""Compute summary statistics of a supported variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
value_counts, distinct_count = base.get_groupby_statistic(series)
if count > distinct_count > 1:
mode = series.mode().iloc[0]
else:
mode = series[0]
results_data = {'count': count,
'distinct_count': distinct_count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'is_unique': distinct_count == leng,
'mode': mode,
'p_unique': distinct_count * 1.0 / leng}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_1d(data, **kwargs):
"""Compute summary statistics of a variable (a Series).
The description is different according to the type of the variable.
However a set of common stats is also computed.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Replace infinite values with NaNs to avoid issues with
# histograms later.
data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)
result = pd.Series({}, name=data.name)
vartype = base.get_vartype(data)
if vartype == base.S_TYPE_UNSUPPORTED:
result = result.append(describe_unsupported(data))
else:
result = result.append(describe_supported(data))
if vartype == base.S_TYPE_CONST:
result = result.append(describe_constant_1d(data))
elif vartype == base.TYPE_BOOL:
result = result.append(describe_boolean_1d(data))
elif vartype == base.TYPE_NUM:
result = result.append(describe_numeric_1d(data, **kwargs))
elif vartype == base.TYPE_DATE:
result = result.append(describe_date_1d(data))
elif vartype == base.S_TYPE_UNIQUE:
result = result.append(describe_unique_1d(data))
else:
# TYPE_CAT
result = result.append(describe_categorical_1d(data))
return result
def multiprocess_func(x, **kwargs):
return x[0], describe_1d(x[1], **kwargs)
def describe(df, bins=10, check_correlation=True, correlation_threshold=0.9, correlation_overrides=None, check_recoded=False, pool_size=multiprocessing.cpu_count(), **kwargs):
"""Generates a dict containing summary statistics for a given dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Parameters
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
Notes:
------
* The section dedicated to check the correlation should be externalized
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("df must be of type pandas.DataFrame")
if df.empty:
raise ValueError("df can not be empty")
try:
# reset matplotlib style before use
# Fails in matplotlib 1.4.x so plot might look bad
matplotlib.style.use("default")
except:
pass
try:
# Ignore FutureWarning
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
except:
pass
matplotlib.style.use(resource_filename(__name__, "pandas_profiling.mplstyle"))
# Clearing the cache before computing stats
base.clear_cache()
if not pd.Index(np.arange(0, len(df))).equals(df.index):
# Treat index as any other column
df = df.reset_index()
kwargs.update({'bins': bins})
# Describe all variables in a univariate way
if pool_size == 1:
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in map(local_multiprocess_func, df.iteritems())}
else:
pool = multiprocessing.Pool(pool_size)
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}
pool.close()
# Get correlations
dfcorrPear = df.corr(method="pearson")
dfcorrSpear = df.corr(method="spearman")
# Check correlations between variable
if check_correlation is True:
''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9
If x~y and y~z but not x~z, it would be better to delete only y
Better way would be to find out which variable causes the highest increase in multicollinearity.
'''
corr = dfcorrPear.copy()
for x, corr_x in corr.iterrows():
if correlation_overrides and x in correlation_overrides:
continue
for y, corr in corr_x.iteritems():
if x == y: break
if corr > correlation_threshold:
ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])
if check_recoded:
categorical_variables = [(name, data) for (name, data) in df.iteritems() if base.get_vartype(data)=='CAT']
for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):
if correlation_overrides and name1 in correlation_overrides:
continue
confusion_matrix=pd.crosstab(data1,data2)
if confusion_matrix.values.diagonal().sum() == len(df):
ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])
# Convert ldesc to a DataFrame
names = []
ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
variable_stats.columns.names = df.columns.names
# General statistics
table_stats = {}
table_stats['n'] = len(df)
table_stats['nvar'] = len(df.columns)
table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])
unsupported_columns = variable_stats.transpose()[variable_stats.transpose().type != base.S_TYPE_UNSUPPORTED].index.tolist()
table_stats['n_duplicates'] = sum(df.duplicated(subset=unsupported_columns)) if len(unsupported_columns) > 0 else 0
memsize = df.memory_usage(index=True).sum()
table_stats['memsize'] = formatters.fmt_bytesize(memsize)
table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])
table_stats.update({k: 0 for k in ("NUM", "DATE", "CONST", "CAT", "UNIQUE", "CORR", "RECODED", "BOOL", "UNSUPPORTED")})
table_stats.update(dict(variable_stats.loc['type'].value_counts()))
table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']
return {
'table': table_stats,
'variables': variable_stats.T,
'freq': {k: (base.get_groupby_statistic(df[k])[0] if variable_stats[k].type != base.S_TYPE_UNSUPPORTED else None) for k in df.columns},
'correlations': {'pearson': dfcorrPear, 'spearman': dfcorrSpear}
}
|
pandas-profiling/pandas-profiling | pandas_profiling/describe.py | describe_1d | python | def describe_1d(data, **kwargs):
# Replace infinite values with NaNs to avoid issues with
# histograms later.
data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)
result = pd.Series({}, name=data.name)
vartype = base.get_vartype(data)
if vartype == base.S_TYPE_UNSUPPORTED:
result = result.append(describe_unsupported(data))
else:
result = result.append(describe_supported(data))
if vartype == base.S_TYPE_CONST:
result = result.append(describe_constant_1d(data))
elif vartype == base.TYPE_BOOL:
result = result.append(describe_boolean_1d(data))
elif vartype == base.TYPE_NUM:
result = result.append(describe_numeric_1d(data, **kwargs))
elif vartype == base.TYPE_DATE:
result = result.append(describe_date_1d(data))
elif vartype == base.S_TYPE_UNIQUE:
result = result.append(describe_unique_1d(data))
else:
# TYPE_CAT
result = result.append(describe_categorical_1d(data))
return result | Compute summary statistics of a variable (a Series).
The description is different according to the type of the variable.
However a set of common stats is also computed.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/describe.py#L235-L279 | [
"def get_vartype(data):\n \"\"\"Infer the type of a variable (technically a Series).\n\n The types supported are split in standard types and special types.\n\n Standard types:\n * Categorical (`TYPE_CAT`): the default type if no other one can be determined\n * Numerical (`TYPE_NUM`): if it contains numbers\n * Boolean (`TYPE_BOOL`): at this time only detected if it contains boolean values, see todo\n * Date (`TYPE_DATE`): if it contains datetime\n\n Special types:\n * Constant (`S_TYPE_CONST`): if all values in the variable are equal\n * Unique (`S_TYPE_UNIQUE`): if all values in the variable are different\n * Unsupported (`S_TYPE_UNSUPPORTED`): if the variable is unsupported\n\n The result is cached by column name in a global variable to avoid recomputing.\n\n Parameters\n ----------\n data : Series\n The data type of the Series.\n\n Returns\n -------\n str\n The data type of the Series.\n\n Notes\n ----\n * Should improve verification when a categorical or numeric field has 3 values, it could be a categorical field\n or just a boolean with NaN values\n * #72: Numeric with low Distinct count should be treated as \"Categorical\"\n \"\"\"\n if data.name is not None and data.name in _MEMO:\n return _MEMO[data.name]\n\n vartype = None\n try:\n distinct_count = get_groupby_statistic(data)[1]\n leng = len(data)\n\n if distinct_count <= 1:\n vartype = S_TYPE_CONST\n elif pd.api.types.is_bool_dtype(data) or (distinct_count == 2 and pd.api.types.is_numeric_dtype(data)):\n vartype = TYPE_BOOL\n elif pd.api.types.is_numeric_dtype(data):\n vartype = TYPE_NUM\n elif pd.api.types.is_datetime64_dtype(data):\n vartype = TYPE_DATE\n elif distinct_count == leng:\n vartype = S_TYPE_UNIQUE\n else:\n vartype = TYPE_CAT\n except:\n vartype = S_TYPE_UNSUPPORTED\n\n if data.name is not None:\n _MEMO[data.name] = vartype\n\n return vartype\n",
"def describe_numeric_1d(series, **kwargs):\n \"\"\"Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).\n\n Also create histograms (mini an full) of its distribution.\n\n Parameters\n ----------\n series : Series\n The variable to describe.\n\n Returns\n -------\n Series\n The description of the variable as a Series with index being stats keys.\n \"\"\"\n # Format a number as a percentage. For example 0.25 will be turned to 25%.\n _percentile_format = \"{:.0%}\"\n stats = dict()\n stats['type'] = base.TYPE_NUM\n stats['mean'] = series.mean()\n stats['std'] = series.std()\n stats['variance'] = series.var()\n stats['min'] = series.min()\n stats['max'] = series.max()\n stats['range'] = stats['max'] - stats['min']\n # To avoid to compute it several times\n _series_no_na = series.dropna()\n for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):\n # The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098\n stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)\n stats['iqr'] = stats['75%'] - stats['25%']\n stats['kurtosis'] = series.kurt()\n stats['skewness'] = series.skew()\n stats['sum'] = series.sum()\n stats['mad'] = series.mad()\n stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN\n stats['n_zeros'] = (len(series) - np.count_nonzero(series))\n stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)\n # Histograms\n stats['histogram'] = histogram(series, **kwargs)\n stats['mini_histogram'] = mini_histogram(series, **kwargs)\n return pd.Series(stats, name=series.name)\n",
"def describe_date_1d(series):\n \"\"\"Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).\n\n Also create histograms (mini an full) of its distribution.\n\n Parameters\n ----------\n series : Series\n The variable to describe.\n\n Returns\n -------\n Series\n The description of the variable as a Series with index being stats keys.\n \"\"\"\n stats = dict()\n stats['type'] = base.TYPE_DATE\n stats['min'] = series.min()\n stats['max'] = series.max()\n stats['range'] = stats['max'] - stats['min']\n # Histograms\n stats['histogram'] = histogram(series)\n stats['mini_histogram'] = mini_histogram(series)\n return pd.Series(stats, name=series.name)\n",
"def describe_categorical_1d(series):\n \"\"\"Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).\n\n Parameters\n ----------\n series : Series\n The variable to describe.\n\n Returns\n -------\n Series\n The description of the variable as a Series with index being stats keys.\n \"\"\"\n # Only run if at least 1 non-missing value\n value_counts, distinct_count = base.get_groupby_statistic(series)\n top, freq = value_counts.index[0], value_counts.iloc[0]\n names = []\n result = []\n\n if base.get_vartype(series) == base.TYPE_CAT:\n names += ['top', 'freq', 'type']\n result += [top, freq, base.TYPE_CAT]\n\n return pd.Series(result, index=names, name=series.name)\n",
"def describe_boolean_1d(series):\n \"\"\"Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).\n\n Parameters\n ----------\n series : Series\n The variable to describe.\n\n Returns\n -------\n Series\n The description of the variable as a Series with index being stats keys.\n \"\"\"\n value_counts, distinct_count = base.get_groupby_statistic(series)\n top, freq = value_counts.index[0], value_counts.iloc[0]\n # The mean of boolean is an interesting information\n mean = series.mean()\n names = []\n result = []\n names += ['top', 'freq', 'type', 'mean']\n result += [top, freq, base.TYPE_BOOL, mean]\n\n return pd.Series(result, index=names, name=series.name)\n",
"def describe_constant_1d(series):\n \"\"\"Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).\n\n Parameters\n ----------\n series : Series\n The variable to describe.\n\n Returns\n -------\n Series\n The description of the variable as a Series with index being stats keys.\n \"\"\"\n return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name)\n",
"def describe_unique_1d(series):\n \"\"\"Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).\n\n Parameters\n ----------\n series : Series\n The variable to describe.\n\n Returns\n -------\n Series\n The description of the variable as a Series with index being stats keys.\n \"\"\"\n return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name)\n",
"def describe_supported(series, **kwargs):\n \"\"\"Compute summary statistics of a supported variable (a Series).\n\n Parameters\n ----------\n series : Series\n The variable to describe.\n\n Returns\n -------\n Series\n The description of the variable as a Series with index being stats keys.\n \"\"\"\n leng = len(series) # number of observations in the Series\n count = series.count() # number of non-NaN observations in the Series\n n_infinite = count - series.count() # number of infinte observations in the Series\n\n value_counts, distinct_count = base.get_groupby_statistic(series)\n if count > distinct_count > 1:\n mode = series.mode().iloc[0]\n else:\n mode = series[0]\n\n results_data = {'count': count,\n 'distinct_count': distinct_count,\n 'p_missing': 1 - count * 1.0 / leng,\n 'n_missing': leng - count,\n 'p_infinite': n_infinite * 1.0 / leng,\n 'n_infinite': n_infinite,\n 'is_unique': distinct_count == leng,\n 'mode': mode,\n 'p_unique': distinct_count * 1.0 / leng}\n try:\n # pandas 0.17 onwards\n results_data['memorysize'] = series.memory_usage()\n except:\n results_data['memorysize'] = 0\n\n return pd.Series(results_data, name=series.name)\n",
"def describe_unsupported(series, **kwargs):\n \"\"\"Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).\n\n Parameters\n ----------\n series : Series\n The variable to describe.\n\n Returns\n -------\n Series\n The description of the variable as a Series with index being stats keys.\n \"\"\"\n leng = len(series) # number of observations in the Series\n count = series.count() # number of non-NaN observations in the Series\n n_infinite = count - series.count() # number of infinte observations in the Series\n\n results_data = {'count': count,\n 'p_missing': 1 - count * 1.0 / leng,\n 'n_missing': leng - count,\n 'p_infinite': n_infinite * 1.0 / leng,\n 'n_infinite': n_infinite,\n 'type': base.S_TYPE_UNSUPPORTED}\n\n try:\n # pandas 0.17 onwards\n results_data['memorysize'] = series.memory_usage()\n except:\n results_data['memorysize'] = 0\n\n return pd.Series(results_data, name=series.name)\n"
] | # -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
stats['kurtosis'] = series.kurt()
stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name)
def describe_date_1d(series):
"""Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name)
def describe_categorical_1d(series):
"""Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name)
def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name)
def describe_constant_1d(series):
"""Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name)
def describe_unique_1d(series):
"""Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name)
def describe_supported(series, **kwargs):
"""Compute summary statistics of a supported variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
value_counts, distinct_count = base.get_groupby_statistic(series)
if count > distinct_count > 1:
mode = series.mode().iloc[0]
else:
mode = series[0]
results_data = {'count': count,
'distinct_count': distinct_count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'is_unique': distinct_count == leng,
'mode': mode,
'p_unique': distinct_count * 1.0 / leng}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_unsupported(series, **kwargs):
"""Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def multiprocess_func(x, **kwargs):
return x[0], describe_1d(x[1], **kwargs)
def describe(df, bins=10, check_correlation=True, correlation_threshold=0.9, correlation_overrides=None, check_recoded=False, pool_size=multiprocessing.cpu_count(), **kwargs):
"""Generates a dict containing summary statistics for a given dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Parameters
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
Notes:
------
* The section dedicated to check the correlation should be externalized
"""
if not isinstance(df, pd.DataFrame):
raise TypeError("df must be of type pandas.DataFrame")
if df.empty:
raise ValueError("df can not be empty")
try:
# reset matplotlib style before use
# Fails in matplotlib 1.4.x so plot might look bad
matplotlib.style.use("default")
except:
pass
try:
# Ignore FutureWarning
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
except:
pass
matplotlib.style.use(resource_filename(__name__, "pandas_profiling.mplstyle"))
# Clearing the cache before computing stats
base.clear_cache()
if not pd.Index(np.arange(0, len(df))).equals(df.index):
# Treat index as any other column
df = df.reset_index()
kwargs.update({'bins': bins})
# Describe all variables in a univariate way
if pool_size == 1:
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in map(local_multiprocess_func, df.iteritems())}
else:
pool = multiprocessing.Pool(pool_size)
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}
pool.close()
# Get correlations
dfcorrPear = df.corr(method="pearson")
dfcorrSpear = df.corr(method="spearman")
# Check correlations between variable
if check_correlation is True:
''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9
If x~y and y~z but not x~z, it would be better to delete only y
Better way would be to find out which variable causes the highest increase in multicollinearity.
'''
corr = dfcorrPear.copy()
for x, corr_x in corr.iterrows():
if correlation_overrides and x in correlation_overrides:
continue
for y, corr in corr_x.iteritems():
if x == y: break
if corr > correlation_threshold:
ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])
if check_recoded:
categorical_variables = [(name, data) for (name, data) in df.iteritems() if base.get_vartype(data)=='CAT']
for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):
if correlation_overrides and name1 in correlation_overrides:
continue
confusion_matrix=pd.crosstab(data1,data2)
if confusion_matrix.values.diagonal().sum() == len(df):
ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])
# Convert ldesc to a DataFrame
names = []
ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
variable_stats.columns.names = df.columns.names
# General statistics
table_stats = {}
table_stats['n'] = len(df)
table_stats['nvar'] = len(df.columns)
table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])
unsupported_columns = variable_stats.transpose()[variable_stats.transpose().type != base.S_TYPE_UNSUPPORTED].index.tolist()
table_stats['n_duplicates'] = sum(df.duplicated(subset=unsupported_columns)) if len(unsupported_columns) > 0 else 0
memsize = df.memory_usage(index=True).sum()
table_stats['memsize'] = formatters.fmt_bytesize(memsize)
table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])
table_stats.update({k: 0 for k in ("NUM", "DATE", "CONST", "CAT", "UNIQUE", "CORR", "RECODED", "BOOL", "UNSUPPORTED")})
table_stats.update(dict(variable_stats.loc['type'].value_counts()))
table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']
return {
'table': table_stats,
'variables': variable_stats.T,
'freq': {k: (base.get_groupby_statistic(df[k])[0] if variable_stats[k].type != base.S_TYPE_UNSUPPORTED else None) for k in df.columns},
'correlations': {'pearson': dfcorrPear, 'spearman': dfcorrSpear}
}
|
pandas-profiling/pandas-profiling | pandas_profiling/describe.py | describe | python | def describe(df, bins=10, check_correlation=True, correlation_threshold=0.9, correlation_overrides=None, check_recoded=False, pool_size=multiprocessing.cpu_count(), **kwargs):
if not isinstance(df, pd.DataFrame):
raise TypeError("df must be of type pandas.DataFrame")
if df.empty:
raise ValueError("df can not be empty")
try:
# reset matplotlib style before use
# Fails in matplotlib 1.4.x so plot might look bad
matplotlib.style.use("default")
except:
pass
try:
# Ignore FutureWarning
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
except:
pass
matplotlib.style.use(resource_filename(__name__, "pandas_profiling.mplstyle"))
# Clearing the cache before computing stats
base.clear_cache()
if not pd.Index(np.arange(0, len(df))).equals(df.index):
# Treat index as any other column
df = df.reset_index()
kwargs.update({'bins': bins})
# Describe all variables in a univariate way
if pool_size == 1:
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in map(local_multiprocess_func, df.iteritems())}
else:
pool = multiprocessing.Pool(pool_size)
local_multiprocess_func = partial(multiprocess_func, **kwargs)
ldesc = {col: s for col, s in pool.map(local_multiprocess_func, df.iteritems())}
pool.close()
# Get correlations
dfcorrPear = df.corr(method="pearson")
dfcorrSpear = df.corr(method="spearman")
# Check correlations between variable
if check_correlation is True:
''' TODO: corr(x,y) > 0.9 and corr(y,z) > 0.9 does not imply corr(x,z) > 0.9
If x~y and y~z but not x~z, it would be better to delete only y
Better way would be to find out which variable causes the highest increase in multicollinearity.
'''
corr = dfcorrPear.copy()
for x, corr_x in corr.iterrows():
if correlation_overrides and x in correlation_overrides:
continue
for y, corr in corr_x.iteritems():
if x == y: break
if corr > correlation_threshold:
ldesc[x] = pd.Series(['CORR', y, corr], index=['type', 'correlation_var', 'correlation'])
if check_recoded:
categorical_variables = [(name, data) for (name, data) in df.iteritems() if base.get_vartype(data)=='CAT']
for (name1, data1), (name2, data2) in itertools.combinations(categorical_variables, 2):
if correlation_overrides and name1 in correlation_overrides:
continue
confusion_matrix=pd.crosstab(data1,data2)
if confusion_matrix.values.diagonal().sum() == len(df):
ldesc[name1] = pd.Series(['RECODED', name2], index=['type', 'correlation_var'])
# Convert ldesc to a DataFrame
names = []
ldesc_indexes = sorted([x.index for x in ldesc.values()], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
variable_stats = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
variable_stats.columns.names = df.columns.names
# General statistics
table_stats = {}
table_stats['n'] = len(df)
table_stats['nvar'] = len(df.columns)
table_stats['total_missing'] = variable_stats.loc['n_missing'].sum() / (table_stats['n'] * table_stats['nvar'])
unsupported_columns = variable_stats.transpose()[variable_stats.transpose().type != base.S_TYPE_UNSUPPORTED].index.tolist()
table_stats['n_duplicates'] = sum(df.duplicated(subset=unsupported_columns)) if len(unsupported_columns) > 0 else 0
memsize = df.memory_usage(index=True).sum()
table_stats['memsize'] = formatters.fmt_bytesize(memsize)
table_stats['recordsize'] = formatters.fmt_bytesize(memsize / table_stats['n'])
table_stats.update({k: 0 for k in ("NUM", "DATE", "CONST", "CAT", "UNIQUE", "CORR", "RECODED", "BOOL", "UNSUPPORTED")})
table_stats.update(dict(variable_stats.loc['type'].value_counts()))
table_stats['REJECTED'] = table_stats['CONST'] + table_stats['CORR'] + table_stats['RECODED']
return {
'table': table_stats,
'variables': variable_stats.T,
'freq': {k: (base.get_groupby_statistic(df[k])[0] if variable_stats[k].type != base.S_TYPE_UNSUPPORTED else None) for k in df.columns},
'correlations': {'pearson': dfcorrPear, 'spearman': dfcorrSpear}
} | Generates a dict containing summary statistics for a given dataset stored as a pandas `DataFrame`.
Used has is it will output its content as an HTML report in a Jupyter notebook.
Parameters
----------
df : DataFrame
Data to be analyzed
bins : int
Number of bins in histogram.
The default is 10.
check_correlation : boolean
Whether or not to check correlation.
It's `True` by default.
correlation_threshold: float
Threshold to determine if the variable pair is correlated.
The default is 0.9.
correlation_overrides : list
Variable names not to be rejected because they are correlated.
There is no variable in the list (`None`) by default.
check_recoded : boolean
Whether or not to check recoded correlation (memory heavy feature).
Since it's an expensive computation it can be activated for small datasets.
`check_correlation` must be true to disable this check.
It's `False` by default.
pool_size : int
Number of workers in thread pool
The default is equal to the number of CPU.
Returns
-------
dict
Containing the following keys:
* table: general statistics on the dataset
* variables: summary statistics for each variable
* freq: frequency table
Notes:
------
* The section dedicated to check the correlation should be externalized | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/describe.py#L284-L429 | [
"def clear_cache():\n \"\"\"Clear the cache stored as global variables\"\"\"\n global _MEMO, _VALUE_COUNTS_MEMO\n _MEMO = {}\n _VALUE_COUNTS_MEMO = {}\n",
"def fmt_bytesize(num, suffix='B'):\n for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:\n if abs(num) < 1024.0:\n return \"%3.1f %s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f %s%s\" % (num, 'Yi', suffix)\n"
] | # -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
stats['kurtosis'] = series.kurt()
stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name)
def describe_date_1d(series):
"""Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name)
def describe_categorical_1d(series):
"""Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name)
def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return pd.Series(result, index=names, name=series.name)
def describe_constant_1d(series):
"""Compute summary statistics of a constant (`S_TYPE_CONST`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_CONST], index=['type'], name=series.name)
def describe_unique_1d(series):
"""Compute summary statistics of a unique (`S_TYPE_UNIQUE`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
return pd.Series([base.S_TYPE_UNIQUE], index=['type'], name=series.name)
def describe_supported(series, **kwargs):
"""Compute summary statistics of a supported variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
value_counts, distinct_count = base.get_groupby_statistic(series)
if count > distinct_count > 1:
mode = series.mode().iloc[0]
else:
mode = series[0]
results_data = {'count': count,
'distinct_count': distinct_count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'is_unique': distinct_count == leng,
'mode': mode,
'p_unique': distinct_count * 1.0 / leng}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_unsupported(series, **kwargs):
"""Compute summary statistics of a unsupported (`S_TYPE_UNSUPPORTED`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
leng = len(series) # number of observations in the Series
count = series.count() # number of non-NaN observations in the Series
n_infinite = count - series.count() # number of infinte observations in the Series
results_data = {'count': count,
'p_missing': 1 - count * 1.0 / leng,
'n_missing': leng - count,
'p_infinite': n_infinite * 1.0 / leng,
'n_infinite': n_infinite,
'type': base.S_TYPE_UNSUPPORTED}
try:
# pandas 0.17 onwards
results_data['memorysize'] = series.memory_usage()
except:
results_data['memorysize'] = 0
return pd.Series(results_data, name=series.name)
def describe_1d(data, **kwargs):
"""Compute summary statistics of a variable (a Series).
The description is different according to the type of the variable.
However a set of common stats is also computed.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Replace infinite values with NaNs to avoid issues with
# histograms later.
data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)
result = pd.Series({}, name=data.name)
vartype = base.get_vartype(data)
if vartype == base.S_TYPE_UNSUPPORTED:
result = result.append(describe_unsupported(data))
else:
result = result.append(describe_supported(data))
if vartype == base.S_TYPE_CONST:
result = result.append(describe_constant_1d(data))
elif vartype == base.TYPE_BOOL:
result = result.append(describe_boolean_1d(data))
elif vartype == base.TYPE_NUM:
result = result.append(describe_numeric_1d(data, **kwargs))
elif vartype == base.TYPE_DATE:
result = result.append(describe_date_1d(data))
elif vartype == base.S_TYPE_UNIQUE:
result = result.append(describe_unique_1d(data))
else:
# TYPE_CAT
result = result.append(describe_categorical_1d(data))
return result
def multiprocess_func(x, **kwargs):
return x[0], describe_1d(x[1], **kwargs)
|
pandas-profiling/pandas-profiling | pandas_profiling/plot.py | _plot_histogram | python | def _plot_histogram(series, bins=10, figsize=(6, 4), facecolor='#337ab7'):
if base.get_vartype(series) == base.TYPE_DATE:
# TODO: These calls should be merged
fig = plt.figure(figsize=figsize)
plot = fig.add_subplot(111)
plot.set_ylabel('Frequency')
try:
plot.hist(series.dropna().values, facecolor=facecolor, bins=bins)
except TypeError: # matplotlib 1.4 can't plot dates so will show empty plot instead
pass
else:
plot = series.plot(kind='hist', figsize=figsize,
facecolor=facecolor,
bins=bins) # TODO when running on server, send this off to a different thread
return plot | Plot an histogram from the data and return the AxesSubplot object.
Parameters
----------
series : Series
The data to plot
figsize : tuple
The size of the figure (width, height) in inches, default (6,4)
facecolor : str
The color code.
Returns
-------
matplotlib.AxesSubplot
The plot. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/plot.py#L26-L56 | [
"def get_vartype(data):\n \"\"\"Infer the type of a variable (technically a Series).\n\n The types supported are split in standard types and special types.\n\n Standard types:\n * Categorical (`TYPE_CAT`): the default type if no other one can be determined\n * Numerical (`TYPE_NUM`): if it contains numbers\n * Boolean (`TYPE_BOOL`): at this time only detected if it contains boolean values, see todo\n * Date (`TYPE_DATE`): if it contains datetime\n\n Special types:\n * Constant (`S_TYPE_CONST`): if all values in the variable are equal\n * Unique (`S_TYPE_UNIQUE`): if all values in the variable are different\n * Unsupported (`S_TYPE_UNSUPPORTED`): if the variable is unsupported\n\n The result is cached by column name in a global variable to avoid recomputing.\n\n Parameters\n ----------\n data : Series\n The data type of the Series.\n\n Returns\n -------\n str\n The data type of the Series.\n\n Notes\n ----\n * Should improve verification when a categorical or numeric field has 3 values, it could be a categorical field\n or just a boolean with NaN values\n * #72: Numeric with low Distinct count should be treated as \"Categorical\"\n \"\"\"\n if data.name is not None and data.name in _MEMO:\n return _MEMO[data.name]\n\n vartype = None\n try:\n distinct_count = get_groupby_statistic(data)[1]\n leng = len(data)\n\n if distinct_count <= 1:\n vartype = S_TYPE_CONST\n elif pd.api.types.is_bool_dtype(data) or (distinct_count == 2 and pd.api.types.is_numeric_dtype(data)):\n vartype = TYPE_BOOL\n elif pd.api.types.is_numeric_dtype(data):\n vartype = TYPE_NUM\n elif pd.api.types.is_datetime64_dtype(data):\n vartype = TYPE_DATE\n elif distinct_count == leng:\n vartype = S_TYPE_UNIQUE\n else:\n vartype = TYPE_CAT\n except:\n vartype = S_TYPE_UNSUPPORTED\n\n if data.name is not None:\n _MEMO[data.name] = vartype\n\n return vartype\n"
] | # -*- coding: utf-8 -*-
"""Plot distribution of datasets"""
import base64
from distutils.version import LooseVersion
import pandas_profiling.base as base
import matplotlib
import numpy as np
# Fix #68, this call is not needed and brings side effects in some use cases
# Backend name specifications are not case-sensitive; e.g., ‘GTKAgg’ and ‘gtkagg’ are equivalent.
# See https://matplotlib.org/faq/usage_faq.html#what-is-a-backend
BACKEND = matplotlib.get_backend()
if matplotlib.get_backend().lower() != BACKEND.lower():
# If backend is not set properly a call to describe will hang
matplotlib.use(BACKEND)
from matplotlib import pyplot as plt
try:
from StringIO import BytesIO
except ImportError:
from io import BytesIO
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
def histogram(series, **kwargs):
"""Plot an histogram of the data.
Parameters
----------
series: Series
The data to plot.
Returns
-------
str
The resulting image encoded as a string.
"""
imgdata = BytesIO()
plot = _plot_histogram(series, **kwargs)
plot.figure.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.1, wspace=0, hspace=0)
plot.figure.savefig(imgdata)
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
# TODO Think about writing this to disk instead of caching them in strings
plt.close(plot.figure)
return result_string
def mini_histogram(series, **kwargs):
"""Plot a small (mini) histogram of the data.
Parameters
----------
series: Series
The data to plot.
Returns
-------
str
The resulting image encoded as a string.
"""
imgdata = BytesIO()
plot = _plot_histogram(series, figsize=(2, 0.75), **kwargs)
plot.axes.get_yaxis().set_visible(False)
if LooseVersion(matplotlib.__version__) <= '1.5.9':
plot.set_axis_bgcolor("w")
else:
plot.set_facecolor("w")
xticks = plot.xaxis.get_major_ticks()
for tick in xticks[1:-1]:
tick.set_visible(False)
tick.label.set_visible(False)
for tick in (xticks[0], xticks[-1]):
tick.label.set_fontsize(8)
plot.figure.subplots_adjust(left=0.15, right=0.85, top=1, bottom=0.35, wspace=0, hspace=0)
plot.figure.savefig(imgdata)
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
plt.close(plot.figure)
return result_string
def correlation_matrix(corrdf, title, **kwargs):
"""Plot image of a matrix correlation.
Parameters
----------
corrdf: DataFrame
The matrix correlation to plot.
title: str
The matrix title
Returns
-------
str, The resulting image encoded as a string.
"""
imgdata = BytesIO()
fig_cor, axes_cor = plt.subplots(1, 1)
labels = corrdf.columns
matrix_image = axes_cor.imshow(corrdf, vmin=-1, vmax=1, interpolation="nearest", cmap='bwr')
plt.title(title, size=18)
plt.colorbar(matrix_image)
axes_cor.set_xticks(np.arange(0, corrdf.shape[0], corrdf.shape[0] * 1.0 / len(labels)))
axes_cor.set_yticks(np.arange(0, corrdf.shape[1], corrdf.shape[1] * 1.0 / len(labels)))
axes_cor.set_xticklabels(labels, rotation=90)
axes_cor.set_yticklabels(labels)
matrix_image.figure.savefig(imgdata, bbox_inches='tight')
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
plt.close(matrix_image.figure)
return result_string
|
pandas-profiling/pandas-profiling | pandas_profiling/plot.py | histogram | python | def histogram(series, **kwargs):
imgdata = BytesIO()
plot = _plot_histogram(series, **kwargs)
plot.figure.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.1, wspace=0, hspace=0)
plot.figure.savefig(imgdata)
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
# TODO Think about writing this to disk instead of caching them in strings
plt.close(plot.figure)
return result_string | Plot an histogram of the data.
Parameters
----------
series: Series
The data to plot.
Returns
-------
str
The resulting image encoded as a string. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/plot.py#L59-L80 | [
"def _plot_histogram(series, bins=10, figsize=(6, 4), facecolor='#337ab7'):\n \"\"\"Plot an histogram from the data and return the AxesSubplot object.\n\n Parameters\n ----------\n series : Series\n The data to plot\n figsize : tuple\n The size of the figure (width, height) in inches, default (6,4)\n facecolor : str\n The color code.\n\n Returns\n -------\n matplotlib.AxesSubplot\n The plot.\n \"\"\"\n if base.get_vartype(series) == base.TYPE_DATE:\n # TODO: These calls should be merged\n fig = plt.figure(figsize=figsize)\n plot = fig.add_subplot(111)\n plot.set_ylabel('Frequency')\n try:\n plot.hist(series.dropna().values, facecolor=facecolor, bins=bins)\n except TypeError: # matplotlib 1.4 can't plot dates so will show empty plot instead\n pass\n else:\n plot = series.plot(kind='hist', figsize=figsize,\n facecolor=facecolor,\n bins=bins) # TODO when running on server, send this off to a different thread\n return plot\n"
] | # -*- coding: utf-8 -*-
"""Plot distribution of datasets"""
import base64
from distutils.version import LooseVersion
import pandas_profiling.base as base
import matplotlib
import numpy as np
# Fix #68, this call is not needed and brings side effects in some use cases
# Backend name specifications are not case-sensitive; e.g., ‘GTKAgg’ and ‘gtkagg’ are equivalent.
# See https://matplotlib.org/faq/usage_faq.html#what-is-a-backend
BACKEND = matplotlib.get_backend()
if matplotlib.get_backend().lower() != BACKEND.lower():
# If backend is not set properly a call to describe will hang
matplotlib.use(BACKEND)
from matplotlib import pyplot as plt
try:
from StringIO import BytesIO
except ImportError:
from io import BytesIO
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
def _plot_histogram(series, bins=10, figsize=(6, 4), facecolor='#337ab7'):
"""Plot an histogram from the data and return the AxesSubplot object.
Parameters
----------
series : Series
The data to plot
figsize : tuple
The size of the figure (width, height) in inches, default (6,4)
facecolor : str
The color code.
Returns
-------
matplotlib.AxesSubplot
The plot.
"""
if base.get_vartype(series) == base.TYPE_DATE:
# TODO: These calls should be merged
fig = plt.figure(figsize=figsize)
plot = fig.add_subplot(111)
plot.set_ylabel('Frequency')
try:
plot.hist(series.dropna().values, facecolor=facecolor, bins=bins)
except TypeError: # matplotlib 1.4 can't plot dates so will show empty plot instead
pass
else:
plot = series.plot(kind='hist', figsize=figsize,
facecolor=facecolor,
bins=bins) # TODO when running on server, send this off to a different thread
return plot
def mini_histogram(series, **kwargs):
"""Plot a small (mini) histogram of the data.
Parameters
----------
series: Series
The data to plot.
Returns
-------
str
The resulting image encoded as a string.
"""
imgdata = BytesIO()
plot = _plot_histogram(series, figsize=(2, 0.75), **kwargs)
plot.axes.get_yaxis().set_visible(False)
if LooseVersion(matplotlib.__version__) <= '1.5.9':
plot.set_axis_bgcolor("w")
else:
plot.set_facecolor("w")
xticks = plot.xaxis.get_major_ticks()
for tick in xticks[1:-1]:
tick.set_visible(False)
tick.label.set_visible(False)
for tick in (xticks[0], xticks[-1]):
tick.label.set_fontsize(8)
plot.figure.subplots_adjust(left=0.15, right=0.85, top=1, bottom=0.35, wspace=0, hspace=0)
plot.figure.savefig(imgdata)
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
plt.close(plot.figure)
return result_string
def correlation_matrix(corrdf, title, **kwargs):
"""Plot image of a matrix correlation.
Parameters
----------
corrdf: DataFrame
The matrix correlation to plot.
title: str
The matrix title
Returns
-------
str, The resulting image encoded as a string.
"""
imgdata = BytesIO()
fig_cor, axes_cor = plt.subplots(1, 1)
labels = corrdf.columns
matrix_image = axes_cor.imshow(corrdf, vmin=-1, vmax=1, interpolation="nearest", cmap='bwr')
plt.title(title, size=18)
plt.colorbar(matrix_image)
axes_cor.set_xticks(np.arange(0, corrdf.shape[0], corrdf.shape[0] * 1.0 / len(labels)))
axes_cor.set_yticks(np.arange(0, corrdf.shape[1], corrdf.shape[1] * 1.0 / len(labels)))
axes_cor.set_xticklabels(labels, rotation=90)
axes_cor.set_yticklabels(labels)
matrix_image.figure.savefig(imgdata, bbox_inches='tight')
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
plt.close(matrix_image.figure)
return result_string
|
pandas-profiling/pandas-profiling | pandas_profiling/plot.py | mini_histogram | python | def mini_histogram(series, **kwargs):
imgdata = BytesIO()
plot = _plot_histogram(series, figsize=(2, 0.75), **kwargs)
plot.axes.get_yaxis().set_visible(False)
if LooseVersion(matplotlib.__version__) <= '1.5.9':
plot.set_axis_bgcolor("w")
else:
plot.set_facecolor("w")
xticks = plot.xaxis.get_major_ticks()
for tick in xticks[1:-1]:
tick.set_visible(False)
tick.label.set_visible(False)
for tick in (xticks[0], xticks[-1]):
tick.label.set_fontsize(8)
plot.figure.subplots_adjust(left=0.15, right=0.85, top=1, bottom=0.35, wspace=0, hspace=0)
plot.figure.savefig(imgdata)
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
plt.close(plot.figure)
return result_string | Plot a small (mini) histogram of the data.
Parameters
----------
series: Series
The data to plot.
Returns
-------
str
The resulting image encoded as a string. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/plot.py#L83-L116 | [
"def _plot_histogram(series, bins=10, figsize=(6, 4), facecolor='#337ab7'):\n \"\"\"Plot an histogram from the data and return the AxesSubplot object.\n\n Parameters\n ----------\n series : Series\n The data to plot\n figsize : tuple\n The size of the figure (width, height) in inches, default (6,4)\n facecolor : str\n The color code.\n\n Returns\n -------\n matplotlib.AxesSubplot\n The plot.\n \"\"\"\n if base.get_vartype(series) == base.TYPE_DATE:\n # TODO: These calls should be merged\n fig = plt.figure(figsize=figsize)\n plot = fig.add_subplot(111)\n plot.set_ylabel('Frequency')\n try:\n plot.hist(series.dropna().values, facecolor=facecolor, bins=bins)\n except TypeError: # matplotlib 1.4 can't plot dates so will show empty plot instead\n pass\n else:\n plot = series.plot(kind='hist', figsize=figsize,\n facecolor=facecolor,\n bins=bins) # TODO when running on server, send this off to a different thread\n return plot\n"
] | # -*- coding: utf-8 -*-
"""Plot distribution of datasets"""
import base64
from distutils.version import LooseVersion
import pandas_profiling.base as base
import matplotlib
import numpy as np
# Fix #68, this call is not needed and brings side effects in some use cases
# Backend name specifications are not case-sensitive; e.g., ‘GTKAgg’ and ‘gtkagg’ are equivalent.
# See https://matplotlib.org/faq/usage_faq.html#what-is-a-backend
BACKEND = matplotlib.get_backend()
if matplotlib.get_backend().lower() != BACKEND.lower():
# If backend is not set properly a call to describe will hang
matplotlib.use(BACKEND)
from matplotlib import pyplot as plt
try:
from StringIO import BytesIO
except ImportError:
from io import BytesIO
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
def _plot_histogram(series, bins=10, figsize=(6, 4), facecolor='#337ab7'):
"""Plot an histogram from the data and return the AxesSubplot object.
Parameters
----------
series : Series
The data to plot
figsize : tuple
The size of the figure (width, height) in inches, default (6,4)
facecolor : str
The color code.
Returns
-------
matplotlib.AxesSubplot
The plot.
"""
if base.get_vartype(series) == base.TYPE_DATE:
# TODO: These calls should be merged
fig = plt.figure(figsize=figsize)
plot = fig.add_subplot(111)
plot.set_ylabel('Frequency')
try:
plot.hist(series.dropna().values, facecolor=facecolor, bins=bins)
except TypeError: # matplotlib 1.4 can't plot dates so will show empty plot instead
pass
else:
plot = series.plot(kind='hist', figsize=figsize,
facecolor=facecolor,
bins=bins) # TODO when running on server, send this off to a different thread
return plot
def histogram(series, **kwargs):
"""Plot an histogram of the data.
Parameters
----------
series: Series
The data to plot.
Returns
-------
str
The resulting image encoded as a string.
"""
imgdata = BytesIO()
plot = _plot_histogram(series, **kwargs)
plot.figure.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.1, wspace=0, hspace=0)
plot.figure.savefig(imgdata)
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
# TODO Think about writing this to disk instead of caching them in strings
plt.close(plot.figure)
return result_string
def correlation_matrix(corrdf, title, **kwargs):
"""Plot image of a matrix correlation.
Parameters
----------
corrdf: DataFrame
The matrix correlation to plot.
title: str
The matrix title
Returns
-------
str, The resulting image encoded as a string.
"""
imgdata = BytesIO()
fig_cor, axes_cor = plt.subplots(1, 1)
labels = corrdf.columns
matrix_image = axes_cor.imshow(corrdf, vmin=-1, vmax=1, interpolation="nearest", cmap='bwr')
plt.title(title, size=18)
plt.colorbar(matrix_image)
axes_cor.set_xticks(np.arange(0, corrdf.shape[0], corrdf.shape[0] * 1.0 / len(labels)))
axes_cor.set_yticks(np.arange(0, corrdf.shape[1], corrdf.shape[1] * 1.0 / len(labels)))
axes_cor.set_xticklabels(labels, rotation=90)
axes_cor.set_yticklabels(labels)
matrix_image.figure.savefig(imgdata, bbox_inches='tight')
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
plt.close(matrix_image.figure)
return result_string
|
pandas-profiling/pandas-profiling | pandas_profiling/plot.py | correlation_matrix | python | def correlation_matrix(corrdf, title, **kwargs):
imgdata = BytesIO()
fig_cor, axes_cor = plt.subplots(1, 1)
labels = corrdf.columns
matrix_image = axes_cor.imshow(corrdf, vmin=-1, vmax=1, interpolation="nearest", cmap='bwr')
plt.title(title, size=18)
plt.colorbar(matrix_image)
axes_cor.set_xticks(np.arange(0, corrdf.shape[0], corrdf.shape[0] * 1.0 / len(labels)))
axes_cor.set_yticks(np.arange(0, corrdf.shape[1], corrdf.shape[1] * 1.0 / len(labels)))
axes_cor.set_xticklabels(labels, rotation=90)
axes_cor.set_yticklabels(labels)
matrix_image.figure.savefig(imgdata, bbox_inches='tight')
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
plt.close(matrix_image.figure)
return result_string | Plot image of a matrix correlation.
Parameters
----------
corrdf: DataFrame
The matrix correlation to plot.
title: str
The matrix title
Returns
-------
str, The resulting image encoded as a string. | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/plot.py#L118-L145 | null | # -*- coding: utf-8 -*-
"""Plot distribution of datasets"""
import base64
from distutils.version import LooseVersion
import pandas_profiling.base as base
import matplotlib
import numpy as np
# Fix #68, this call is not needed and brings side effects in some use cases
# Backend name specifications are not case-sensitive; e.g., ‘GTKAgg’ and ‘gtkagg’ are equivalent.
# See https://matplotlib.org/faq/usage_faq.html#what-is-a-backend
BACKEND = matplotlib.get_backend()
if matplotlib.get_backend().lower() != BACKEND.lower():
# If backend is not set properly a call to describe will hang
matplotlib.use(BACKEND)
from matplotlib import pyplot as plt
try:
from StringIO import BytesIO
except ImportError:
from io import BytesIO
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
def _plot_histogram(series, bins=10, figsize=(6, 4), facecolor='#337ab7'):
"""Plot an histogram from the data and return the AxesSubplot object.
Parameters
----------
series : Series
The data to plot
figsize : tuple
The size of the figure (width, height) in inches, default (6,4)
facecolor : str
The color code.
Returns
-------
matplotlib.AxesSubplot
The plot.
"""
if base.get_vartype(series) == base.TYPE_DATE:
# TODO: These calls should be merged
fig = plt.figure(figsize=figsize)
plot = fig.add_subplot(111)
plot.set_ylabel('Frequency')
try:
plot.hist(series.dropna().values, facecolor=facecolor, bins=bins)
except TypeError: # matplotlib 1.4 can't plot dates so will show empty plot instead
pass
else:
plot = series.plot(kind='hist', figsize=figsize,
facecolor=facecolor,
bins=bins) # TODO when running on server, send this off to a different thread
return plot
def histogram(series, **kwargs):
"""Plot an histogram of the data.
Parameters
----------
series: Series
The data to plot.
Returns
-------
str
The resulting image encoded as a string.
"""
imgdata = BytesIO()
plot = _plot_histogram(series, **kwargs)
plot.figure.subplots_adjust(left=0.15, right=0.95, top=0.9, bottom=0.1, wspace=0, hspace=0)
plot.figure.savefig(imgdata)
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
# TODO Think about writing this to disk instead of caching them in strings
plt.close(plot.figure)
return result_string
def mini_histogram(series, **kwargs):
"""Plot a small (mini) histogram of the data.
Parameters
----------
series: Series
The data to plot.
Returns
-------
str
The resulting image encoded as a string.
"""
imgdata = BytesIO()
plot = _plot_histogram(series, figsize=(2, 0.75), **kwargs)
plot.axes.get_yaxis().set_visible(False)
if LooseVersion(matplotlib.__version__) <= '1.5.9':
plot.set_axis_bgcolor("w")
else:
plot.set_facecolor("w")
xticks = plot.xaxis.get_major_ticks()
for tick in xticks[1:-1]:
tick.set_visible(False)
tick.label.set_visible(False)
for tick in (xticks[0], xticks[-1]):
tick.label.set_fontsize(8)
plot.figure.subplots_adjust(left=0.15, right=0.85, top=1, bottom=0.35, wspace=0, hspace=0)
plot.figure.savefig(imgdata)
imgdata.seek(0)
result_string = 'data:image/png;base64,' + quote(base64.b64encode(imgdata.getvalue()))
plt.close(plot.figure)
return result_string
|
pandas-profiling/pandas-profiling | pandas_profiling/base.py | get_groupby_statistic | python | def get_groupby_statistic(data):
if data.name is not None and data.name in _VALUE_COUNTS_MEMO:
return _VALUE_COUNTS_MEMO[data.name]
value_counts_with_nan = data.value_counts(dropna=False)
value_counts_without_nan = value_counts_with_nan.reset_index().dropna().set_index('index').iloc[:,0]
distinct_count_with_nan = value_counts_with_nan.count()
# When the inferred type of the index is just "mixed" probably the types within the series are tuple, dict, list and so on...
if value_counts_without_nan.index.inferred_type == "mixed":
raise TypeError('Not supported mixed type')
result = [value_counts_without_nan, distinct_count_with_nan]
if data.name is not None:
_VALUE_COUNTS_MEMO[data.name] = result
return result | Calculate value counts and distinct count of a variable (technically a Series).
The result is cached by column name in a global variable to avoid recomputing.
Parameters
----------
data : Series
The data type of the Series.
Returns
-------
list
value count and distinct count | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/base.py#L29-L60 | null | # -*- coding: utf-8 -*-
"""Common parts to all other modules, mainly utility functions.
"""
import pandas as pd
TYPE_CAT = 'CAT'
"""String: A categorical variable"""
TYPE_BOOL = 'BOOL'
"""String: A boolean variable"""
TYPE_NUM = 'NUM'
"""String: A numerical variable"""
TYPE_DATE = 'DATE'
"""String: A numeric variable"""
S_TYPE_CONST = 'CONST'
"""String: A constant variable"""
S_TYPE_UNIQUE = 'UNIQUE'
"""String: A unique variable"""
S_TYPE_UNSUPPORTED = 'UNSUPPORTED'
"""String: An unsupported variable"""
_VALUE_COUNTS_MEMO = {}
_MEMO = {}
def get_vartype(data):
"""Infer the type of a variable (technically a Series).
The types supported are split in standard types and special types.
Standard types:
* Categorical (`TYPE_CAT`): the default type if no other one can be determined
* Numerical (`TYPE_NUM`): if it contains numbers
* Boolean (`TYPE_BOOL`): at this time only detected if it contains boolean values, see todo
* Date (`TYPE_DATE`): if it contains datetime
Special types:
* Constant (`S_TYPE_CONST`): if all values in the variable are equal
* Unique (`S_TYPE_UNIQUE`): if all values in the variable are different
* Unsupported (`S_TYPE_UNSUPPORTED`): if the variable is unsupported
The result is cached by column name in a global variable to avoid recomputing.
Parameters
----------
data : Series
The data type of the Series.
Returns
-------
str
The data type of the Series.
Notes
----
* Should improve verification when a categorical or numeric field has 3 values, it could be a categorical field
or just a boolean with NaN values
* #72: Numeric with low Distinct count should be treated as "Categorical"
"""
if data.name is not None and data.name in _MEMO:
return _MEMO[data.name]
vartype = None
try:
distinct_count = get_groupby_statistic(data)[1]
leng = len(data)
if distinct_count <= 1:
vartype = S_TYPE_CONST
elif pd.api.types.is_bool_dtype(data) or (distinct_count == 2 and pd.api.types.is_numeric_dtype(data)):
vartype = TYPE_BOOL
elif pd.api.types.is_numeric_dtype(data):
vartype = TYPE_NUM
elif pd.api.types.is_datetime64_dtype(data):
vartype = TYPE_DATE
elif distinct_count == leng:
vartype = S_TYPE_UNIQUE
else:
vartype = TYPE_CAT
except:
vartype = S_TYPE_UNSUPPORTED
if data.name is not None:
_MEMO[data.name] = vartype
return vartype
def clear_cache():
"""Clear the cache stored as global variables"""
global _MEMO, _VALUE_COUNTS_MEMO
_MEMO = {}
_VALUE_COUNTS_MEMO = {}
|
pandas-profiling/pandas-profiling | pandas_profiling/base.py | get_vartype | python | def get_vartype(data):
if data.name is not None and data.name in _MEMO:
return _MEMO[data.name]
vartype = None
try:
distinct_count = get_groupby_statistic(data)[1]
leng = len(data)
if distinct_count <= 1:
vartype = S_TYPE_CONST
elif pd.api.types.is_bool_dtype(data) or (distinct_count == 2 and pd.api.types.is_numeric_dtype(data)):
vartype = TYPE_BOOL
elif pd.api.types.is_numeric_dtype(data):
vartype = TYPE_NUM
elif pd.api.types.is_datetime64_dtype(data):
vartype = TYPE_DATE
elif distinct_count == leng:
vartype = S_TYPE_UNIQUE
else:
vartype = TYPE_CAT
except:
vartype = S_TYPE_UNSUPPORTED
if data.name is not None:
_MEMO[data.name] = vartype
return vartype | Infer the type of a variable (technically a Series).
The types supported are split in standard types and special types.
Standard types:
* Categorical (`TYPE_CAT`): the default type if no other one can be determined
* Numerical (`TYPE_NUM`): if it contains numbers
* Boolean (`TYPE_BOOL`): at this time only detected if it contains boolean values, see todo
* Date (`TYPE_DATE`): if it contains datetime
Special types:
* Constant (`S_TYPE_CONST`): if all values in the variable are equal
* Unique (`S_TYPE_UNIQUE`): if all values in the variable are different
* Unsupported (`S_TYPE_UNSUPPORTED`): if the variable is unsupported
The result is cached by column name in a global variable to avoid recomputing.
Parameters
----------
data : Series
The data type of the Series.
Returns
-------
str
The data type of the Series.
Notes
----
* Should improve verification when a categorical or numeric field has 3 values, it could be a categorical field
or just a boolean with NaN values
* #72: Numeric with low Distinct count should be treated as "Categorical" | train | https://github.com/pandas-profiling/pandas-profiling/blob/003d236daee8b7aca39c62708b18d59bced0bc03/pandas_profiling/base.py#L63-L123 | [
"def get_groupby_statistic(data):\n \"\"\"Calculate value counts and distinct count of a variable (technically a Series).\n\n The result is cached by column name in a global variable to avoid recomputing.\n\n Parameters\n ----------\n data : Series\n The data type of the Series.\n\n Returns\n -------\n list\n value count and distinct count\n \"\"\"\n if data.name is not None and data.name in _VALUE_COUNTS_MEMO:\n return _VALUE_COUNTS_MEMO[data.name]\n\n value_counts_with_nan = data.value_counts(dropna=False)\n value_counts_without_nan = value_counts_with_nan.reset_index().dropna().set_index('index').iloc[:,0]\n distinct_count_with_nan = value_counts_with_nan.count()\n\n # When the inferred type of the index is just \"mixed\" probably the types within the series are tuple, dict, list and so on...\n if value_counts_without_nan.index.inferred_type == \"mixed\":\n raise TypeError('Not supported mixed type')\n\n result = [value_counts_without_nan, distinct_count_with_nan]\n\n if data.name is not None:\n _VALUE_COUNTS_MEMO[data.name] = result\n\n return result\n"
] | # -*- coding: utf-8 -*-
"""Common parts to all other modules, mainly utility functions.
"""
import pandas as pd
TYPE_CAT = 'CAT'
"""String: A categorical variable"""
TYPE_BOOL = 'BOOL'
"""String: A boolean variable"""
TYPE_NUM = 'NUM'
"""String: A numerical variable"""
TYPE_DATE = 'DATE'
"""String: A numeric variable"""
S_TYPE_CONST = 'CONST'
"""String: A constant variable"""
S_TYPE_UNIQUE = 'UNIQUE'
"""String: A unique variable"""
S_TYPE_UNSUPPORTED = 'UNSUPPORTED'
"""String: An unsupported variable"""
_VALUE_COUNTS_MEMO = {}
def get_groupby_statistic(data):
"""Calculate value counts and distinct count of a variable (technically a Series).
The result is cached by column name in a global variable to avoid recomputing.
Parameters
----------
data : Series
The data type of the Series.
Returns
-------
list
value count and distinct count
"""
if data.name is not None and data.name in _VALUE_COUNTS_MEMO:
return _VALUE_COUNTS_MEMO[data.name]
value_counts_with_nan = data.value_counts(dropna=False)
value_counts_without_nan = value_counts_with_nan.reset_index().dropna().set_index('index').iloc[:,0]
distinct_count_with_nan = value_counts_with_nan.count()
# When the inferred type of the index is just "mixed" probably the types within the series are tuple, dict, list and so on...
if value_counts_without_nan.index.inferred_type == "mixed":
raise TypeError('Not supported mixed type')
result = [value_counts_without_nan, distinct_count_with_nan]
if data.name is not None:
_VALUE_COUNTS_MEMO[data.name] = result
return result
_MEMO = {}
def clear_cache():
"""Clear the cache stored as global variables"""
global _MEMO, _VALUE_COUNTS_MEMO
_MEMO = {}
_VALUE_COUNTS_MEMO = {}
|
filestack/filestack-python | filestack/models/filestack_audiovisual.py | AudioVisual.to_filelink | python | def to_filelink(self):
if self.status != 'completed':
return 'Audio/video conversion not complete!'
response = utils.make_call(self.url, 'get')
if response.ok:
response = response.json()
handle = re.match(
r'(?:https:\/\/cdn\.filestackcontent\.com\/)(\w+)',
response['data']['url']
).group(1)
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
raise Exception(response.text) | Checks is the status of the conversion is complete and, if so, converts to a Filelink
*returns* [Filestack.Filelink]
```python
filelink = av_convert.to_filelink()
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_audiovisual.py#L34-L57 | [
"def make_call(base, action, handle=None, path=None, params=None, data=None, files=None, security=None, transform_url=None):\n request_func = getattr(requests, action)\n if transform_url:\n return request_func(transform_url, params=params, headers=HEADERS, data=data, files=files)\n\n url = get_url(base, path=path, handle=handle, security=security)\n response = request_func(url, params=params, headers=HEADERS, data=data, files=files)\n\n if not response.ok:\n raise Exception(response.text)\n\n return response\n"
] | class AudioVisual:
def __init__(self, url, uuid, timestamp, apikey=None, security=None):
"""
AudioVisual instances provide a bridge between transform and filelinks, and allow
you to check the status of a conversion and convert to a Filelink once completed
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='path/to/file/doom.mp4')
av_convert= filelink.av_convert(width=100, height=100)
while av_convert.status != 'completed':
print(av_convert.status)
filelink = av_convert.to_filelink()
print(filelink.url)
```
"""
self._url = url
self._apikey = apikey
self._security = security
self._uuid = uuid
self._timestamp = timestamp
@property
def status(self):
"""
Returns the status of the AV conversion (makes a GET request)
*returns* [String]
```python
av_convert= filelink.av_convert(width=100, height=100)
while av_convert.status != 'completed':
print(av_convert.status)
```
"""
response = utils.make_call(self.url, 'get')
return response.json()['status']
@property
def url(self):
return self._url
@property
def apikey(self):
"""
Returns the handle associated with the instance (if any)
*returns* [String]
```python
av.handle
# YOUR_HANDLE
```
"""
return self._apikey
@property
def security(self):
"""
Returns the security object associated with the instance (if any)
*returns* [Dict]
```python
av.security
# {'policy': 'YOUR_ENCODED_POLICY', 'signature': 'YOUR_ENCODED_SIGNATURE'}
```
"""
return self._security
@property
def uuid(self):
return self._uuid
@property
def timestamp(self):
return self._timestamp
|
filestack/filestack-python | filestack/models/filestack_filelink.py | Filelink._return_tag_task | python | def _return_tag_task(self, task):
if self.security is None:
raise Exception('Tags require security')
tasks = [task]
transform_url = get_transform_url(
tasks, handle=self.handle, security=self.security,
apikey=self.apikey
)
response = make_call(
CDN_URL, 'get', handle=self.handle, security=self.security,
transform_url=transform_url
)
return response.json() | Runs both SFW and Tags tasks | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_filelink.py#L52-L67 | [
"def make_call(base, action, handle=None, path=None, params=None, data=None, files=None, security=None, transform_url=None):\n request_func = getattr(requests, action)\n if transform_url:\n return request_func(transform_url, params=params, headers=HEADERS, data=data, files=files)\n\n url = get_url(base, path=path, handle=handle, security=security)\n response = request_func(url, params=params, headers=HEADERS, data=data, files=files)\n\n if not response.ok:\n raise Exception(response.text)\n\n return response\n",
"def get_transform_url(tasks, external_url=None, handle=None, security=None, apikey=None, video=False):\n url_components = [(PROCESS_URL if video else CDN_URL)]\n if external_url:\n url_components.append(apikey)\n\n if 'debug' in tasks:\n index = tasks.index('debug')\n tasks.pop(index)\n tasks.insert(0, 'debug')\n\n url_components.append('/'.join(tasks))\n\n if security:\n url_components.append('security=policy:{},signature:{}'.format(\n security['policy'].decode('utf-8'), security['signature']))\n\n url_components.append(handle or external_url)\n\n url_path = '/'.join(url_components)\n\n return url_path\n"
] | class Filelink(ImageTransformationMixin, CommonMixin):
"""
Filelinks are object representations of Filestack Filehandles. You can perform all actions that is allowed through our REST API,
including downloading, deleting, overwriting and retrieving metadata. You can also get image tags, SFW filters, and directly
call any of our available transformations.
"""
def __init__(self, handle, apikey=None, security=None):
self._apikey = apikey
self._handle = handle
self._security = security
def tags(self):
"""
Get Google Vision tags for the Filelink.
*returns* [Dict]
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='/path/to/file/foo.jpg')
tags = filelink.tags()
```
"""
return self._return_tag_task('tags')
def sfw(self):
"""
Get SFW label for the given file.
*returns* [Boolean]
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='/path/to/file/foo.jpg')
# returns true if SFW and false if not
sfw = filelink.sfw()
```
"""
return self._return_tag_task('sfw')
@property
def handle(self):
"""
Returns the handle associated with the instance (if any)
*returns* [String]
```python
filelink.handle
# YOUR_HANDLE
```
"""
return self._handle
@property
def url(self):
"""
Returns the URL for the instance, which can be used
to retrieve, delete, and overwrite the file. If security is enabled, signature and policy parameters will
be included,
*returns* [String]
```python
filelink = client.upload(filepath='/path/to/file')
filelink.url
# https://cdn.filestackcontent.com/FILE_HANDLE
```
"""
return get_url(CDN_URL, handle=self.handle, security=self.security)
@property
def security(self):
"""
Returns the security object associated with the instance (if any)
*returns* [Dict]
```python
filelink.security
# {'policy': 'YOUR_ENCODED_POLICY', 'signature': 'YOUR_ENCODED_SIGNATURE'}
```
"""
return self._security
@property
def apikey(self):
"""
Returns the API key associated with the instance
*returns* [String]
```python
filelink.apikey
# YOUR_API_KEY
```
"""
return self._apikey
@apikey.setter
def apikey(self, apikey):
self._apikey = apikey
|
filestack/filestack-python | filestack/models/filestack_filelink.py | Filelink.url | python | def url(self):
return get_url(CDN_URL, handle=self.handle, security=self.security) | Returns the URL for the instance, which can be used
to retrieve, delete, and overwrite the file. If security is enabled, signature and policy parameters will
be included,
*returns* [String]
```python
filelink = client.upload(filepath='/path/to/file')
filelink.url
# https://cdn.filestackcontent.com/FILE_HANDLE
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_filelink.py#L84-L98 | [
"def get_url(base, handle=None, path=None, security=None):\n url_components = [base]\n\n if path:\n url_components.append(path)\n\n if handle:\n url_components.append(handle)\n\n url_path = '/'.join(url_components)\n\n if security:\n return get_security_path(url_path, security)\n\n return url_path\n"
] | class Filelink(ImageTransformationMixin, CommonMixin):
"""
Filelinks are object representations of Filestack Filehandles. You can perform all actions that is allowed through our REST API,
including downloading, deleting, overwriting and retrieving metadata. You can also get image tags, SFW filters, and directly
call any of our available transformations.
"""
def __init__(self, handle, apikey=None, security=None):
self._apikey = apikey
self._handle = handle
self._security = security
def tags(self):
"""
Get Google Vision tags for the Filelink.
*returns* [Dict]
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='/path/to/file/foo.jpg')
tags = filelink.tags()
```
"""
return self._return_tag_task('tags')
def sfw(self):
"""
Get SFW label for the given file.
*returns* [Boolean]
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='/path/to/file/foo.jpg')
# returns true if SFW and false if not
sfw = filelink.sfw()
```
"""
return self._return_tag_task('sfw')
def _return_tag_task(self, task):
"""
Runs both SFW and Tags tasks
"""
if self.security is None:
raise Exception('Tags require security')
tasks = [task]
transform_url = get_transform_url(
tasks, handle=self.handle, security=self.security,
apikey=self.apikey
)
response = make_call(
CDN_URL, 'get', handle=self.handle, security=self.security,
transform_url=transform_url
)
return response.json()
@property
def handle(self):
"""
Returns the handle associated with the instance (if any)
*returns* [String]
```python
filelink.handle
# YOUR_HANDLE
```
"""
return self._handle
@property
@property
def security(self):
"""
Returns the security object associated with the instance (if any)
*returns* [Dict]
```python
filelink.security
# {'policy': 'YOUR_ENCODED_POLICY', 'signature': 'YOUR_ENCODED_SIGNATURE'}
```
"""
return self._security
@property
def apikey(self):
"""
Returns the API key associated with the instance
*returns* [String]
```python
filelink.apikey
# YOUR_API_KEY
```
"""
return self._apikey
@apikey.setter
def apikey(self, apikey):
self._apikey = apikey
|
filestack/filestack-python | filestack/mixins/filestack_imagetransform_mixin.py | ImageTransformationMixin.zip | python | def zip(self, store=False, store_params=None):
params = locals()
params.pop('store')
params.pop('store_params')
new_transform = self.add_transform_task('zip', params)
if store:
return new_transform.store(**store_params) if store_params else new_transform.store()
return utils.make_call(CDN_URL, 'get', transform_url=new_transform.url) | Returns a zip file of the current transformation. This is different from
the zip function that lives on the Filestack Client
*returns* [Filestack.Transform] | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/mixins/filestack_imagetransform_mixin.py#L119-L135 | [
"def make_call(base, action, handle=None, path=None, params=None, data=None, files=None, security=None, transform_url=None):\n request_func = getattr(requests, action)\n if transform_url:\n return request_func(transform_url, params=params, headers=HEADERS, data=data, files=files)\n\n url = get_url(base, path=path, handle=handle, security=security)\n response = request_func(url, params=params, headers=HEADERS, data=data, files=files)\n\n if not response.ok:\n raise Exception(response.text)\n\n return response\n",
"def add_transform_task(self, transformation, params):\n \"\"\"\n Adds a transform task to the current instance and returns it\n\n *returns* Filestack.Transform\n \"\"\"\n if not isinstance(self, filestack.models.Transform):\n instance = filestack.models.Transform(apikey=self.apikey, security=self.security, handle=self.handle)\n else:\n instance = self\n\n params.pop('self')\n params = {k: v for k, v in params.items() if v is not None}\n\n transformation_url = utils.return_transform_task(transformation, params)\n instance._transformation_tasks.append(transformation_url)\n\n return instance\n"
] | class ImageTransformationMixin(object):
"""
All transformations and related/dependent tasks live here. They can
be directly called by Transform or Filelink objects.
"""
def resize(self, width=None, height=None, fit=None, align=None):
return self.add_transform_task('resize', locals())
def crop(self, dim=None):
return self.add_transform_task('crop', locals())
def rotate(self, deg=None, exif=None, background=None):
return self.add_transform_task('rotate', locals())
def flip(self):
return self.add_transform_task('flip', locals())
def flop(self):
return self.add_transform_task('flop', locals())
def watermark(self, file=None, size=None, position=None):
return self.add_transform_task('watermark', locals())
def detect_faces(self, minsize=None, maxsize=None, color=None, export=None):
return self.add_transform_task('detect_faces', locals())
def crop_faces(self, mode=None, width=None, height=None, faces=None, buffer=None):
return self.add_transform_task('crop_faces', locals())
def pixelate_faces(self, faces=None, minsize=None, maxsize=None, buffer=None, amount=None, blur=None, type=None):
return self.add_transform_task('pixelate_faces', locals())
def round_corners(self, radius=None, blur=None, background=None):
return self.add_transform_task('round_corners', locals())
def vignette(self, amount=None, blurmode=None, background=None):
return self.add_transform_task('vignette', locals())
def polaroid(self, color=None, rotate=None, background=None):
return self.add_transform_task('polaroid', locals())
def torn_edges(self, spread=None, background=None):
return self.add_transform_task('torn_edges', locals())
def shadow(self, blur=None, opacity=None, vector=None, color=None, background=None):
return self.add_transform_task('shadow', locals())
def circle(self, background=None):
return self.add_transform_task('circle', locals())
def border(self, width=None, color=None, background=None):
return self.add_transform_task('border', locals())
def sharpen(self, amount=None):
return self.add_transform_task('sharpen', locals())
def blur(self, amount=None):
return self.add_transform_task('blur', locals())
def monochrome(self):
return self.add_transform_task('monochrome', locals())
def blackwhite(self, threshold=None):
return self.add_transform_task('blackwhite', locals())
def sepia(self, tone=None):
return self.add_transform_task('sepia', locals())
def pixelate(self, amount=None):
return self.add_transform_task('pixelate', locals())
def oil_paint(self, amount=None):
return self.add_transform_task('oil_paint', locals())
def negative(self):
return self.add_transform_task('negative', locals())
def modulate(self, brightness=None, hue=None, saturation=None):
return self.add_transform_task('modulate', locals())
def partial_pixelate(self, amount=None, blur=None, type=None, objects=None):
return self.add_transform_task('partial_pixelate', locals())
def partial_blur(self, amount=None, blur=None, type=None, objects=None):
return self.add_transform_task('partial_blur', locals())
def collage(self, files=None, margin=None, width=None, height=None, color=None, fit=None, autorotate=None):
return self.add_transform_task('collage', locals())
def upscale(self, upscale=None, noise=None, style=None):
return self.add_transform_task('upscale', locals())
def enhance(self):
return self.add_transform_task('enhance', locals())
def redeye(self):
return self.add_transform_task('redeye', locals())
def ascii(self, background=None, foreground=None, colored=None, size=None, reverse=None):
return self.add_transform_task('ascii', locals())
def filetype_conversion(self, format=None, background=None, page=None, density=None, compress=None,
quality=None, strip=None, colorspace=None, secure=None,
docinfo=None, pageformat=None, pageorientation=None):
return self.add_transform_task('output', locals())
def no_metadata(self):
return self.add_transform_task('no_metadata', locals())
def quality(self, value=None):
return self.add_transform_task('quality', locals())
def av_convert(self, preset=None, force=None, title=None, extname=None, filename=None,
width=None, height=None, upscale=None, aspect_mode=None, two_pass=None,
video_bitrate=None, fps=None, keyframe_interval=None, location=None,
watermark_url=None, watermark_top=None, watermark_bottom=None,
watermark_right=None, watermark_left=None, watermark_width=None, watermark_height=None,
path=None, access=None, container=None, audio_bitrate=None, audio_sample_rate=None,
audio_channels=None, clip_length=None, clip_offset=None):
"""
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='path/to/file/doom.mp4')
av_convert= filelink.av_convert(width=100, height=100)
while av_convert.status != 'completed':
print(av_convert.status)
filelink = av_convert.to_filelink()
print(filelink.url)
```
"""
new_transform = self.add_transform_task('video_convert', locals())
transform_url = utils.get_transform_url(
new_transform._transformation_tasks, external_url=new_transform.external_url,
handle=new_transform.handle, security=new_transform.security,
apikey=new_transform.apikey, video=True
)
response = utils.make_call(transform_url, 'get')
if not response.ok:
raise Exception(response.text)
uuid = response.json()['uuid']
timestamp = response.json()['timestamp']
return filestack.models.AudioVisual(
transform_url, uuid, timestamp, apikey=new_transform.apikey, security=new_transform.security
)
def add_transform_task(self, transformation, params):
"""
Adds a transform task to the current instance and returns it
*returns* Filestack.Transform
"""
if not isinstance(self, filestack.models.Transform):
instance = filestack.models.Transform(apikey=self.apikey, security=self.security, handle=self.handle)
else:
instance = self
params.pop('self')
params = {k: v for k, v in params.items() if v is not None}
transformation_url = utils.return_transform_task(transformation, params)
instance._transformation_tasks.append(transformation_url)
return instance
|
filestack/filestack-python | filestack/mixins/filestack_imagetransform_mixin.py | ImageTransformationMixin.av_convert | python | def av_convert(self, preset=None, force=None, title=None, extname=None, filename=None,
width=None, height=None, upscale=None, aspect_mode=None, two_pass=None,
video_bitrate=None, fps=None, keyframe_interval=None, location=None,
watermark_url=None, watermark_top=None, watermark_bottom=None,
watermark_right=None, watermark_left=None, watermark_width=None, watermark_height=None,
path=None, access=None, container=None, audio_bitrate=None, audio_sample_rate=None,
audio_channels=None, clip_length=None, clip_offset=None):
new_transform = self.add_transform_task('video_convert', locals())
transform_url = utils.get_transform_url(
new_transform._transformation_tasks, external_url=new_transform.external_url,
handle=new_transform.handle, security=new_transform.security,
apikey=new_transform.apikey, video=True
)
response = utils.make_call(transform_url, 'get')
if not response.ok:
raise Exception(response.text)
uuid = response.json()['uuid']
timestamp = response.json()['timestamp']
return filestack.models.AudioVisual(
transform_url, uuid, timestamp, apikey=new_transform.apikey, security=new_transform.security
) | ```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='path/to/file/doom.mp4')
av_convert= filelink.av_convert(width=100, height=100)
while av_convert.status != 'completed':
print(av_convert.status)
filelink = av_convert.to_filelink()
print(filelink.url)
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/mixins/filestack_imagetransform_mixin.py#L137-L177 | [
"def make_call(base, action, handle=None, path=None, params=None, data=None, files=None, security=None, transform_url=None):\n request_func = getattr(requests, action)\n if transform_url:\n return request_func(transform_url, params=params, headers=HEADERS, data=data, files=files)\n\n url = get_url(base, path=path, handle=handle, security=security)\n response = request_func(url, params=params, headers=HEADERS, data=data, files=files)\n\n if not response.ok:\n raise Exception(response.text)\n\n return response\n",
"def get_transform_url(tasks, external_url=None, handle=None, security=None, apikey=None, video=False):\n url_components = [(PROCESS_URL if video else CDN_URL)]\n if external_url:\n url_components.append(apikey)\n\n if 'debug' in tasks:\n index = tasks.index('debug')\n tasks.pop(index)\n tasks.insert(0, 'debug')\n\n url_components.append('/'.join(tasks))\n\n if security:\n url_components.append('security=policy:{},signature:{}'.format(\n security['policy'].decode('utf-8'), security['signature']))\n\n url_components.append(handle or external_url)\n\n url_path = '/'.join(url_components)\n\n return url_path\n",
"def add_transform_task(self, transformation, params):\n \"\"\"\n Adds a transform task to the current instance and returns it\n\n *returns* Filestack.Transform\n \"\"\"\n if not isinstance(self, filestack.models.Transform):\n instance = filestack.models.Transform(apikey=self.apikey, security=self.security, handle=self.handle)\n else:\n instance = self\n\n params.pop('self')\n params = {k: v for k, v in params.items() if v is not None}\n\n transformation_url = utils.return_transform_task(transformation, params)\n instance._transformation_tasks.append(transformation_url)\n\n return instance\n"
] | class ImageTransformationMixin(object):
"""
All transformations and related/dependent tasks live here. They can
be directly called by Transform or Filelink objects.
"""
def resize(self, width=None, height=None, fit=None, align=None):
return self.add_transform_task('resize', locals())
def crop(self, dim=None):
return self.add_transform_task('crop', locals())
def rotate(self, deg=None, exif=None, background=None):
return self.add_transform_task('rotate', locals())
def flip(self):
return self.add_transform_task('flip', locals())
def flop(self):
return self.add_transform_task('flop', locals())
def watermark(self, file=None, size=None, position=None):
return self.add_transform_task('watermark', locals())
def detect_faces(self, minsize=None, maxsize=None, color=None, export=None):
return self.add_transform_task('detect_faces', locals())
def crop_faces(self, mode=None, width=None, height=None, faces=None, buffer=None):
return self.add_transform_task('crop_faces', locals())
def pixelate_faces(self, faces=None, minsize=None, maxsize=None, buffer=None, amount=None, blur=None, type=None):
return self.add_transform_task('pixelate_faces', locals())
def round_corners(self, radius=None, blur=None, background=None):
return self.add_transform_task('round_corners', locals())
def vignette(self, amount=None, blurmode=None, background=None):
return self.add_transform_task('vignette', locals())
def polaroid(self, color=None, rotate=None, background=None):
return self.add_transform_task('polaroid', locals())
def torn_edges(self, spread=None, background=None):
return self.add_transform_task('torn_edges', locals())
def shadow(self, blur=None, opacity=None, vector=None, color=None, background=None):
return self.add_transform_task('shadow', locals())
def circle(self, background=None):
return self.add_transform_task('circle', locals())
def border(self, width=None, color=None, background=None):
return self.add_transform_task('border', locals())
def sharpen(self, amount=None):
return self.add_transform_task('sharpen', locals())
def blur(self, amount=None):
return self.add_transform_task('blur', locals())
def monochrome(self):
return self.add_transform_task('monochrome', locals())
def blackwhite(self, threshold=None):
return self.add_transform_task('blackwhite', locals())
def sepia(self, tone=None):
return self.add_transform_task('sepia', locals())
def pixelate(self, amount=None):
return self.add_transform_task('pixelate', locals())
def oil_paint(self, amount=None):
return self.add_transform_task('oil_paint', locals())
def negative(self):
return self.add_transform_task('negative', locals())
def modulate(self, brightness=None, hue=None, saturation=None):
return self.add_transform_task('modulate', locals())
def partial_pixelate(self, amount=None, blur=None, type=None, objects=None):
return self.add_transform_task('partial_pixelate', locals())
def partial_blur(self, amount=None, blur=None, type=None, objects=None):
return self.add_transform_task('partial_blur', locals())
def collage(self, files=None, margin=None, width=None, height=None, color=None, fit=None, autorotate=None):
return self.add_transform_task('collage', locals())
def upscale(self, upscale=None, noise=None, style=None):
return self.add_transform_task('upscale', locals())
def enhance(self):
return self.add_transform_task('enhance', locals())
def redeye(self):
return self.add_transform_task('redeye', locals())
def ascii(self, background=None, foreground=None, colored=None, size=None, reverse=None):
return self.add_transform_task('ascii', locals())
def filetype_conversion(self, format=None, background=None, page=None, density=None, compress=None,
quality=None, strip=None, colorspace=None, secure=None,
docinfo=None, pageformat=None, pageorientation=None):
return self.add_transform_task('output', locals())
def no_metadata(self):
return self.add_transform_task('no_metadata', locals())
def quality(self, value=None):
return self.add_transform_task('quality', locals())
def zip(self, store=False, store_params=None):
"""
Returns a zip file of the current transformation. This is different from
the zip function that lives on the Filestack Client
*returns* [Filestack.Transform]
"""
params = locals()
params.pop('store')
params.pop('store_params')
new_transform = self.add_transform_task('zip', params)
if store:
return new_transform.store(**store_params) if store_params else new_transform.store()
return utils.make_call(CDN_URL, 'get', transform_url=new_transform.url)
def av_convert(self, preset=None, force=None, title=None, extname=None, filename=None,
width=None, height=None, upscale=None, aspect_mode=None, two_pass=None,
video_bitrate=None, fps=None, keyframe_interval=None, location=None,
watermark_url=None, watermark_top=None, watermark_bottom=None,
watermark_right=None, watermark_left=None, watermark_width=None, watermark_height=None,
path=None, access=None, container=None, audio_bitrate=None, audio_sample_rate=None,
audio_channels=None, clip_length=None, clip_offset=None):
"""
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='path/to/file/doom.mp4')
av_convert= filelink.av_convert(width=100, height=100)
while av_convert.status != 'completed':
print(av_convert.status)
filelink = av_convert.to_filelink()
print(filelink.url)
```
"""
new_transform = self.add_transform_task('video_convert', locals())
transform_url = utils.get_transform_url(
new_transform._transformation_tasks, external_url=new_transform.external_url,
handle=new_transform.handle, security=new_transform.security,
apikey=new_transform.apikey, video=True
)
response = utils.make_call(transform_url, 'get')
if not response.ok:
raise Exception(response.text)
uuid = response.json()['uuid']
timestamp = response.json()['timestamp']
return filestack.models.AudioVisual(
transform_url, uuid, timestamp, apikey=new_transform.apikey, security=new_transform.security
)
def add_transform_task(self, transformation, params):
"""
Adds a transform task to the current instance and returns it
*returns* Filestack.Transform
"""
if not isinstance(self, filestack.models.Transform):
instance = filestack.models.Transform(apikey=self.apikey, security=self.security, handle=self.handle)
else:
instance = self
params.pop('self')
params = {k: v for k, v in params.items() if v is not None}
transformation_url = utils.return_transform_task(transformation, params)
instance._transformation_tasks.append(transformation_url)
return instance
|
filestack/filestack-python | filestack/mixins/filestack_imagetransform_mixin.py | ImageTransformationMixin.add_transform_task | python | def add_transform_task(self, transformation, params):
if not isinstance(self, filestack.models.Transform):
instance = filestack.models.Transform(apikey=self.apikey, security=self.security, handle=self.handle)
else:
instance = self
params.pop('self')
params = {k: v for k, v in params.items() if v is not None}
transformation_url = utils.return_transform_task(transformation, params)
instance._transformation_tasks.append(transformation_url)
return instance | Adds a transform task to the current instance and returns it
*returns* Filestack.Transform | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/mixins/filestack_imagetransform_mixin.py#L180-L197 | [
"def return_transform_task(transformation, params):\n transform_tasks = []\n\n for key, value in params.items():\n\n if isinstance(value, list):\n value = str(value).replace(\"'\", \"\").replace('\"', '').replace(\" \", \"\")\n if isinstance(value, bool):\n value = str(value).lower()\n\n transform_tasks.append('{}:{}'.format(key, value))\n\n transform_tasks = sorted(transform_tasks)\n\n if len(transform_tasks) > 0:\n transformation_url = '{}={}'.format(transformation, ','.join(transform_tasks))\n else:\n transformation_url = transformation\n\n return transformation_url\n"
] | class ImageTransformationMixin(object):
"""
All transformations and related/dependent tasks live here. They can
be directly called by Transform or Filelink objects.
"""
def resize(self, width=None, height=None, fit=None, align=None):
return self.add_transform_task('resize', locals())
def crop(self, dim=None):
return self.add_transform_task('crop', locals())
def rotate(self, deg=None, exif=None, background=None):
return self.add_transform_task('rotate', locals())
def flip(self):
return self.add_transform_task('flip', locals())
def flop(self):
return self.add_transform_task('flop', locals())
def watermark(self, file=None, size=None, position=None):
return self.add_transform_task('watermark', locals())
def detect_faces(self, minsize=None, maxsize=None, color=None, export=None):
return self.add_transform_task('detect_faces', locals())
def crop_faces(self, mode=None, width=None, height=None, faces=None, buffer=None):
return self.add_transform_task('crop_faces', locals())
def pixelate_faces(self, faces=None, minsize=None, maxsize=None, buffer=None, amount=None, blur=None, type=None):
return self.add_transform_task('pixelate_faces', locals())
def round_corners(self, radius=None, blur=None, background=None):
return self.add_transform_task('round_corners', locals())
def vignette(self, amount=None, blurmode=None, background=None):
return self.add_transform_task('vignette', locals())
def polaroid(self, color=None, rotate=None, background=None):
return self.add_transform_task('polaroid', locals())
def torn_edges(self, spread=None, background=None):
return self.add_transform_task('torn_edges', locals())
def shadow(self, blur=None, opacity=None, vector=None, color=None, background=None):
return self.add_transform_task('shadow', locals())
def circle(self, background=None):
return self.add_transform_task('circle', locals())
def border(self, width=None, color=None, background=None):
return self.add_transform_task('border', locals())
def sharpen(self, amount=None):
return self.add_transform_task('sharpen', locals())
def blur(self, amount=None):
return self.add_transform_task('blur', locals())
def monochrome(self):
return self.add_transform_task('monochrome', locals())
def blackwhite(self, threshold=None):
return self.add_transform_task('blackwhite', locals())
def sepia(self, tone=None):
return self.add_transform_task('sepia', locals())
def pixelate(self, amount=None):
return self.add_transform_task('pixelate', locals())
def oil_paint(self, amount=None):
return self.add_transform_task('oil_paint', locals())
def negative(self):
return self.add_transform_task('negative', locals())
def modulate(self, brightness=None, hue=None, saturation=None):
return self.add_transform_task('modulate', locals())
def partial_pixelate(self, amount=None, blur=None, type=None, objects=None):
return self.add_transform_task('partial_pixelate', locals())
def partial_blur(self, amount=None, blur=None, type=None, objects=None):
return self.add_transform_task('partial_blur', locals())
def collage(self, files=None, margin=None, width=None, height=None, color=None, fit=None, autorotate=None):
return self.add_transform_task('collage', locals())
def upscale(self, upscale=None, noise=None, style=None):
return self.add_transform_task('upscale', locals())
def enhance(self):
return self.add_transform_task('enhance', locals())
def redeye(self):
return self.add_transform_task('redeye', locals())
def ascii(self, background=None, foreground=None, colored=None, size=None, reverse=None):
return self.add_transform_task('ascii', locals())
def filetype_conversion(self, format=None, background=None, page=None, density=None, compress=None,
quality=None, strip=None, colorspace=None, secure=None,
docinfo=None, pageformat=None, pageorientation=None):
return self.add_transform_task('output', locals())
def no_metadata(self):
return self.add_transform_task('no_metadata', locals())
def quality(self, value=None):
return self.add_transform_task('quality', locals())
def zip(self, store=False, store_params=None):
"""
Returns a zip file of the current transformation. This is different from
the zip function that lives on the Filestack Client
*returns* [Filestack.Transform]
"""
params = locals()
params.pop('store')
params.pop('store_params')
new_transform = self.add_transform_task('zip', params)
if store:
return new_transform.store(**store_params) if store_params else new_transform.store()
return utils.make_call(CDN_URL, 'get', transform_url=new_transform.url)
def av_convert(self, preset=None, force=None, title=None, extname=None, filename=None,
width=None, height=None, upscale=None, aspect_mode=None, two_pass=None,
video_bitrate=None, fps=None, keyframe_interval=None, location=None,
watermark_url=None, watermark_top=None, watermark_bottom=None,
watermark_right=None, watermark_left=None, watermark_width=None, watermark_height=None,
path=None, access=None, container=None, audio_bitrate=None, audio_sample_rate=None,
audio_channels=None, clip_length=None, clip_offset=None):
"""
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='path/to/file/doom.mp4')
av_convert= filelink.av_convert(width=100, height=100)
while av_convert.status != 'completed':
print(av_convert.status)
filelink = av_convert.to_filelink()
print(filelink.url)
```
"""
new_transform = self.add_transform_task('video_convert', locals())
transform_url = utils.get_transform_url(
new_transform._transformation_tasks, external_url=new_transform.external_url,
handle=new_transform.handle, security=new_transform.security,
apikey=new_transform.apikey, video=True
)
response = utils.make_call(transform_url, 'get')
if not response.ok:
raise Exception(response.text)
uuid = response.json()['uuid']
timestamp = response.json()['timestamp']
return filestack.models.AudioVisual(
transform_url, uuid, timestamp, apikey=new_transform.apikey, security=new_transform.security
)
|
filestack/filestack-python | filestack/mixins/filestack_common.py | CommonMixin.download | python | def download(self, destination_path, params=None):
if params:
CONTENT_DOWNLOAD_SCHEMA.check(params)
with open(destination_path, 'wb') as new_file:
response = utils.make_call(CDN_URL, 'get',
handle=self.handle,
params=params,
security=self.security,
transform_url=(self.url if isinstance(self, filestack.models.Transform) else None))
if response.ok:
for chunk in response.iter_content(1024):
if not chunk:
break
new_file.write(chunk)
return response | Downloads a file to the given local path and returns the size of the downloaded file if successful
*returns* [Integer]
```python
from filestack import Client
client = Client('API_KEY', security=sec)
filelink = client.upload(filepath='/path/to/file')
# if successful, returns size of downloaded file in bytes
response = filelink.download('path/to/file')
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/mixins/filestack_common.py#L15-L45 | [
"def make_call(base, action, handle=None, path=None, params=None, data=None, files=None, security=None, transform_url=None):\n request_func = getattr(requests, action)\n if transform_url:\n return request_func(transform_url, params=params, headers=HEADERS, data=data, files=files)\n\n url = get_url(base, path=path, handle=handle, security=security)\n response = request_func(url, params=params, headers=HEADERS, data=data, files=files)\n\n if not response.ok:\n raise Exception(response.text)\n\n return response\n"
] | class CommonMixin(object):
"""
Contains all functions related to the manipulation of Filelinks
"""
def get_content(self, params=None):
"""
Returns the raw byte content of a given Filelink
*returns* [Bytes]
```python
from filestack import Client
client = Client('API_KEY')
filelink = client.upload(filepath='/path/to/file/foo.jpg')
byte_content = filelink.get_content()
```
"""
if params:
CONTENT_DOWNLOAD_SCHEMA.check(params)
response = utils.make_call(CDN_URL, 'get',
handle=self.handle,
params=params,
security=self.security,
transform_url=(self.url if isinstance(self, filestack.models.Transform) else None))
return response.content
def get_metadata(self, params=None):
"""
Metadata provides certain information about a Filehandle, and you can specify which pieces
of information you will receive back by passing in optional parameters.
```python
from filestack import Client
client = Client('API_KEY')
filelink = client.upload(filepath='/path/to/file/foo.jpg')
metadata = filelink.get_metadata()
# or define specific metadata to receive
metadata = filelink.get_metadata({'filename': true})
```
"""
metadata_url = "{CDN_URL}/{handle}/metadata".format(
CDN_URL=CDN_URL, handle=self.handle
)
response = utils.make_call(metadata_url, 'get',
params=params,
security=self.security)
return response.json()
def delete(self, params=None):
"""
You may delete any file you have uploaded, either through a Filelink returned from the client or one you have initialized yourself.
This returns a response of success or failure. This action requires security.abs
*returns* [requests.response]
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
filelink = client.upload(filepath='/path/to/file/foo.txt')
response = filelink.delete()
```
"""
if params:
params['key'] = self.apikey
else:
params = {'key': self.apikey}
return utils.make_call(API_URL, 'delete',
path=FILE_PATH,
handle=self.handle,
params=params,
security=self.security,
transform_url=self.url if isinstance(self, filestack.models.Transform) else None)
def overwrite(self, url=None, filepath=None, params=None):
"""
You may overwrite any Filelink by supplying a new file. The Filehandle will remain the same.
*returns* [requests.response]
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
```
"""
if params:
OVERWRITE_SCHEMA.check(params)
data, files = None, None
if url:
data = {'url': url}
elif filepath:
filename = os.path.basename(filepath)
mimetype = mimetypes.guess_type(filepath)[0]
files = {'fileUpload': (filename, open(filepath, 'rb'), mimetype)}
else:
raise ValueError("You must include a url or filepath parameter")
return utils.make_call(API_URL, 'post',
path=FILE_PATH,
params=params,
handle=self.handle,
data=data,
files=files,
security=self.security,
transform_url=self.url if isinstance(self, filestack.models.Transform) else None)
|
filestack/filestack-python | filestack/mixins/filestack_common.py | CommonMixin.get_content | python | def get_content(self, params=None):
if params:
CONTENT_DOWNLOAD_SCHEMA.check(params)
response = utils.make_call(CDN_URL, 'get',
handle=self.handle,
params=params,
security=self.security,
transform_url=(self.url if isinstance(self, filestack.models.Transform) else None))
return response.content | Returns the raw byte content of a given Filelink
*returns* [Bytes]
```python
from filestack import Client
client = Client('API_KEY')
filelink = client.upload(filepath='/path/to/file/foo.jpg')
byte_content = filelink.get_content()
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/mixins/filestack_common.py#L47-L68 | [
"def make_call(base, action, handle=None, path=None, params=None, data=None, files=None, security=None, transform_url=None):\n request_func = getattr(requests, action)\n if transform_url:\n return request_func(transform_url, params=params, headers=HEADERS, data=data, files=files)\n\n url = get_url(base, path=path, handle=handle, security=security)\n response = request_func(url, params=params, headers=HEADERS, data=data, files=files)\n\n if not response.ok:\n raise Exception(response.text)\n\n return response\n"
] | class CommonMixin(object):
"""
Contains all functions related to the manipulation of Filelinks
"""
def download(self, destination_path, params=None):
"""
Downloads a file to the given local path and returns the size of the downloaded file if successful
*returns* [Integer]
```python
from filestack import Client
client = Client('API_KEY', security=sec)
filelink = client.upload(filepath='/path/to/file')
# if successful, returns size of downloaded file in bytes
response = filelink.download('path/to/file')
```
"""
if params:
CONTENT_DOWNLOAD_SCHEMA.check(params)
with open(destination_path, 'wb') as new_file:
response = utils.make_call(CDN_URL, 'get',
handle=self.handle,
params=params,
security=self.security,
transform_url=(self.url if isinstance(self, filestack.models.Transform) else None))
if response.ok:
for chunk in response.iter_content(1024):
if not chunk:
break
new_file.write(chunk)
return response
def get_metadata(self, params=None):
"""
Metadata provides certain information about a Filehandle, and you can specify which pieces
of information you will receive back by passing in optional parameters.
```python
from filestack import Client
client = Client('API_KEY')
filelink = client.upload(filepath='/path/to/file/foo.jpg')
metadata = filelink.get_metadata()
# or define specific metadata to receive
metadata = filelink.get_metadata({'filename': true})
```
"""
metadata_url = "{CDN_URL}/{handle}/metadata".format(
CDN_URL=CDN_URL, handle=self.handle
)
response = utils.make_call(metadata_url, 'get',
params=params,
security=self.security)
return response.json()
def delete(self, params=None):
"""
You may delete any file you have uploaded, either through a Filelink returned from the client or one you have initialized yourself.
This returns a response of success or failure. This action requires security.abs
*returns* [requests.response]
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
filelink = client.upload(filepath='/path/to/file/foo.txt')
response = filelink.delete()
```
"""
if params:
params['key'] = self.apikey
else:
params = {'key': self.apikey}
return utils.make_call(API_URL, 'delete',
path=FILE_PATH,
handle=self.handle,
params=params,
security=self.security,
transform_url=self.url if isinstance(self, filestack.models.Transform) else None)
def overwrite(self, url=None, filepath=None, params=None):
"""
You may overwrite any Filelink by supplying a new file. The Filehandle will remain the same.
*returns* [requests.response]
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
```
"""
if params:
OVERWRITE_SCHEMA.check(params)
data, files = None, None
if url:
data = {'url': url}
elif filepath:
filename = os.path.basename(filepath)
mimetype = mimetypes.guess_type(filepath)[0]
files = {'fileUpload': (filename, open(filepath, 'rb'), mimetype)}
else:
raise ValueError("You must include a url or filepath parameter")
return utils.make_call(API_URL, 'post',
path=FILE_PATH,
params=params,
handle=self.handle,
data=data,
files=files,
security=self.security,
transform_url=self.url if isinstance(self, filestack.models.Transform) else None)
|
filestack/filestack-python | filestack/mixins/filestack_common.py | CommonMixin.get_metadata | python | def get_metadata(self, params=None):
metadata_url = "{CDN_URL}/{handle}/metadata".format(
CDN_URL=CDN_URL, handle=self.handle
)
response = utils.make_call(metadata_url, 'get',
params=params,
security=self.security)
return response.json() | Metadata provides certain information about a Filehandle, and you can specify which pieces
of information you will receive back by passing in optional parameters.
```python
from filestack import Client
client = Client('API_KEY')
filelink = client.upload(filepath='/path/to/file/foo.jpg')
metadata = filelink.get_metadata()
# or define specific metadata to receive
metadata = filelink.get_metadata({'filename': true})
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/mixins/filestack_common.py#L70-L91 | [
"def make_call(base, action, handle=None, path=None, params=None, data=None, files=None, security=None, transform_url=None):\n request_func = getattr(requests, action)\n if transform_url:\n return request_func(transform_url, params=params, headers=HEADERS, data=data, files=files)\n\n url = get_url(base, path=path, handle=handle, security=security)\n response = request_func(url, params=params, headers=HEADERS, data=data, files=files)\n\n if not response.ok:\n raise Exception(response.text)\n\n return response\n"
] | class CommonMixin(object):
"""
Contains all functions related to the manipulation of Filelinks
"""
def download(self, destination_path, params=None):
"""
Downloads a file to the given local path and returns the size of the downloaded file if successful
*returns* [Integer]
```python
from filestack import Client
client = Client('API_KEY', security=sec)
filelink = client.upload(filepath='/path/to/file')
# if successful, returns size of downloaded file in bytes
response = filelink.download('path/to/file')
```
"""
if params:
CONTENT_DOWNLOAD_SCHEMA.check(params)
with open(destination_path, 'wb') as new_file:
response = utils.make_call(CDN_URL, 'get',
handle=self.handle,
params=params,
security=self.security,
transform_url=(self.url if isinstance(self, filestack.models.Transform) else None))
if response.ok:
for chunk in response.iter_content(1024):
if not chunk:
break
new_file.write(chunk)
return response
def get_content(self, params=None):
"""
Returns the raw byte content of a given Filelink
*returns* [Bytes]
```python
from filestack import Client
client = Client('API_KEY')
filelink = client.upload(filepath='/path/to/file/foo.jpg')
byte_content = filelink.get_content()
```
"""
if params:
CONTENT_DOWNLOAD_SCHEMA.check(params)
response = utils.make_call(CDN_URL, 'get',
handle=self.handle,
params=params,
security=self.security,
transform_url=(self.url if isinstance(self, filestack.models.Transform) else None))
return response.content
def delete(self, params=None):
"""
You may delete any file you have uploaded, either through a Filelink returned from the client or one you have initialized yourself.
This returns a response of success or failure. This action requires security.abs
*returns* [requests.response]
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
filelink = client.upload(filepath='/path/to/file/foo.txt')
response = filelink.delete()
```
"""
if params:
params['key'] = self.apikey
else:
params = {'key': self.apikey}
return utils.make_call(API_URL, 'delete',
path=FILE_PATH,
handle=self.handle,
params=params,
security=self.security,
transform_url=self.url if isinstance(self, filestack.models.Transform) else None)
def overwrite(self, url=None, filepath=None, params=None):
"""
You may overwrite any Filelink by supplying a new file. The Filehandle will remain the same.
*returns* [requests.response]
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
```
"""
if params:
OVERWRITE_SCHEMA.check(params)
data, files = None, None
if url:
data = {'url': url}
elif filepath:
filename = os.path.basename(filepath)
mimetype = mimetypes.guess_type(filepath)[0]
files = {'fileUpload': (filename, open(filepath, 'rb'), mimetype)}
else:
raise ValueError("You must include a url or filepath parameter")
return utils.make_call(API_URL, 'post',
path=FILE_PATH,
params=params,
handle=self.handle,
data=data,
files=files,
security=self.security,
transform_url=self.url if isinstance(self, filestack.models.Transform) else None)
|
filestack/filestack-python | filestack/mixins/filestack_common.py | CommonMixin.delete | python | def delete(self, params=None):
if params:
params['key'] = self.apikey
else:
params = {'key': self.apikey}
return utils.make_call(API_URL, 'delete',
path=FILE_PATH,
handle=self.handle,
params=params,
security=self.security,
transform_url=self.url if isinstance(self, filestack.models.Transform) else None) | You may delete any file you have uploaded, either through a Filelink returned from the client or one you have initialized yourself.
This returns a response of success or failure. This action requires security.abs
*returns* [requests.response]
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
filelink = client.upload(filepath='/path/to/file/foo.txt')
response = filelink.delete()
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/mixins/filestack_common.py#L93-L121 | [
"def make_call(base, action, handle=None, path=None, params=None, data=None, files=None, security=None, transform_url=None):\n request_func = getattr(requests, action)\n if transform_url:\n return request_func(transform_url, params=params, headers=HEADERS, data=data, files=files)\n\n url = get_url(base, path=path, handle=handle, security=security)\n response = request_func(url, params=params, headers=HEADERS, data=data, files=files)\n\n if not response.ok:\n raise Exception(response.text)\n\n return response\n"
] | class CommonMixin(object):
"""
Contains all functions related to the manipulation of Filelinks
"""
def download(self, destination_path, params=None):
"""
Downloads a file to the given local path and returns the size of the downloaded file if successful
*returns* [Integer]
```python
from filestack import Client
client = Client('API_KEY', security=sec)
filelink = client.upload(filepath='/path/to/file')
# if successful, returns size of downloaded file in bytes
response = filelink.download('path/to/file')
```
"""
if params:
CONTENT_DOWNLOAD_SCHEMA.check(params)
with open(destination_path, 'wb') as new_file:
response = utils.make_call(CDN_URL, 'get',
handle=self.handle,
params=params,
security=self.security,
transform_url=(self.url if isinstance(self, filestack.models.Transform) else None))
if response.ok:
for chunk in response.iter_content(1024):
if not chunk:
break
new_file.write(chunk)
return response
def get_content(self, params=None):
"""
Returns the raw byte content of a given Filelink
*returns* [Bytes]
```python
from filestack import Client
client = Client('API_KEY')
filelink = client.upload(filepath='/path/to/file/foo.jpg')
byte_content = filelink.get_content()
```
"""
if params:
CONTENT_DOWNLOAD_SCHEMA.check(params)
response = utils.make_call(CDN_URL, 'get',
handle=self.handle,
params=params,
security=self.security,
transform_url=(self.url if isinstance(self, filestack.models.Transform) else None))
return response.content
def get_metadata(self, params=None):
"""
Metadata provides certain information about a Filehandle, and you can specify which pieces
of information you will receive back by passing in optional parameters.
```python
from filestack import Client
client = Client('API_KEY')
filelink = client.upload(filepath='/path/to/file/foo.jpg')
metadata = filelink.get_metadata()
# or define specific metadata to receive
metadata = filelink.get_metadata({'filename': true})
```
"""
metadata_url = "{CDN_URL}/{handle}/metadata".format(
CDN_URL=CDN_URL, handle=self.handle
)
response = utils.make_call(metadata_url, 'get',
params=params,
security=self.security)
return response.json()
def overwrite(self, url=None, filepath=None, params=None):
"""
You may overwrite any Filelink by supplying a new file. The Filehandle will remain the same.
*returns* [requests.response]
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
```
"""
if params:
OVERWRITE_SCHEMA.check(params)
data, files = None, None
if url:
data = {'url': url}
elif filepath:
filename = os.path.basename(filepath)
mimetype = mimetypes.guess_type(filepath)[0]
files = {'fileUpload': (filename, open(filepath, 'rb'), mimetype)}
else:
raise ValueError("You must include a url or filepath parameter")
return utils.make_call(API_URL, 'post',
path=FILE_PATH,
params=params,
handle=self.handle,
data=data,
files=files,
security=self.security,
transform_url=self.url if isinstance(self, filestack.models.Transform) else None)
|
filestack/filestack-python | filestack/mixins/filestack_common.py | CommonMixin.overwrite | python | def overwrite(self, url=None, filepath=None, params=None):
if params:
OVERWRITE_SCHEMA.check(params)
data, files = None, None
if url:
data = {'url': url}
elif filepath:
filename = os.path.basename(filepath)
mimetype = mimetypes.guess_type(filepath)[0]
files = {'fileUpload': (filename, open(filepath, 'rb'), mimetype)}
else:
raise ValueError("You must include a url or filepath parameter")
return utils.make_call(API_URL, 'post',
path=FILE_PATH,
params=params,
handle=self.handle,
data=data,
files=files,
security=self.security,
transform_url=self.url if isinstance(self, filestack.models.Transform) else None) | You may overwrite any Filelink by supplying a new file. The Filehandle will remain the same.
*returns* [requests.response]
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/mixins/filestack_common.py#L123-L158 | [
"def make_call(base, action, handle=None, path=None, params=None, data=None, files=None, security=None, transform_url=None):\n request_func = getattr(requests, action)\n if transform_url:\n return request_func(transform_url, params=params, headers=HEADERS, data=data, files=files)\n\n url = get_url(base, path=path, handle=handle, security=security)\n response = request_func(url, params=params, headers=HEADERS, data=data, files=files)\n\n if not response.ok:\n raise Exception(response.text)\n\n return response\n"
] | class CommonMixin(object):
"""
Contains all functions related to the manipulation of Filelinks
"""
def download(self, destination_path, params=None):
"""
Downloads a file to the given local path and returns the size of the downloaded file if successful
*returns* [Integer]
```python
from filestack import Client
client = Client('API_KEY', security=sec)
filelink = client.upload(filepath='/path/to/file')
# if successful, returns size of downloaded file in bytes
response = filelink.download('path/to/file')
```
"""
if params:
CONTENT_DOWNLOAD_SCHEMA.check(params)
with open(destination_path, 'wb') as new_file:
response = utils.make_call(CDN_URL, 'get',
handle=self.handle,
params=params,
security=self.security,
transform_url=(self.url if isinstance(self, filestack.models.Transform) else None))
if response.ok:
for chunk in response.iter_content(1024):
if not chunk:
break
new_file.write(chunk)
return response
def get_content(self, params=None):
"""
Returns the raw byte content of a given Filelink
*returns* [Bytes]
```python
from filestack import Client
client = Client('API_KEY')
filelink = client.upload(filepath='/path/to/file/foo.jpg')
byte_content = filelink.get_content()
```
"""
if params:
CONTENT_DOWNLOAD_SCHEMA.check(params)
response = utils.make_call(CDN_URL, 'get',
handle=self.handle,
params=params,
security=self.security,
transform_url=(self.url if isinstance(self, filestack.models.Transform) else None))
return response.content
def get_metadata(self, params=None):
"""
Metadata provides certain information about a Filehandle, and you can specify which pieces
of information you will receive back by passing in optional parameters.
```python
from filestack import Client
client = Client('API_KEY')
filelink = client.upload(filepath='/path/to/file/foo.jpg')
metadata = filelink.get_metadata()
# or define specific metadata to receive
metadata = filelink.get_metadata({'filename': true})
```
"""
metadata_url = "{CDN_URL}/{handle}/metadata".format(
CDN_URL=CDN_URL, handle=self.handle
)
response = utils.make_call(metadata_url, 'get',
params=params,
security=self.security)
return response.json()
def delete(self, params=None):
"""
You may delete any file you have uploaded, either through a Filelink returned from the client or one you have initialized yourself.
This returns a response of success or failure. This action requires security.abs
*returns* [requests.response]
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
filelink = client.upload(filepath='/path/to/file/foo.txt')
response = filelink.delete()
```
"""
if params:
params['key'] = self.apikey
else:
params = {'key': self.apikey}
return utils.make_call(API_URL, 'delete',
path=FILE_PATH,
handle=self.handle,
params=params,
security=self.security,
transform_url=self.url if isinstance(self, filestack.models.Transform) else None)
|
filestack/filestack-python | filestack/models/filestack_security.py | validate | python | def validate(policy):
for param, value in policy.items():
if param not in ACCEPTED_SECURITY_TYPES.keys():
raise SecurityError('Invalid Security Parameter: {}'.format(param))
if type(value) != ACCEPTED_SECURITY_TYPES[param]:
raise SecurityError('Invalid Parameter Data Type for {}, '
'Expecting: {} Received: {}'.format(
param, ACCEPTED_SECURITY_TYPES[param],
type(value))) | Validates a policy and its parameters and raises an error if invalid | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_security.py#L10-L23 | null | from filestack.config import ACCEPTED_SECURITY_TYPES
from filestack.exceptions import SecurityError
import base64
import hashlib
import hmac
import json
def security(policy, app_secret):
"""
Creates a valid signature and policy based on provided app secret and
parameters
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012, 'call': ['read', 'store', 'pick']}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
```
"""
validate(policy)
policy_enc = base64.urlsafe_b64encode(json.dumps(policy).encode('utf-8'))
signature = hmac.new(app_secret.encode('utf-8'),
policy_enc,
hashlib.sha256).hexdigest()
return {'policy': policy_enc, 'signature': signature}
|
filestack/filestack-python | filestack/models/filestack_security.py | security | python | def security(policy, app_secret):
validate(policy)
policy_enc = base64.urlsafe_b64encode(json.dumps(policy).encode('utf-8'))
signature = hmac.new(app_secret.encode('utf-8'),
policy_enc,
hashlib.sha256).hexdigest()
return {'policy': policy_enc, 'signature': signature} | Creates a valid signature and policy based on provided app secret and
parameters
```python
from filestack import Client, security
# a policy requires at least an expiry
policy = {'expiry': 56589012, 'call': ['read', 'store', 'pick']}
sec = security(policy, 'APP_SECRET')
client = Client('API_KEY', security=sec)
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_security.py#L26-L47 | [
"def validate(policy):\n \"\"\"\n Validates a policy and its parameters and raises an error if invalid\n \"\"\"\n for param, value in policy.items():\n\n if param not in ACCEPTED_SECURITY_TYPES.keys():\n raise SecurityError('Invalid Security Parameter: {}'.format(param))\n\n if type(value) != ACCEPTED_SECURITY_TYPES[param]:\n raise SecurityError('Invalid Parameter Data Type for {}, '\n 'Expecting: {} Received: {}'.format(\n param, ACCEPTED_SECURITY_TYPES[param],\n type(value)))\n"
] | from filestack.config import ACCEPTED_SECURITY_TYPES
from filestack.exceptions import SecurityError
import base64
import hashlib
import hmac
import json
def validate(policy):
"""
Validates a policy and its parameters and raises an error if invalid
"""
for param, value in policy.items():
if param not in ACCEPTED_SECURITY_TYPES.keys():
raise SecurityError('Invalid Security Parameter: {}'.format(param))
if type(value) != ACCEPTED_SECURITY_TYPES[param]:
raise SecurityError('Invalid Parameter Data Type for {}, '
'Expecting: {} Received: {}'.format(
param, ACCEPTED_SECURITY_TYPES[param],
type(value)))
|
filestack/filestack-python | filestack/models/filestack_client.py | Client.transform_external | python | def transform_external(self, external_url):
return filestack.models.Transform(apikey=self.apikey, security=self.security, external_url=external_url) | Turns an external URL into a Filestack Transform object
*returns* [Filestack.Transform]
```python
from filestack import Client, Filelink
client = Client("API_KEY")
transform = client.transform_external('http://www.example.com')
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_client.py#L26-L39 | null | class Client():
"""
The hub for all Filestack operations. Creates Filelinks, converts external to transform objects,
takes a URL screenshot and returns zipped files.
"""
def __init__(self, apikey, security=None, storage='S3'):
self._apikey = apikey
self._security = security
STORE_LOCATION_SCHEMA.check(storage)
self._storage = storage
def urlscreenshot(self, external_url, agent=None, mode=None, width=None, height=None, delay=None):
"""
Takes a 'screenshot' of the given URL
*returns* [Filestack.Transform]
```python
from filestack import Client
client = Client("API_KEY")
# returns a Transform object
screenshot = client.url_screenshot('https://www.example.com', width=100, height=100, agent="desktop")
filelink = screenshot.store()
````
"""
params = locals()
params.pop('self')
params.pop('external_url')
params = {k: v for k, v in params.items() if v is not None}
url_task = utils.return_transform_task('urlscreenshot', params)
new_transform = filestack.models.Transform(apikey=self.apikey, security=self.security, external_url=external_url)
new_transform._transformation_tasks.append(url_task)
return new_transform
def zip(self, destination_path, files):
"""
Takes array of files and downloads a compressed ZIP archive
to provided path
*returns* [requests.response]
```python
from filestack import Client
client = Client("<API_KEY>")
client.zip('/path/to/file/destination', ['files'])
```
"""
zip_url = "{}/{}/zip/[{}]".format(CDN_URL, self.apikey, ','.join(files))
with open(destination_path, 'wb') as new_file:
response = utils.make_call(zip_url, 'get')
if response.ok:
for chunk in response.iter_content(1024):
if not chunk:
break
new_file.write(chunk)
return response
return response.text
def upload(self, url=None, filepath=None, multipart=True, params=None, upload_processes=None, intelligent=False):
"""
Uploads a file either through a local filepath or external_url.
Uses multipart by default and Intelligent Ingestion by default (if enabled).
You can specify the number of multipart processes and pass in parameters.
returns [Filestack.Filelink]
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='/path/to/file')
# to use different storage:
client = FilestackClient.new('API_KEY', storage='dropbox')
filelink = client.upload(filepath='/path/to/file', params={'container': 'my-container'})
# to use an external URL:
filelink = client.upload(external_url='https://www.example.com')
# to disable intelligent ingestion:
filelink = client.upload(filepath='/path/to/file', intelligent=False)
```
"""
if params: # Check the structure of parameters
STORE_SCHEMA.check(params)
if filepath and url: # Raise an error for using both filepath and external url
raise ValueError("Cannot upload file and external url at the same time")
if filepath: # Uploading from local drive
if intelligent:
response = intelligent_ingestion.upload(
self.apikey, filepath, self.storage, params=params, security=self.security
)
elif multipart:
response = upload_utils.multipart_upload(
self.apikey, filepath, self.storage,
upload_processes=upload_processes, params=params, security=self.security
)
handle = response['handle']
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else: # Uploading with multipart=False
filename = os.path.basename(filepath)
mimetype = mimetypes.guess_type(filepath)[0]
files = {'fileUpload': (filename, open(filepath, 'rb'), mimetype)}
if params:
params['key'] = self.apikey
else:
params = {'key': self.apikey}
path = '{path}/{storage}'.format(path=STORE_PATH, storage=self.storage)
if self.security:
path = "{path}?policy={policy}&signature={signature}".format(
path=path, policy=self.security['policy'].decode('utf-8'),
signature=self.security['signature']
)
response = utils.make_call(
API_URL, 'post', path=path, params=params, files=files
)
else: # Uploading from an external URL
tasks = []
request_url_list = []
if utils.store_params_checker(params):
store_task = utils.store_params_maker(params)
tasks.append(store_task)
if self.security:
tasks.append(
'security=p:{policy},s:{signature}'.format(
policy=self.security['policy'].decode('utf-8'),
signature=self.security['signature']
)
)
tasks = '/'.join(tasks)
if tasks:
request_url_list.extend((CDN_URL, self.apikey, tasks, url))
else:
request_url_list.extend((CDN_URL, self.apikey, url))
request_url = '/'.join(request_url_list)
response = requests.post(request_url, headers=HEADERS)
if response.ok:
response = response.json()
handle = re.match(
r'(?:https:\/\/cdn\.filestackcontent\.com\/)(\w+)',
response['url']
).group(1)
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else:
raise Exception('Invalid API response')
@property
def security(self):
"""
Returns the security object associated with the instance (if any)
*returns* [Dict]
```python
client.security
# {'policy': 'YOUR_ENCODED_POLICY', 'signature': 'YOUR_ENCODED_SIGNATURE'}
```
"""
return self._security
@property
def storage(self):
"""
Returns the storage associated with the client (defaults to 'S3')
*returns* [Dict]
```python
client.storage
# S3
```
"""
return self._storage
@property
def apikey(self):
"""
Returns the API key associated with the instance
*returns* [String]
```python
client.apikey
# YOUR_API_KEY
```
"""
return self._apikey
|
filestack/filestack-python | filestack/models/filestack_client.py | Client.urlscreenshot | python | def urlscreenshot(self, external_url, agent=None, mode=None, width=None, height=None, delay=None):
params = locals()
params.pop('self')
params.pop('external_url')
params = {k: v for k, v in params.items() if v is not None}
url_task = utils.return_transform_task('urlscreenshot', params)
new_transform = filestack.models.Transform(apikey=self.apikey, security=self.security, external_url=external_url)
new_transform._transformation_tasks.append(url_task)
return new_transform | Takes a 'screenshot' of the given URL
*returns* [Filestack.Transform]
```python
from filestack import Client
client = Client("API_KEY")
# returns a Transform object
screenshot = client.url_screenshot('https://www.example.com', width=100, height=100, agent="desktop")
filelink = screenshot.store()
```` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_client.py#L41-L67 | [
"def return_transform_task(transformation, params):\n transform_tasks = []\n\n for key, value in params.items():\n\n if isinstance(value, list):\n value = str(value).replace(\"'\", \"\").replace('\"', '').replace(\" \", \"\")\n if isinstance(value, bool):\n value = str(value).lower()\n\n transform_tasks.append('{}:{}'.format(key, value))\n\n transform_tasks = sorted(transform_tasks)\n\n if len(transform_tasks) > 0:\n transformation_url = '{}={}'.format(transformation, ','.join(transform_tasks))\n else:\n transformation_url = transformation\n\n return transformation_url\n"
] | class Client():
"""
The hub for all Filestack operations. Creates Filelinks, converts external to transform objects,
takes a URL screenshot and returns zipped files.
"""
def __init__(self, apikey, security=None, storage='S3'):
self._apikey = apikey
self._security = security
STORE_LOCATION_SCHEMA.check(storage)
self._storage = storage
def transform_external(self, external_url):
"""
Turns an external URL into a Filestack Transform object
*returns* [Filestack.Transform]
```python
from filestack import Client, Filelink
client = Client("API_KEY")
transform = client.transform_external('http://www.example.com')
```
"""
return filestack.models.Transform(apikey=self.apikey, security=self.security, external_url=external_url)
def zip(self, destination_path, files):
"""
Takes array of files and downloads a compressed ZIP archive
to provided path
*returns* [requests.response]
```python
from filestack import Client
client = Client("<API_KEY>")
client.zip('/path/to/file/destination', ['files'])
```
"""
zip_url = "{}/{}/zip/[{}]".format(CDN_URL, self.apikey, ','.join(files))
with open(destination_path, 'wb') as new_file:
response = utils.make_call(zip_url, 'get')
if response.ok:
for chunk in response.iter_content(1024):
if not chunk:
break
new_file.write(chunk)
return response
return response.text
def upload(self, url=None, filepath=None, multipart=True, params=None, upload_processes=None, intelligent=False):
"""
Uploads a file either through a local filepath or external_url.
Uses multipart by default and Intelligent Ingestion by default (if enabled).
You can specify the number of multipart processes and pass in parameters.
returns [Filestack.Filelink]
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='/path/to/file')
# to use different storage:
client = FilestackClient.new('API_KEY', storage='dropbox')
filelink = client.upload(filepath='/path/to/file', params={'container': 'my-container'})
# to use an external URL:
filelink = client.upload(external_url='https://www.example.com')
# to disable intelligent ingestion:
filelink = client.upload(filepath='/path/to/file', intelligent=False)
```
"""
if params: # Check the structure of parameters
STORE_SCHEMA.check(params)
if filepath and url: # Raise an error for using both filepath and external url
raise ValueError("Cannot upload file and external url at the same time")
if filepath: # Uploading from local drive
if intelligent:
response = intelligent_ingestion.upload(
self.apikey, filepath, self.storage, params=params, security=self.security
)
elif multipart:
response = upload_utils.multipart_upload(
self.apikey, filepath, self.storage,
upload_processes=upload_processes, params=params, security=self.security
)
handle = response['handle']
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else: # Uploading with multipart=False
filename = os.path.basename(filepath)
mimetype = mimetypes.guess_type(filepath)[0]
files = {'fileUpload': (filename, open(filepath, 'rb'), mimetype)}
if params:
params['key'] = self.apikey
else:
params = {'key': self.apikey}
path = '{path}/{storage}'.format(path=STORE_PATH, storage=self.storage)
if self.security:
path = "{path}?policy={policy}&signature={signature}".format(
path=path, policy=self.security['policy'].decode('utf-8'),
signature=self.security['signature']
)
response = utils.make_call(
API_URL, 'post', path=path, params=params, files=files
)
else: # Uploading from an external URL
tasks = []
request_url_list = []
if utils.store_params_checker(params):
store_task = utils.store_params_maker(params)
tasks.append(store_task)
if self.security:
tasks.append(
'security=p:{policy},s:{signature}'.format(
policy=self.security['policy'].decode('utf-8'),
signature=self.security['signature']
)
)
tasks = '/'.join(tasks)
if tasks:
request_url_list.extend((CDN_URL, self.apikey, tasks, url))
else:
request_url_list.extend((CDN_URL, self.apikey, url))
request_url = '/'.join(request_url_list)
response = requests.post(request_url, headers=HEADERS)
if response.ok:
response = response.json()
handle = re.match(
r'(?:https:\/\/cdn\.filestackcontent\.com\/)(\w+)',
response['url']
).group(1)
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else:
raise Exception('Invalid API response')
@property
def security(self):
"""
Returns the security object associated with the instance (if any)
*returns* [Dict]
```python
client.security
# {'policy': 'YOUR_ENCODED_POLICY', 'signature': 'YOUR_ENCODED_SIGNATURE'}
```
"""
return self._security
@property
def storage(self):
"""
Returns the storage associated with the client (defaults to 'S3')
*returns* [Dict]
```python
client.storage
# S3
```
"""
return self._storage
@property
def apikey(self):
"""
Returns the API key associated with the instance
*returns* [String]
```python
client.apikey
# YOUR_API_KEY
```
"""
return self._apikey
|
filestack/filestack-python | filestack/models/filestack_client.py | Client.zip | python | def zip(self, destination_path, files):
zip_url = "{}/{}/zip/[{}]".format(CDN_URL, self.apikey, ','.join(files))
with open(destination_path, 'wb') as new_file:
response = utils.make_call(zip_url, 'get')
if response.ok:
for chunk in response.iter_content(1024):
if not chunk:
break
new_file.write(chunk)
return response
return response.text | Takes array of files and downloads a compressed ZIP archive
to provided path
*returns* [requests.response]
```python
from filestack import Client
client = Client("<API_KEY>")
client.zip('/path/to/file/destination', ['files'])
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_client.py#L69-L94 | [
"def make_call(base, action, handle=None, path=None, params=None, data=None, files=None, security=None, transform_url=None):\n request_func = getattr(requests, action)\n if transform_url:\n return request_func(transform_url, params=params, headers=HEADERS, data=data, files=files)\n\n url = get_url(base, path=path, handle=handle, security=security)\n response = request_func(url, params=params, headers=HEADERS, data=data, files=files)\n\n if not response.ok:\n raise Exception(response.text)\n\n return response\n"
] | class Client():
"""
The hub for all Filestack operations. Creates Filelinks, converts external to transform objects,
takes a URL screenshot and returns zipped files.
"""
def __init__(self, apikey, security=None, storage='S3'):
self._apikey = apikey
self._security = security
STORE_LOCATION_SCHEMA.check(storage)
self._storage = storage
def transform_external(self, external_url):
"""
Turns an external URL into a Filestack Transform object
*returns* [Filestack.Transform]
```python
from filestack import Client, Filelink
client = Client("API_KEY")
transform = client.transform_external('http://www.example.com')
```
"""
return filestack.models.Transform(apikey=self.apikey, security=self.security, external_url=external_url)
def urlscreenshot(self, external_url, agent=None, mode=None, width=None, height=None, delay=None):
"""
Takes a 'screenshot' of the given URL
*returns* [Filestack.Transform]
```python
from filestack import Client
client = Client("API_KEY")
# returns a Transform object
screenshot = client.url_screenshot('https://www.example.com', width=100, height=100, agent="desktop")
filelink = screenshot.store()
````
"""
params = locals()
params.pop('self')
params.pop('external_url')
params = {k: v for k, v in params.items() if v is not None}
url_task = utils.return_transform_task('urlscreenshot', params)
new_transform = filestack.models.Transform(apikey=self.apikey, security=self.security, external_url=external_url)
new_transform._transformation_tasks.append(url_task)
return new_transform
def upload(self, url=None, filepath=None, multipart=True, params=None, upload_processes=None, intelligent=False):
"""
Uploads a file either through a local filepath or external_url.
Uses multipart by default and Intelligent Ingestion by default (if enabled).
You can specify the number of multipart processes and pass in parameters.
returns [Filestack.Filelink]
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='/path/to/file')
# to use different storage:
client = FilestackClient.new('API_KEY', storage='dropbox')
filelink = client.upload(filepath='/path/to/file', params={'container': 'my-container'})
# to use an external URL:
filelink = client.upload(external_url='https://www.example.com')
# to disable intelligent ingestion:
filelink = client.upload(filepath='/path/to/file', intelligent=False)
```
"""
if params: # Check the structure of parameters
STORE_SCHEMA.check(params)
if filepath and url: # Raise an error for using both filepath and external url
raise ValueError("Cannot upload file and external url at the same time")
if filepath: # Uploading from local drive
if intelligent:
response = intelligent_ingestion.upload(
self.apikey, filepath, self.storage, params=params, security=self.security
)
elif multipart:
response = upload_utils.multipart_upload(
self.apikey, filepath, self.storage,
upload_processes=upload_processes, params=params, security=self.security
)
handle = response['handle']
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else: # Uploading with multipart=False
filename = os.path.basename(filepath)
mimetype = mimetypes.guess_type(filepath)[0]
files = {'fileUpload': (filename, open(filepath, 'rb'), mimetype)}
if params:
params['key'] = self.apikey
else:
params = {'key': self.apikey}
path = '{path}/{storage}'.format(path=STORE_PATH, storage=self.storage)
if self.security:
path = "{path}?policy={policy}&signature={signature}".format(
path=path, policy=self.security['policy'].decode('utf-8'),
signature=self.security['signature']
)
response = utils.make_call(
API_URL, 'post', path=path, params=params, files=files
)
else: # Uploading from an external URL
tasks = []
request_url_list = []
if utils.store_params_checker(params):
store_task = utils.store_params_maker(params)
tasks.append(store_task)
if self.security:
tasks.append(
'security=p:{policy},s:{signature}'.format(
policy=self.security['policy'].decode('utf-8'),
signature=self.security['signature']
)
)
tasks = '/'.join(tasks)
if tasks:
request_url_list.extend((CDN_URL, self.apikey, tasks, url))
else:
request_url_list.extend((CDN_URL, self.apikey, url))
request_url = '/'.join(request_url_list)
response = requests.post(request_url, headers=HEADERS)
if response.ok:
response = response.json()
handle = re.match(
r'(?:https:\/\/cdn\.filestackcontent\.com\/)(\w+)',
response['url']
).group(1)
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else:
raise Exception('Invalid API response')
@property
def security(self):
"""
Returns the security object associated with the instance (if any)
*returns* [Dict]
```python
client.security
# {'policy': 'YOUR_ENCODED_POLICY', 'signature': 'YOUR_ENCODED_SIGNATURE'}
```
"""
return self._security
@property
def storage(self):
"""
Returns the storage associated with the client (defaults to 'S3')
*returns* [Dict]
```python
client.storage
# S3
```
"""
return self._storage
@property
def apikey(self):
"""
Returns the API key associated with the instance
*returns* [String]
```python
client.apikey
# YOUR_API_KEY
```
"""
return self._apikey
|
filestack/filestack-python | filestack/models/filestack_client.py | Client.upload | python | def upload(self, url=None, filepath=None, multipart=True, params=None, upload_processes=None, intelligent=False):
if params: # Check the structure of parameters
STORE_SCHEMA.check(params)
if filepath and url: # Raise an error for using both filepath and external url
raise ValueError("Cannot upload file and external url at the same time")
if filepath: # Uploading from local drive
if intelligent:
response = intelligent_ingestion.upload(
self.apikey, filepath, self.storage, params=params, security=self.security
)
elif multipart:
response = upload_utils.multipart_upload(
self.apikey, filepath, self.storage,
upload_processes=upload_processes, params=params, security=self.security
)
handle = response['handle']
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else: # Uploading with multipart=False
filename = os.path.basename(filepath)
mimetype = mimetypes.guess_type(filepath)[0]
files = {'fileUpload': (filename, open(filepath, 'rb'), mimetype)}
if params:
params['key'] = self.apikey
else:
params = {'key': self.apikey}
path = '{path}/{storage}'.format(path=STORE_PATH, storage=self.storage)
if self.security:
path = "{path}?policy={policy}&signature={signature}".format(
path=path, policy=self.security['policy'].decode('utf-8'),
signature=self.security['signature']
)
response = utils.make_call(
API_URL, 'post', path=path, params=params, files=files
)
else: # Uploading from an external URL
tasks = []
request_url_list = []
if utils.store_params_checker(params):
store_task = utils.store_params_maker(params)
tasks.append(store_task)
if self.security:
tasks.append(
'security=p:{policy},s:{signature}'.format(
policy=self.security['policy'].decode('utf-8'),
signature=self.security['signature']
)
)
tasks = '/'.join(tasks)
if tasks:
request_url_list.extend((CDN_URL, self.apikey, tasks, url))
else:
request_url_list.extend((CDN_URL, self.apikey, url))
request_url = '/'.join(request_url_list)
response = requests.post(request_url, headers=HEADERS)
if response.ok:
response = response.json()
handle = re.match(
r'(?:https:\/\/cdn\.filestackcontent\.com\/)(\w+)',
response['url']
).group(1)
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else:
raise Exception('Invalid API response') | Uploads a file either through a local filepath or external_url.
Uses multipart by default and Intelligent Ingestion by default (if enabled).
You can specify the number of multipart processes and pass in parameters.
returns [Filestack.Filelink]
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='/path/to/file')
# to use different storage:
client = FilestackClient.new('API_KEY', storage='dropbox')
filelink = client.upload(filepath='/path/to/file', params={'container': 'my-container'})
# to use an external URL:
filelink = client.upload(external_url='https://www.example.com')
# to disable intelligent ingestion:
filelink = client.upload(filepath='/path/to/file', intelligent=False)
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_client.py#L96-L198 | [
"def upload(apikey, filepath, storage, params=None, security=None):\n upload_q = Queue()\n commit_q = Queue()\n response_q = Queue()\n\n manager_proc = Process(\n target=manage_upload,\n name='manager',\n args=(apikey, filepath, storage, params, security, upload_q, commit_q, response_q)\n )\n\n side_processes = [\n Process(\n target=consume_upload_job,\n name='uploader',\n args=(upload_q, response_q)\n ) for _ in range(NUM_OF_UPLOADERS)\n ]\n\n for _ in range(NUM_OF_COMMITTERS):\n side_processes.append(\n Process(\n target=commit_part,\n name='committer',\n args=(commit_q, response_q)\n )\n )\n\n for proc in side_processes:\n proc.start()\n\n manager_proc.start()\n manager_proc.join()\n\n for proc in side_processes:\n proc.terminate()\n\n try:\n final_response = response_q.get(block=True, timeout=1)\n if not isinstance(final_response, requests.Response):\n raise Exception()\n return final_response\n except Exception:\n raise Exception('Upload aborted')\n",
"def make_call(base, action, handle=None, path=None, params=None, data=None, files=None, security=None, transform_url=None):\n request_func = getattr(requests, action)\n if transform_url:\n return request_func(transform_url, params=params, headers=HEADERS, data=data, files=files)\n\n url = get_url(base, path=path, handle=handle, security=security)\n response = request_func(url, params=params, headers=HEADERS, data=data, files=files)\n\n if not response.ok:\n raise Exception(response.text)\n\n return response\n",
"def multipart_upload(apikey, filepath, storage, upload_processes=None, params=None, security=None):\n params = params or {}\n\n if upload_processes is None:\n upload_processes = multiprocessing.cpu_count()\n\n filename = params.get('filename')\n mimetype = params.get('mimetype')\n\n filename, filesize, mimetype = get_file_info(filepath, filename=filename, mimetype=mimetype)\n\n request_data = {\n 'apikey': apikey,\n 'filename': filename,\n 'mimetype': mimetype,\n 'size': filesize,\n 'store_location': storage\n }\n\n start_response = multipart_request(MULTIPART_START_URL, request_data, params, security)\n jobs = create_upload_jobs(filesize)\n\n pooling_job = partial(upload_chunk, apikey, filename, filepath, storage, start_response)\n pool = ThreadPool(upload_processes)\n uploaded_parts = pool.map(pooling_job, jobs)\n pool.close()\n\n location_url = start_response.pop('location_url')\n request_data.update(start_response)\n request_data['parts'] = ';'.join(uploaded_parts)\n\n if params.get('workflows'):\n workflows = ','.join('\"{}\"'.format(item) for item in params.get('workflows'))\n workflows = '[{}]'.format(workflows)\n request_data['workflows'] = workflows\n\n complete_response = multipart_request(\n 'https://{}/multipart/complete'.format(location_url),\n request_data,\n params,\n security\n )\n\n return complete_response\n",
"def store_params_checker(params):\n store_params_list = ['filename', 'location', 'path', 'container',\n 'region', 'access', 'base64decode', 'workflows']\n\n if any(key in params for key in store_params_list):\n return True\n else:\n return False\n"
] | class Client():
"""
The hub for all Filestack operations. Creates Filelinks, converts external to transform objects,
takes a URL screenshot and returns zipped files.
"""
def __init__(self, apikey, security=None, storage='S3'):
self._apikey = apikey
self._security = security
STORE_LOCATION_SCHEMA.check(storage)
self._storage = storage
def transform_external(self, external_url):
"""
Turns an external URL into a Filestack Transform object
*returns* [Filestack.Transform]
```python
from filestack import Client, Filelink
client = Client("API_KEY")
transform = client.transform_external('http://www.example.com')
```
"""
return filestack.models.Transform(apikey=self.apikey, security=self.security, external_url=external_url)
def urlscreenshot(self, external_url, agent=None, mode=None, width=None, height=None, delay=None):
"""
Takes a 'screenshot' of the given URL
*returns* [Filestack.Transform]
```python
from filestack import Client
client = Client("API_KEY")
# returns a Transform object
screenshot = client.url_screenshot('https://www.example.com', width=100, height=100, agent="desktop")
filelink = screenshot.store()
````
"""
params = locals()
params.pop('self')
params.pop('external_url')
params = {k: v for k, v in params.items() if v is not None}
url_task = utils.return_transform_task('urlscreenshot', params)
new_transform = filestack.models.Transform(apikey=self.apikey, security=self.security, external_url=external_url)
new_transform._transformation_tasks.append(url_task)
return new_transform
def zip(self, destination_path, files):
"""
Takes array of files and downloads a compressed ZIP archive
to provided path
*returns* [requests.response]
```python
from filestack import Client
client = Client("<API_KEY>")
client.zip('/path/to/file/destination', ['files'])
```
"""
zip_url = "{}/{}/zip/[{}]".format(CDN_URL, self.apikey, ','.join(files))
with open(destination_path, 'wb') as new_file:
response = utils.make_call(zip_url, 'get')
if response.ok:
for chunk in response.iter_content(1024):
if not chunk:
break
new_file.write(chunk)
return response
return response.text
@property
def security(self):
"""
Returns the security object associated with the instance (if any)
*returns* [Dict]
```python
client.security
# {'policy': 'YOUR_ENCODED_POLICY', 'signature': 'YOUR_ENCODED_SIGNATURE'}
```
"""
return self._security
@property
def storage(self):
"""
Returns the storage associated with the client (defaults to 'S3')
*returns* [Dict]
```python
client.storage
# S3
```
"""
return self._storage
@property
def apikey(self):
"""
Returns the API key associated with the instance
*returns* [String]
```python
client.apikey
# YOUR_API_KEY
```
"""
return self._apikey
|
filestack/filestack-python | filestack/models/filestack_transform.py | Transform.url | python | def url(self):
return utils.get_transform_url(
self._transformation_tasks, external_url=self.external_url,
handle=self.handle, security=self.security, apikey=self.apikey
) | Returns the URL for the current transformation, which can be used
to retrieve the file. If security is enabled, signature and policy parameters will
be included
*returns* [String]
```python
transform = client.upload(filepath='/path/to/file')
transform.url()
# https://cdn.filestackcontent.com/TRANSFORMS/FILE_HANDLE
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_transform.py#L96-L113 | [
"def get_transform_url(tasks, external_url=None, handle=None, security=None, apikey=None, video=False):\n url_components = [(PROCESS_URL if video else CDN_URL)]\n if external_url:\n url_components.append(apikey)\n\n if 'debug' in tasks:\n index = tasks.index('debug')\n tasks.pop(index)\n tasks.insert(0, 'debug')\n\n url_components.append('/'.join(tasks))\n\n if security:\n url_components.append('security=policy:{},signature:{}'.format(\n security['policy'].decode('utf-8'), security['signature']))\n\n url_components.append(handle or external_url)\n\n url_path = '/'.join(url_components)\n\n return url_path\n"
] | class Transform(ImageTransformationMixin, CommonMixin):
"""
Transform objects take either a handle or an external URL. They act similarly to
Filelinks, but have specific methods like store, debug, and also construct
URLs differently.
Transform objects can be chained to build up multi-task transform URLs, each one saved in
self._transformation_tasks
"""
def __init__(self, apikey=None, handle=None, external_url=None, security=None):
"""
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='/path/to/file/foo.jpg')
transform = filelink.resize(width=100, height=100).rotate(deg=90)
new_filelink = transform.store()
```
"""
self._apikey = apikey
self._handle = handle
self._security = security
self._external_url = external_url
self._transformation_tasks = []
@property
def handle(self):
"""
Returns the handle associated with the instance (if any)
*returns* [String]
```python
transform.handle
# YOUR_HANDLE
```
"""
return self._handle
@property
def external_url(self):
"""
Returns the external URL associated with the instance (if any)
*returns* [String]
```python
transform.external_url
# YOUR_EXTERNAL_URL
```
"""
return self._external_url
@property
def apikey(self):
"""
Returns the API key associated with the instance
*returns* [String]
```python
transform.apikey
# YOUR_API_KEY
```
"""
return self._apikey
@property
def security(self):
"""
Returns the security object associated with the instance (if any)
*returns* [Dict]
```python
transform.security
# {'policy': 'YOUR_ENCODED_POLICY', 'signature': 'YOUR_ENCODED_SIGNATURE'}
```
"""
return self._security
@property
def store(self, filename=None, location=None, path=None, container=None, region=None, access=None, base64decode=None):
"""
Uploads and stores the current transformation as a Fileink
*returns* [Filestack.Filelink]
```python
filelink = transform.store()
```
"""
if path:
path = '"{}"'.format(path)
filelink_obj = self.add_transform_task('store', locals())
response = utils.make_call(filelink_obj.url, 'get')
if response.ok:
data = json.loads(response.text)
handle = re.match(r'(?:https:\/\/cdn\.filestackcontent\.com\/)(\w+)', data['url']).group(1)
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else:
raise Exception(response.text)
def debug(self):
"""
Returns a JSON object with inforamtion regarding the current transformation
*returns* [Dict]
"""
debug_instance = self.add_transform_task('debug', locals())
response = utils.make_call(debug_instance.url, 'get')
return response.json()
|
filestack/filestack-python | filestack/models/filestack_transform.py | Transform.store | python | def store(self, filename=None, location=None, path=None, container=None, region=None, access=None, base64decode=None):
if path:
path = '"{}"'.format(path)
filelink_obj = self.add_transform_task('store', locals())
response = utils.make_call(filelink_obj.url, 'get')
if response.ok:
data = json.loads(response.text)
handle = re.match(r'(?:https:\/\/cdn\.filestackcontent\.com\/)(\w+)', data['url']).group(1)
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else:
raise Exception(response.text) | Uploads and stores the current transformation as a Fileink
*returns* [Filestack.Filelink]
```python
filelink = transform.store()
``` | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_transform.py#L115-L136 | [
"def make_call(base, action, handle=None, path=None, params=None, data=None, files=None, security=None, transform_url=None):\n request_func = getattr(requests, action)\n if transform_url:\n return request_func(transform_url, params=params, headers=HEADERS, data=data, files=files)\n\n url = get_url(base, path=path, handle=handle, security=security)\n response = request_func(url, params=params, headers=HEADERS, data=data, files=files)\n\n if not response.ok:\n raise Exception(response.text)\n\n return response\n",
"def add_transform_task(self, transformation, params):\n \"\"\"\n Adds a transform task to the current instance and returns it\n\n *returns* Filestack.Transform\n \"\"\"\n if not isinstance(self, filestack.models.Transform):\n instance = filestack.models.Transform(apikey=self.apikey, security=self.security, handle=self.handle)\n else:\n instance = self\n\n params.pop('self')\n params = {k: v for k, v in params.items() if v is not None}\n\n transformation_url = utils.return_transform_task(transformation, params)\n instance._transformation_tasks.append(transformation_url)\n\n return instance\n"
] | class Transform(ImageTransformationMixin, CommonMixin):
"""
Transform objects take either a handle or an external URL. They act similarly to
Filelinks, but have specific methods like store, debug, and also construct
URLs differently.
Transform objects can be chained to build up multi-task transform URLs, each one saved in
self._transformation_tasks
"""
def __init__(self, apikey=None, handle=None, external_url=None, security=None):
"""
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='/path/to/file/foo.jpg')
transform = filelink.resize(width=100, height=100).rotate(deg=90)
new_filelink = transform.store()
```
"""
self._apikey = apikey
self._handle = handle
self._security = security
self._external_url = external_url
self._transformation_tasks = []
@property
def handle(self):
"""
Returns the handle associated with the instance (if any)
*returns* [String]
```python
transform.handle
# YOUR_HANDLE
```
"""
return self._handle
@property
def external_url(self):
"""
Returns the external URL associated with the instance (if any)
*returns* [String]
```python
transform.external_url
# YOUR_EXTERNAL_URL
```
"""
return self._external_url
@property
def apikey(self):
"""
Returns the API key associated with the instance
*returns* [String]
```python
transform.apikey
# YOUR_API_KEY
```
"""
return self._apikey
@property
def security(self):
"""
Returns the security object associated with the instance (if any)
*returns* [Dict]
```python
transform.security
# {'policy': 'YOUR_ENCODED_POLICY', 'signature': 'YOUR_ENCODED_SIGNATURE'}
```
"""
return self._security
@property
def url(self):
"""
Returns the URL for the current transformation, which can be used
to retrieve the file. If security is enabled, signature and policy parameters will
be included
*returns* [String]
```python
transform = client.upload(filepath='/path/to/file')
transform.url()
# https://cdn.filestackcontent.com/TRANSFORMS/FILE_HANDLE
```
"""
return utils.get_transform_url(
self._transformation_tasks, external_url=self.external_url,
handle=self.handle, security=self.security, apikey=self.apikey
)
def debug(self):
"""
Returns a JSON object with inforamtion regarding the current transformation
*returns* [Dict]
"""
debug_instance = self.add_transform_task('debug', locals())
response = utils.make_call(debug_instance.url, 'get')
return response.json()
|
filestack/filestack-python | filestack/models/filestack_transform.py | Transform.debug | python | def debug(self):
debug_instance = self.add_transform_task('debug', locals())
response = utils.make_call(debug_instance.url, 'get')
return response.json() | Returns a JSON object with inforamtion regarding the current transformation
*returns* [Dict] | train | https://github.com/filestack/filestack-python/blob/f4d54c48987f3eeaad02d31cc5f6037e914bba0d/filestack/models/filestack_transform.py#L138-L146 | [
"def make_call(base, action, handle=None, path=None, params=None, data=None, files=None, security=None, transform_url=None):\n request_func = getattr(requests, action)\n if transform_url:\n return request_func(transform_url, params=params, headers=HEADERS, data=data, files=files)\n\n url = get_url(base, path=path, handle=handle, security=security)\n response = request_func(url, params=params, headers=HEADERS, data=data, files=files)\n\n if not response.ok:\n raise Exception(response.text)\n\n return response\n",
"def add_transform_task(self, transformation, params):\n \"\"\"\n Adds a transform task to the current instance and returns it\n\n *returns* Filestack.Transform\n \"\"\"\n if not isinstance(self, filestack.models.Transform):\n instance = filestack.models.Transform(apikey=self.apikey, security=self.security, handle=self.handle)\n else:\n instance = self\n\n params.pop('self')\n params = {k: v for k, v in params.items() if v is not None}\n\n transformation_url = utils.return_transform_task(transformation, params)\n instance._transformation_tasks.append(transformation_url)\n\n return instance\n"
] | class Transform(ImageTransformationMixin, CommonMixin):
"""
Transform objects take either a handle or an external URL. They act similarly to
Filelinks, but have specific methods like store, debug, and also construct
URLs differently.
Transform objects can be chained to build up multi-task transform URLs, each one saved in
self._transformation_tasks
"""
def __init__(self, apikey=None, handle=None, external_url=None, security=None):
"""
```python
from filestack import Client
client = Client("<API_KEY>")
filelink = client.upload(filepath='/path/to/file/foo.jpg')
transform = filelink.resize(width=100, height=100).rotate(deg=90)
new_filelink = transform.store()
```
"""
self._apikey = apikey
self._handle = handle
self._security = security
self._external_url = external_url
self._transformation_tasks = []
@property
def handle(self):
"""
Returns the handle associated with the instance (if any)
*returns* [String]
```python
transform.handle
# YOUR_HANDLE
```
"""
return self._handle
@property
def external_url(self):
"""
Returns the external URL associated with the instance (if any)
*returns* [String]
```python
transform.external_url
# YOUR_EXTERNAL_URL
```
"""
return self._external_url
@property
def apikey(self):
"""
Returns the API key associated with the instance
*returns* [String]
```python
transform.apikey
# YOUR_API_KEY
```
"""
return self._apikey
@property
def security(self):
"""
Returns the security object associated with the instance (if any)
*returns* [Dict]
```python
transform.security
# {'policy': 'YOUR_ENCODED_POLICY', 'signature': 'YOUR_ENCODED_SIGNATURE'}
```
"""
return self._security
@property
def url(self):
"""
Returns the URL for the current transformation, which can be used
to retrieve the file. If security is enabled, signature and policy parameters will
be included
*returns* [String]
```python
transform = client.upload(filepath='/path/to/file')
transform.url()
# https://cdn.filestackcontent.com/TRANSFORMS/FILE_HANDLE
```
"""
return utils.get_transform_url(
self._transformation_tasks, external_url=self.external_url,
handle=self.handle, security=self.security, apikey=self.apikey
)
def store(self, filename=None, location=None, path=None, container=None, region=None, access=None, base64decode=None):
"""
Uploads and stores the current transformation as a Fileink
*returns* [Filestack.Filelink]
```python
filelink = transform.store()
```
"""
if path:
path = '"{}"'.format(path)
filelink_obj = self.add_transform_task('store', locals())
response = utils.make_call(filelink_obj.url, 'get')
if response.ok:
data = json.loads(response.text)
handle = re.match(r'(?:https:\/\/cdn\.filestackcontent\.com\/)(\w+)', data['url']).group(1)
return filestack.models.Filelink(handle, apikey=self.apikey, security=self.security)
else:
raise Exception(response.text)
|
spotify/docker_interface | docker_interface/util.py | abspath | python | def abspath(path, ref=None):
if ref:
path = os.path.join(ref, path)
if not os.path.isabs(path):
raise ValueError("expected an absolute path but got '%s'" % path)
return path | Create an absolute path.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
path : str
absolute path
Raises
------
ValueError
if an absolute path cannot be constructed | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/util.py#L29-L54 | null | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import socket
TYPES = {
'integer': int,
'string': str,
'number': float,
'boolean': bool,
'array': list,
}
def split_path(path, ref=None):
"""
Split a path into its components.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
list : str
components of the path
"""
path = abspath(path, ref)
return path.strip(os.path.sep).split(os.path.sep)
def get_value(instance, path, ref=None):
"""
Get the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
Raises
------
KeyError
if `path` is not valid
TypeError
if a value along the `path` is not a list or dictionary
"""
for part in split_path(path, ref):
if isinstance(instance, list):
part = int(part)
elif not isinstance(instance, dict):
raise TypeError("expected `list` or `dict` but got `%s`" % instance)
try:
instance = instance[part]
except KeyError:
raise KeyError(abspath(path, ref))
return instance
def pop_value(instance, path, ref=None):
"""
Pop the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
"""
head, tail = os.path.split(abspath(path, ref))
instance = get_value(instance, head)
if isinstance(instance, list):
tail = int(tail)
return instance.pop(tail)
def set_value(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
instance[tail] = value
def set_default(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
return instance.setdefault(tail, value)
def merge(x, y):
"""
Merge two dictionaries and raise an error for inconsistencies.
Parameters
----------
x : dict
dictionary x
y : dict
dictionary y
Returns
-------
x : dict
merged dictionary
Raises
------
ValueError
if `x` and `y` are inconsistent
"""
keys_x = set(x)
keys_y = set(y)
for key in keys_y - keys_x:
x[key] = y[key]
for key in keys_x & keys_y:
value_x = x[key]
value_y = y[key]
if isinstance(value_x, dict) and isinstance(value_y, dict):
x[key] = merge(value_x, value_y)
else:
if value_x != value_y:
raise ValueError
return x
def set_default_from_schema(instance, schema):
"""
Populate default values on an `instance` given a `schema`.
Parameters
----------
instance : dict
instance to populate default values for
schema : dict
JSON schema with default values
Returns
-------
instance : dict
instance with populated default values
"""
for name, property_ in schema.get('properties', {}).items():
# Set the defaults at this level of the schema
if 'default' in property_:
instance.setdefault(name, property_['default'])
# Descend one level if the property is an object
if 'properties' in property_:
set_default_from_schema(instance.setdefault(name, {}), property_)
return instance
def apply(instance, func, path=None):
"""
Apply `func` to all fundamental types of `instance`.
Parameters
----------
instance : dict
instance to apply functions to
func : callable
function with two arguments (instance, path) to apply to all fundamental types recursively
path : str
path in the document (defaults to '/')
Returns
-------
instance : dict
instance after applying `func` to fundamental types
"""
path = path or os.path.sep
if isinstance(instance, list):
return [apply(item, func, os.path.join(path, str(i))) for i, item in enumerate(instance)]
elif isinstance(instance, dict):
return {key: apply(value, func, os.path.join(path, key)) for key, value in instance.items()}
return func(instance, path)
def get_free_port(ports=None):
"""
Get a free port.
Parameters
----------
ports : iterable
ports to check (obtain a random port by default)
Returns
-------
port : int
a free port
"""
if ports is None:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
_socket.bind(('', 0))
_, port = _socket.getsockname()
return port
# Get ports from the specified list
for port in ports:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
try:
_socket.bind(('', port))
return port
except socket.error as ex:
if ex.errno not in (48, 98):
raise
raise RuntimeError("could not find a free port")
|
spotify/docker_interface | docker_interface/util.py | split_path | python | def split_path(path, ref=None):
path = abspath(path, ref)
return path.strip(os.path.sep).split(os.path.sep) | Split a path into its components.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
list : str
components of the path | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/util.py#L57-L74 | [
"def abspath(path, ref=None):\n \"\"\"\n Create an absolute path.\n\n Parameters\n ----------\n path : str\n absolute or relative path with respect to `ref`\n ref : str or None\n reference path if `path` is relative\n\n Returns\n -------\n path : str\n absolute path\n\n Raises\n ------\n ValueError\n if an absolute path cannot be constructed\n \"\"\"\n if ref:\n path = os.path.join(ref, path)\n if not os.path.isabs(path):\n raise ValueError(\"expected an absolute path but got '%s'\" % path)\n return path\n"
] | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import socket
TYPES = {
'integer': int,
'string': str,
'number': float,
'boolean': bool,
'array': list,
}
def abspath(path, ref=None):
"""
Create an absolute path.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
path : str
absolute path
Raises
------
ValueError
if an absolute path cannot be constructed
"""
if ref:
path = os.path.join(ref, path)
if not os.path.isabs(path):
raise ValueError("expected an absolute path but got '%s'" % path)
return path
def get_value(instance, path, ref=None):
"""
Get the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
Raises
------
KeyError
if `path` is not valid
TypeError
if a value along the `path` is not a list or dictionary
"""
for part in split_path(path, ref):
if isinstance(instance, list):
part = int(part)
elif not isinstance(instance, dict):
raise TypeError("expected `list` or `dict` but got `%s`" % instance)
try:
instance = instance[part]
except KeyError:
raise KeyError(abspath(path, ref))
return instance
def pop_value(instance, path, ref=None):
"""
Pop the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
"""
head, tail = os.path.split(abspath(path, ref))
instance = get_value(instance, head)
if isinstance(instance, list):
tail = int(tail)
return instance.pop(tail)
def set_value(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
instance[tail] = value
def set_default(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
return instance.setdefault(tail, value)
def merge(x, y):
"""
Merge two dictionaries and raise an error for inconsistencies.
Parameters
----------
x : dict
dictionary x
y : dict
dictionary y
Returns
-------
x : dict
merged dictionary
Raises
------
ValueError
if `x` and `y` are inconsistent
"""
keys_x = set(x)
keys_y = set(y)
for key in keys_y - keys_x:
x[key] = y[key]
for key in keys_x & keys_y:
value_x = x[key]
value_y = y[key]
if isinstance(value_x, dict) and isinstance(value_y, dict):
x[key] = merge(value_x, value_y)
else:
if value_x != value_y:
raise ValueError
return x
def set_default_from_schema(instance, schema):
"""
Populate default values on an `instance` given a `schema`.
Parameters
----------
instance : dict
instance to populate default values for
schema : dict
JSON schema with default values
Returns
-------
instance : dict
instance with populated default values
"""
for name, property_ in schema.get('properties', {}).items():
# Set the defaults at this level of the schema
if 'default' in property_:
instance.setdefault(name, property_['default'])
# Descend one level if the property is an object
if 'properties' in property_:
set_default_from_schema(instance.setdefault(name, {}), property_)
return instance
def apply(instance, func, path=None):
"""
Apply `func` to all fundamental types of `instance`.
Parameters
----------
instance : dict
instance to apply functions to
func : callable
function with two arguments (instance, path) to apply to all fundamental types recursively
path : str
path in the document (defaults to '/')
Returns
-------
instance : dict
instance after applying `func` to fundamental types
"""
path = path or os.path.sep
if isinstance(instance, list):
return [apply(item, func, os.path.join(path, str(i))) for i, item in enumerate(instance)]
elif isinstance(instance, dict):
return {key: apply(value, func, os.path.join(path, key)) for key, value in instance.items()}
return func(instance, path)
def get_free_port(ports=None):
"""
Get a free port.
Parameters
----------
ports : iterable
ports to check (obtain a random port by default)
Returns
-------
port : int
a free port
"""
if ports is None:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
_socket.bind(('', 0))
_, port = _socket.getsockname()
return port
# Get ports from the specified list
for port in ports:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
try:
_socket.bind(('', port))
return port
except socket.error as ex:
if ex.errno not in (48, 98):
raise
raise RuntimeError("could not find a free port")
|
spotify/docker_interface | docker_interface/util.py | get_value | python | def get_value(instance, path, ref=None):
for part in split_path(path, ref):
if isinstance(instance, list):
part = int(part)
elif not isinstance(instance, dict):
raise TypeError("expected `list` or `dict` but got `%s`" % instance)
try:
instance = instance[part]
except KeyError:
raise KeyError(abspath(path, ref))
return instance | Get the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
Raises
------
KeyError
if `path` is not valid
TypeError
if a value along the `path` is not a list or dictionary | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/util.py#L77-L111 | [
"def abspath(path, ref=None):\n \"\"\"\n Create an absolute path.\n\n Parameters\n ----------\n path : str\n absolute or relative path with respect to `ref`\n ref : str or None\n reference path if `path` is relative\n\n Returns\n -------\n path : str\n absolute path\n\n Raises\n ------\n ValueError\n if an absolute path cannot be constructed\n \"\"\"\n if ref:\n path = os.path.join(ref, path)\n if not os.path.isabs(path):\n raise ValueError(\"expected an absolute path but got '%s'\" % path)\n return path\n",
"def split_path(path, ref=None):\n \"\"\"\n Split a path into its components.\n\n Parameters\n ----------\n path : str\n absolute or relative path with respect to `ref`\n ref : str or None\n reference path if `path` is relative\n\n Returns\n -------\n list : str\n components of the path\n \"\"\"\n path = abspath(path, ref)\n return path.strip(os.path.sep).split(os.path.sep)\n"
] | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import socket
TYPES = {
'integer': int,
'string': str,
'number': float,
'boolean': bool,
'array': list,
}
def abspath(path, ref=None):
"""
Create an absolute path.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
path : str
absolute path
Raises
------
ValueError
if an absolute path cannot be constructed
"""
if ref:
path = os.path.join(ref, path)
if not os.path.isabs(path):
raise ValueError("expected an absolute path but got '%s'" % path)
return path
def split_path(path, ref=None):
"""
Split a path into its components.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
list : str
components of the path
"""
path = abspath(path, ref)
return path.strip(os.path.sep).split(os.path.sep)
def pop_value(instance, path, ref=None):
"""
Pop the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
"""
head, tail = os.path.split(abspath(path, ref))
instance = get_value(instance, head)
if isinstance(instance, list):
tail = int(tail)
return instance.pop(tail)
def set_value(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
instance[tail] = value
def set_default(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
return instance.setdefault(tail, value)
def merge(x, y):
"""
Merge two dictionaries and raise an error for inconsistencies.
Parameters
----------
x : dict
dictionary x
y : dict
dictionary y
Returns
-------
x : dict
merged dictionary
Raises
------
ValueError
if `x` and `y` are inconsistent
"""
keys_x = set(x)
keys_y = set(y)
for key in keys_y - keys_x:
x[key] = y[key]
for key in keys_x & keys_y:
value_x = x[key]
value_y = y[key]
if isinstance(value_x, dict) and isinstance(value_y, dict):
x[key] = merge(value_x, value_y)
else:
if value_x != value_y:
raise ValueError
return x
def set_default_from_schema(instance, schema):
"""
Populate default values on an `instance` given a `schema`.
Parameters
----------
instance : dict
instance to populate default values for
schema : dict
JSON schema with default values
Returns
-------
instance : dict
instance with populated default values
"""
for name, property_ in schema.get('properties', {}).items():
# Set the defaults at this level of the schema
if 'default' in property_:
instance.setdefault(name, property_['default'])
# Descend one level if the property is an object
if 'properties' in property_:
set_default_from_schema(instance.setdefault(name, {}), property_)
return instance
def apply(instance, func, path=None):
"""
Apply `func` to all fundamental types of `instance`.
Parameters
----------
instance : dict
instance to apply functions to
func : callable
function with two arguments (instance, path) to apply to all fundamental types recursively
path : str
path in the document (defaults to '/')
Returns
-------
instance : dict
instance after applying `func` to fundamental types
"""
path = path or os.path.sep
if isinstance(instance, list):
return [apply(item, func, os.path.join(path, str(i))) for i, item in enumerate(instance)]
elif isinstance(instance, dict):
return {key: apply(value, func, os.path.join(path, key)) for key, value in instance.items()}
return func(instance, path)
def get_free_port(ports=None):
"""
Get a free port.
Parameters
----------
ports : iterable
ports to check (obtain a random port by default)
Returns
-------
port : int
a free port
"""
if ports is None:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
_socket.bind(('', 0))
_, port = _socket.getsockname()
return port
# Get ports from the specified list
for port in ports:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
try:
_socket.bind(('', port))
return port
except socket.error as ex:
if ex.errno not in (48, 98):
raise
raise RuntimeError("could not find a free port")
|
spotify/docker_interface | docker_interface/util.py | pop_value | python | def pop_value(instance, path, ref=None):
head, tail = os.path.split(abspath(path, ref))
instance = get_value(instance, head)
if isinstance(instance, list):
tail = int(tail)
return instance.pop(tail) | Pop the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance` | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/util.py#L114-L136 | [
"def abspath(path, ref=None):\n \"\"\"\n Create an absolute path.\n\n Parameters\n ----------\n path : str\n absolute or relative path with respect to `ref`\n ref : str or None\n reference path if `path` is relative\n\n Returns\n -------\n path : str\n absolute path\n\n Raises\n ------\n ValueError\n if an absolute path cannot be constructed\n \"\"\"\n if ref:\n path = os.path.join(ref, path)\n if not os.path.isabs(path):\n raise ValueError(\"expected an absolute path but got '%s'\" % path)\n return path\n",
"def get_value(instance, path, ref=None):\n \"\"\"\n Get the value from `instance` at the given `path`.\n\n Parameters\n ----------\n instance : dict or list\n instance from which to retrieve a value\n path : str\n path to retrieve a value from\n ref : str or None\n reference path if `path` is relative\n\n Returns\n -------\n value :\n value at `path` in `instance`\n\n Raises\n ------\n KeyError\n if `path` is not valid\n TypeError\n if a value along the `path` is not a list or dictionary\n \"\"\"\n for part in split_path(path, ref):\n if isinstance(instance, list):\n part = int(part)\n elif not isinstance(instance, dict):\n raise TypeError(\"expected `list` or `dict` but got `%s`\" % instance)\n try:\n instance = instance[part]\n except KeyError:\n raise KeyError(abspath(path, ref))\n return instance\n"
] | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import socket
TYPES = {
'integer': int,
'string': str,
'number': float,
'boolean': bool,
'array': list,
}
def abspath(path, ref=None):
"""
Create an absolute path.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
path : str
absolute path
Raises
------
ValueError
if an absolute path cannot be constructed
"""
if ref:
path = os.path.join(ref, path)
if not os.path.isabs(path):
raise ValueError("expected an absolute path but got '%s'" % path)
return path
def split_path(path, ref=None):
"""
Split a path into its components.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
list : str
components of the path
"""
path = abspath(path, ref)
return path.strip(os.path.sep).split(os.path.sep)
def get_value(instance, path, ref=None):
"""
Get the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
Raises
------
KeyError
if `path` is not valid
TypeError
if a value along the `path` is not a list or dictionary
"""
for part in split_path(path, ref):
if isinstance(instance, list):
part = int(part)
elif not isinstance(instance, dict):
raise TypeError("expected `list` or `dict` but got `%s`" % instance)
try:
instance = instance[part]
except KeyError:
raise KeyError(abspath(path, ref))
return instance
def set_value(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
instance[tail] = value
def set_default(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
return instance.setdefault(tail, value)
def merge(x, y):
"""
Merge two dictionaries and raise an error for inconsistencies.
Parameters
----------
x : dict
dictionary x
y : dict
dictionary y
Returns
-------
x : dict
merged dictionary
Raises
------
ValueError
if `x` and `y` are inconsistent
"""
keys_x = set(x)
keys_y = set(y)
for key in keys_y - keys_x:
x[key] = y[key]
for key in keys_x & keys_y:
value_x = x[key]
value_y = y[key]
if isinstance(value_x, dict) and isinstance(value_y, dict):
x[key] = merge(value_x, value_y)
else:
if value_x != value_y:
raise ValueError
return x
def set_default_from_schema(instance, schema):
"""
Populate default values on an `instance` given a `schema`.
Parameters
----------
instance : dict
instance to populate default values for
schema : dict
JSON schema with default values
Returns
-------
instance : dict
instance with populated default values
"""
for name, property_ in schema.get('properties', {}).items():
# Set the defaults at this level of the schema
if 'default' in property_:
instance.setdefault(name, property_['default'])
# Descend one level if the property is an object
if 'properties' in property_:
set_default_from_schema(instance.setdefault(name, {}), property_)
return instance
def apply(instance, func, path=None):
"""
Apply `func` to all fundamental types of `instance`.
Parameters
----------
instance : dict
instance to apply functions to
func : callable
function with two arguments (instance, path) to apply to all fundamental types recursively
path : str
path in the document (defaults to '/')
Returns
-------
instance : dict
instance after applying `func` to fundamental types
"""
path = path or os.path.sep
if isinstance(instance, list):
return [apply(item, func, os.path.join(path, str(i))) for i, item in enumerate(instance)]
elif isinstance(instance, dict):
return {key: apply(value, func, os.path.join(path, key)) for key, value in instance.items()}
return func(instance, path)
def get_free_port(ports=None):
"""
Get a free port.
Parameters
----------
ports : iterable
ports to check (obtain a random port by default)
Returns
-------
port : int
a free port
"""
if ports is None:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
_socket.bind(('', 0))
_, port = _socket.getsockname()
return port
# Get ports from the specified list
for port in ports:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
try:
_socket.bind(('', port))
return port
except socket.error as ex:
if ex.errno not in (48, 98):
raise
raise RuntimeError("could not find a free port")
|
spotify/docker_interface | docker_interface/util.py | set_value | python | def set_value(instance, path, value, ref=None):
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
instance[tail] = value | Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative | train | https://github.com/spotify/docker_interface/blob/4df80e1fe072d958020080d32c16551ff7703d51/docker_interface/util.py#L139-L157 | [
"def split_path(path, ref=None):\n \"\"\"\n Split a path into its components.\n\n Parameters\n ----------\n path : str\n absolute or relative path with respect to `ref`\n ref : str or None\n reference path if `path` is relative\n\n Returns\n -------\n list : str\n components of the path\n \"\"\"\n path = abspath(path, ref)\n return path.strip(os.path.sep).split(os.path.sep)\n"
] | # Copyright 2018 Spotify AB
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
import socket
TYPES = {
'integer': int,
'string': str,
'number': float,
'boolean': bool,
'array': list,
}
def abspath(path, ref=None):
"""
Create an absolute path.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
path : str
absolute path
Raises
------
ValueError
if an absolute path cannot be constructed
"""
if ref:
path = os.path.join(ref, path)
if not os.path.isabs(path):
raise ValueError("expected an absolute path but got '%s'" % path)
return path
def split_path(path, ref=None):
"""
Split a path into its components.
Parameters
----------
path : str
absolute or relative path with respect to `ref`
ref : str or None
reference path if `path` is relative
Returns
-------
list : str
components of the path
"""
path = abspath(path, ref)
return path.strip(os.path.sep).split(os.path.sep)
def get_value(instance, path, ref=None):
"""
Get the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
Raises
------
KeyError
if `path` is not valid
TypeError
if a value along the `path` is not a list or dictionary
"""
for part in split_path(path, ref):
if isinstance(instance, list):
part = int(part)
elif not isinstance(instance, dict):
raise TypeError("expected `list` or `dict` but got `%s`" % instance)
try:
instance = instance[part]
except KeyError:
raise KeyError(abspath(path, ref))
return instance
def pop_value(instance, path, ref=None):
"""
Pop the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
"""
head, tail = os.path.split(abspath(path, ref))
instance = get_value(instance, head)
if isinstance(instance, list):
tail = int(tail)
return instance.pop(tail)
def set_default(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
return instance.setdefault(tail, value)
def merge(x, y):
"""
Merge two dictionaries and raise an error for inconsistencies.
Parameters
----------
x : dict
dictionary x
y : dict
dictionary y
Returns
-------
x : dict
merged dictionary
Raises
------
ValueError
if `x` and `y` are inconsistent
"""
keys_x = set(x)
keys_y = set(y)
for key in keys_y - keys_x:
x[key] = y[key]
for key in keys_x & keys_y:
value_x = x[key]
value_y = y[key]
if isinstance(value_x, dict) and isinstance(value_y, dict):
x[key] = merge(value_x, value_y)
else:
if value_x != value_y:
raise ValueError
return x
def set_default_from_schema(instance, schema):
"""
Populate default values on an `instance` given a `schema`.
Parameters
----------
instance : dict
instance to populate default values for
schema : dict
JSON schema with default values
Returns
-------
instance : dict
instance with populated default values
"""
for name, property_ in schema.get('properties', {}).items():
# Set the defaults at this level of the schema
if 'default' in property_:
instance.setdefault(name, property_['default'])
# Descend one level if the property is an object
if 'properties' in property_:
set_default_from_schema(instance.setdefault(name, {}), property_)
return instance
def apply(instance, func, path=None):
"""
Apply `func` to all fundamental types of `instance`.
Parameters
----------
instance : dict
instance to apply functions to
func : callable
function with two arguments (instance, path) to apply to all fundamental types recursively
path : str
path in the document (defaults to '/')
Returns
-------
instance : dict
instance after applying `func` to fundamental types
"""
path = path or os.path.sep
if isinstance(instance, list):
return [apply(item, func, os.path.join(path, str(i))) for i, item in enumerate(instance)]
elif isinstance(instance, dict):
return {key: apply(value, func, os.path.join(path, key)) for key, value in instance.items()}
return func(instance, path)
def get_free_port(ports=None):
"""
Get a free port.
Parameters
----------
ports : iterable
ports to check (obtain a random port by default)
Returns
-------
port : int
a free port
"""
if ports is None:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
_socket.bind(('', 0))
_, port = _socket.getsockname()
return port
# Get ports from the specified list
for port in ports:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as _socket:
try:
_socket.bind(('', port))
return port
except socket.error as ex:
if ex.errno not in (48, 98):
raise
raise RuntimeError("could not find a free port")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.