repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
jtambasco/modesolverpy | modesolverpy/structure_base.py | _AbstractStructure.x | python | def x(self):
'''
np.array: The grid points in x.
'''
if None not in (self.x_min, self.x_max, self.x_step) and \
self.x_min != self.x_max:
x = np.arange(self.x_min, self.x_max+self.x_step-self.y_step*0.1, self.x_step)
else:
x = np.array([])
return x | np.array: The grid points in x. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/structure_base.py#L130-L139 | null | class _AbstractStructure(with_metaclass(abc.ABCMeta)):
@abc.abstractproperty
def n(self):
'''
np.array: A grid of refractive indices representing
the refractive index profile of the structure.
'''
pass
@property
def x_pts(self):
'''
int: The number of grid points in x.
'''
return int((self.x_max - self.x_min) / self.x_step + 1)
@property
def y_pts(self):
'''
int: The number of grid points in y.
'''
return int((self.y_max - self.y_min) / self.y_step)
@property
def x_ctr(self):
'''
float: The centre distance in x.
'''
return 0.5*(self.x_max + self.x_min)
@property
def y_ctr(self):
'''
float: The centre distance in y
'''
return 0.5*(self.y_max + self.y_min)
@property
def xc(self):
'''
np.array: The centre points of the x points.
'''
return 0.5*(self.x[1:] + self.x[:-1])
@property
def yc(self):
'''
np.array: The centre points of the y points.
'''
return 0.5*(self.y[1:] + self.y[:-1])
@property
def xc_pts(self):
'''
int: The number of points in `xc`.
'''
return self.x_pts - 1
@property
def yc_pts(self):
'''
int: The number of points in `yc`.
'''
return self.y_pts - 1
@property
def xc_min(self):
'''
float: The minimum value of `xc`.
'''
return self.xc[0]
@property
def xc_max(self):
'''
float: The maximum value of `xc`.
'''
return self.xc[-1]
@property
def yc_min(self):
'''
float: The minimum value of `yc`.
'''
return self.yc[0]
@property
def yc_max(self):
'''
float: The maximum value of `yc`.
'''
return self.yc[-1]
@property
@property
def y(self):
'''
np.array: The grid points in y.
'''
if None not in (self.y_min, self.y_max, self.y_step) and \
self.y_min != self.y_max:
y = np.arange(self.y_min, self.y_max-self.y_step*0.1, self.y_step)
else:
y = np.array([])
return y
@property
def eps(self):
'''
np.array: A grid of permittivies representing
the permittivity profile of the structure.
'''
return self.n**2
@property
def eps_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the permittivity profile of the structure,
interpolating if necessary.
'''
interp_real = interpolate.interp2d(self.x, self.y, self.eps.real)
interp_imag = interpolate.interp2d(self.x, self.y, self.eps.imag)
interp = lambda x, y: interp_real(x, y) + 1.j*interp_imag(x, y)
return interp
@property
def n_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the refractive index profile of the structure,
interpolating if necessary.
'''
return interpolate.interp2d(self.x, self.y, self.n)
def _add_triangular_sides(self, xy_mask, angle, y_top_right, y_bot_left,
x_top_right, x_bot_left, n_material):
angle = np.radians(angle)
trap_len = (y_top_right - y_bot_left) / np.tan(angle)
num_x_iterations = trap_len / self.x_step
y_per_iteration = num_x_iterations / self.y_pts
lhs_x_start_index = int(x_bot_left/ self.x_step + 0.5)
rhs_x_stop_index = int(x_top_right/ self.x_step + 1 + 0.5)
running_removal_float = y_per_iteration
for i, _ in enumerate(xy_mask):
if running_removal_float >= 1:
removal_int = int(round(running_removal_float))
lhs_x_start_index -= removal_int
rhs_x_stop_index += removal_int
running_removal_float -= removal_int
running_removal_float += y_per_iteration
xy_mask[i][:lhs_x_start_index] = False
xy_mask[i][lhs_x_start_index:rhs_x_stop_index] = True
self.n[xy_mask] = n_material
return self.n
def _add_material(self, x_bot_left, y_bot_left, x_top_right, y_top_right,
n_material, angle=0):
'''
A low-level function that allows writing a rectangle refractive
index profile to a `Structure`.
Args:
x_bot_left (float): The bottom-left x-coordinate of the
rectangle.
y_bot_left (float): The bottom-left y-coordinate of the
rectangle.
x_top_right (float): The top-right x-coordinate of the
rectangle.
y_top_right (float): The top-right y-coordinate of the
rectangle.
n_material (float): The refractive index of the points
encompassed by the defined rectangle.
angle (float): The angle in degrees of the sidewalls
of the defined rectangle. Default is 0. This
is useful for creating a ridge with angled
sidewalls.
'''
x_mask = np.logical_and(x_bot_left<=self.x, self.x<=x_top_right)
y_mask = np.logical_and(y_bot_left<=self.y, self.y<=y_top_right)
xy_mask = np.kron(y_mask, x_mask).reshape((y_mask.size, x_mask.size))
self.n[xy_mask] = n_material
if angle:
self._add_triangular_sides(xy_mask, angle, y_top_right, y_bot_left,
x_top_right, x_bot_left, n_material)
return self.n
def write_to_file(self, filename='material_index.dat', plot=True):
'''
Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
'''
path = os.path.dirname(sys.modules[__name__].__file__) + '/'
with open(filename, 'w') as fs:
for n_row in np.abs(self.n[::-1]):
n_str = ','.join([str(v) for v in n_row])
fs.write(n_str+'\n')
if plot:
filename_image_prefix, _ = os.path.splitext(filename)
filename_image = filename_image_prefix + '.png'
args = {
'title': 'Refractive Index Profile',
'x_pts': self.x_pts,
'y_pts': self.y_pts,
'x_min': self.x_min,
'x_max': self.x_max,
'y_min': self.y_min,
'y_max': self.y_max,
'filename_data': filename,
'filename_image': filename_image
}
if MPL:
heatmap = np.loadtxt(args['filename_data'], delimiter=',')
plt.clf()
plt.title(args['title'])
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.imshow(np.flipud(heatmap),
extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),
aspect="auto")
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(path+'structure.gpi', args)
def __str__(self):
return self.n.__str__()
|
jtambasco/modesolverpy | modesolverpy/structure_base.py | _AbstractStructure.y | python | def y(self):
'''
np.array: The grid points in y.
'''
if None not in (self.y_min, self.y_max, self.y_step) and \
self.y_min != self.y_max:
y = np.arange(self.y_min, self.y_max-self.y_step*0.1, self.y_step)
else:
y = np.array([])
return y | np.array: The grid points in y. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/structure_base.py#L142-L151 | null | class _AbstractStructure(with_metaclass(abc.ABCMeta)):
@abc.abstractproperty
def n(self):
'''
np.array: A grid of refractive indices representing
the refractive index profile of the structure.
'''
pass
@property
def x_pts(self):
'''
int: The number of grid points in x.
'''
return int((self.x_max - self.x_min) / self.x_step + 1)
@property
def y_pts(self):
'''
int: The number of grid points in y.
'''
return int((self.y_max - self.y_min) / self.y_step)
@property
def x_ctr(self):
'''
float: The centre distance in x.
'''
return 0.5*(self.x_max + self.x_min)
@property
def y_ctr(self):
'''
float: The centre distance in y
'''
return 0.5*(self.y_max + self.y_min)
@property
def xc(self):
'''
np.array: The centre points of the x points.
'''
return 0.5*(self.x[1:] + self.x[:-1])
@property
def yc(self):
'''
np.array: The centre points of the y points.
'''
return 0.5*(self.y[1:] + self.y[:-1])
@property
def xc_pts(self):
'''
int: The number of points in `xc`.
'''
return self.x_pts - 1
@property
def yc_pts(self):
'''
int: The number of points in `yc`.
'''
return self.y_pts - 1
@property
def xc_min(self):
'''
float: The minimum value of `xc`.
'''
return self.xc[0]
@property
def xc_max(self):
'''
float: The maximum value of `xc`.
'''
return self.xc[-1]
@property
def yc_min(self):
'''
float: The minimum value of `yc`.
'''
return self.yc[0]
@property
def yc_max(self):
'''
float: The maximum value of `yc`.
'''
return self.yc[-1]
@property
def x(self):
'''
np.array: The grid points in x.
'''
if None not in (self.x_min, self.x_max, self.x_step) and \
self.x_min != self.x_max:
x = np.arange(self.x_min, self.x_max+self.x_step-self.y_step*0.1, self.x_step)
else:
x = np.array([])
return x
@property
@property
def eps(self):
'''
np.array: A grid of permittivies representing
the permittivity profile of the structure.
'''
return self.n**2
@property
def eps_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the permittivity profile of the structure,
interpolating if necessary.
'''
interp_real = interpolate.interp2d(self.x, self.y, self.eps.real)
interp_imag = interpolate.interp2d(self.x, self.y, self.eps.imag)
interp = lambda x, y: interp_real(x, y) + 1.j*interp_imag(x, y)
return interp
@property
def n_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the refractive index profile of the structure,
interpolating if necessary.
'''
return interpolate.interp2d(self.x, self.y, self.n)
def _add_triangular_sides(self, xy_mask, angle, y_top_right, y_bot_left,
x_top_right, x_bot_left, n_material):
angle = np.radians(angle)
trap_len = (y_top_right - y_bot_left) / np.tan(angle)
num_x_iterations = trap_len / self.x_step
y_per_iteration = num_x_iterations / self.y_pts
lhs_x_start_index = int(x_bot_left/ self.x_step + 0.5)
rhs_x_stop_index = int(x_top_right/ self.x_step + 1 + 0.5)
running_removal_float = y_per_iteration
for i, _ in enumerate(xy_mask):
if running_removal_float >= 1:
removal_int = int(round(running_removal_float))
lhs_x_start_index -= removal_int
rhs_x_stop_index += removal_int
running_removal_float -= removal_int
running_removal_float += y_per_iteration
xy_mask[i][:lhs_x_start_index] = False
xy_mask[i][lhs_x_start_index:rhs_x_stop_index] = True
self.n[xy_mask] = n_material
return self.n
def _add_material(self, x_bot_left, y_bot_left, x_top_right, y_top_right,
n_material, angle=0):
'''
A low-level function that allows writing a rectangle refractive
index profile to a `Structure`.
Args:
x_bot_left (float): The bottom-left x-coordinate of the
rectangle.
y_bot_left (float): The bottom-left y-coordinate of the
rectangle.
x_top_right (float): The top-right x-coordinate of the
rectangle.
y_top_right (float): The top-right y-coordinate of the
rectangle.
n_material (float): The refractive index of the points
encompassed by the defined rectangle.
angle (float): The angle in degrees of the sidewalls
of the defined rectangle. Default is 0. This
is useful for creating a ridge with angled
sidewalls.
'''
x_mask = np.logical_and(x_bot_left<=self.x, self.x<=x_top_right)
y_mask = np.logical_and(y_bot_left<=self.y, self.y<=y_top_right)
xy_mask = np.kron(y_mask, x_mask).reshape((y_mask.size, x_mask.size))
self.n[xy_mask] = n_material
if angle:
self._add_triangular_sides(xy_mask, angle, y_top_right, y_bot_left,
x_top_right, x_bot_left, n_material)
return self.n
def write_to_file(self, filename='material_index.dat', plot=True):
'''
Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
'''
path = os.path.dirname(sys.modules[__name__].__file__) + '/'
with open(filename, 'w') as fs:
for n_row in np.abs(self.n[::-1]):
n_str = ','.join([str(v) for v in n_row])
fs.write(n_str+'\n')
if plot:
filename_image_prefix, _ = os.path.splitext(filename)
filename_image = filename_image_prefix + '.png'
args = {
'title': 'Refractive Index Profile',
'x_pts': self.x_pts,
'y_pts': self.y_pts,
'x_min': self.x_min,
'x_max': self.x_max,
'y_min': self.y_min,
'y_max': self.y_max,
'filename_data': filename,
'filename_image': filename_image
}
if MPL:
heatmap = np.loadtxt(args['filename_data'], delimiter=',')
plt.clf()
plt.title(args['title'])
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.imshow(np.flipud(heatmap),
extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),
aspect="auto")
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(path+'structure.gpi', args)
def __str__(self):
return self.n.__str__()
|
jtambasco/modesolverpy | modesolverpy/structure_base.py | _AbstractStructure.eps_func | python | def eps_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the permittivity profile of the structure,
interpolating if necessary.
'''
interp_real = interpolate.interp2d(self.x, self.y, self.eps.real)
interp_imag = interpolate.interp2d(self.x, self.y, self.eps.imag)
interp = lambda x, y: interp_real(x, y) + 1.j*interp_imag(x, y)
return interp | function: a function that when passed a `x` and `y` values,
returns the permittivity profile of the structure,
interpolating if necessary. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/structure_base.py#L162-L171 | null | class _AbstractStructure(with_metaclass(abc.ABCMeta)):
@abc.abstractproperty
def n(self):
'''
np.array: A grid of refractive indices representing
the refractive index profile of the structure.
'''
pass
@property
def x_pts(self):
'''
int: The number of grid points in x.
'''
return int((self.x_max - self.x_min) / self.x_step + 1)
@property
def y_pts(self):
'''
int: The number of grid points in y.
'''
return int((self.y_max - self.y_min) / self.y_step)
@property
def x_ctr(self):
'''
float: The centre distance in x.
'''
return 0.5*(self.x_max + self.x_min)
@property
def y_ctr(self):
'''
float: The centre distance in y
'''
return 0.5*(self.y_max + self.y_min)
@property
def xc(self):
'''
np.array: The centre points of the x points.
'''
return 0.5*(self.x[1:] + self.x[:-1])
@property
def yc(self):
'''
np.array: The centre points of the y points.
'''
return 0.5*(self.y[1:] + self.y[:-1])
@property
def xc_pts(self):
'''
int: The number of points in `xc`.
'''
return self.x_pts - 1
@property
def yc_pts(self):
'''
int: The number of points in `yc`.
'''
return self.y_pts - 1
@property
def xc_min(self):
'''
float: The minimum value of `xc`.
'''
return self.xc[0]
@property
def xc_max(self):
'''
float: The maximum value of `xc`.
'''
return self.xc[-1]
@property
def yc_min(self):
'''
float: The minimum value of `yc`.
'''
return self.yc[0]
@property
def yc_max(self):
'''
float: The maximum value of `yc`.
'''
return self.yc[-1]
@property
def x(self):
'''
np.array: The grid points in x.
'''
if None not in (self.x_min, self.x_max, self.x_step) and \
self.x_min != self.x_max:
x = np.arange(self.x_min, self.x_max+self.x_step-self.y_step*0.1, self.x_step)
else:
x = np.array([])
return x
@property
def y(self):
'''
np.array: The grid points in y.
'''
if None not in (self.y_min, self.y_max, self.y_step) and \
self.y_min != self.y_max:
y = np.arange(self.y_min, self.y_max-self.y_step*0.1, self.y_step)
else:
y = np.array([])
return y
@property
def eps(self):
'''
np.array: A grid of permittivies representing
the permittivity profile of the structure.
'''
return self.n**2
@property
@property
def n_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the refractive index profile of the structure,
interpolating if necessary.
'''
return interpolate.interp2d(self.x, self.y, self.n)
def _add_triangular_sides(self, xy_mask, angle, y_top_right, y_bot_left,
x_top_right, x_bot_left, n_material):
angle = np.radians(angle)
trap_len = (y_top_right - y_bot_left) / np.tan(angle)
num_x_iterations = trap_len / self.x_step
y_per_iteration = num_x_iterations / self.y_pts
lhs_x_start_index = int(x_bot_left/ self.x_step + 0.5)
rhs_x_stop_index = int(x_top_right/ self.x_step + 1 + 0.5)
running_removal_float = y_per_iteration
for i, _ in enumerate(xy_mask):
if running_removal_float >= 1:
removal_int = int(round(running_removal_float))
lhs_x_start_index -= removal_int
rhs_x_stop_index += removal_int
running_removal_float -= removal_int
running_removal_float += y_per_iteration
xy_mask[i][:lhs_x_start_index] = False
xy_mask[i][lhs_x_start_index:rhs_x_stop_index] = True
self.n[xy_mask] = n_material
return self.n
def _add_material(self, x_bot_left, y_bot_left, x_top_right, y_top_right,
n_material, angle=0):
'''
A low-level function that allows writing a rectangle refractive
index profile to a `Structure`.
Args:
x_bot_left (float): The bottom-left x-coordinate of the
rectangle.
y_bot_left (float): The bottom-left y-coordinate of the
rectangle.
x_top_right (float): The top-right x-coordinate of the
rectangle.
y_top_right (float): The top-right y-coordinate of the
rectangle.
n_material (float): The refractive index of the points
encompassed by the defined rectangle.
angle (float): The angle in degrees of the sidewalls
of the defined rectangle. Default is 0. This
is useful for creating a ridge with angled
sidewalls.
'''
x_mask = np.logical_and(x_bot_left<=self.x, self.x<=x_top_right)
y_mask = np.logical_and(y_bot_left<=self.y, self.y<=y_top_right)
xy_mask = np.kron(y_mask, x_mask).reshape((y_mask.size, x_mask.size))
self.n[xy_mask] = n_material
if angle:
self._add_triangular_sides(xy_mask, angle, y_top_right, y_bot_left,
x_top_right, x_bot_left, n_material)
return self.n
def write_to_file(self, filename='material_index.dat', plot=True):
'''
Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
'''
path = os.path.dirname(sys.modules[__name__].__file__) + '/'
with open(filename, 'w') as fs:
for n_row in np.abs(self.n[::-1]):
n_str = ','.join([str(v) for v in n_row])
fs.write(n_str+'\n')
if plot:
filename_image_prefix, _ = os.path.splitext(filename)
filename_image = filename_image_prefix + '.png'
args = {
'title': 'Refractive Index Profile',
'x_pts': self.x_pts,
'y_pts': self.y_pts,
'x_min': self.x_min,
'x_max': self.x_max,
'y_min': self.y_min,
'y_max': self.y_max,
'filename_data': filename,
'filename_image': filename_image
}
if MPL:
heatmap = np.loadtxt(args['filename_data'], delimiter=',')
plt.clf()
plt.title(args['title'])
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.imshow(np.flipud(heatmap),
extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),
aspect="auto")
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(path+'structure.gpi', args)
def __str__(self):
return self.n.__str__()
|
jtambasco/modesolverpy | modesolverpy/structure_base.py | _AbstractStructure.n_func | python | def n_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the refractive index profile of the structure,
interpolating if necessary.
'''
return interpolate.interp2d(self.x, self.y, self.n) | function: a function that when passed a `x` and `y` values,
returns the refractive index profile of the structure,
interpolating if necessary. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/structure_base.py#L174-L180 | null | class _AbstractStructure(with_metaclass(abc.ABCMeta)):
@abc.abstractproperty
def n(self):
'''
np.array: A grid of refractive indices representing
the refractive index profile of the structure.
'''
pass
@property
def x_pts(self):
'''
int: The number of grid points in x.
'''
return int((self.x_max - self.x_min) / self.x_step + 1)
@property
def y_pts(self):
'''
int: The number of grid points in y.
'''
return int((self.y_max - self.y_min) / self.y_step)
@property
def x_ctr(self):
'''
float: The centre distance in x.
'''
return 0.5*(self.x_max + self.x_min)
@property
def y_ctr(self):
'''
float: The centre distance in y
'''
return 0.5*(self.y_max + self.y_min)
@property
def xc(self):
'''
np.array: The centre points of the x points.
'''
return 0.5*(self.x[1:] + self.x[:-1])
@property
def yc(self):
'''
np.array: The centre points of the y points.
'''
return 0.5*(self.y[1:] + self.y[:-1])
@property
def xc_pts(self):
'''
int: The number of points in `xc`.
'''
return self.x_pts - 1
@property
def yc_pts(self):
'''
int: The number of points in `yc`.
'''
return self.y_pts - 1
@property
def xc_min(self):
'''
float: The minimum value of `xc`.
'''
return self.xc[0]
@property
def xc_max(self):
'''
float: The maximum value of `xc`.
'''
return self.xc[-1]
@property
def yc_min(self):
'''
float: The minimum value of `yc`.
'''
return self.yc[0]
@property
def yc_max(self):
'''
float: The maximum value of `yc`.
'''
return self.yc[-1]
@property
def x(self):
'''
np.array: The grid points in x.
'''
if None not in (self.x_min, self.x_max, self.x_step) and \
self.x_min != self.x_max:
x = np.arange(self.x_min, self.x_max+self.x_step-self.y_step*0.1, self.x_step)
else:
x = np.array([])
return x
@property
def y(self):
'''
np.array: The grid points in y.
'''
if None not in (self.y_min, self.y_max, self.y_step) and \
self.y_min != self.y_max:
y = np.arange(self.y_min, self.y_max-self.y_step*0.1, self.y_step)
else:
y = np.array([])
return y
@property
def eps(self):
'''
np.array: A grid of permittivies representing
the permittivity profile of the structure.
'''
return self.n**2
@property
def eps_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the permittivity profile of the structure,
interpolating if necessary.
'''
interp_real = interpolate.interp2d(self.x, self.y, self.eps.real)
interp_imag = interpolate.interp2d(self.x, self.y, self.eps.imag)
interp = lambda x, y: interp_real(x, y) + 1.j*interp_imag(x, y)
return interp
@property
def _add_triangular_sides(self, xy_mask, angle, y_top_right, y_bot_left,
x_top_right, x_bot_left, n_material):
angle = np.radians(angle)
trap_len = (y_top_right - y_bot_left) / np.tan(angle)
num_x_iterations = trap_len / self.x_step
y_per_iteration = num_x_iterations / self.y_pts
lhs_x_start_index = int(x_bot_left/ self.x_step + 0.5)
rhs_x_stop_index = int(x_top_right/ self.x_step + 1 + 0.5)
running_removal_float = y_per_iteration
for i, _ in enumerate(xy_mask):
if running_removal_float >= 1:
removal_int = int(round(running_removal_float))
lhs_x_start_index -= removal_int
rhs_x_stop_index += removal_int
running_removal_float -= removal_int
running_removal_float += y_per_iteration
xy_mask[i][:lhs_x_start_index] = False
xy_mask[i][lhs_x_start_index:rhs_x_stop_index] = True
self.n[xy_mask] = n_material
return self.n
def _add_material(self, x_bot_left, y_bot_left, x_top_right, y_top_right,
n_material, angle=0):
'''
A low-level function that allows writing a rectangle refractive
index profile to a `Structure`.
Args:
x_bot_left (float): The bottom-left x-coordinate of the
rectangle.
y_bot_left (float): The bottom-left y-coordinate of the
rectangle.
x_top_right (float): The top-right x-coordinate of the
rectangle.
y_top_right (float): The top-right y-coordinate of the
rectangle.
n_material (float): The refractive index of the points
encompassed by the defined rectangle.
angle (float): The angle in degrees of the sidewalls
of the defined rectangle. Default is 0. This
is useful for creating a ridge with angled
sidewalls.
'''
x_mask = np.logical_and(x_bot_left<=self.x, self.x<=x_top_right)
y_mask = np.logical_and(y_bot_left<=self.y, self.y<=y_top_right)
xy_mask = np.kron(y_mask, x_mask).reshape((y_mask.size, x_mask.size))
self.n[xy_mask] = n_material
if angle:
self._add_triangular_sides(xy_mask, angle, y_top_right, y_bot_left,
x_top_right, x_bot_left, n_material)
return self.n
def write_to_file(self, filename='material_index.dat', plot=True):
'''
Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
'''
path = os.path.dirname(sys.modules[__name__].__file__) + '/'
with open(filename, 'w') as fs:
for n_row in np.abs(self.n[::-1]):
n_str = ','.join([str(v) for v in n_row])
fs.write(n_str+'\n')
if plot:
filename_image_prefix, _ = os.path.splitext(filename)
filename_image = filename_image_prefix + '.png'
args = {
'title': 'Refractive Index Profile',
'x_pts': self.x_pts,
'y_pts': self.y_pts,
'x_min': self.x_min,
'x_max': self.x_max,
'y_min': self.y_min,
'y_max': self.y_max,
'filename_data': filename,
'filename_image': filename_image
}
if MPL:
heatmap = np.loadtxt(args['filename_data'], delimiter=',')
plt.clf()
plt.title(args['title'])
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.imshow(np.flipud(heatmap),
extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),
aspect="auto")
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(path+'structure.gpi', args)
def __str__(self):
return self.n.__str__()
|
jtambasco/modesolverpy | modesolverpy/structure_base.py | _AbstractStructure._add_material | python | def _add_material(self, x_bot_left, y_bot_left, x_top_right, y_top_right,
n_material, angle=0):
'''
A low-level function that allows writing a rectangle refractive
index profile to a `Structure`.
Args:
x_bot_left (float): The bottom-left x-coordinate of the
rectangle.
y_bot_left (float): The bottom-left y-coordinate of the
rectangle.
x_top_right (float): The top-right x-coordinate of the
rectangle.
y_top_right (float): The top-right y-coordinate of the
rectangle.
n_material (float): The refractive index of the points
encompassed by the defined rectangle.
angle (float): The angle in degrees of the sidewalls
of the defined rectangle. Default is 0. This
is useful for creating a ridge with angled
sidewalls.
'''
x_mask = np.logical_and(x_bot_left<=self.x, self.x<=x_top_right)
y_mask = np.logical_and(y_bot_left<=self.y, self.y<=y_top_right)
xy_mask = np.kron(y_mask, x_mask).reshape((y_mask.size, x_mask.size))
self.n[xy_mask] = n_material
if angle:
self._add_triangular_sides(xy_mask, angle, y_top_right, y_bot_left,
x_top_right, x_bot_left, n_material)
return self.n | A low-level function that allows writing a rectangle refractive
index profile to a `Structure`.
Args:
x_bot_left (float): The bottom-left x-coordinate of the
rectangle.
y_bot_left (float): The bottom-left y-coordinate of the
rectangle.
x_top_right (float): The top-right x-coordinate of the
rectangle.
y_top_right (float): The top-right y-coordinate of the
rectangle.
n_material (float): The refractive index of the points
encompassed by the defined rectangle.
angle (float): The angle in degrees of the sidewalls
of the defined rectangle. Default is 0. This
is useful for creating a ridge with angled
sidewalls. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/structure_base.py#L207-L239 | null | class _AbstractStructure(with_metaclass(abc.ABCMeta)):
@abc.abstractproperty
def n(self):
'''
np.array: A grid of refractive indices representing
the refractive index profile of the structure.
'''
pass
@property
def x_pts(self):
'''
int: The number of grid points in x.
'''
return int((self.x_max - self.x_min) / self.x_step + 1)
@property
def y_pts(self):
'''
int: The number of grid points in y.
'''
return int((self.y_max - self.y_min) / self.y_step)
@property
def x_ctr(self):
'''
float: The centre distance in x.
'''
return 0.5*(self.x_max + self.x_min)
@property
def y_ctr(self):
'''
float: The centre distance in y
'''
return 0.5*(self.y_max + self.y_min)
@property
def xc(self):
'''
np.array: The centre points of the x points.
'''
return 0.5*(self.x[1:] + self.x[:-1])
@property
def yc(self):
'''
np.array: The centre points of the y points.
'''
return 0.5*(self.y[1:] + self.y[:-1])
@property
def xc_pts(self):
'''
int: The number of points in `xc`.
'''
return self.x_pts - 1
@property
def yc_pts(self):
'''
int: The number of points in `yc`.
'''
return self.y_pts - 1
@property
def xc_min(self):
'''
float: The minimum value of `xc`.
'''
return self.xc[0]
@property
def xc_max(self):
'''
float: The maximum value of `xc`.
'''
return self.xc[-1]
@property
def yc_min(self):
'''
float: The minimum value of `yc`.
'''
return self.yc[0]
@property
def yc_max(self):
'''
float: The maximum value of `yc`.
'''
return self.yc[-1]
@property
def x(self):
'''
np.array: The grid points in x.
'''
if None not in (self.x_min, self.x_max, self.x_step) and \
self.x_min != self.x_max:
x = np.arange(self.x_min, self.x_max+self.x_step-self.y_step*0.1, self.x_step)
else:
x = np.array([])
return x
@property
def y(self):
'''
np.array: The grid points in y.
'''
if None not in (self.y_min, self.y_max, self.y_step) and \
self.y_min != self.y_max:
y = np.arange(self.y_min, self.y_max-self.y_step*0.1, self.y_step)
else:
y = np.array([])
return y
@property
def eps(self):
'''
np.array: A grid of permittivies representing
the permittivity profile of the structure.
'''
return self.n**2
@property
def eps_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the permittivity profile of the structure,
interpolating if necessary.
'''
interp_real = interpolate.interp2d(self.x, self.y, self.eps.real)
interp_imag = interpolate.interp2d(self.x, self.y, self.eps.imag)
interp = lambda x, y: interp_real(x, y) + 1.j*interp_imag(x, y)
return interp
@property
def n_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the refractive index profile of the structure,
interpolating if necessary.
'''
return interpolate.interp2d(self.x, self.y, self.n)
def _add_triangular_sides(self, xy_mask, angle, y_top_right, y_bot_left,
x_top_right, x_bot_left, n_material):
angle = np.radians(angle)
trap_len = (y_top_right - y_bot_left) / np.tan(angle)
num_x_iterations = trap_len / self.x_step
y_per_iteration = num_x_iterations / self.y_pts
lhs_x_start_index = int(x_bot_left/ self.x_step + 0.5)
rhs_x_stop_index = int(x_top_right/ self.x_step + 1 + 0.5)
running_removal_float = y_per_iteration
for i, _ in enumerate(xy_mask):
if running_removal_float >= 1:
removal_int = int(round(running_removal_float))
lhs_x_start_index -= removal_int
rhs_x_stop_index += removal_int
running_removal_float -= removal_int
running_removal_float += y_per_iteration
xy_mask[i][:lhs_x_start_index] = False
xy_mask[i][lhs_x_start_index:rhs_x_stop_index] = True
self.n[xy_mask] = n_material
return self.n
def write_to_file(self, filename='material_index.dat', plot=True):
'''
Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
'''
path = os.path.dirname(sys.modules[__name__].__file__) + '/'
with open(filename, 'w') as fs:
for n_row in np.abs(self.n[::-1]):
n_str = ','.join([str(v) for v in n_row])
fs.write(n_str+'\n')
if plot:
filename_image_prefix, _ = os.path.splitext(filename)
filename_image = filename_image_prefix + '.png'
args = {
'title': 'Refractive Index Profile',
'x_pts': self.x_pts,
'y_pts': self.y_pts,
'x_min': self.x_min,
'x_max': self.x_max,
'y_min': self.y_min,
'y_max': self.y_max,
'filename_data': filename,
'filename_image': filename_image
}
if MPL:
heatmap = np.loadtxt(args['filename_data'], delimiter=',')
plt.clf()
plt.title(args['title'])
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.imshow(np.flipud(heatmap),
extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),
aspect="auto")
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(path+'structure.gpi', args)
def __str__(self):
return self.n.__str__()
|
jtambasco/modesolverpy | modesolverpy/structure_base.py | _AbstractStructure.write_to_file | python | def write_to_file(self, filename='material_index.dat', plot=True):
'''
Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
'''
path = os.path.dirname(sys.modules[__name__].__file__) + '/'
with open(filename, 'w') as fs:
for n_row in np.abs(self.n[::-1]):
n_str = ','.join([str(v) for v in n_row])
fs.write(n_str+'\n')
if plot:
filename_image_prefix, _ = os.path.splitext(filename)
filename_image = filename_image_prefix + '.png'
args = {
'title': 'Refractive Index Profile',
'x_pts': self.x_pts,
'y_pts': self.y_pts,
'x_min': self.x_min,
'x_max': self.x_max,
'y_min': self.y_min,
'y_max': self.y_max,
'filename_data': filename,
'filename_image': filename_image
}
if MPL:
heatmap = np.loadtxt(args['filename_data'], delimiter=',')
plt.clf()
plt.title(args['title'])
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.imshow(np.flipud(heatmap),
extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),
aspect="auto")
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(path+'structure.gpi', args) | Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/structure_base.py#L241-L285 | null | class _AbstractStructure(with_metaclass(abc.ABCMeta)):
@abc.abstractproperty
def n(self):
'''
np.array: A grid of refractive indices representing
the refractive index profile of the structure.
'''
pass
@property
def x_pts(self):
'''
int: The number of grid points in x.
'''
return int((self.x_max - self.x_min) / self.x_step + 1)
@property
def y_pts(self):
'''
int: The number of grid points in y.
'''
return int((self.y_max - self.y_min) / self.y_step)
@property
def x_ctr(self):
'''
float: The centre distance in x.
'''
return 0.5*(self.x_max + self.x_min)
@property
def y_ctr(self):
'''
float: The centre distance in y
'''
return 0.5*(self.y_max + self.y_min)
@property
def xc(self):
'''
np.array: The centre points of the x points.
'''
return 0.5*(self.x[1:] + self.x[:-1])
@property
def yc(self):
'''
np.array: The centre points of the y points.
'''
return 0.5*(self.y[1:] + self.y[:-1])
@property
def xc_pts(self):
'''
int: The number of points in `xc`.
'''
return self.x_pts - 1
@property
def yc_pts(self):
'''
int: The number of points in `yc`.
'''
return self.y_pts - 1
@property
def xc_min(self):
'''
float: The minimum value of `xc`.
'''
return self.xc[0]
@property
def xc_max(self):
'''
float: The maximum value of `xc`.
'''
return self.xc[-1]
@property
def yc_min(self):
'''
float: The minimum value of `yc`.
'''
return self.yc[0]
@property
def yc_max(self):
'''
float: The maximum value of `yc`.
'''
return self.yc[-1]
@property
def x(self):
'''
np.array: The grid points in x.
'''
if None not in (self.x_min, self.x_max, self.x_step) and \
self.x_min != self.x_max:
x = np.arange(self.x_min, self.x_max+self.x_step-self.y_step*0.1, self.x_step)
else:
x = np.array([])
return x
@property
def y(self):
'''
np.array: The grid points in y.
'''
if None not in (self.y_min, self.y_max, self.y_step) and \
self.y_min != self.y_max:
y = np.arange(self.y_min, self.y_max-self.y_step*0.1, self.y_step)
else:
y = np.array([])
return y
@property
def eps(self):
'''
np.array: A grid of permittivies representing
the permittivity profile of the structure.
'''
return self.n**2
@property
def eps_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the permittivity profile of the structure,
interpolating if necessary.
'''
interp_real = interpolate.interp2d(self.x, self.y, self.eps.real)
interp_imag = interpolate.interp2d(self.x, self.y, self.eps.imag)
interp = lambda x, y: interp_real(x, y) + 1.j*interp_imag(x, y)
return interp
@property
def n_func(self):
'''
function: a function that when passed a `x` and `y` values,
returns the refractive index profile of the structure,
interpolating if necessary.
'''
return interpolate.interp2d(self.x, self.y, self.n)
def _add_triangular_sides(self, xy_mask, angle, y_top_right, y_bot_left,
x_top_right, x_bot_left, n_material):
angle = np.radians(angle)
trap_len = (y_top_right - y_bot_left) / np.tan(angle)
num_x_iterations = trap_len / self.x_step
y_per_iteration = num_x_iterations / self.y_pts
lhs_x_start_index = int(x_bot_left/ self.x_step + 0.5)
rhs_x_stop_index = int(x_top_right/ self.x_step + 1 + 0.5)
running_removal_float = y_per_iteration
for i, _ in enumerate(xy_mask):
if running_removal_float >= 1:
removal_int = int(round(running_removal_float))
lhs_x_start_index -= removal_int
rhs_x_stop_index += removal_int
running_removal_float -= removal_int
running_removal_float += y_per_iteration
xy_mask[i][:lhs_x_start_index] = False
xy_mask[i][lhs_x_start_index:rhs_x_stop_index] = True
self.n[xy_mask] = n_material
return self.n
def _add_material(self, x_bot_left, y_bot_left, x_top_right, y_top_right,
n_material, angle=0):
'''
A low-level function that allows writing a rectangle refractive
index profile to a `Structure`.
Args:
x_bot_left (float): The bottom-left x-coordinate of the
rectangle.
y_bot_left (float): The bottom-left y-coordinate of the
rectangle.
x_top_right (float): The top-right x-coordinate of the
rectangle.
y_top_right (float): The top-right y-coordinate of the
rectangle.
n_material (float): The refractive index of the points
encompassed by the defined rectangle.
angle (float): The angle in degrees of the sidewalls
of the defined rectangle. Default is 0. This
is useful for creating a ridge with angled
sidewalls.
'''
x_mask = np.logical_and(x_bot_left<=self.x, self.x<=x_top_right)
y_mask = np.logical_and(y_bot_left<=self.y, self.y<=y_top_right)
xy_mask = np.kron(y_mask, x_mask).reshape((y_mask.size, x_mask.size))
self.n[xy_mask] = n_material
if angle:
self._add_triangular_sides(xy_mask, angle, y_top_right, y_bot_left,
x_top_right, x_bot_left, n_material)
return self.n
def __str__(self):
return self.n.__str__()
|
jtambasco/modesolverpy | modesolverpy/structure_base.py | Slabs.add_slab | python | def add_slab(self, height, n_background=1., position='top'):
'''
Creates and adds a :class:`Slab` object.
Args:
height (float): Height of the slab.
n_background (float): The nominal refractive
index of the slab. Default is 1 (air).
Returns:
str: The name of the slab.
'''
assert position in ('top', 'bottom')
name = str(self.slab_count)
if not callable(n_background):
n_back = lambda wl: n_background
else:
n_back = n_background
height_discretised = self.y_step*((height // self.y_step) + 1)
y_min = self._next_start
y_max = y_min + height_discretised
self.slabs[name] = Slab(name, self.x_step, self.y_step, self.x_max,
y_max, self.x_min, y_min, n_back, self._wl)
self.y_max = y_max
self._next_start = y_min + height_discretised
self.slab_count += 1
if position == 'bottom':
slabs = {}
for k in self.slabs.keys():
slabs[str(int(k)+1)] = self.slabs[k]
slabs['0'] = slabs.pop(str(self.slab_count))
self.slabs = slabs
return name | Creates and adds a :class:`Slab` object.
Args:
height (float): Height of the slab.
n_background (float): The nominal refractive
index of the slab. Default is 1 (air).
Returns:
str: The name of the slab. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/structure_base.py#L351-L390 | null | class Slabs(_AbstractStructure):
'''
Class to implement device refractive index
profile cross-section designs.
:class:`Slabs` is a collection of :class:`Slab` objects. Each
slab has a fixed height (usually less than the
maximum height of the desired simulation window),
and is as wide as the simulation window.
:class:`Slabs` objects can be index using `[name]` to return
the various :class:`Slab` objects. The bottom slab is
returned first and so on up to the top slab.
.. image:: ../images/slabs.svg
:width: 200%
Args:
wavelength (float): The wavelength the structure
operates at.
y_step (float): The step in y.
x_step (float): The step in x.
x_max (float): The maximum x-value.
x_min (float): The minimum x-value. Default is 0.
Attributes:
slabs (dict): The key is the name of the slab,
and the value is the :class:`Slab` object.
slab_count (int): The number of :class:`Slab` objects
added so far.
'''
def __init__(self, wavelength, y_step, x_step, x_max, x_min=0.):
_AbstractStructure.__init__(self)
self._wl = wavelength
self.x_min = x_min
self.x_max = x_max
self.x_step = x_step
self.y_step = y_step
self.y_min = 0
self.slabs = {}
self.slab_count = 0
self._next_start = 0.
def change_wavelength(self, wavelength):
'''
Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength.
'''
for name, slab in self.slabs.items():
const_args = slab._const_args
mat_args = slab._mat_params
const_args[8] = wavelength
s = Slab(*const_args)
for mat_arg in mat_args:
s.add_material(*mat_arg)
self.slabs[name] = s
self._wl = wavelength
@property
def n(self):
'''
np.array: The refractive index profile matrix
of the current slab.
'''
try:
n_mat = self.slabs['0'].n
for s in range(1, self.slab_count):
n_mat = np.vstack((self.slabs[str(s)].n, n_mat))
except KeyError:
n_mat = None
return n_mat
def __getitem__(self, slab_name):
return self.slabs[str(slab_name)]
|
jtambasco/modesolverpy | modesolverpy/structure_base.py | Slabs.change_wavelength | python | def change_wavelength(self, wavelength):
'''
Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength.
'''
for name, slab in self.slabs.items():
const_args = slab._const_args
mat_args = slab._mat_params
const_args[8] = wavelength
s = Slab(*const_args)
for mat_arg in mat_args:
s.add_material(*mat_arg)
self.slabs[name] = s
self._wl = wavelength | Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/structure_base.py#L392-L415 | null | class Slabs(_AbstractStructure):
'''
Class to implement device refractive index
profile cross-section designs.
:class:`Slabs` is a collection of :class:`Slab` objects. Each
slab has a fixed height (usually less than the
maximum height of the desired simulation window),
and is as wide as the simulation window.
:class:`Slabs` objects can be index using `[name]` to return
the various :class:`Slab` objects. The bottom slab is
returned first and so on up to the top slab.
.. image:: ../images/slabs.svg
:width: 200%
Args:
wavelength (float): The wavelength the structure
operates at.
y_step (float): The step in y.
x_step (float): The step in x.
x_max (float): The maximum x-value.
x_min (float): The minimum x-value. Default is 0.
Attributes:
slabs (dict): The key is the name of the slab,
and the value is the :class:`Slab` object.
slab_count (int): The number of :class:`Slab` objects
added so far.
'''
def __init__(self, wavelength, y_step, x_step, x_max, x_min=0.):
_AbstractStructure.__init__(self)
self._wl = wavelength
self.x_min = x_min
self.x_max = x_max
self.x_step = x_step
self.y_step = y_step
self.y_min = 0
self.slabs = {}
self.slab_count = 0
self._next_start = 0.
def add_slab(self, height, n_background=1., position='top'):
'''
Creates and adds a :class:`Slab` object.
Args:
height (float): Height of the slab.
n_background (float): The nominal refractive
index of the slab. Default is 1 (air).
Returns:
str: The name of the slab.
'''
assert position in ('top', 'bottom')
name = str(self.slab_count)
if not callable(n_background):
n_back = lambda wl: n_background
else:
n_back = n_background
height_discretised = self.y_step*((height // self.y_step) + 1)
y_min = self._next_start
y_max = y_min + height_discretised
self.slabs[name] = Slab(name, self.x_step, self.y_step, self.x_max,
y_max, self.x_min, y_min, n_back, self._wl)
self.y_max = y_max
self._next_start = y_min + height_discretised
self.slab_count += 1
if position == 'bottom':
slabs = {}
for k in self.slabs.keys():
slabs[str(int(k)+1)] = self.slabs[k]
slabs['0'] = slabs.pop(str(self.slab_count))
self.slabs = slabs
return name
@property
def n(self):
'''
np.array: The refractive index profile matrix
of the current slab.
'''
try:
n_mat = self.slabs['0'].n
for s in range(1, self.slab_count):
n_mat = np.vstack((self.slabs[str(s)].n, n_mat))
except KeyError:
n_mat = None
return n_mat
def __getitem__(self, slab_name):
return self.slabs[str(slab_name)]
|
jtambasco/modesolverpy | modesolverpy/structure_base.py | Slabs.n | python | def n(self):
'''
np.array: The refractive index profile matrix
of the current slab.
'''
try:
n_mat = self.slabs['0'].n
for s in range(1, self.slab_count):
n_mat = np.vstack((self.slabs[str(s)].n, n_mat))
except KeyError:
n_mat = None
return n_mat | np.array: The refractive index profile matrix
of the current slab. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/structure_base.py#L418-L429 | null | class Slabs(_AbstractStructure):
'''
Class to implement device refractive index
profile cross-section designs.
:class:`Slabs` is a collection of :class:`Slab` objects. Each
slab has a fixed height (usually less than the
maximum height of the desired simulation window),
and is as wide as the simulation window.
:class:`Slabs` objects can be index using `[name]` to return
the various :class:`Slab` objects. The bottom slab is
returned first and so on up to the top slab.
.. image:: ../images/slabs.svg
:width: 200%
Args:
wavelength (float): The wavelength the structure
operates at.
y_step (float): The step in y.
x_step (float): The step in x.
x_max (float): The maximum x-value.
x_min (float): The minimum x-value. Default is 0.
Attributes:
slabs (dict): The key is the name of the slab,
and the value is the :class:`Slab` object.
slab_count (int): The number of :class:`Slab` objects
added so far.
'''
def __init__(self, wavelength, y_step, x_step, x_max, x_min=0.):
_AbstractStructure.__init__(self)
self._wl = wavelength
self.x_min = x_min
self.x_max = x_max
self.x_step = x_step
self.y_step = y_step
self.y_min = 0
self.slabs = {}
self.slab_count = 0
self._next_start = 0.
def add_slab(self, height, n_background=1., position='top'):
'''
Creates and adds a :class:`Slab` object.
Args:
height (float): Height of the slab.
n_background (float): The nominal refractive
index of the slab. Default is 1 (air).
Returns:
str: The name of the slab.
'''
assert position in ('top', 'bottom')
name = str(self.slab_count)
if not callable(n_background):
n_back = lambda wl: n_background
else:
n_back = n_background
height_discretised = self.y_step*((height // self.y_step) + 1)
y_min = self._next_start
y_max = y_min + height_discretised
self.slabs[name] = Slab(name, self.x_step, self.y_step, self.x_max,
y_max, self.x_min, y_min, n_back, self._wl)
self.y_max = y_max
self._next_start = y_min + height_discretised
self.slab_count += 1
if position == 'bottom':
slabs = {}
for k in self.slabs.keys():
slabs[str(int(k)+1)] = self.slabs[k]
slabs['0'] = slabs.pop(str(self.slab_count))
self.slabs = slabs
return name
def change_wavelength(self, wavelength):
'''
Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength.
'''
for name, slab in self.slabs.items():
const_args = slab._const_args
mat_args = slab._mat_params
const_args[8] = wavelength
s = Slab(*const_args)
for mat_arg in mat_args:
s.add_material(*mat_arg)
self.slabs[name] = s
self._wl = wavelength
@property
def __getitem__(self, slab_name):
return self.slabs[str(slab_name)]
|
jtambasco/modesolverpy | modesolverpy/structure_base.py | Slab.add_material | python | def add_material(self, x_min, x_max, n, angle=0):
'''
Add a refractive index between two x-points.
Args:
x_min (float): The start x-point.
x_max (float): The stop x-point.
n (float, function): Refractive index between
`x_min` and `x_max`. Either a constant (`float`), or
a function that accepts one parameters, the
wavelength, and returns a float of the refractive
index. This is useful when doing wavelength
sweeps and solving for the group velocity. The
function provided could be a Sellmeier equation.
angle (float): Angle in degrees of the slope of the
sidewalls at `x_min` and `x_max`. This is useful
for defining a ridge with angled sidewalls.
'''
self._mat_params.append([x_min, x_max, n, angle])
if not callable(n):
n_mat = lambda wl: n
else:
n_mat = n
Structure._add_material(self, x_min, self.y_min, x_max, self.y_max, n_mat(self._wl), angle)
return self.n | Add a refractive index between two x-points.
Args:
x_min (float): The start x-point.
x_max (float): The stop x-point.
n (float, function): Refractive index between
`x_min` and `x_max`. Either a constant (`float`), or
a function that accepts one parameters, the
wavelength, and returns a float of the refractive
index. This is useful when doing wavelength
sweeps and solving for the group velocity. The
function provided could be a Sellmeier equation.
angle (float): Angle in degrees of the slope of the
sidewalls at `x_min` and `x_max`. This is useful
for defining a ridge with angled sidewalls. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/structure_base.py#L479-L505 | [
"n_mat = lambda wl: n\n"
] | class Slab(Structure):
'''
A :class:`Slab` represents a horizontal slice of
the refractive index profile.
A :class:`Slabs` object composes many :class:`Slab` objects.
The more :class:`Slab` are added, the more horizontal
slices are added. A :class:`Slab` has a chosen fixed
height, and a background (nominal) refractive
index. A slab can then be customised to include
a desired design.
Args:
name (str): The name of the slab.
x_step (float): The step in x.
y_step (float): The step in y.
x_max (float): The maximum x-value.
y_max (float): The maximum y-value.
x_min (float): The minimum x-value.
y_min (float): The minimum x-value.
n_background (float): The nominal refractive
index.
wavelength (float): The wavelength the structure
operates at.
Attributes:
name (str): The name of the :class:`Slab` object.
position (int): A unique identifier for the
:class:`Slab` object.
'''
position = 0
def __init__(self, name, x_step, y_step, x_max, y_max, x_min, y_min,
n_background, wavelength):
self._wl = wavelength
self.name = name
self.position = Slab.position
Slab.position += 1
Structure.__init__(self, x_step, y_step, x_max, y_max, x_min, y_min,
n_background(self._wl))
self._const_args = [name, x_step, y_step, x_max, y_max, x_min, y_min, n_background, wavelength]
self._mat_params = []
|
jtambasco/modesolverpy | modesolverpy/structure_base.py | StructureAni.write_to_file | python | def write_to_file(self, filename='material_index.dat', plot=True):
'''
Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
'''
path = os.path.dirname(sys.modules[__name__].__file__) + '/'
dir_plot = 'material_index/'
if not os.path.exists(dir_plot):
os.makedirs(dir_plot)
for axis, name in zip(self.axes, self.axes_str):
root, ext = os.path.splitext(filename)
fn = dir_plot + root + '_'+ name + ext
with open(fn, 'w') as fs:
for n_row in np.abs(axis.n[::-1]):
n_str = ','.join([str(v) for v in n_row])
fs.write(n_str+'\n')
if plot:
filename_image_prefix, _ = os.path.splitext(fn)
filename_image = filename_image_prefix + '.png'
args = {
'title': 'Refractive Index Profile: %s' % name,
'x_pts': self.xx.x_pts,
'y_pts': self.xx.y_pts,
'x_min': self.xx.x_min,
'x_max': self.xx.x_max,
'y_min': self.xx.y_min,
'y_max': self.xx.y_max,
'filename_data': fn,
'filename_image': filename_image
}
if MPL:
heatmap = np.loadtxt(args['filename_data'], delimiter=',')
plt.clf()
plt.title(args['title'])
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.imshow(np.flipud(heatmap),
extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),
aspect="auto")
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(path+'structure.gpi', args, silent=False) | Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/structure_base.py#L666-L716 | null | class StructureAni():
r"""
Anisottropic structure object.
This is used with the fully-vectorial simulation when
an anisotropic material is being used.
The form of the refractive index is
.. math::
n = \begin{bmatrix}
n_{xx} & n_{xy} & 0 \\
n_{yx} & n_{yy} & 0 \\
0 & 0 & n_{zz}
\end{bmatrix}.
Args:
structure_xx (Structure): The structure with refractive
index, :math:`n_{xx}`.
structure_yy (Structure): The structure with refractive
index, :math:`n_{yy}`. Presumably the same structure
as `structure_xx`, but with different refractive index
parameters.
structure_zz (Structure): The structure with refractive
index, :math:`n_{zz}`. Presumably the same structure
as `structure_xx`, but with different refractive index
parameters.
structure_xy (None, Structure): The structure with refractive
index, :math:`n_{yx}`. Presumably the same structure
as `structure_xx`, but with different refractive index
parameters. Default is `None`.
structure_yx (None, Structure): The structure with refractive
index, :math:`n_{yx}`. Presumably the same structure
as `structure_xx`, but with different refractive index
parameters. Default is `None`.
"""
def __init__(self, structure_xx, structure_yy, structure_zz,
structure_xy=None, structure_yx=None):
self.xx = structure_xx
self.yy = structure_yy
self.zz = structure_zz
if not structure_xy or not structure_yx:
struct_dummy = Structure(self.xx.x_step, self.xx.y_step,
self.xx.x_max, self.xx.y_max,
self.xx.x_min, self.xx.y_min,
n_background=0.)
struct_dummy._wl = self.xx._wl
if structure_xy:
self.xy = structure_xy
else:
self.xy = struct_dummy
if structure_yx:
self.yx = structure_yx
else:
self.yx = struct_dummy
assert self.xx._wl == self.xy._wl == self.yx._wl == \
self.yy._wl == self.zz._wl
self._wl = structure_xx._wl
self.axes = (self.xx, self.xy, self.yx, self.yy, self.zz)
self.axes_str = ('xx', 'xy', 'yx', 'yy', 'zz')
@property
def n(self):
return [a.n for a in self.axes]
@property
def x_step(self):
return self.xx.x_step
@property
def y_step(self):
return self.xx.y_step
@property
def x_pts(self):
return int((self.xx.x_max - self.xx.x_min) / self.xx.x_step + 1)
@property
def y_pts(self):
return int((self.xx.y_max - self.xx.y_min) / self.xx.y_step)
@property
def x_ctr(self):
return 0.5*(self.xx.x_max + self.xx.x_min)
@property
def y_ctr(self):
return 0.5*(self.xx.y_max + self.xx.y_min)
@property
def xc(self):
return 0.5*(self.xx.x[1:] + self.xx.x[:-1])
@property
def yc(self):
return 0.5*(self.xx.y[1:] + self.xx.y[:-1])
@property
def xc_pts(self):
return self.xx.x_pts - 1
@property
def yc_pts(self):
return self.xx.y_pts - 1
@property
def xc_min(self):
return self.xx.xc[0]
@property
def xc_max(self):
return self.xx.xc[-1]
@property
def yc_min(self):
return self.xx.yc[0]
@property
def yc_max(self):
return self.xx.yc[-1]
@property
def x(self):
if None not in (self.xx.x_min, self.xx.x_max, self.xx.x_step) and \
self.xx.x_min != self.xx.x_max:
x = np.arange(self.xx.x_min, self.xx.x_max+self.xx.x_step-self.xx.y_step*0.1, self.xx.x_step)
else:
x = np.array([])
return x
@property
def y(self):
if None not in (self.xx.y_min, self.xx.y_max, self.xx.y_step) and \
self.xx.y_min != self.xx.y_max:
y = np.arange(self.xx.y_min, self.xx.y_max-self.xx.y_step*0.1, self.xx.y_step)
else:
y = np.array([])
return y
@property
def eps(self):
eps_ani = [a.n**2 for a in self.axes]
return eps_ani
@property
def eps_func(self):
return lambda x,y: tuple(axis.eps_func(x,y) for axis in self.axes)
@property
def n_func(self):
return lambda x,y: tuple(axis.n_func(x,y) for axis in self.axes)
def change_wavelength(self, wavelength):
'''
Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength.
'''
for axis in self.axes:
if issubclass(type(axis), Slabs):
axis.change_wavelength(wavelength)
self.xx, self.xy, self.yx, self.yy, self.zz = self.axes
self._wl = wavelength
|
jtambasco/modesolverpy | modesolverpy/structure_base.py | StructureAni.change_wavelength | python | def change_wavelength(self, wavelength):
'''
Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength.
'''
for axis in self.axes:
if issubclass(type(axis), Slabs):
axis.change_wavelength(wavelength)
self.xx, self.xy, self.yx, self.yy, self.zz = self.axes
self._wl = wavelength | Changes the wavelength of the structure.
This will affect the mode solver and potentially
the refractive indices used (provided functions
were provided as refractive indices).
Args:
wavelength (float): The new wavelength. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/structure_base.py#L718-L733 | null | class StructureAni():
r"""
Anisottropic structure object.
This is used with the fully-vectorial simulation when
an anisotropic material is being used.
The form of the refractive index is
.. math::
n = \begin{bmatrix}
n_{xx} & n_{xy} & 0 \\
n_{yx} & n_{yy} & 0 \\
0 & 0 & n_{zz}
\end{bmatrix}.
Args:
structure_xx (Structure): The structure with refractive
index, :math:`n_{xx}`.
structure_yy (Structure): The structure with refractive
index, :math:`n_{yy}`. Presumably the same structure
as `structure_xx`, but with different refractive index
parameters.
structure_zz (Structure): The structure with refractive
index, :math:`n_{zz}`. Presumably the same structure
as `structure_xx`, but with different refractive index
parameters.
structure_xy (None, Structure): The structure with refractive
index, :math:`n_{yx}`. Presumably the same structure
as `structure_xx`, but with different refractive index
parameters. Default is `None`.
structure_yx (None, Structure): The structure with refractive
index, :math:`n_{yx}`. Presumably the same structure
as `structure_xx`, but with different refractive index
parameters. Default is `None`.
"""
def __init__(self, structure_xx, structure_yy, structure_zz,
structure_xy=None, structure_yx=None):
self.xx = structure_xx
self.yy = structure_yy
self.zz = structure_zz
if not structure_xy or not structure_yx:
struct_dummy = Structure(self.xx.x_step, self.xx.y_step,
self.xx.x_max, self.xx.y_max,
self.xx.x_min, self.xx.y_min,
n_background=0.)
struct_dummy._wl = self.xx._wl
if structure_xy:
self.xy = structure_xy
else:
self.xy = struct_dummy
if structure_yx:
self.yx = structure_yx
else:
self.yx = struct_dummy
assert self.xx._wl == self.xy._wl == self.yx._wl == \
self.yy._wl == self.zz._wl
self._wl = structure_xx._wl
self.axes = (self.xx, self.xy, self.yx, self.yy, self.zz)
self.axes_str = ('xx', 'xy', 'yx', 'yy', 'zz')
@property
def n(self):
return [a.n for a in self.axes]
@property
def x_step(self):
return self.xx.x_step
@property
def y_step(self):
return self.xx.y_step
@property
def x_pts(self):
return int((self.xx.x_max - self.xx.x_min) / self.xx.x_step + 1)
@property
def y_pts(self):
return int((self.xx.y_max - self.xx.y_min) / self.xx.y_step)
@property
def x_ctr(self):
return 0.5*(self.xx.x_max + self.xx.x_min)
@property
def y_ctr(self):
return 0.5*(self.xx.y_max + self.xx.y_min)
@property
def xc(self):
return 0.5*(self.xx.x[1:] + self.xx.x[:-1])
@property
def yc(self):
return 0.5*(self.xx.y[1:] + self.xx.y[:-1])
@property
def xc_pts(self):
return self.xx.x_pts - 1
@property
def yc_pts(self):
return self.xx.y_pts - 1
@property
def xc_min(self):
return self.xx.xc[0]
@property
def xc_max(self):
return self.xx.xc[-1]
@property
def yc_min(self):
return self.xx.yc[0]
@property
def yc_max(self):
return self.xx.yc[-1]
@property
def x(self):
if None not in (self.xx.x_min, self.xx.x_max, self.xx.x_step) and \
self.xx.x_min != self.xx.x_max:
x = np.arange(self.xx.x_min, self.xx.x_max+self.xx.x_step-self.xx.y_step*0.1, self.xx.x_step)
else:
x = np.array([])
return x
@property
def y(self):
if None not in (self.xx.y_min, self.xx.y_max, self.xx.y_step) and \
self.xx.y_min != self.xx.y_max:
y = np.arange(self.xx.y_min, self.xx.y_max-self.xx.y_step*0.1, self.xx.y_step)
else:
y = np.array([])
return y
@property
def eps(self):
eps_ani = [a.n**2 for a in self.axes]
return eps_ani
@property
def eps_func(self):
return lambda x,y: tuple(axis.eps_func(x,y) for axis in self.axes)
@property
def n_func(self):
return lambda x,y: tuple(axis.n_func(x,y) for axis in self.axes)
def write_to_file(self, filename='material_index.dat', plot=True):
'''
Write the refractive index profile to file.
Args:
filename (str): The nominal filename the refractive
index data should be saved to.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
'''
path = os.path.dirname(sys.modules[__name__].__file__) + '/'
dir_plot = 'material_index/'
if not os.path.exists(dir_plot):
os.makedirs(dir_plot)
for axis, name in zip(self.axes, self.axes_str):
root, ext = os.path.splitext(filename)
fn = dir_plot + root + '_'+ name + ext
with open(fn, 'w') as fs:
for n_row in np.abs(axis.n[::-1]):
n_str = ','.join([str(v) for v in n_row])
fs.write(n_str+'\n')
if plot:
filename_image_prefix, _ = os.path.splitext(fn)
filename_image = filename_image_prefix + '.png'
args = {
'title': 'Refractive Index Profile: %s' % name,
'x_pts': self.xx.x_pts,
'y_pts': self.xx.y_pts,
'x_min': self.xx.x_min,
'x_max': self.xx.x_max,
'y_min': self.xx.y_min,
'y_max': self.xx.y_max,
'filename_data': fn,
'filename_image': filename_image
}
if MPL:
heatmap = np.loadtxt(args['filename_data'], delimiter=',')
plt.clf()
plt.title(args['title'])
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.imshow(np.flipud(heatmap),
extent=(args['x_min'], args['x_max'], args['y_min'], args['y_max']),
aspect="auto")
plt.colorbar()
plt.savefig(filename_image)
else:
gp.gnuplot(path+'structure.gpi', args, silent=False)
|
jtambasco/modesolverpy | modesolverpy/_mode_solver_lib.py | trapz2 | python | def trapz2(f, x=None, y=None, dx=1.0, dy=1.0):
return numpy.trapz(numpy.trapz(f, x=y, dx=dy), x=x, dx=dx) | Double integrate. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/_mode_solver_lib.py#L22-L24 | null | # pylint: disable=line-too-long,too-many-locals,too-many-statements,too-many-branches
# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
# pylint: disable=attribute-defined-outside-init,too-many-instance-attributes
# pylint: disable=arguments-differ,too-many-arguments
"""Finite Difference Modesolver.
@see: Fallahkhair, "Vector Finite Difference Modesolver for Anisotropic Dielectric Waveguides", JLT 2007 <http://www.photonics.umd.edu/pubs/journal-articles/JA-D/anisotropic-modesolver.pdf>}
@see: http://www.mathworks.com/matlabcentral/fileexchange/loadFile.do?objectId=12734&objectType=FILE
"""
from __future__ import print_function
from builtins import zip
from builtins import str
from builtins import range
import numpy
import scipy
import scipy.optimize
import collections as col
def centered1d(x):
return (x[1:] + x[:-1]) / 2.
def centered2d(x):
return (x[1:, 1:] + x[1:, :-1] + x[:-1, 1:] + x[:-1, :-1]) / 4.
class _ModeSolverSemiVectorial():
"""
This function calculates the modes of a dielectric waveguide
using the semivectorial finite difference method.
It is slightly faster than the full-vectorial VFDModeSolver,
but it does not accept non-isotropic permittivity. For example,
birefringent materials, which have
different refractive indices along different dimensions cannot be used.
It is adapted from the svmodes.m matlab code of Thomas Murphy and co-workers.
Parameters
----------
wl : float
optical wavelength
units are arbitrary, but must be self-consistent. It's recommended to just work in microns.
x : 1D array of floats
Array of x-values
y : 1D array of floats
Array of y-values
epsfunc : function
This is a function that provides the relative permittivity (square of the refractive index)
as a function of the x and y position. The function must be of the form:
``myRelativePermittivity(x,y)``
The function can either return a single float, corresponding the an isotropic refractive index,
or, it may a length-5 tuple. In the tuple case, the relative permittivity is given in the form
(epsxx, epsxy, epsyx, epsyy, epszz).
boundary : str
This is a string that identifies the type of boundary conditions applied.
The following options are available:
'A' - Hx is antisymmetric, Hy is symmetric.
'S' - Hx is symmetric and, Hy is antisymmetric.
'0' - Hx and Hy are zero immediately outside of the boundary.
The string identifies all four boundary conditions, in the order: North, south, east, west.
For example, boundary='000A'
method : str
must be 'Ex', 'Ey', or 'scalar'
this identifies the field that will be calculated.
Returns
-------
self : an instance of the SVFDModeSolver class
Typically self.solve() will be called in order to actually find the modes.
"""
def __init__(self, wl, structure, boundary='0000', method='Ex'):
# Polarisation bug fix.
assert method in ('Ex', 'Ey'), 'Invalid polarisation method.'
if method == 'Ex':
method = 'Ey'
elif method == 'Ey':
method = 'Ex'
self.wl = wl
self.x = structure.y
self.y = structure.x
self.boundary = boundary
self.method = method
self.structure = structure
def build_matrix(self):
from scipy.sparse import coo_matrix
wl = self.wl
x = self.x
y = self.y
structure = self.structure
boundary = self.boundary
method = self.method
dx = numpy.diff(x)
dy = numpy.diff(y)
dx = numpy.r_[dx[0], dx, dx[-1]].reshape(-1, 1)
dy = numpy.r_[dy[0], dy, dy[-1]].reshape(1, -1)
xc = (x[:-1] + x[1:]) / 2
yc = (y[:-1] + y[1:]) / 2
eps = structure.eps_func(yc, xc)
eps = numpy.c_[eps[:, 0:1], eps, eps[:, -1:]]
eps = numpy.r_[eps[0:1, :], eps, eps[-1:, :]]
nx = len(xc)
ny = len(yc)
self.nx = nx
self.ny = ny
k = 2 * numpy.pi / wl
ones_nx = numpy.ones((nx, 1))
ones_ny = numpy.ones((1, ny))
n = numpy.dot(ones_nx, 0.5 * (dy[:, 2:] + dy[:, 1:-1])).flatten()
s = numpy.dot(ones_nx, 0.5 * (dy[:, 0:-2] + dy[:, 1:-1])).flatten()
e = numpy.dot(0.5 * (dx[2:, :] + dx[1:-1, :]), ones_ny).flatten()
w = numpy.dot(0.5 * (dx[0:-2, :] + dx[1:-1, :]), ones_ny).flatten()
p = numpy.dot(dx[1:-1, :], ones_ny).flatten()
q = numpy.dot(ones_nx, dy[:, 1:-1]).flatten()
en = eps[1:-1, 2:].flatten()
es = eps[1:-1, 0:-2].flatten()
ee = eps[2:, 1:-1].flatten()
ew = eps[0:-2, 1:-1].flatten()
ep = eps[1:-1, 1:-1].flatten()
# three methods: Ex, Ey and scalar
if method == 'Ex':
# Ex
An = 2 / n / (n + s)
As = 2 / s / (n + s)
Ae = 8 * (p * (ep - ew) + 2 * w * ew) * ee / \
((p * (ep - ee) + 2 * e * ee) * (p ** 2 * (ep - ew) + 4 * w ** 2 * ew) +
(p * (ep - ew) + 2 * w * ew) * (p ** 2 * (ep - ee) + 4 * e ** 2 * ee))
Aw = 8 * (p * (ep - ee) + 2 * e * ee) * ew / \
((p * (ep - ee) + 2 * e * ee) * (p ** 2 * (ep - ew) + 4 * w ** 2 * ew) +
(p * (ep - ew) + 2 * w * ew) * (p ** 2 * (ep - ee) + 4 * e ** 2 * ee))
Ap = ep * k ** 2 - An - As - Ae * ep / ee - Aw * ep / ew
elif method == 'Ey':
# Ey
An = 8 * (q * (ep - es) + 2 * s * es) * en / \
((q * (ep - en) + 2 * n * en) * (q ** 2 * (ep - es) + 4 * s ** 2 * es) +
(q * (ep - es) + 2 * s * es) * (q ** 2 * (ep - en) + 4 * n ** 2 * en))
As = 8 * (q * (ep - en) + 2 * n * en) * es / \
((q * (ep - en) + 2 * n * en) * (q ** 2 * (ep - es) + 4 * s ** 2 * es) +
(q * (ep - es) + 2 * s * es) * (q ** 2 * (ep - en) + 4 * n ** 2 * en))
Ae = 2 / e / (e + w)
Aw = 2 / w / (e + w)
Ap = ep * k ** 2 - An * ep / en - As * ep / es - Ae - Aw
elif method == 'scalar':
# scalar
An = 2 / n / (n + s)
As = 2 / s / (n + s)
Ae = 2 / e / (e + w)
Aw = 2 / w / (e + w)
Ap = ep * k ** 2 - An - As - Ae - Aw
else:
raise ValueError('unknown method')
ii = numpy.arange(nx * ny).reshape(nx, ny)
# north boundary
ib = ii[:, -1]
if boundary[0] == 'S':
Ap[ib] += An[ib]
elif boundary[0] == 'A':
Ap[ib] -= An[ib]
# else:
# raise ValueError('unknown boundary')
# south
ib = ii[:, 0]
if boundary[1] == 'S':
Ap[ib] += As[ib]
elif boundary[1] == 'A':
Ap[ib] -= As[ib]
# else:
# raise ValueError('unknown boundary')
# east
ib = ii[-1, :]
if boundary[2] == 'S':
Ap[ib] += Ae[ib]
elif boundary[2] == 'A':
Ap[ib] -= Ae[ib]
# else:
# raise ValueError('unknown boundary')
# west
ib = ii[0, :]
if boundary[3] == 'S':
Ap[ib] += Aw[ib]
elif boundary[3] == 'A':
Ap[ib] -= Aw[ib]
# else:
# raise ValueError('unknown boundary')
iall = ii.flatten()
i_n = ii[:, 1:].flatten()
i_s = ii[:, :-1].flatten()
i_e = ii[1:, :].flatten()
i_w = ii[:-1, :].flatten()
I = numpy.r_[iall, i_w, i_e, i_s, i_n]
J = numpy.r_[iall, i_e, i_w, i_n, i_s]
V = numpy.r_[Ap[iall], Ae[i_w], Aw[i_e], An[i_s], As[i_n]]
A = coo_matrix((V, (I, J))).tocsr()
return A
def solve(self, neigs, tol=0, mode_profiles=True, initial_mode_guess=None):
from scipy.sparse.linalg import eigen
self.nmodes = neigs
self.tol = tol
A = self.build_matrix()
eigs = eigen.eigs(A,
k=neigs,
which='LR',
tol=0.001,
ncv=None,
v0 = initial_mode_guess,
return_eigenvectors=mode_profiles)
if mode_profiles:
eigvals, eigvecs = eigs
else:
eigvals = eigs
eigvecs = None
neff = self.wl * scipy.sqrt(eigvals) / (2 * numpy.pi)
if mode_profiles:
phi = []
for ieig in range(neigs):
tmp = eigvecs[:, ieig].reshape(self.nx, self.ny)
phi.append(tmp)
# sort and save the modes
idx = numpy.flipud(numpy.argsort(neff))
self.neff = neff[idx]
if mode_profiles:
tmp = []
for i in idx:
tmp.append(phi[i])
if self.method == 'scalar':
self.phi = tmp
elif self.method == 'Ex':
self.Ex = tmp
elif self.method == 'Ey':
self.Ey = tmp
self.modes = tmp
return self
def __str__(self):
descr = 'Semi-Vectorial Finite Difference Modesolver\n\tmethod: %s\n' % self.method
return descr
class _ModeSolverVectorial():
"""
The VFDModeSolver class computes the electric and magnetic fields for modes of a dielectric
waveguide using the "Vector Finite Difference (VFD)" method, as described in
A. B. Fallahkhair, K. S. Li and T. E. Murphy, "Vector Finite Difference Modesolver for
Anisotropic Dielectric Waveguides", J. Lightwave Technol. 26(11), 1423-1431, (2008).
Parameters
----------
wl : float
The wavelength of the optical radiation (units are arbitrary, but must be self-consistent
between all inputs. Recommandation is to just use micron for everthing)
x : 1D array of floats
Array of x-values
y : 1D array of floats
Array of y-values
epsfunc : function
This is a function that provides the relative permittivity (square of the refractive index)
as a function of the x and y position. The function must be of the form:
``myRelativePermittivity(x,y)``
The function can either return a single float, corresponding the an isotropic refractive index,
or, ir may a length-5 tuple. In the tuple case, the relative permittivity is given in the form
(epsxx, epsxy, epsyx, epsyy, epszz).
The light is `z` propagating.
boundary : str
This is a string that identifies the type of boundary conditions applied.
The following options are available:
'A' - Hx is antisymmetric, Hy is symmetric.
'S' - Hx is symmetric and, Hy is antisymmetric.
'0' - Hx and Hy are zero immediately outside of the boundary.
The string identifies all four boundary conditions, in the order: North, south, east, west.
For example, boundary='000A'
Returns
-------
self : an instance of the VFDModeSolver class
Typically self.solve() will be called in order to actually find the modes.
"""
def __init__(self, wl, structure, boundary):
self.wl = wl
self.x = structure.y
self.y = structure.x
self.epsfunc = structure.eps_func
self.boundary = boundary
def build_matrix(self):
from scipy.sparse import coo_matrix
wl = self.wl
x = self.x
y = self.y
epsfunc = self.epsfunc
boundary = self.boundary
dx = numpy.diff(x)
dy = numpy.diff(y)
dx = numpy.r_[dx[0], dx, dx[-1]].reshape(-1, 1)
dy = numpy.r_[dy[0], dy, dy[-1]].reshape(1, -1)
xc = (x[:-1] + x[1:]) / 2
yc = (y[:-1] + y[1:]) / 2
tmp = epsfunc(yc, xc)
if isinstance(tmp, tuple):
tmp = [numpy.c_[t[:, 0:1], t, t[:, -1:]] for t in tmp]
tmp = [numpy.r_[t[0:1, :], t, t[-1:, :]] for t in tmp]
epsyy, epsyx, epsxy, epsxx, epszz = tmp
else:
tmp = numpy.c_[tmp[:, 0:1], tmp, tmp[:, -1:]]
tmp = numpy.r_[tmp[0:1, :], tmp, tmp[-1:, :]]
epsxx = epsyy = epszz = tmp
epsxy = epsyx = numpy.zeros_like(epsxx)
nx = len(x)
ny = len(y)
self.nx = nx
self.ny = ny
k = 2 * numpy.pi / wl
ones_nx = numpy.ones((nx, 1))
ones_ny = numpy.ones((1, ny))
n = numpy.dot(ones_nx, dy[:, 1:]).flatten()
s = numpy.dot(ones_nx, dy[:, :-1]).flatten()
e = numpy.dot(dx[1:, :], ones_ny).flatten()
w = numpy.dot(dx[:-1, :], ones_ny).flatten()
exx1 = epsxx[:-1, 1:].flatten()
exx2 = epsxx[:-1, :-1].flatten()
exx3 = epsxx[1:, :-1].flatten()
exx4 = epsxx[1:, 1:].flatten()
eyy1 = epsyy[:-1, 1:].flatten()
eyy2 = epsyy[:-1, :-1].flatten()
eyy3 = epsyy[1:, :-1].flatten()
eyy4 = epsyy[1:, 1:].flatten()
exy1 = epsxy[:-1, 1:].flatten()
exy2 = epsxy[:-1, :-1].flatten()
exy3 = epsxy[1:, :-1].flatten()
exy4 = epsxy[1:, 1:].flatten()
eyx1 = epsyx[:-1, 1:].flatten()
eyx2 = epsyx[:-1, :-1].flatten()
eyx3 = epsyx[1:, :-1].flatten()
eyx4 = epsyx[1:, 1:].flatten()
ezz1 = epszz[:-1, 1:].flatten()
ezz2 = epszz[:-1, :-1].flatten()
ezz3 = epszz[1:, :-1].flatten()
ezz4 = epszz[1:, 1:].flatten()
ns21 = n * eyy2 + s * eyy1
ns34 = n * eyy3 + s * eyy4
ew14 = e * exx1 + w * exx4
ew23 = e * exx2 + w * exx3
axxn = ((2 * eyy4 * e - eyx4 * n) * (eyy3 / ezz4) / ns34 +
(2 * eyy1 * w + eyx1 * n) * (eyy2 / ezz1) / ns21) / (n * (e + w))
axxs = ((2 * eyy3 * e + eyx3 * s) * (eyy4 / ezz3) / ns34 +
(2 * eyy2 * w - eyx2 * s) * (eyy1 / ezz2) / ns21) / (s * (e + w))
ayye = (2 * n * exx4 - e * exy4) * exx1 / ezz4 / e / ew14 / \
(n + s) + (2 * s * exx3 + e * exy3) * \
exx2 / ezz3 / e / ew23 / (n + s)
ayyw = (2 * exx1 * n + exy1 * w) * exx4 / ezz1 / w / ew14 / \
(n + s) + (2 * exx2 * s - exy2 * w) * \
exx3 / ezz2 / w / ew23 / (n + s)
axxe = 2 / (e * (e + w)) + \
(eyy4 * eyx3 / ezz3 - eyy3 * eyx4 / ezz4) / (e + w) / ns34
axxw = 2 / (w * (e + w)) + \
(eyy2 * eyx1 / ezz1 - eyy1 * eyx2 / ezz2) / (e + w) / ns21
ayyn = 2 / (n * (n + s)) + \
(exx4 * exy1 / ezz1 - exx1 * exy4 / ezz4) / (n + s) / ew14
ayys = 2 / (s * (n + s)) + \
(exx2 * exy3 / ezz3 - exx3 * exy2 / ezz2) / (n + s) / ew23
axxne = +eyx4 * eyy3 / ezz4 / (e + w) / ns34
axxse = -eyx3 * eyy4 / ezz3 / (e + w) / ns34
axxnw = -eyx1 * eyy2 / ezz1 / (e + w) / ns21
axxsw = +eyx2 * eyy1 / ezz2 / (e + w) / ns21
ayyne = +exy4 * exx1 / ezz4 / (n + s) / ew14
ayyse = -exy3 * exx2 / ezz3 / (n + s) / ew23
ayynw = -exy1 * exx4 / ezz1 / (n + s) / ew14
ayysw = +exy2 * exx3 / ezz2 / (n + s) / ew23
axxp = -axxn - axxs - axxe - axxw - axxne - axxse - axxnw - axxsw + k ** 2 * \
(n + s) * \
(eyy4 * eyy3 * e / ns34 + eyy1 * eyy2 * w / ns21) / (e + w)
ayyp = -ayyn - ayys - ayye - ayyw - ayyne - ayyse - ayynw - ayysw + k ** 2 * \
(e + w) * \
(exx1 * exx4 * n / ew14 + exx2 * exx3 * s / ew23) / (n + s)
axyn = (eyy3 * eyy4 / ezz4 / ns34 - eyy2 * eyy1 / ezz1 /
ns21 + s * (eyy2 * eyy4 - eyy1 * eyy3) / ns21 / ns34) / (e + w)
axys = (eyy1 * eyy2 / ezz2 / ns21 - eyy4 * eyy3 / ezz3 /
ns34 + n * (eyy2 * eyy4 - eyy1 * eyy3) / ns21 / ns34) / (e + w)
ayxe = (exx1 * exx4 / ezz4 / ew14 - exx2 * exx3 / ezz3 /
ew23 + w * (exx2 * exx4 - exx1 * exx3) / ew23 / ew14) / (n + s)
ayxw = (exx3 * exx2 / ezz2 / ew23 - exx4 * exx1 / ezz1 /
ew14 + e * (exx4 * exx2 - exx1 * exx3) / ew23 / ew14) / (n + s)
axye = (eyy4 * (1 + eyy3 / ezz4) - eyy3 * (1 + eyy4 / ezz4)) / ns34 / (e + w) - \
(2 * eyx1 * eyy2 / ezz1 * n * w / ns21 +
2 * eyx2 * eyy1 / ezz2 * s * w / ns21 +
2 * eyx4 * eyy3 / ezz4 * n * e / ns34 +
2 * eyx3 * eyy4 / ezz3 * s * e / ns34 +
2 * eyy1 * eyy2 * (1. / ezz1 - 1. / ezz2) * w ** 2 / ns21) / e / (e + w) ** 2
axyw = (eyy2 * (1 + eyy1 / ezz2) - eyy1 * (1 + eyy2 / ezz2)) / ns21 / (e + w) - \
(2 * eyx1 * eyy2 / ezz1 * n * e / ns21 +
2 * eyx2 * eyy1 / ezz2 * s * e / ns21 +
2 * eyx4 * eyy3 / ezz4 * n * w / ns34 +
2 * eyx3 * eyy4 / ezz3 * s * w / ns34 +
2 * eyy3 * eyy4 * (1. / ezz3 - 1. / ezz4) * e ** 2 / ns34) / w / (e + w) ** 2
ayxn = (exx4 * (1 + exx1 / ezz4) - exx1 * (1 + exx4 / ezz4)) / ew14 / (n + s) - \
(2 * exy3 * exx2 / ezz3 * e * s / ew23 +
2 * exy2 * exx3 / ezz2 * w * n / ew23 +
2 * exy4 * exx1 / ezz4 * e * s / ew14 +
2 * exy1 * exx4 / ezz1 * w * n / ew14 +
2 * exx3 * exx2 * (1. / ezz3 - 1. / ezz2) * s ** 2 / ew23) / n / (n + s) ** 2
ayxs = (exx2 * (1 + exx3 / ezz2) - exx3 * (1 + exx2 / ezz2)) / ew23 / (n + s) - \
(2 * exy3 * exx2 / ezz3 * e * n / ew23 +
2 * exy2 * exx3 / ezz2 * w * n / ew23 +
2 * exy4 * exx1 / ezz4 * e * s / ew14 +
2 * exy1 * exx4 / ezz1 * w * s / ew14 +
2 * exx1 * exx4 * (1. / ezz1 - 1. / ezz4) * n ** 2 / ew14) / s / (n + s) ** 2
axyne = +eyy3 * (1 - eyy4 / ezz4) / (e + w) / ns34
axyse = -eyy4 * (1 - eyy3 / ezz3) / (e + w) / ns34
axynw = -eyy2 * (1 - eyy1 / ezz1) / (e + w) / ns21
axysw = +eyy1 * (1 - eyy2 / ezz2) / (e + w) / ns21
ayxne = +exx1 * (1 - exx4 / ezz4) / (n + s) / ew14
ayxse = -exx2 * (1 - exx3 / ezz3) / (n + s) / ew23
ayxnw = -exx4 * (1 - exx1 / ezz1) / (n + s) / ew14
ayxsw = +exx3 * (1 - exx2 / ezz2) / (n + s) / ew23
axyp = -(axyn + axys + axye + axyw + axyne + axyse + axynw + axysw) - k ** 2 * (w * (n * eyx1 *
eyy2 + s * eyx2 * eyy1) / ns21 + e * (s * eyx3 * eyy4 + n * eyx4 * eyy3) / ns34) / (e + w)
ayxp = -(ayxn + ayxs + ayxe + ayxw + ayxne + ayxse + ayxnw + ayxsw) - k ** 2 * (n * (w * exy1 *
exx4 + e * exy4 * exx1) / ew14 + s * (w * exy2 * exx3 + e * exy3 * exx2) / ew23) / (n + s)
ii = numpy.arange(nx * ny).reshape(nx, ny)
# NORTH boundary
ib = ii[:, -1]
if boundary[0] == 'S':
sign = 1
elif boundary[0] == 'A':
sign = -1
elif boundary[0] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
axxs[ib] += sign * axxn[ib]
axxse[ib] += sign * axxne[ib]
axxsw[ib] += sign * axxnw[ib]
ayxs[ib] += sign * ayxn[ib]
ayxse[ib] += sign * ayxne[ib]
ayxsw[ib] += sign * ayxnw[ib]
ayys[ib] -= sign * ayyn[ib]
ayyse[ib] -= sign * ayyne[ib]
ayysw[ib] -= sign * ayynw[ib]
axys[ib] -= sign * axyn[ib]
axyse[ib] -= sign * axyne[ib]
axysw[ib] -= sign * axynw[ib]
# SOUTH boundary
ib = ii[:, 0]
if boundary[1] == 'S':
sign = 1
elif boundary[1] == 'A':
sign = -1
elif boundary[1] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
axxn[ib] += sign * axxs[ib]
axxne[ib] += sign * axxse[ib]
axxnw[ib] += sign * axxsw[ib]
ayxn[ib] += sign * ayxs[ib]
ayxne[ib] += sign * ayxse[ib]
ayxnw[ib] += sign * ayxsw[ib]
ayyn[ib] -= sign * ayys[ib]
ayyne[ib] -= sign * ayyse[ib]
ayynw[ib] -= sign * ayysw[ib]
axyn[ib] -= sign * axys[ib]
axyne[ib] -= sign * axyse[ib]
axynw[ib] -= sign * axysw[ib]
# EAST boundary
ib = ii[-1, :]
if boundary[2] == 'S':
sign = 1
elif boundary[2] == 'A':
sign = -1
elif boundary[2] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
axxw[ib] += sign * axxe[ib]
axxnw[ib] += sign * axxne[ib]
axxsw[ib] += sign * axxse[ib]
ayxw[ib] += sign * ayxe[ib]
ayxnw[ib] += sign * ayxne[ib]
ayxsw[ib] += sign * ayxse[ib]
ayyw[ib] -= sign * ayye[ib]
ayynw[ib] -= sign * ayyne[ib]
ayysw[ib] -= sign * ayyse[ib]
axyw[ib] -= sign * axye[ib]
axynw[ib] -= sign * axyne[ib]
axysw[ib] -= sign * axyse[ib]
# WEST boundary
ib = ii[0, :]
if boundary[3] == 'S':
sign = 1
elif boundary[3] == 'A':
sign = -1
elif boundary[3] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
axxe[ib] += sign * axxw[ib]
axxne[ib] += sign * axxnw[ib]
axxse[ib] += sign * axxsw[ib]
ayxe[ib] += sign * ayxw[ib]
ayxne[ib] += sign * ayxnw[ib]
ayxse[ib] += sign * ayxsw[ib]
ayye[ib] -= sign * ayyw[ib]
ayyne[ib] -= sign * ayynw[ib]
ayyse[ib] -= sign * ayysw[ib]
axye[ib] -= sign * axyw[ib]
axyne[ib] -= sign * axynw[ib]
axyse[ib] -= sign * axysw[ib]
# Assemble sparse matrix
iall = ii.flatten()
i_s = ii[:, :-1].flatten()
i_n = ii[:, 1:].flatten()
i_e = ii[1:, :].flatten()
i_w = ii[:-1, :].flatten()
i_ne = ii[1:, 1:].flatten()
i_se = ii[1:, :-1].flatten()
i_sw = ii[:-1, :-1].flatten()
i_nw = ii[:-1, 1:].flatten()
Ixx = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]
Jxx = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se]
Vxx = numpy.r_[axxp[iall], axxe[i_w], axxw[i_e], axxn[i_s], axxs[
i_n], axxsw[i_ne], axxnw[i_se], axxne[i_sw], axxse[i_nw]]
Ixy = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]
Jxy = numpy.r_[
iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se] + nx * ny
Vxy = numpy.r_[axyp[iall], axye[i_w], axyw[i_e], axyn[i_s], axys[
i_n], axysw[i_ne], axynw[i_se], axyne[i_sw], axyse[i_nw]]
Iyx = numpy.r_[
iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw] + nx * ny
Jyx = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se]
Vyx = numpy.r_[ayxp[iall], ayxe[i_w], ayxw[i_e], ayxn[i_s], ayxs[
i_n], ayxsw[i_ne], ayxnw[i_se], ayxne[i_sw], ayxse[i_nw]]
Iyy = numpy.r_[
iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw] + nx * ny
Jyy = numpy.r_[
iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se] + nx * ny
Vyy = numpy.r_[ayyp[iall], ayye[i_w], ayyw[i_e], ayyn[i_s], ayys[
i_n], ayysw[i_ne], ayynw[i_se], ayyne[i_sw], ayyse[i_nw]]
I = numpy.r_[Ixx, Ixy, Iyx, Iyy]
J = numpy.r_[Jxx, Jxy, Jyx, Jyy]
V = numpy.r_[Vxx, Vxy, Vyx, Vyy]
A = coo_matrix((V, (I, J))).tocsr()
return A
def compute_other_fields(self, neffs, Hxs, Hys):
from scipy.sparse import coo_matrix
wl = self.wl
x = self.x
y = self.y
epsfunc = self.epsfunc
boundary = self.boundary
Hzs = []
Exs = []
Eys = []
Ezs = []
for neff, Hx, Hy in zip(neffs, Hxs, Hys):
dx = numpy.diff(x)
dy = numpy.diff(y)
dx = numpy.r_[dx[0], dx, dx[-1]].reshape(-1, 1)
dy = numpy.r_[dy[0], dy, dy[-1]].reshape(1, -1)
xc = (x[:-1] + x[1:]) / 2
yc = (y[:-1] + y[1:]) / 2
tmp = epsfunc(yc, xc)
if isinstance(tmp, tuple):
tmp = [numpy.c_[t[:, 0:1], t, t[:, -1:]] for t in tmp]
tmp = [numpy.r_[t[0:1, :], t, t[-1:, :]] for t in tmp]
epsxx, epsxy, epsyx, epsyy, epszz = tmp
else:
tmp = numpy.c_[tmp[:, 0:1], tmp, tmp[:, -1:]]
tmp = numpy.r_[tmp[0:1, :], tmp, tmp[-1:, :]]
epsxx = epsyy = epszz = tmp
epsxy = epsyx = numpy.zeros_like(epsxx)
nx = len(x)
ny = len(y)
k = 2 * numpy.pi / wl
ones_nx = numpy.ones((nx, 1))
ones_ny = numpy.ones((1, ny))
n = numpy.dot(ones_nx, dy[:, 1:]).flatten()
s = numpy.dot(ones_nx, dy[:, :-1]).flatten()
e = numpy.dot(dx[1:, :], ones_ny).flatten()
w = numpy.dot(dx[:-1, :], ones_ny).flatten()
exx1 = epsxx[:-1, 1:].flatten()
exx2 = epsxx[:-1, :-1].flatten()
exx3 = epsxx[1:, :-1].flatten()
exx4 = epsxx[1:, 1:].flatten()
eyy1 = epsyy[:-1, 1:].flatten()
eyy2 = epsyy[:-1, :-1].flatten()
eyy3 = epsyy[1:, :-1].flatten()
eyy4 = epsyy[1:, 1:].flatten()
exy1 = epsxy[:-1, 1:].flatten()
exy2 = epsxy[:-1, :-1].flatten()
exy3 = epsxy[1:, :-1].flatten()
exy4 = epsxy[1:, 1:].flatten()
eyx1 = epsyx[:-1, 1:].flatten()
eyx2 = epsyx[:-1, :-1].flatten()
eyx3 = epsyx[1:, :-1].flatten()
eyx4 = epsyx[1:, 1:].flatten()
ezz1 = epszz[:-1, 1:].flatten()
ezz2 = epszz[:-1, :-1].flatten()
ezz3 = epszz[1:, :-1].flatten()
ezz4 = epszz[1:, 1:].flatten()
b = neff * k
bzxne = (0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * eyx4 / ezz4 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy3 * eyy1 * w * eyy2 +
0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (1 - exx4 / ezz4) / ezz3 / ezz2 / (w * exx3 + e * exx2) / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * exx1 * s) / b
bzxse = (-0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * eyx3 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy1 * w * eyy2 +
0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (1 - exx3 / ezz3) / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * n * exx1 * exx4) / b
bzxnw = (-0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * eyx1 / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy2 * e -
0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (1 - exx1 / ezz1) / ezz3 / ezz2 / (w * exx3 + e * exx2) / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * exx4 * s) / b
bzxsw = (0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * eyx2 / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * e -
0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (1 - exx2 / ezz2) / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx3 * n * exx1 * exx4) / b
bzxn = ((0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * n * ezz1 * ezz2 / eyy1 * (2 * eyy1 / ezz1 / n ** 2 + eyx1 / ezz1 / n / w) + 0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * n * ezz4 * ezz3 / eyy4 * (2 * eyy4 / ezz4 / n ** 2 - eyx4 / ezz4 / n / e)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + ((ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (0.5 * ezz4 * ((1 - exx1 / ezz1) / n / w - exy1 / ezz1 *
(2. / n ** 2 - 2 / n ** 2 * s / (n + s))) / exx1 * ezz1 * w + (ezz4 - ezz1) * s / n / (n + s) + 0.5 * ezz1 * (-(1 - exx4 / ezz4) / n / e - exy4 / ezz4 * (2. / n ** 2 - 2 / n ** 2 * s / (n + s))) / exx4 * ezz4 * e) - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (-ezz3 * exy2 / n / (n + s) / exx2 * w + (ezz3 - ezz2) * s / n / (n + s) - ezz2 * exy3 / n / (n + s) / exx3 * e)) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzxs = ((0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * s * ezz2 * ezz1 / eyy2 * (2 * eyy2 / ezz2 / s ** 2 - eyx2 / ezz2 / s / w) + 0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * s * ezz3 * ezz4 / eyy3 * (2 * eyy3 / ezz3 / s ** 2 + eyx3 / ezz3 / s / e)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + ((ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (-ezz4 * exy1 / s / (n + s) / exx1 * w - (ezz4 - ezz1)
* n / s / (n + s) - ezz1 * exy4 / s / (n + s) / exx4 * e) - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (0.5 * ezz3 * (-(1 - exx2 / ezz2) / s / w - exy2 / ezz2 * (2. / s ** 2 - 2 / s ** 2 * n / (n + s))) / exx2 * ezz2 * w - (ezz3 - ezz2) * n / s / (n + s) + 0.5 * ezz2 * ((1 - exx3 / ezz3) / s / e - exy3 / ezz3 * (2. / s ** 2 - 2 / s ** 2 * n / (n + s))) / exx3 * ezz3 * e)) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzxe = ((n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (0.5 * n * ezz4 * ezz3 / eyy4 * (2. / e ** 2 - eyx4 / ezz4 / n / e) + 0.5 * s * ezz3 * ezz4 / eyy3 * (2. / e ** 2 + eyx3 / ezz3 / s / e)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e +
(-0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * ezz1 * (1 - exx4 / ezz4) / n / exx4 * ezz4 - 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * ezz2 * (1 - exx3 / ezz3) / s / exx3 * ezz3) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzxw = ((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (0.5 * n * ezz1 * ezz2 / eyy1 * (2. / w ** 2 + eyx1 / ezz1 / n / w) + 0.5 * s * ezz2 * ezz1 / eyy2 * (2. / w ** 2 - eyx2 / ezz2 / s / w)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e +
(0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * ezz4 * (1 - exx1 / ezz1) / n / exx1 * ezz1 + 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * ezz3 * (1 - exx2 / ezz2) / s / exx2 * ezz2) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzxp = (((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (0.5 * n * ezz1 * ezz2 / eyy1 * (-2. / w ** 2 - 2 * eyy1 / ezz1 / n ** 2 + k ** 2 * eyy1 - eyx1 / ezz1 / n / w) + 0.5 * s * ezz2 * ezz1 / eyy2 * (-2. / w ** 2 - 2 * eyy2 / ezz2 / s ** 2 + k ** 2 * eyy2 + eyx2 / ezz2 / s / w)) + (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (0.5 * n * ezz4 * ezz3 / eyy4 * (-2. / e ** 2 - 2 * eyy4 / ezz4 / n ** 2 + k ** 2 * eyy4 + eyx4 / ezz4 / n / e) + 0.5 * s * ezz3 * ezz4 / eyy3 * (-2. / e ** 2 - 2 * eyy3 / ezz3 / s ** 2 + k ** 2 * eyy3 - eyx3 / ezz3 / s / e))) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + ((ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (0.5 * ezz4 * (-k **
2 * exy1 - (1 - exx1 / ezz1) / n / w - exy1 / ezz1 * (-2. / n ** 2 - 2 / n ** 2 * (n - s) / s)) / exx1 * ezz1 * w + (ezz4 - ezz1) * (n - s) / n / s + 0.5 * ezz1 * (-k ** 2 * exy4 + (1 - exx4 / ezz4) / n / e - exy4 / ezz4 * (-2. / n ** 2 - 2 / n ** 2 * (n - s) / s)) / exx4 * ezz4 * e) - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (0.5 * ezz3 * (-k ** 2 * exy2 + (1 - exx2 / ezz2) / s / w - exy2 / ezz2 * (-2. / s ** 2 + 2 / s ** 2 * (n - s) / n)) / exx2 * ezz2 * w + (ezz3 - ezz2) * (n - s) / n / s + 0.5 * ezz2 * (-k ** 2 * exy3 - (1 - exx3 / ezz3) / s / e - exy3 / ezz3 * (-2. / s ** 2 + 2 / s ** 2 * (n - s) / n)) / exx3 * ezz3 * e)) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzyne = (0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (1 - eyy4 / ezz4) / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy3 * eyy1 * w *
eyy2 + 0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * exy4 / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * exx1 * s) / b
bzyse = (-0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (1 - eyy3 / ezz3) / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy1 * w *
eyy2 + 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * exy3 / ezz3 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * n * exx1 * exx4) / b
bzynw = (-0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (1 - eyy1 / ezz1) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 *
eyy2 * e - 0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * exy1 / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * exx4 * s) / b
bzysw = (0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (1 - eyy2 / ezz2) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 *
e - 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * exy2 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx3 * n * exx1 * exx4) / b
bzyn = ((0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * ezz1 * ezz2 / eyy1 * (1 - eyy1 / ezz1) / w - 0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * ezz4 * ezz3 / eyy4 * (1 - eyy4 / ezz4) / e) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w *
eyy2 * e + (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (0.5 * ezz4 * (2. / n ** 2 + exy1 / ezz1 / n / w) / exx1 * ezz1 * w + 0.5 * ezz1 * (2. / n ** 2 - exy4 / ezz4 / n / e) / exx4 * ezz4 * e) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzys = ((-0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * ezz2 * ezz1 / eyy2 * (1 - eyy2 / ezz2) / w + 0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * ezz3 * ezz4 / eyy3 * (1 - eyy3 / ezz3) / e) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w *
eyy2 * e - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (0.5 * ezz3 * (2. / s ** 2 - exy2 / ezz2 / s / w) / exx2 * ezz2 * w + 0.5 * ezz2 * (2. / s ** 2 + exy3 / ezz3 / s / e) / exx3 * ezz3 * e) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzye = (((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (-n * ezz2 / eyy1 * eyx1 / e / (e + w) + (ezz1 - ezz2) * w / e / (e + w) - s * ezz1 / eyy2 * eyx2 / e / (e + w)) + (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (0.5 * n * ezz4 * ezz3 / eyy4 * (-(1 - eyy4 / ezz4) / n / e - eyx4 / ezz4 * (2. / e ** 2 - 2 / e ** 2 * w / (e + w))) + 0.5 * s * ezz3 * ezz4 / eyy3 * ((1 - eyy3 / ezz3) / s / e - eyx3 / ezz3 * (2. / e ** 2 - 2 / e ** 2 * w / (e + w))) + (ezz4 - ezz3) * w / e / (e + w))) / ezz4 /
ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + (0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * ezz1 * (2 * exx4 / ezz4 / e ** 2 - exy4 / ezz4 / n / e) / exx4 * ezz4 * e - 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * ezz2 * (2 * exx3 / ezz3 / e ** 2 + exy3 / ezz3 / s / e) / exx3 * ezz3 * e) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzyw = (((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (0.5 * n * ezz1 * ezz2 / eyy1 * ((1 - eyy1 / ezz1) / n / w - eyx1 / ezz1 * (2. / w ** 2 - 2 / w ** 2 * e / (e + w))) - (ezz1 - ezz2) * e / w / (e + w) + 0.5 * s * ezz2 * ezz1 / eyy2 * (-(1 - eyy2 / ezz2) / s / w - eyx2 / ezz2 * (2. / w ** 2 - 2 / w ** 2 * e / (e + w)))) + (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (-n * ezz3 / eyy4 * eyx4 / w / (e + w) - s * ezz4 / eyy3 * eyx3 / w / (e + w) - (ezz4 - ezz3) * e / w / (e + w))) / ezz4 /
ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + (0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * ezz4 * (2 * exx1 / ezz1 / w ** 2 + exy1 / ezz1 / n / w) / exx1 * ezz1 * w - 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * ezz3 * (2 * exx2 / ezz2 / w ** 2 - exy2 / ezz2 / s / w) / exx2 * ezz2 * w) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzyp = (((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (0.5 * n * ezz1 * ezz2 / eyy1 * (-k ** 2 * eyx1 - (1 - eyy1 / ezz1) / n / w - eyx1 / ezz1 * (-2. / w ** 2 + 2 / w ** 2 * (e - w) / e)) + (ezz1 - ezz2) * (e - w) / e / w + 0.5 * s * ezz2 * ezz1 / eyy2 * (-k ** 2 * eyx2 + (1 - eyy2 / ezz2) / s / w - eyx2 / ezz2 * (-2. / w ** 2 + 2 / w ** 2 * (e - w) / e))) + (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (0.5 * n * ezz4 * ezz3 / eyy4 * (-k ** 2 * eyx4 + (1 - eyy4 / ezz4) / n / e - eyx4 / ezz4 * (-2. / e ** 2 - 2 / e ** 2 * (e - w) / w)) + 0.5 * s * ezz3 * ezz4 / eyy3 * (-k ** 2 * eyx3 - (1 - eyy3 / ezz3) / s / e - eyx3 / ezz3 * (-2. / e ** 2 - 2 / e ** 2 * (e - w) / w)) + (ezz4 - ezz3) * (e - w) / e / w)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) /
ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + ((ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (0.5 * ezz4 * (-2. / n ** 2 - 2 * exx1 / ezz1 / w ** 2 + k ** 2 * exx1 - exy1 / ezz1 / n / w) / exx1 * ezz1 * w + 0.5 * ezz1 * (-2. / n ** 2 - 2 * exx4 / ezz4 / e ** 2 + k ** 2 * exx4 + exy4 / ezz4 / n / e) / exx4 * ezz4 * e) - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (0.5 * ezz3 * (-2. / s ** 2 - 2 * exx2 / ezz2 / w ** 2 + k ** 2 * exx2 + exy2 / ezz2 / s / w) / exx2 * ezz2 * w + 0.5 * ezz2 * (-2. / s ** 2 - 2 * exx3 / ezz3 / e ** 2 + k ** 2 * exx3 - exy3 / ezz3 / s / e) / exx3 * ezz3 * e)) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
ii = numpy.arange(nx * ny).reshape(nx, ny)
# NORTH boundary
ib = ii[:, -1]
if boundary[0] == 'S':
sign = 1
elif boundary[0] == 'A':
sign = -1
elif boundary[0] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
bzxs[ib] += sign * bzxn[ib]
bzxse[ib] += sign * bzxne[ib]
bzxsw[ib] += sign * bzxnw[ib]
bzys[ib] -= sign * bzyn[ib]
bzyse[ib] -= sign * bzyne[ib]
bzysw[ib] -= sign * bzynw[ib]
# SOUTH boundary
ib = ii[:, 0]
if boundary[1] == 'S':
sign = 1
elif boundary[1] == 'A':
sign = -1
elif boundary[1] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
bzxn[ib] += sign * bzxs[ib]
bzxne[ib] += sign * bzxse[ib]
bzxnw[ib] += sign * bzxsw[ib]
bzyn[ib] -= sign * bzys[ib]
bzyne[ib] -= sign * bzyse[ib]
bzynw[ib] -= sign * bzysw[ib]
# EAST boundary
ib = ii[-1, :]
if boundary[2] == 'S':
sign = 1
elif boundary[2] == 'A':
sign = -1
elif boundary[2] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
bzxw[ib] += sign * bzxe[ib]
bzxnw[ib] += sign * bzxne[ib]
bzxsw[ib] += sign * bzxse[ib]
bzyw[ib] -= sign * bzye[ib]
bzynw[ib] -= sign * bzyne[ib]
bzysw[ib] -= sign * bzyse[ib]
# WEST boundary
ib = ii[0, :]
if boundary[3] == 'S':
sign = 1
elif boundary[3] == 'A':
sign = -1
elif boundary[3] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
bzxe[ib] += sign * bzxw[ib]
bzxne[ib] += sign * bzxnw[ib]
bzxse[ib] += sign * bzxsw[ib]
bzye[ib] -= sign * bzyw[ib]
bzyne[ib] -= sign * bzynw[ib]
bzyse[ib] -= sign * bzysw[ib]
# Assemble sparse matrix
iall = ii.flatten()
i_s = ii[:, :-1].flatten()
i_n = ii[:, 1:].flatten()
i_e = ii[1:, :].flatten()
i_w = ii[:-1, :].flatten()
i_ne = ii[1:, 1:].flatten()
i_se = ii[1:, :-1].flatten()
i_sw = ii[:-1, :-1].flatten()
i_nw = ii[:-1, 1:].flatten()
Izx = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]
Jzx = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se]
Vzx = numpy.r_[bzxp[iall], bzxe[i_w], bzxw[i_e], bzxn[i_s], bzxs[
i_n], bzxsw[i_ne], bzxnw[i_se], bzxne[i_sw], bzxse[i_nw]]
Izy = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]
Jzy = numpy.r_[
iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se] + nx * ny
Vzy = numpy.r_[bzyp[iall], bzye[i_w], bzyw[i_e], bzyn[i_s], bzys[
i_n], bzysw[i_ne], bzynw[i_se], bzyne[i_sw], bzyse[i_nw]]
I = numpy.r_[Izx, Izy]
J = numpy.r_[Jzx, Jzy]
V = numpy.r_[Vzx, Vzy]
B = coo_matrix((V, (I, J))).tocsr()
HxHy = numpy.r_[Hx, Hy]
Hz = B * HxHy.ravel() / 1j
Hz = Hz.reshape(Hx.shape)
# in xc e yc
exx = epsxx[1:-1, 1:-1]
exy = epsxy[1:-1, 1:-1]
eyx = epsyx[1:-1, 1:-1]
eyy = epsyy[1:-1, 1:-1]
ezz = epszz[1:-1, 1:-1]
edet = (exx * eyy - exy * eyx)
h = e.reshape(nx, ny)[:-1, :-1]
v = n.reshape(nx, ny)[:-1, :-1]
# in xc e yc
Dx = neff * centered2d(Hy) + (
Hz[:-1, 1:] + Hz[1:, 1:] - Hz[:-1, :-1] - Hz[1:, :-1]) / (2j * k * v)
Dy = -neff * centered2d(Hx) - (
Hz[1:, :-1] + Hz[1:, 1:] - Hz[:-1, 1:] - Hz[:-1, :-1]) / (2j * k * h)
Dz = ((Hy[1:, :-1] + Hy[1:, 1:] - Hy[:-1, 1:] - Hy[:-1, :-1]) / (2 * h) -
(Hx[:-1, 1:] + Hx[1:, 1:] - Hx[:-1, :-1] - Hx[1:, :-1]) / (2 * v)) / (1j * k)
Ex = (eyy * Dx - exy * Dy) / edet
Ey = (exx * Dy - eyx * Dx) / edet
Ez = Dz / ezz
Hzs.append(Hz)
Exs.append(Ex)
Eys.append(Ey)
Ezs.append(Ez)
return (Hzs, Exs, Eys, Ezs)
def solve(self, neigs=4, tol=0, guess=None, mode_profiles=True, initial_mode_guess=None):
"""
This function finds the eigenmodes.
Parameters
----------
neigs : int
number of eigenmodes to find
tol : float
Relative accuracy for eigenvalues. The default value of 0 implies machine precision.
guess : float
a guess for the refractive index. Only finds eigenvectors with an effective refractive index
higher than this value.
Returns
-------
self : an instance of the VFDModeSolver class
obtain the fields of interest for specific modes using, for example:
solver = EMpy.modesolvers.FD.VFDModeSolver(wavelength, x, y, epsf, boundary).solve()
Ex = solver.modes[0].Ex
Ey = solver.modes[0].Ey
Ez = solver.modes[0].Ez
"""
from scipy.sparse.linalg import eigen
self.nmodes = neigs
self.tol = tol
A = self.build_matrix()
if guess is not None:
# calculate shift for eigs function
k = 2 * numpy.pi / self.wl
shift = (guess * k) ** 2
else:
shift = None
[eigvals, eigvecs] = eigen.eigs(A,
k=neigs,
which='LR',
tol=0.001,
ncv=None,
v0 = initial_mode_guess,
return_eigenvectors=mode_profiles,
sigma=shift)
neffs = self.wl * scipy.sqrt(eigvals) / (2 * numpy.pi)
if mode_profiles:
Hxs = []
Hys = []
nx = self.nx
ny = self.ny
for ieig in range(neigs):
Hxs.append(eigvecs[:nx * ny, ieig].reshape(nx, ny))
Hys.append(eigvecs[nx * ny:, ieig].reshape(nx, ny))
# sort the modes
idx = numpy.flipud(numpy.argsort(neffs))
neffs = neffs[idx]
self.neff = neffs
if mode_profiles:
tmpx = []
tmpy = []
for i in idx:
tmpx.append(Hxs[i])
tmpy.append(Hys[i])
Hxs = tmpx
Hys = tmpy
[Hzs, Exs, Eys, Ezs] = self.compute_other_fields(neffs, Hxs, Hys)
self.modes = []
for (neff, Hx, Hy, Hz, Ex, Ey, Ez) in zip(neffs, Hxs, Hys, Hzs, Exs, Eys, Ezs):
self.modes.append(
FDMode(self.wl, self.x, self.y, neff, Ey, Ex, Ez, Hy, Hx, Hz).normalize())
return self
def __str__(self):
descr = 'Vectorial Finite Difference Modesolver\n'
return descr
class FDMode():
def __init__(self, wl, x, y, neff, Ex, Ey, Ez, Hx, Hy, Hz):
self.wl = wl
self.x = x
self.y = y
self.neff = neff
self.Ex = Ex
self.Ey = Ey
self.Ez = Ez
self.Hx = Hx
self.Hy = Hy
self.Hz = Hz
self.fields = col.OrderedDict({
'Ex': Ex,
'Ey': Ey,
'Ez': Ez,
'Hx': Hx,
'Hy': Hy,
'Hz': Hz
})
def norm(self):
x = centered1d(self.x)
y = centered1d(self.y)
return scipy.sqrt(trapz2(self.intensity(), x=x, y=y))
def normalize(self):
n = self.norm()
self.Ex /= n
self.Ey /= n
self.Ez /= n
self.Hx /= n
self.Hy /= n
self.Hz /= n
return self
def intensityTETM(self, x=None, y=None):
I_TE = self.Ex * centered2d(numpy.conj(self.Hy)) / 2.
I_TM = -self.Ey * centered2d(numpy.conj(self.Hx)) / 2.
if x is None and y is None:
return (I_TE, I_TM)
else:
x0 = centered1d(self.x)
y0 = centered1d(self.y)
I_TE_ = interp2(x, y, x0, y0, I_TE)
I_TM_ = interp2(x, y, x0, y0, I_TM)
return (I_TE_, I_TM_)
def intensity(self, x=None, y=None):
I_TE, I_TM = self.intensityTETM(x, y)
return I_TE + I_TM
|
jtambasco/modesolverpy | modesolverpy/_mode_solver_lib.py | _ModeSolverVectorial.solve | python | def solve(self, neigs=4, tol=0, guess=None, mode_profiles=True, initial_mode_guess=None):
from scipy.sparse.linalg import eigen
self.nmodes = neigs
self.tol = tol
A = self.build_matrix()
if guess is not None:
# calculate shift for eigs function
k = 2 * numpy.pi / self.wl
shift = (guess * k) ** 2
else:
shift = None
[eigvals, eigvecs] = eigen.eigs(A,
k=neigs,
which='LR',
tol=0.001,
ncv=None,
v0 = initial_mode_guess,
return_eigenvectors=mode_profiles,
sigma=shift)
neffs = self.wl * scipy.sqrt(eigvals) / (2 * numpy.pi)
if mode_profiles:
Hxs = []
Hys = []
nx = self.nx
ny = self.ny
for ieig in range(neigs):
Hxs.append(eigvecs[:nx * ny, ieig].reshape(nx, ny))
Hys.append(eigvecs[nx * ny:, ieig].reshape(nx, ny))
# sort the modes
idx = numpy.flipud(numpy.argsort(neffs))
neffs = neffs[idx]
self.neff = neffs
if mode_profiles:
tmpx = []
tmpy = []
for i in idx:
tmpx.append(Hxs[i])
tmpy.append(Hys[i])
Hxs = tmpx
Hys = tmpy
[Hzs, Exs, Eys, Ezs] = self.compute_other_fields(neffs, Hxs, Hys)
self.modes = []
for (neff, Hx, Hy, Hz, Ex, Ey, Ez) in zip(neffs, Hxs, Hys, Hzs, Exs, Eys, Ezs):
self.modes.append(
FDMode(self.wl, self.x, self.y, neff, Ey, Ex, Ez, Hy, Hx, Hz).normalize())
return self | This function finds the eigenmodes.
Parameters
----------
neigs : int
number of eigenmodes to find
tol : float
Relative accuracy for eigenvalues. The default value of 0 implies machine precision.
guess : float
a guess for the refractive index. Only finds eigenvectors with an effective refractive index
higher than this value.
Returns
-------
self : an instance of the VFDModeSolver class
obtain the fields of interest for specific modes using, for example:
solver = EMpy.modesolvers.FD.VFDModeSolver(wavelength, x, y, epsf, boundary).solve()
Ex = solver.modes[0].Ex
Ey = solver.modes[0].Ey
Ez = solver.modes[0].Ez | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/_mode_solver_lib.py#L926-L1003 | [
"def build_matrix(self):\n\n from scipy.sparse import coo_matrix\n\n wl = self.wl\n x = self.x\n y = self.y\n epsfunc = self.epsfunc\n boundary = self.boundary\n\n dx = numpy.diff(x)\n dy = numpy.diff(y)\n\n dx = numpy.r_[dx[0], dx, dx[-1]].reshape(-1, 1)\n dy = numpy.r_[dy[0], dy, dy[... | class _ModeSolverVectorial():
"""
The VFDModeSolver class computes the electric and magnetic fields for modes of a dielectric
waveguide using the "Vector Finite Difference (VFD)" method, as described in
A. B. Fallahkhair, K. S. Li and T. E. Murphy, "Vector Finite Difference Modesolver for
Anisotropic Dielectric Waveguides", J. Lightwave Technol. 26(11), 1423-1431, (2008).
Parameters
----------
wl : float
The wavelength of the optical radiation (units are arbitrary, but must be self-consistent
between all inputs. Recommandation is to just use micron for everthing)
x : 1D array of floats
Array of x-values
y : 1D array of floats
Array of y-values
epsfunc : function
This is a function that provides the relative permittivity (square of the refractive index)
as a function of the x and y position. The function must be of the form:
``myRelativePermittivity(x,y)``
The function can either return a single float, corresponding the an isotropic refractive index,
or, ir may a length-5 tuple. In the tuple case, the relative permittivity is given in the form
(epsxx, epsxy, epsyx, epsyy, epszz).
The light is `z` propagating.
boundary : str
This is a string that identifies the type of boundary conditions applied.
The following options are available:
'A' - Hx is antisymmetric, Hy is symmetric.
'S' - Hx is symmetric and, Hy is antisymmetric.
'0' - Hx and Hy are zero immediately outside of the boundary.
The string identifies all four boundary conditions, in the order: North, south, east, west.
For example, boundary='000A'
Returns
-------
self : an instance of the VFDModeSolver class
Typically self.solve() will be called in order to actually find the modes.
"""
def __init__(self, wl, structure, boundary):
self.wl = wl
self.x = structure.y
self.y = structure.x
self.epsfunc = structure.eps_func
self.boundary = boundary
def build_matrix(self):
from scipy.sparse import coo_matrix
wl = self.wl
x = self.x
y = self.y
epsfunc = self.epsfunc
boundary = self.boundary
dx = numpy.diff(x)
dy = numpy.diff(y)
dx = numpy.r_[dx[0], dx, dx[-1]].reshape(-1, 1)
dy = numpy.r_[dy[0], dy, dy[-1]].reshape(1, -1)
xc = (x[:-1] + x[1:]) / 2
yc = (y[:-1] + y[1:]) / 2
tmp = epsfunc(yc, xc)
if isinstance(tmp, tuple):
tmp = [numpy.c_[t[:, 0:1], t, t[:, -1:]] for t in tmp]
tmp = [numpy.r_[t[0:1, :], t, t[-1:, :]] for t in tmp]
epsyy, epsyx, epsxy, epsxx, epszz = tmp
else:
tmp = numpy.c_[tmp[:, 0:1], tmp, tmp[:, -1:]]
tmp = numpy.r_[tmp[0:1, :], tmp, tmp[-1:, :]]
epsxx = epsyy = epszz = tmp
epsxy = epsyx = numpy.zeros_like(epsxx)
nx = len(x)
ny = len(y)
self.nx = nx
self.ny = ny
k = 2 * numpy.pi / wl
ones_nx = numpy.ones((nx, 1))
ones_ny = numpy.ones((1, ny))
n = numpy.dot(ones_nx, dy[:, 1:]).flatten()
s = numpy.dot(ones_nx, dy[:, :-1]).flatten()
e = numpy.dot(dx[1:, :], ones_ny).flatten()
w = numpy.dot(dx[:-1, :], ones_ny).flatten()
exx1 = epsxx[:-1, 1:].flatten()
exx2 = epsxx[:-1, :-1].flatten()
exx3 = epsxx[1:, :-1].flatten()
exx4 = epsxx[1:, 1:].flatten()
eyy1 = epsyy[:-1, 1:].flatten()
eyy2 = epsyy[:-1, :-1].flatten()
eyy3 = epsyy[1:, :-1].flatten()
eyy4 = epsyy[1:, 1:].flatten()
exy1 = epsxy[:-1, 1:].flatten()
exy2 = epsxy[:-1, :-1].flatten()
exy3 = epsxy[1:, :-1].flatten()
exy4 = epsxy[1:, 1:].flatten()
eyx1 = epsyx[:-1, 1:].flatten()
eyx2 = epsyx[:-1, :-1].flatten()
eyx3 = epsyx[1:, :-1].flatten()
eyx4 = epsyx[1:, 1:].flatten()
ezz1 = epszz[:-1, 1:].flatten()
ezz2 = epszz[:-1, :-1].flatten()
ezz3 = epszz[1:, :-1].flatten()
ezz4 = epszz[1:, 1:].flatten()
ns21 = n * eyy2 + s * eyy1
ns34 = n * eyy3 + s * eyy4
ew14 = e * exx1 + w * exx4
ew23 = e * exx2 + w * exx3
axxn = ((2 * eyy4 * e - eyx4 * n) * (eyy3 / ezz4) / ns34 +
(2 * eyy1 * w + eyx1 * n) * (eyy2 / ezz1) / ns21) / (n * (e + w))
axxs = ((2 * eyy3 * e + eyx3 * s) * (eyy4 / ezz3) / ns34 +
(2 * eyy2 * w - eyx2 * s) * (eyy1 / ezz2) / ns21) / (s * (e + w))
ayye = (2 * n * exx4 - e * exy4) * exx1 / ezz4 / e / ew14 / \
(n + s) + (2 * s * exx3 + e * exy3) * \
exx2 / ezz3 / e / ew23 / (n + s)
ayyw = (2 * exx1 * n + exy1 * w) * exx4 / ezz1 / w / ew14 / \
(n + s) + (2 * exx2 * s - exy2 * w) * \
exx3 / ezz2 / w / ew23 / (n + s)
axxe = 2 / (e * (e + w)) + \
(eyy4 * eyx3 / ezz3 - eyy3 * eyx4 / ezz4) / (e + w) / ns34
axxw = 2 / (w * (e + w)) + \
(eyy2 * eyx1 / ezz1 - eyy1 * eyx2 / ezz2) / (e + w) / ns21
ayyn = 2 / (n * (n + s)) + \
(exx4 * exy1 / ezz1 - exx1 * exy4 / ezz4) / (n + s) / ew14
ayys = 2 / (s * (n + s)) + \
(exx2 * exy3 / ezz3 - exx3 * exy2 / ezz2) / (n + s) / ew23
axxne = +eyx4 * eyy3 / ezz4 / (e + w) / ns34
axxse = -eyx3 * eyy4 / ezz3 / (e + w) / ns34
axxnw = -eyx1 * eyy2 / ezz1 / (e + w) / ns21
axxsw = +eyx2 * eyy1 / ezz2 / (e + w) / ns21
ayyne = +exy4 * exx1 / ezz4 / (n + s) / ew14
ayyse = -exy3 * exx2 / ezz3 / (n + s) / ew23
ayynw = -exy1 * exx4 / ezz1 / (n + s) / ew14
ayysw = +exy2 * exx3 / ezz2 / (n + s) / ew23
axxp = -axxn - axxs - axxe - axxw - axxne - axxse - axxnw - axxsw + k ** 2 * \
(n + s) * \
(eyy4 * eyy3 * e / ns34 + eyy1 * eyy2 * w / ns21) / (e + w)
ayyp = -ayyn - ayys - ayye - ayyw - ayyne - ayyse - ayynw - ayysw + k ** 2 * \
(e + w) * \
(exx1 * exx4 * n / ew14 + exx2 * exx3 * s / ew23) / (n + s)
axyn = (eyy3 * eyy4 / ezz4 / ns34 - eyy2 * eyy1 / ezz1 /
ns21 + s * (eyy2 * eyy4 - eyy1 * eyy3) / ns21 / ns34) / (e + w)
axys = (eyy1 * eyy2 / ezz2 / ns21 - eyy4 * eyy3 / ezz3 /
ns34 + n * (eyy2 * eyy4 - eyy1 * eyy3) / ns21 / ns34) / (e + w)
ayxe = (exx1 * exx4 / ezz4 / ew14 - exx2 * exx3 / ezz3 /
ew23 + w * (exx2 * exx4 - exx1 * exx3) / ew23 / ew14) / (n + s)
ayxw = (exx3 * exx2 / ezz2 / ew23 - exx4 * exx1 / ezz1 /
ew14 + e * (exx4 * exx2 - exx1 * exx3) / ew23 / ew14) / (n + s)
axye = (eyy4 * (1 + eyy3 / ezz4) - eyy3 * (1 + eyy4 / ezz4)) / ns34 / (e + w) - \
(2 * eyx1 * eyy2 / ezz1 * n * w / ns21 +
2 * eyx2 * eyy1 / ezz2 * s * w / ns21 +
2 * eyx4 * eyy3 / ezz4 * n * e / ns34 +
2 * eyx3 * eyy4 / ezz3 * s * e / ns34 +
2 * eyy1 * eyy2 * (1. / ezz1 - 1. / ezz2) * w ** 2 / ns21) / e / (e + w) ** 2
axyw = (eyy2 * (1 + eyy1 / ezz2) - eyy1 * (1 + eyy2 / ezz2)) / ns21 / (e + w) - \
(2 * eyx1 * eyy2 / ezz1 * n * e / ns21 +
2 * eyx2 * eyy1 / ezz2 * s * e / ns21 +
2 * eyx4 * eyy3 / ezz4 * n * w / ns34 +
2 * eyx3 * eyy4 / ezz3 * s * w / ns34 +
2 * eyy3 * eyy4 * (1. / ezz3 - 1. / ezz4) * e ** 2 / ns34) / w / (e + w) ** 2
ayxn = (exx4 * (1 + exx1 / ezz4) - exx1 * (1 + exx4 / ezz4)) / ew14 / (n + s) - \
(2 * exy3 * exx2 / ezz3 * e * s / ew23 +
2 * exy2 * exx3 / ezz2 * w * n / ew23 +
2 * exy4 * exx1 / ezz4 * e * s / ew14 +
2 * exy1 * exx4 / ezz1 * w * n / ew14 +
2 * exx3 * exx2 * (1. / ezz3 - 1. / ezz2) * s ** 2 / ew23) / n / (n + s) ** 2
ayxs = (exx2 * (1 + exx3 / ezz2) - exx3 * (1 + exx2 / ezz2)) / ew23 / (n + s) - \
(2 * exy3 * exx2 / ezz3 * e * n / ew23 +
2 * exy2 * exx3 / ezz2 * w * n / ew23 +
2 * exy4 * exx1 / ezz4 * e * s / ew14 +
2 * exy1 * exx4 / ezz1 * w * s / ew14 +
2 * exx1 * exx4 * (1. / ezz1 - 1. / ezz4) * n ** 2 / ew14) / s / (n + s) ** 2
axyne = +eyy3 * (1 - eyy4 / ezz4) / (e + w) / ns34
axyse = -eyy4 * (1 - eyy3 / ezz3) / (e + w) / ns34
axynw = -eyy2 * (1 - eyy1 / ezz1) / (e + w) / ns21
axysw = +eyy1 * (1 - eyy2 / ezz2) / (e + w) / ns21
ayxne = +exx1 * (1 - exx4 / ezz4) / (n + s) / ew14
ayxse = -exx2 * (1 - exx3 / ezz3) / (n + s) / ew23
ayxnw = -exx4 * (1 - exx1 / ezz1) / (n + s) / ew14
ayxsw = +exx3 * (1 - exx2 / ezz2) / (n + s) / ew23
axyp = -(axyn + axys + axye + axyw + axyne + axyse + axynw + axysw) - k ** 2 * (w * (n * eyx1 *
eyy2 + s * eyx2 * eyy1) / ns21 + e * (s * eyx3 * eyy4 + n * eyx4 * eyy3) / ns34) / (e + w)
ayxp = -(ayxn + ayxs + ayxe + ayxw + ayxne + ayxse + ayxnw + ayxsw) - k ** 2 * (n * (w * exy1 *
exx4 + e * exy4 * exx1) / ew14 + s * (w * exy2 * exx3 + e * exy3 * exx2) / ew23) / (n + s)
ii = numpy.arange(nx * ny).reshape(nx, ny)
# NORTH boundary
ib = ii[:, -1]
if boundary[0] == 'S':
sign = 1
elif boundary[0] == 'A':
sign = -1
elif boundary[0] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
axxs[ib] += sign * axxn[ib]
axxse[ib] += sign * axxne[ib]
axxsw[ib] += sign * axxnw[ib]
ayxs[ib] += sign * ayxn[ib]
ayxse[ib] += sign * ayxne[ib]
ayxsw[ib] += sign * ayxnw[ib]
ayys[ib] -= sign * ayyn[ib]
ayyse[ib] -= sign * ayyne[ib]
ayysw[ib] -= sign * ayynw[ib]
axys[ib] -= sign * axyn[ib]
axyse[ib] -= sign * axyne[ib]
axysw[ib] -= sign * axynw[ib]
# SOUTH boundary
ib = ii[:, 0]
if boundary[1] == 'S':
sign = 1
elif boundary[1] == 'A':
sign = -1
elif boundary[1] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
axxn[ib] += sign * axxs[ib]
axxne[ib] += sign * axxse[ib]
axxnw[ib] += sign * axxsw[ib]
ayxn[ib] += sign * ayxs[ib]
ayxne[ib] += sign * ayxse[ib]
ayxnw[ib] += sign * ayxsw[ib]
ayyn[ib] -= sign * ayys[ib]
ayyne[ib] -= sign * ayyse[ib]
ayynw[ib] -= sign * ayysw[ib]
axyn[ib] -= sign * axys[ib]
axyne[ib] -= sign * axyse[ib]
axynw[ib] -= sign * axysw[ib]
# EAST boundary
ib = ii[-1, :]
if boundary[2] == 'S':
sign = 1
elif boundary[2] == 'A':
sign = -1
elif boundary[2] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
axxw[ib] += sign * axxe[ib]
axxnw[ib] += sign * axxne[ib]
axxsw[ib] += sign * axxse[ib]
ayxw[ib] += sign * ayxe[ib]
ayxnw[ib] += sign * ayxne[ib]
ayxsw[ib] += sign * ayxse[ib]
ayyw[ib] -= sign * ayye[ib]
ayynw[ib] -= sign * ayyne[ib]
ayysw[ib] -= sign * ayyse[ib]
axyw[ib] -= sign * axye[ib]
axynw[ib] -= sign * axyne[ib]
axysw[ib] -= sign * axyse[ib]
# WEST boundary
ib = ii[0, :]
if boundary[3] == 'S':
sign = 1
elif boundary[3] == 'A':
sign = -1
elif boundary[3] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
axxe[ib] += sign * axxw[ib]
axxne[ib] += sign * axxnw[ib]
axxse[ib] += sign * axxsw[ib]
ayxe[ib] += sign * ayxw[ib]
ayxne[ib] += sign * ayxnw[ib]
ayxse[ib] += sign * ayxsw[ib]
ayye[ib] -= sign * ayyw[ib]
ayyne[ib] -= sign * ayynw[ib]
ayyse[ib] -= sign * ayysw[ib]
axye[ib] -= sign * axyw[ib]
axyne[ib] -= sign * axynw[ib]
axyse[ib] -= sign * axysw[ib]
# Assemble sparse matrix
iall = ii.flatten()
i_s = ii[:, :-1].flatten()
i_n = ii[:, 1:].flatten()
i_e = ii[1:, :].flatten()
i_w = ii[:-1, :].flatten()
i_ne = ii[1:, 1:].flatten()
i_se = ii[1:, :-1].flatten()
i_sw = ii[:-1, :-1].flatten()
i_nw = ii[:-1, 1:].flatten()
Ixx = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]
Jxx = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se]
Vxx = numpy.r_[axxp[iall], axxe[i_w], axxw[i_e], axxn[i_s], axxs[
i_n], axxsw[i_ne], axxnw[i_se], axxne[i_sw], axxse[i_nw]]
Ixy = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]
Jxy = numpy.r_[
iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se] + nx * ny
Vxy = numpy.r_[axyp[iall], axye[i_w], axyw[i_e], axyn[i_s], axys[
i_n], axysw[i_ne], axynw[i_se], axyne[i_sw], axyse[i_nw]]
Iyx = numpy.r_[
iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw] + nx * ny
Jyx = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se]
Vyx = numpy.r_[ayxp[iall], ayxe[i_w], ayxw[i_e], ayxn[i_s], ayxs[
i_n], ayxsw[i_ne], ayxnw[i_se], ayxne[i_sw], ayxse[i_nw]]
Iyy = numpy.r_[
iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw] + nx * ny
Jyy = numpy.r_[
iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se] + nx * ny
Vyy = numpy.r_[ayyp[iall], ayye[i_w], ayyw[i_e], ayyn[i_s], ayys[
i_n], ayysw[i_ne], ayynw[i_se], ayyne[i_sw], ayyse[i_nw]]
I = numpy.r_[Ixx, Ixy, Iyx, Iyy]
J = numpy.r_[Jxx, Jxy, Jyx, Jyy]
V = numpy.r_[Vxx, Vxy, Vyx, Vyy]
A = coo_matrix((V, (I, J))).tocsr()
return A
def compute_other_fields(self, neffs, Hxs, Hys):
from scipy.sparse import coo_matrix
wl = self.wl
x = self.x
y = self.y
epsfunc = self.epsfunc
boundary = self.boundary
Hzs = []
Exs = []
Eys = []
Ezs = []
for neff, Hx, Hy in zip(neffs, Hxs, Hys):
dx = numpy.diff(x)
dy = numpy.diff(y)
dx = numpy.r_[dx[0], dx, dx[-1]].reshape(-1, 1)
dy = numpy.r_[dy[0], dy, dy[-1]].reshape(1, -1)
xc = (x[:-1] + x[1:]) / 2
yc = (y[:-1] + y[1:]) / 2
tmp = epsfunc(yc, xc)
if isinstance(tmp, tuple):
tmp = [numpy.c_[t[:, 0:1], t, t[:, -1:]] for t in tmp]
tmp = [numpy.r_[t[0:1, :], t, t[-1:, :]] for t in tmp]
epsxx, epsxy, epsyx, epsyy, epszz = tmp
else:
tmp = numpy.c_[tmp[:, 0:1], tmp, tmp[:, -1:]]
tmp = numpy.r_[tmp[0:1, :], tmp, tmp[-1:, :]]
epsxx = epsyy = epszz = tmp
epsxy = epsyx = numpy.zeros_like(epsxx)
nx = len(x)
ny = len(y)
k = 2 * numpy.pi / wl
ones_nx = numpy.ones((nx, 1))
ones_ny = numpy.ones((1, ny))
n = numpy.dot(ones_nx, dy[:, 1:]).flatten()
s = numpy.dot(ones_nx, dy[:, :-1]).flatten()
e = numpy.dot(dx[1:, :], ones_ny).flatten()
w = numpy.dot(dx[:-1, :], ones_ny).flatten()
exx1 = epsxx[:-1, 1:].flatten()
exx2 = epsxx[:-1, :-1].flatten()
exx3 = epsxx[1:, :-1].flatten()
exx4 = epsxx[1:, 1:].flatten()
eyy1 = epsyy[:-1, 1:].flatten()
eyy2 = epsyy[:-1, :-1].flatten()
eyy3 = epsyy[1:, :-1].flatten()
eyy4 = epsyy[1:, 1:].flatten()
exy1 = epsxy[:-1, 1:].flatten()
exy2 = epsxy[:-1, :-1].flatten()
exy3 = epsxy[1:, :-1].flatten()
exy4 = epsxy[1:, 1:].flatten()
eyx1 = epsyx[:-1, 1:].flatten()
eyx2 = epsyx[:-1, :-1].flatten()
eyx3 = epsyx[1:, :-1].flatten()
eyx4 = epsyx[1:, 1:].flatten()
ezz1 = epszz[:-1, 1:].flatten()
ezz2 = epszz[:-1, :-1].flatten()
ezz3 = epszz[1:, :-1].flatten()
ezz4 = epszz[1:, 1:].flatten()
b = neff * k
bzxne = (0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * eyx4 / ezz4 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy3 * eyy1 * w * eyy2 +
0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (1 - exx4 / ezz4) / ezz3 / ezz2 / (w * exx3 + e * exx2) / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * exx1 * s) / b
bzxse = (-0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * eyx3 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy1 * w * eyy2 +
0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (1 - exx3 / ezz3) / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * n * exx1 * exx4) / b
bzxnw = (-0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * eyx1 / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy2 * e -
0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (1 - exx1 / ezz1) / ezz3 / ezz2 / (w * exx3 + e * exx2) / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * exx4 * s) / b
bzxsw = (0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * eyx2 / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * e -
0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (1 - exx2 / ezz2) / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx3 * n * exx1 * exx4) / b
bzxn = ((0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * n * ezz1 * ezz2 / eyy1 * (2 * eyy1 / ezz1 / n ** 2 + eyx1 / ezz1 / n / w) + 0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * n * ezz4 * ezz3 / eyy4 * (2 * eyy4 / ezz4 / n ** 2 - eyx4 / ezz4 / n / e)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + ((ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (0.5 * ezz4 * ((1 - exx1 / ezz1) / n / w - exy1 / ezz1 *
(2. / n ** 2 - 2 / n ** 2 * s / (n + s))) / exx1 * ezz1 * w + (ezz4 - ezz1) * s / n / (n + s) + 0.5 * ezz1 * (-(1 - exx4 / ezz4) / n / e - exy4 / ezz4 * (2. / n ** 2 - 2 / n ** 2 * s / (n + s))) / exx4 * ezz4 * e) - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (-ezz3 * exy2 / n / (n + s) / exx2 * w + (ezz3 - ezz2) * s / n / (n + s) - ezz2 * exy3 / n / (n + s) / exx3 * e)) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzxs = ((0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * s * ezz2 * ezz1 / eyy2 * (2 * eyy2 / ezz2 / s ** 2 - eyx2 / ezz2 / s / w) + 0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * s * ezz3 * ezz4 / eyy3 * (2 * eyy3 / ezz3 / s ** 2 + eyx3 / ezz3 / s / e)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + ((ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (-ezz4 * exy1 / s / (n + s) / exx1 * w - (ezz4 - ezz1)
* n / s / (n + s) - ezz1 * exy4 / s / (n + s) / exx4 * e) - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (0.5 * ezz3 * (-(1 - exx2 / ezz2) / s / w - exy2 / ezz2 * (2. / s ** 2 - 2 / s ** 2 * n / (n + s))) / exx2 * ezz2 * w - (ezz3 - ezz2) * n / s / (n + s) + 0.5 * ezz2 * ((1 - exx3 / ezz3) / s / e - exy3 / ezz3 * (2. / s ** 2 - 2 / s ** 2 * n / (n + s))) / exx3 * ezz3 * e)) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzxe = ((n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (0.5 * n * ezz4 * ezz3 / eyy4 * (2. / e ** 2 - eyx4 / ezz4 / n / e) + 0.5 * s * ezz3 * ezz4 / eyy3 * (2. / e ** 2 + eyx3 / ezz3 / s / e)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e +
(-0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * ezz1 * (1 - exx4 / ezz4) / n / exx4 * ezz4 - 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * ezz2 * (1 - exx3 / ezz3) / s / exx3 * ezz3) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzxw = ((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (0.5 * n * ezz1 * ezz2 / eyy1 * (2. / w ** 2 + eyx1 / ezz1 / n / w) + 0.5 * s * ezz2 * ezz1 / eyy2 * (2. / w ** 2 - eyx2 / ezz2 / s / w)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e +
(0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * ezz4 * (1 - exx1 / ezz1) / n / exx1 * ezz1 + 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * ezz3 * (1 - exx2 / ezz2) / s / exx2 * ezz2) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzxp = (((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (0.5 * n * ezz1 * ezz2 / eyy1 * (-2. / w ** 2 - 2 * eyy1 / ezz1 / n ** 2 + k ** 2 * eyy1 - eyx1 / ezz1 / n / w) + 0.5 * s * ezz2 * ezz1 / eyy2 * (-2. / w ** 2 - 2 * eyy2 / ezz2 / s ** 2 + k ** 2 * eyy2 + eyx2 / ezz2 / s / w)) + (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (0.5 * n * ezz4 * ezz3 / eyy4 * (-2. / e ** 2 - 2 * eyy4 / ezz4 / n ** 2 + k ** 2 * eyy4 + eyx4 / ezz4 / n / e) + 0.5 * s * ezz3 * ezz4 / eyy3 * (-2. / e ** 2 - 2 * eyy3 / ezz3 / s ** 2 + k ** 2 * eyy3 - eyx3 / ezz3 / s / e))) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + ((ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (0.5 * ezz4 * (-k **
2 * exy1 - (1 - exx1 / ezz1) / n / w - exy1 / ezz1 * (-2. / n ** 2 - 2 / n ** 2 * (n - s) / s)) / exx1 * ezz1 * w + (ezz4 - ezz1) * (n - s) / n / s + 0.5 * ezz1 * (-k ** 2 * exy4 + (1 - exx4 / ezz4) / n / e - exy4 / ezz4 * (-2. / n ** 2 - 2 / n ** 2 * (n - s) / s)) / exx4 * ezz4 * e) - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (0.5 * ezz3 * (-k ** 2 * exy2 + (1 - exx2 / ezz2) / s / w - exy2 / ezz2 * (-2. / s ** 2 + 2 / s ** 2 * (n - s) / n)) / exx2 * ezz2 * w + (ezz3 - ezz2) * (n - s) / n / s + 0.5 * ezz2 * (-k ** 2 * exy3 - (1 - exx3 / ezz3) / s / e - exy3 / ezz3 * (-2. / s ** 2 + 2 / s ** 2 * (n - s) / n)) / exx3 * ezz3 * e)) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzyne = (0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (1 - eyy4 / ezz4) / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy3 * eyy1 * w *
eyy2 + 0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * exy4 / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * exx1 * s) / b
bzyse = (-0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (1 - eyy3 / ezz3) / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy1 * w *
eyy2 + 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * exy3 / ezz3 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * n * exx1 * exx4) / b
bzynw = (-0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (1 - eyy1 / ezz1) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 *
eyy2 * e - 0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * exy1 / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * exx4 * s) / b
bzysw = (0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (1 - eyy2 / ezz2) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 *
e - 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * exy2 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx3 * n * exx1 * exx4) / b
bzyn = ((0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * ezz1 * ezz2 / eyy1 * (1 - eyy1 / ezz1) / w - 0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * ezz4 * ezz3 / eyy4 * (1 - eyy4 / ezz4) / e) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w *
eyy2 * e + (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (0.5 * ezz4 * (2. / n ** 2 + exy1 / ezz1 / n / w) / exx1 * ezz1 * w + 0.5 * ezz1 * (2. / n ** 2 - exy4 / ezz4 / n / e) / exx4 * ezz4 * e) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzys = ((-0.5 * (-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * ezz2 * ezz1 / eyy2 * (1 - eyy2 / ezz2) / w + 0.5 * (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * ezz3 * ezz4 / eyy3 * (1 - eyy3 / ezz3) / e) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w *
eyy2 * e - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (0.5 * ezz3 * (2. / s ** 2 - exy2 / ezz2 / s / w) / exx2 * ezz2 * w + 0.5 * ezz2 * (2. / s ** 2 + exy3 / ezz3 / s / e) / exx3 * ezz3 * e) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzye = (((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (-n * ezz2 / eyy1 * eyx1 / e / (e + w) + (ezz1 - ezz2) * w / e / (e + w) - s * ezz1 / eyy2 * eyx2 / e / (e + w)) + (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (0.5 * n * ezz4 * ezz3 / eyy4 * (-(1 - eyy4 / ezz4) / n / e - eyx4 / ezz4 * (2. / e ** 2 - 2 / e ** 2 * w / (e + w))) + 0.5 * s * ezz3 * ezz4 / eyy3 * ((1 - eyy3 / ezz3) / s / e - eyx3 / ezz3 * (2. / e ** 2 - 2 / e ** 2 * w / (e + w))) + (ezz4 - ezz3) * w / e / (e + w))) / ezz4 /
ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + (0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * ezz1 * (2 * exx4 / ezz4 / e ** 2 - exy4 / ezz4 / n / e) / exx4 * ezz4 * e - 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * ezz2 * (2 * exx3 / ezz3 / e ** 2 + exy3 / ezz3 / s / e) / exx3 * ezz3 * e) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzyw = (((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (0.5 * n * ezz1 * ezz2 / eyy1 * ((1 - eyy1 / ezz1) / n / w - eyx1 / ezz1 * (2. / w ** 2 - 2 / w ** 2 * e / (e + w))) - (ezz1 - ezz2) * e / w / (e + w) + 0.5 * s * ezz2 * ezz1 / eyy2 * (-(1 - eyy2 / ezz2) / s / w - eyx2 / ezz2 * (2. / w ** 2 - 2 / w ** 2 * e / (e + w)))) + (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (-n * ezz3 / eyy4 * eyx4 / w / (e + w) - s * ezz4 / eyy3 * eyx3 / w / (e + w) - (ezz4 - ezz3) * e / w / (e + w))) / ezz4 /
ezz3 / (n * eyy3 + s * eyy4) / ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + (0.5 * (ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * ezz4 * (2 * exx1 / ezz1 / w ** 2 + exy1 / ezz1 / n / w) / exx1 * ezz1 * w - 0.5 * (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * ezz3 * (2 * exx2 / ezz2 / w ** 2 - exy2 / ezz2 / s / w) / exx2 * ezz2 * w) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
bzyp = (((-n * ezz4 * ezz3 / eyy4 - s * ezz3 * ezz4 / eyy3) * (0.5 * n * ezz1 * ezz2 / eyy1 * (-k ** 2 * eyx1 - (1 - eyy1 / ezz1) / n / w - eyx1 / ezz1 * (-2. / w ** 2 + 2 / w ** 2 * (e - w) / e)) + (ezz1 - ezz2) * (e - w) / e / w + 0.5 * s * ezz2 * ezz1 / eyy2 * (-k ** 2 * eyx2 + (1 - eyy2 / ezz2) / s / w - eyx2 / ezz2 * (-2. / w ** 2 + 2 / w ** 2 * (e - w) / e))) + (n * ezz1 * ezz2 / eyy1 + s * ezz2 * ezz1 / eyy2) * (0.5 * n * ezz4 * ezz3 / eyy4 * (-k ** 2 * eyx4 + (1 - eyy4 / ezz4) / n / e - eyx4 / ezz4 * (-2. / e ** 2 - 2 / e ** 2 * (e - w) / w)) + 0.5 * s * ezz3 * ezz4 / eyy3 * (-k ** 2 * eyx3 - (1 - eyy3 / ezz3) / s / e - eyx3 / ezz3 * (-2. / e ** 2 - 2 / e ** 2 * (e - w) / w)) + (ezz4 - ezz3) * (e - w) / e / w)) / ezz4 / ezz3 / (n * eyy3 + s * eyy4) /
ezz2 / ezz1 / (n * eyy2 + s * eyy1) / (e + w) * eyy4 * eyy3 * eyy1 * w * eyy2 * e + ((ezz3 / exx2 * ezz2 * w + ezz2 / exx3 * ezz3 * e) * (0.5 * ezz4 * (-2. / n ** 2 - 2 * exx1 / ezz1 / w ** 2 + k ** 2 * exx1 - exy1 / ezz1 / n / w) / exx1 * ezz1 * w + 0.5 * ezz1 * (-2. / n ** 2 - 2 * exx4 / ezz4 / e ** 2 + k ** 2 * exx4 + exy4 / ezz4 / n / e) / exx4 * ezz4 * e) - (ezz4 / exx1 * ezz1 * w + ezz1 / exx4 * ezz4 * e) * (0.5 * ezz3 * (-2. / s ** 2 - 2 * exx2 / ezz2 / w ** 2 + k ** 2 * exx2 + exy2 / ezz2 / s / w) / exx2 * ezz2 * w + 0.5 * ezz2 * (-2. / s ** 2 - 2 * exx3 / ezz3 / e ** 2 + k ** 2 * exx3 - exy3 / ezz3 / s / e) / exx3 * ezz3 * e)) / ezz3 / ezz2 / (w * exx3 + e * exx2) / ezz4 / ezz1 / (w * exx4 + e * exx1) / (n + s) * exx2 * exx3 * n * exx1 * exx4 * s) / b
ii = numpy.arange(nx * ny).reshape(nx, ny)
# NORTH boundary
ib = ii[:, -1]
if boundary[0] == 'S':
sign = 1
elif boundary[0] == 'A':
sign = -1
elif boundary[0] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
bzxs[ib] += sign * bzxn[ib]
bzxse[ib] += sign * bzxne[ib]
bzxsw[ib] += sign * bzxnw[ib]
bzys[ib] -= sign * bzyn[ib]
bzyse[ib] -= sign * bzyne[ib]
bzysw[ib] -= sign * bzynw[ib]
# SOUTH boundary
ib = ii[:, 0]
if boundary[1] == 'S':
sign = 1
elif boundary[1] == 'A':
sign = -1
elif boundary[1] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
bzxn[ib] += sign * bzxs[ib]
bzxne[ib] += sign * bzxse[ib]
bzxnw[ib] += sign * bzxsw[ib]
bzyn[ib] -= sign * bzys[ib]
bzyne[ib] -= sign * bzyse[ib]
bzynw[ib] -= sign * bzysw[ib]
# EAST boundary
ib = ii[-1, :]
if boundary[2] == 'S':
sign = 1
elif boundary[2] == 'A':
sign = -1
elif boundary[2] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
bzxw[ib] += sign * bzxe[ib]
bzxnw[ib] += sign * bzxne[ib]
bzxsw[ib] += sign * bzxse[ib]
bzyw[ib] -= sign * bzye[ib]
bzynw[ib] -= sign * bzyne[ib]
bzysw[ib] -= sign * bzyse[ib]
# WEST boundary
ib = ii[0, :]
if boundary[3] == 'S':
sign = 1
elif boundary[3] == 'A':
sign = -1
elif boundary[3] == '0':
sign = 0
else:
raise ValueError('unknown boundary conditions')
bzxe[ib] += sign * bzxw[ib]
bzxne[ib] += sign * bzxnw[ib]
bzxse[ib] += sign * bzxsw[ib]
bzye[ib] -= sign * bzyw[ib]
bzyne[ib] -= sign * bzynw[ib]
bzyse[ib] -= sign * bzysw[ib]
# Assemble sparse matrix
iall = ii.flatten()
i_s = ii[:, :-1].flatten()
i_n = ii[:, 1:].flatten()
i_e = ii[1:, :].flatten()
i_w = ii[:-1, :].flatten()
i_ne = ii[1:, 1:].flatten()
i_se = ii[1:, :-1].flatten()
i_sw = ii[:-1, :-1].flatten()
i_nw = ii[:-1, 1:].flatten()
Izx = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]
Jzx = numpy.r_[iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se]
Vzx = numpy.r_[bzxp[iall], bzxe[i_w], bzxw[i_e], bzxn[i_s], bzxs[
i_n], bzxsw[i_ne], bzxnw[i_se], bzxne[i_sw], bzxse[i_nw]]
Izy = numpy.r_[iall, i_w, i_e, i_s, i_n, i_ne, i_se, i_sw, i_nw]
Jzy = numpy.r_[
iall, i_e, i_w, i_n, i_s, i_sw, i_nw, i_ne, i_se] + nx * ny
Vzy = numpy.r_[bzyp[iall], bzye[i_w], bzyw[i_e], bzyn[i_s], bzys[
i_n], bzysw[i_ne], bzynw[i_se], bzyne[i_sw], bzyse[i_nw]]
I = numpy.r_[Izx, Izy]
J = numpy.r_[Jzx, Jzy]
V = numpy.r_[Vzx, Vzy]
B = coo_matrix((V, (I, J))).tocsr()
HxHy = numpy.r_[Hx, Hy]
Hz = B * HxHy.ravel() / 1j
Hz = Hz.reshape(Hx.shape)
# in xc e yc
exx = epsxx[1:-1, 1:-1]
exy = epsxy[1:-1, 1:-1]
eyx = epsyx[1:-1, 1:-1]
eyy = epsyy[1:-1, 1:-1]
ezz = epszz[1:-1, 1:-1]
edet = (exx * eyy - exy * eyx)
h = e.reshape(nx, ny)[:-1, :-1]
v = n.reshape(nx, ny)[:-1, :-1]
# in xc e yc
Dx = neff * centered2d(Hy) + (
Hz[:-1, 1:] + Hz[1:, 1:] - Hz[:-1, :-1] - Hz[1:, :-1]) / (2j * k * v)
Dy = -neff * centered2d(Hx) - (
Hz[1:, :-1] + Hz[1:, 1:] - Hz[:-1, 1:] - Hz[:-1, :-1]) / (2j * k * h)
Dz = ((Hy[1:, :-1] + Hy[1:, 1:] - Hy[:-1, 1:] - Hy[:-1, :-1]) / (2 * h) -
(Hx[:-1, 1:] + Hx[1:, 1:] - Hx[:-1, :-1] - Hx[1:, :-1]) / (2 * v)) / (1j * k)
Ex = (eyy * Dx - exy * Dy) / edet
Ey = (exx * Dy - eyx * Dx) / edet
Ez = Dz / ezz
Hzs.append(Hz)
Exs.append(Ex)
Eys.append(Ey)
Ezs.append(Ez)
return (Hzs, Exs, Eys, Ezs)
def __str__(self):
descr = 'Vectorial Finite Difference Modesolver\n'
return descr
|
jtambasco/modesolverpy | modesolverpy/design.py | directional_coupler_lc | python | def directional_coupler_lc(wavelength_nm, n_eff_1, n_eff_2):
'''
Calculates the coherence length (100% power transfer) of a
directional coupler.
Args:
wavelength_nm (float): The wavelength in [nm] the
directional coupler should operate at.
n_eff_1 (float): n_eff of the fundamental (even)
supermode of the directional coupler.
n_eff_2 (float): n_eff of the first-order (odd)
supermode of the directional coupler.
Returns:
float: The length [um] the directional coupler
needs to be to achieve 100% power transfer.
'''
wavelength_m = wavelength_nm * 1.e-9
dn_eff = (n_eff_1 - n_eff_2).real
lc_m = wavelength_m / (2. * dn_eff)
lc_um = lc_m * 1.e6
return lc_um | Calculates the coherence length (100% power transfer) of a
directional coupler.
Args:
wavelength_nm (float): The wavelength in [nm] the
directional coupler should operate at.
n_eff_1 (float): n_eff of the fundamental (even)
supermode of the directional coupler.
n_eff_2 (float): n_eff of the first-order (odd)
supermode of the directional coupler.
Returns:
float: The length [um] the directional coupler
needs to be to achieve 100% power transfer. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/design.py#L4-L26 | null | import numpy as np
def grating_coupler_period(wavelength,
n_eff,
n_clad,
incidence_angle_deg,
diffration_order=1):
'''
Calculate the period needed for a grating coupler.
Args:
wavelength (float): The target wavelength for the
grating coupler.
n_eff (float): The effective index of the mode
of a waveguide with the width of the grating
coupler.
n_clad (float): The refractive index of the cladding.
incidence_angle_deg (float): The incidence angle
the grating coupler should operate at [degrees].
diffration_order (int): The grating order the coupler
should work at. Default is 1st order (1).
Returns:
float: The period needed for the grating coupler
in the same units as the wavelength was given at.
'''
k0 = 2. * np.pi / wavelength
beta = n_eff.real * k0
n_inc = n_clad
grating_period = (2.*np.pi*diffration_order) \
/ (beta - k0*n_inc*np.sin(np.radians(incidence_angle_deg)))
return grating_period
def loss(n, wavelength):
kappa = n.imag
alpha = 4.34 * 4 * np.pi * np.abs(
kappa) / wavelength # 4.34 = 10*np.log10(np.e) -> [dB/m] = 4.34 [/m]
return alpha # [db/um] if working in [um]
def qpm_wavenumber(pmp_n,
pmp_l,
sig_n,
sig_l,
idl_n,
idl_l,
period_qpm,
type='forward'):
pi2 = np.pi * 2
k_pmp = pmp_n * pi2 / pmp_l
k_sig = sig_n * pi2 / sig_l
k_idl = idl_n * pi2 / idl_l
k_qpm = pi2 / period_qpm
if type == 'forward':
sgn_1 = 1
sgn_2 = 1
elif type == 'forward_backward':
sgn_1 = 1
sgn_2 = -1
elif type == 'backward':
sgn_1 = -1
sgn_2 = -1
k_mismatch = k_idl * sgn_1 + k_sig * sgn_2 + k_qpm - k_pmp
return k_mismatch
def qpm_period(pmp_n, pmp_l, sig_n, sig_l, idl_n, idl_l, type='forward'):
pi2 = np.pi * 2
k_pmp = pmp_n * pi2 / pmp_l
k_sig = sig_n * pi2 / sig_l
k_idl = idl_n * pi2 / idl_l
if type == 'forward':
sgn_1 = 1
sgn_2 = 1
elif type == 'forward_backward':
sgn_1 = 1
sgn_2 = -1
elif type == 'backward':
sgn_1 = -1
sgn_2 = -1
k_qpm = k_pmp - k_idl * sgn_1 - k_sig * sgn_2
l_qpm = pi2 / k_qpm
return l_qpm
|
jtambasco/modesolverpy | modesolverpy/design.py | grating_coupler_period | python | def grating_coupler_period(wavelength,
n_eff,
n_clad,
incidence_angle_deg,
diffration_order=1):
'''
Calculate the period needed for a grating coupler.
Args:
wavelength (float): The target wavelength for the
grating coupler.
n_eff (float): The effective index of the mode
of a waveguide with the width of the grating
coupler.
n_clad (float): The refractive index of the cladding.
incidence_angle_deg (float): The incidence angle
the grating coupler should operate at [degrees].
diffration_order (int): The grating order the coupler
should work at. Default is 1st order (1).
Returns:
float: The period needed for the grating coupler
in the same units as the wavelength was given at.
'''
k0 = 2. * np.pi / wavelength
beta = n_eff.real * k0
n_inc = n_clad
grating_period = (2.*np.pi*diffration_order) \
/ (beta - k0*n_inc*np.sin(np.radians(incidence_angle_deg)))
return grating_period | Calculate the period needed for a grating coupler.
Args:
wavelength (float): The target wavelength for the
grating coupler.
n_eff (float): The effective index of the mode
of a waveguide with the width of the grating
coupler.
n_clad (float): The refractive index of the cladding.
incidence_angle_deg (float): The incidence angle
the grating coupler should operate at [degrees].
diffration_order (int): The grating order the coupler
should work at. Default is 1st order (1).
Returns:
float: The period needed for the grating coupler
in the same units as the wavelength was given at. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/design.py#L29-L60 | null | import numpy as np
def directional_coupler_lc(wavelength_nm, n_eff_1, n_eff_2):
'''
Calculates the coherence length (100% power transfer) of a
directional coupler.
Args:
wavelength_nm (float): The wavelength in [nm] the
directional coupler should operate at.
n_eff_1 (float): n_eff of the fundamental (even)
supermode of the directional coupler.
n_eff_2 (float): n_eff of the first-order (odd)
supermode of the directional coupler.
Returns:
float: The length [um] the directional coupler
needs to be to achieve 100% power transfer.
'''
wavelength_m = wavelength_nm * 1.e-9
dn_eff = (n_eff_1 - n_eff_2).real
lc_m = wavelength_m / (2. * dn_eff)
lc_um = lc_m * 1.e6
return lc_um
def loss(n, wavelength):
kappa = n.imag
alpha = 4.34 * 4 * np.pi * np.abs(
kappa) / wavelength # 4.34 = 10*np.log10(np.e) -> [dB/m] = 4.34 [/m]
return alpha # [db/um] if working in [um]
def qpm_wavenumber(pmp_n,
pmp_l,
sig_n,
sig_l,
idl_n,
idl_l,
period_qpm,
type='forward'):
pi2 = np.pi * 2
k_pmp = pmp_n * pi2 / pmp_l
k_sig = sig_n * pi2 / sig_l
k_idl = idl_n * pi2 / idl_l
k_qpm = pi2 / period_qpm
if type == 'forward':
sgn_1 = 1
sgn_2 = 1
elif type == 'forward_backward':
sgn_1 = 1
sgn_2 = -1
elif type == 'backward':
sgn_1 = -1
sgn_2 = -1
k_mismatch = k_idl * sgn_1 + k_sig * sgn_2 + k_qpm - k_pmp
return k_mismatch
def qpm_period(pmp_n, pmp_l, sig_n, sig_l, idl_n, idl_l, type='forward'):
pi2 = np.pi * 2
k_pmp = pmp_n * pi2 / pmp_l
k_sig = sig_n * pi2 / sig_l
k_idl = idl_n * pi2 / idl_l
if type == 'forward':
sgn_1 = 1
sgn_2 = 1
elif type == 'forward_backward':
sgn_1 = 1
sgn_2 = -1
elif type == 'backward':
sgn_1 = -1
sgn_2 = -1
k_qpm = k_pmp - k_idl * sgn_1 - k_sig * sgn_2
l_qpm = pi2 / k_qpm
return l_qpm
|
jtambasco/modesolverpy | modesolverpy/coupling_efficiency.py | reflection | python | def reflection(n1, n2):
'''
Calculate the power reflection at the interface
of two refractive index materials.
Args:
n1 (float): Refractive index of material 1.
n2 (float): Refractive index of material 2.
Returns:
float: The percentage of reflected power.
'''
r = abs((n1-n2) / (n1+n2))**2
return r | Calculate the power reflection at the interface
of two refractive index materials.
Args:
n1 (float): Refractive index of material 1.
n2 (float): Refractive index of material 2.
Returns:
float: The percentage of reflected power. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/coupling_efficiency.py#L25-L38 | null | import numpy as np
def _make_gaussian(x_pts, y_pts, mfd, x_offset=0, y_offset=0):
x0 = (x_pts[-1]+x_pts[0])/2 + x_offset
y0 = (y_pts[-1]+y_pts[0])/2 + y_offset
xx, yy = np.meshgrid(x_pts, y_pts)
sigma = mfd * 0.707 / 2.355
sigma_x = sigma
sigma_y = sigma
gaus_2d = np.exp(-((xx-x0)**2/(2*sigma_x**2)+
(yy-y0)**2/(2*sigma_y**2)))
gaus_2d /= np.sum(gaus_2d)
return gaus_2d
def _overlap(mode, gaussian):
mode_1 = mode
mode_2 = np.sqrt(gaussian) # square-root for E-field (not power)
eta = np.abs(np.sum(np.conj(mode_1)*mode_2))**2 / \
(np.sum(np.abs(mode_1)**2) * np.sum(np.abs(mode_2)**2))
return eta
def transmission(n1, n2):
'''
Calculate the power transmission at the interface
of two refractive index materials.
Args:
n1 (float): Refractive index of material 1.
n2 (float): Refractive index of material 2.
Returns:
float: The percentage of transmitted power.
'''
return 1-reflection(n1, n2)
def coupling_efficiency(mode_solver, fibre_mfd,
fibre_offset_x=0, fibre_offset_y=0,
n_eff_fibre=1.441):
'''
Finds the coupling efficiency between a solved
fundamental mode and a fibre of given MFD.
Args:
mode_solver (_ModeSolver): Mode solver that
has found a fundamental mode.
fibre_mfd (float): The mode-field diameter
(MFD) of the fibre.
fibre_offset_x (float): Offset the fibre
from the centre position of the window
in x. Default is 0 (no offset).
fibre_offset_y (float): Offset the fibre
from the centre position of the window
in y. Default is 0 (no offset).
n_eff_fibre (float): The effective index
of the fibre mode. Default is 1.441.
Returns:
float: The power coupling efficiency.
'''
etas = []
gaus = _make_gaussian(mode_solver._structure.xc, mode_solver._structure.yc,
fibre_mfd, fibre_offset_x, fibre_offset_y)
for mode, n_eff in zip(mode_solver.modes, mode_solver.n_effs):
o = abs(_overlap(mode, gaus))
t = abs(transmission(n_eff, n_eff_fibre))
eta = o * t
etas.append(eta)
return etas
|
jtambasco/modesolverpy | modesolverpy/coupling_efficiency.py | coupling_efficiency | python | def coupling_efficiency(mode_solver, fibre_mfd,
fibre_offset_x=0, fibre_offset_y=0,
n_eff_fibre=1.441):
'''
Finds the coupling efficiency between a solved
fundamental mode and a fibre of given MFD.
Args:
mode_solver (_ModeSolver): Mode solver that
has found a fundamental mode.
fibre_mfd (float): The mode-field diameter
(MFD) of the fibre.
fibre_offset_x (float): Offset the fibre
from the centre position of the window
in x. Default is 0 (no offset).
fibre_offset_y (float): Offset the fibre
from the centre position of the window
in y. Default is 0 (no offset).
n_eff_fibre (float): The effective index
of the fibre mode. Default is 1.441.
Returns:
float: The power coupling efficiency.
'''
etas = []
gaus = _make_gaussian(mode_solver._structure.xc, mode_solver._structure.yc,
fibre_mfd, fibre_offset_x, fibre_offset_y)
for mode, n_eff in zip(mode_solver.modes, mode_solver.n_effs):
o = abs(_overlap(mode, gaus))
t = abs(transmission(n_eff, n_eff_fibre))
eta = o * t
etas.append(eta)
return etas | Finds the coupling efficiency between a solved
fundamental mode and a fibre of given MFD.
Args:
mode_solver (_ModeSolver): Mode solver that
has found a fundamental mode.
fibre_mfd (float): The mode-field diameter
(MFD) of the fibre.
fibre_offset_x (float): Offset the fibre
from the centre position of the window
in x. Default is 0 (no offset).
fibre_offset_y (float): Offset the fibre
from the centre position of the window
in y. Default is 0 (no offset).
n_eff_fibre (float): The effective index
of the fibre mode. Default is 1.441.
Returns:
float: The power coupling efficiency. | train | https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/coupling_efficiency.py#L54-L89 | [
"def _make_gaussian(x_pts, y_pts, mfd, x_offset=0, y_offset=0):\n x0 = (x_pts[-1]+x_pts[0])/2 + x_offset\n y0 = (y_pts[-1]+y_pts[0])/2 + y_offset\n xx, yy = np.meshgrid(x_pts, y_pts)\n\n sigma = mfd * 0.707 / 2.355\n sigma_x = sigma\n sigma_y = sigma\n\n gaus_2d = np.exp(-((xx-x0)**2/(2*sigma_x... | import numpy as np
def _make_gaussian(x_pts, y_pts, mfd, x_offset=0, y_offset=0):
x0 = (x_pts[-1]+x_pts[0])/2 + x_offset
y0 = (y_pts[-1]+y_pts[0])/2 + y_offset
xx, yy = np.meshgrid(x_pts, y_pts)
sigma = mfd * 0.707 / 2.355
sigma_x = sigma
sigma_y = sigma
gaus_2d = np.exp(-((xx-x0)**2/(2*sigma_x**2)+
(yy-y0)**2/(2*sigma_y**2)))
gaus_2d /= np.sum(gaus_2d)
return gaus_2d
def _overlap(mode, gaussian):
mode_1 = mode
mode_2 = np.sqrt(gaussian) # square-root for E-field (not power)
eta = np.abs(np.sum(np.conj(mode_1)*mode_2))**2 / \
(np.sum(np.abs(mode_1)**2) * np.sum(np.abs(mode_2)**2))
return eta
def reflection(n1, n2):
'''
Calculate the power reflection at the interface
of two refractive index materials.
Args:
n1 (float): Refractive index of material 1.
n2 (float): Refractive index of material 2.
Returns:
float: The percentage of reflected power.
'''
r = abs((n1-n2) / (n1+n2))**2
return r
def transmission(n1, n2):
'''
Calculate the power transmission at the interface
of two refractive index materials.
Args:
n1 (float): Refractive index of material 1.
n2 (float): Refractive index of material 2.
Returns:
float: The percentage of transmitted power.
'''
return 1-reflection(n1, n2)
|
openvax/topiary | topiary/cli/protein_changes.py | transcript_sort_key | python | def transcript_sort_key(transcript):
return (
-len(transcript.protein_sequence),
-len(transcript.sequence),
transcript.name
) | Key function used to sort transcripts. Taking the negative of
protein sequence length and nucleotide sequence length so that
the transcripts with longest sequences come first in the list. This couldn't
be accomplished with `reverse=True` since we're also sorting by
transcript name (which places TP53-001 before TP53-002). | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/cli/protein_changes.py#L44-L56 | null | # Copyright (c) 2018. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pyensembl import ensembl_grch38
from varcode import EffectCollection
from varcode.effects import Substitution
from varcode.reference import infer_genome
import re
def add_protein_change_args(arg_parser):
protein_change_group = arg_parser.add_argument_group(
title="Protein Changes",
description="Input protein changes without associated genomic variants")
protein_change_group.add_argument(
"--protein-change",
default=[],
nargs=2,
action="append",
help="Protein modification without genomic variant (e.g. EGFR T790M)")
return arg_parser
def genome_from_args(args):
if args.genome:
return infer_genome(args.genome)
else:
# no genome specified, assume it can be inferred from the file(s)
# we're loading
return ensembl_grch38
def best_transcript(transcripts):
"""
Given a set of coding transcripts, choose the one with the longest
protein sequence and in cases of ties use the following tie-breaking
criteria:
- transcript sequence (including UTRs)
- transcript name (so TP53-001 should come before TP53-202)
"""
assert len(transcripts) > 0
sorted_list = sorted(transcripts, key=transcript_sort_key)
return sorted_list[0]
def protein_change_effects_from_args(args):
genome = genome_from_args(args)
valid_gene_names = set(genome.gene_names())
substitution_regex = re.compile("([A-Z]+)([0-9]+)([A-Z]+)")
effects = []
for gene_name, protein_change_string in args.protein_change:
match_obj = substitution_regex.match(protein_change_string)
if match_obj is None:
logging.warn(
"Unable to parse protein modification: '%s'" % protein_change_string)
continue
ref, base1_pos, alt = match_obj.groups()
base1_pos = int(base1_pos)
if gene_name not in valid_gene_names:
logging.warn("Invalid gene name '%s' in protein modification: '%s'" % (
gene_name, protein_change_string))
continue
candidate_transcripts = []
for candidate_gene in genome.genes_by_name(gene_name):
for candidate_transcript in candidate_gene.transcripts:
if not candidate_transcript.is_protein_coding:
continue
protein_sequence = candidate_transcript.protein_sequence
if protein_sequence is None:
continue
if len(protein_sequence) < (base1_pos + len(ref) - 1):
# protein sequence too short for this modification
# e.g. EGFR T790M can't happen in an EGFR transcript
# with only 789 amino acids
continue
seq_at_pos = protein_sequence[base1_pos - 1: base1_pos + len(ref) - 1]
if seq_at_pos != ref:
# if this transcript doesn't have the same reference amino
# acids as the change then skip it and use a different
# transcript
continue
candidate_transcripts.append(candidate_transcript)
if len(candidate_transcripts) > 0:
transcript = best_transcript(candidate_transcripts)
effects.append(Substitution(
variant=None,
transcript=transcript,
aa_ref=ref,
aa_alt=alt,
aa_mutation_start_offset=base1_pos - 1))
return EffectCollection(effects)
|
openvax/topiary | topiary/cli/protein_changes.py | best_transcript | python | def best_transcript(transcripts):
assert len(transcripts) > 0
sorted_list = sorted(transcripts, key=transcript_sort_key)
return sorted_list[0] | Given a set of coding transcripts, choose the one with the longest
protein sequence and in cases of ties use the following tie-breaking
criteria:
- transcript sequence (including UTRs)
- transcript name (so TP53-001 should come before TP53-202) | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/cli/protein_changes.py#L58-L68 | null | # Copyright (c) 2018. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pyensembl import ensembl_grch38
from varcode import EffectCollection
from varcode.effects import Substitution
from varcode.reference import infer_genome
import re
def add_protein_change_args(arg_parser):
protein_change_group = arg_parser.add_argument_group(
title="Protein Changes",
description="Input protein changes without associated genomic variants")
protein_change_group.add_argument(
"--protein-change",
default=[],
nargs=2,
action="append",
help="Protein modification without genomic variant (e.g. EGFR T790M)")
return arg_parser
def genome_from_args(args):
if args.genome:
return infer_genome(args.genome)
else:
# no genome specified, assume it can be inferred from the file(s)
# we're loading
return ensembl_grch38
def transcript_sort_key(transcript):
"""
Key function used to sort transcripts. Taking the negative of
protein sequence length and nucleotide sequence length so that
the transcripts with longest sequences come first in the list. This couldn't
be accomplished with `reverse=True` since we're also sorting by
transcript name (which places TP53-001 before TP53-002).
"""
return (
-len(transcript.protein_sequence),
-len(transcript.sequence),
transcript.name
)
def protein_change_effects_from_args(args):
genome = genome_from_args(args)
valid_gene_names = set(genome.gene_names())
substitution_regex = re.compile("([A-Z]+)([0-9]+)([A-Z]+)")
effects = []
for gene_name, protein_change_string in args.protein_change:
match_obj = substitution_regex.match(protein_change_string)
if match_obj is None:
logging.warn(
"Unable to parse protein modification: '%s'" % protein_change_string)
continue
ref, base1_pos, alt = match_obj.groups()
base1_pos = int(base1_pos)
if gene_name not in valid_gene_names:
logging.warn("Invalid gene name '%s' in protein modification: '%s'" % (
gene_name, protein_change_string))
continue
candidate_transcripts = []
for candidate_gene in genome.genes_by_name(gene_name):
for candidate_transcript in candidate_gene.transcripts:
if not candidate_transcript.is_protein_coding:
continue
protein_sequence = candidate_transcript.protein_sequence
if protein_sequence is None:
continue
if len(protein_sequence) < (base1_pos + len(ref) - 1):
# protein sequence too short for this modification
# e.g. EGFR T790M can't happen in an EGFR transcript
# with only 789 amino acids
continue
seq_at_pos = protein_sequence[base1_pos - 1: base1_pos + len(ref) - 1]
if seq_at_pos != ref:
# if this transcript doesn't have the same reference amino
# acids as the change then skip it and use a different
# transcript
continue
candidate_transcripts.append(candidate_transcript)
if len(candidate_transcripts) > 0:
transcript = best_transcript(candidate_transcripts)
effects.append(Substitution(
variant=None,
transcript=transcript,
aa_ref=ref,
aa_alt=alt,
aa_mutation_start_offset=base1_pos - 1))
return EffectCollection(effects)
|
openvax/topiary | topiary/cli/args.py | predict_epitopes_from_args | python | def predict_epitopes_from_args(args):
mhc_model = mhc_binding_predictor_from_args(args)
variants = variant_collection_from_args(args)
gene_expression_dict = rna_gene_expression_dict_from_args(args)
transcript_expression_dict = rna_transcript_expression_dict_from_args(args)
predictor = TopiaryPredictor(
mhc_model=mhc_model,
padding_around_mutation=args.padding_around_mutation,
ic50_cutoff=args.ic50_cutoff,
percentile_cutoff=args.percentile_cutoff,
min_transcript_expression=args.rna_min_transcript_expression,
min_gene_expression=args.rna_min_gene_expression,
only_novel_epitopes=args.only_novel_epitopes,
raise_on_error=not args.skip_variant_errors)
return predictor.predict_from_variants(
variants=variants,
transcript_expression_dict=transcript_expression_dict,
gene_expression_dict=gene_expression_dict) | Returns an epitope collection from the given commandline arguments.
Parameters
----------
args : argparse.Namespace
Parsed commandline arguments for Topiary | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/cli/args.py#L68-L94 | [
"def rna_gene_expression_dict_from_args(args):\n \"\"\"\n Returns a dictionary mapping Ensembl gene IDs to FPKM expression values\n or None if neither Cufflinks tracking file nor StringTie GTF file specified\n in the commandline arguments.\n \"\"\"\n if args.rna_gene_fpkm_tracking_file:\n r... | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common commandline arguments used by scripts
"""
from __future__ import print_function, division, absolute_import
from argparse import ArgumentParser
from mhctools.cli import add_mhc_args, mhc_binding_predictor_from_args
from varcode.cli import add_variant_args, variant_collection_from_args
from .filtering import add_filter_args
from .rna import (
add_rna_args,
rna_gene_expression_dict_from_args,
rna_transcript_expression_dict_from_args,
)
from .sequence import add_sequence_args
from .errors import add_error_args
from .outputs import add_output_args
from .protein_changes import add_protein_change_args
from ..predictor import TopiaryPredictor
def create_arg_parser(
rna=True,
mhc=True,
variants=True,
protein_changes=True,
filters=True,
sequence_options=True,
error_options=True,
output=True):
arg_parser = ArgumentParser()
if rna:
add_rna_args(arg_parser)
if mhc:
add_mhc_args(arg_parser)
if variants:
add_variant_args(arg_parser)
if protein_changes:
add_protein_change_args(arg_parser)
if filters:
add_filter_args(arg_parser)
if sequence_options:
add_sequence_args(arg_parser)
if error_options:
add_error_args(arg_parser)
if output:
add_output_args(arg_parser)
return arg_parser
# keeping global instance for backwards compatibility with existing code
arg_parser = create_arg_parser()
|
openvax/topiary | topiary/sequence_helpers.py | protein_subsequences_around_mutations | python | def protein_subsequences_around_mutations(effects, padding_around_mutation):
protein_subsequences = {}
protein_subsequence_start_offsets = {}
for effect in effects:
protein_sequence = effect.mutant_protein_sequence
# some effects will lack a mutant protein sequence since
# they are either silent or unpredictable
if protein_sequence:
mutation_start = effect.aa_mutation_start_offset
mutation_end = effect.aa_mutation_end_offset
seq_start_offset = max(
0,
mutation_start - padding_around_mutation)
# some pseudogenes have stop codons in the reference sequence,
# if we try to use them for epitope prediction we should trim
# the sequence to not include the stop character '*'
first_stop_codon_index = protein_sequence.find("*")
if first_stop_codon_index < 0:
first_stop_codon_index = len(protein_sequence)
seq_end_offset = min(
first_stop_codon_index,
mutation_end + padding_around_mutation)
subsequence = protein_sequence[seq_start_offset:seq_end_offset]
protein_subsequences[effect] = subsequence
protein_subsequence_start_offsets[effect] = seq_start_offset
return protein_subsequences, protein_subsequence_start_offsets | From each effect get a mutant protein sequence and pull out a subsequence
around the mutation (based on the given padding). Returns a dictionary
of subsequences and a dictionary of subsequence start offsets. | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/sequence_helpers.py#L19-L50 | null | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from typechecks import require_integer
def check_padding_around_mutation(given_padding, epitope_lengths):
"""
If user doesn't provide any padding around the mutation we need
to at least include enough of the surrounding non-mutated
esidues to construct candidate epitopes of the specified lengths.
"""
min_required_padding = max(epitope_lengths) - 1
if not given_padding:
return min_required_padding
else:
require_integer(given_padding, "Padding around mutation")
if given_padding < min_required_padding:
raise ValueError(
"Padding around mutation %d cannot be less than %d "
"for epitope lengths %s" % (
given_padding,
min_required_padding,
epitope_lengths))
return given_padding
def contains_mutant_residues(
peptide_start_in_protein,
peptide_length,
mutation_start_in_protein,
mutation_end_in_protein):
peptide_end_in_protein = peptide_start_in_protein + peptide_length - 1
return (
peptide_start_in_protein < mutation_end_in_protein and
peptide_end_in_protein >= mutation_start_in_protein
)
def peptide_mutation_interval(
peptide_start_in_protein,
peptide_length,
mutation_start_in_protein,
mutation_end_in_protein):
"""
Half-open interval of mutated residues in the peptide, determined
from the mutation interval in the original protein sequence.
Parameters
----------
peptide_start_in_protein : int
Position of the first peptide residue within the protein
(starting from 0)
peptide_length : int
mutation_start_in_protein : int
Position of the first mutated residue starting from 0. In the case of a
deletion, the position where the first residue had been.
mutation_end_in_protein : int
Position of the last mutated residue in the mutant protein. In the case
of a deletion, this is equal to the mutation_start_in_protein.
)
"""
if peptide_start_in_protein > mutation_end_in_protein:
raise ValueError("Peptide starts after mutation")
elif peptide_start_in_protein + peptide_length < mutation_start_in_protein:
raise ValueError("Peptide ends before mutation")
# need a half-open start/end interval
peptide_mutation_start_offset = min(
peptide_length,
max(0, mutation_start_in_protein - peptide_start_in_protein))
peptide_mutation_end_offset = min(
peptide_length,
max(0, mutation_end_in_protein - peptide_start_in_protein))
return (peptide_mutation_start_offset, peptide_mutation_end_offset)
|
openvax/topiary | topiary/sequence_helpers.py | check_padding_around_mutation | python | def check_padding_around_mutation(given_padding, epitope_lengths):
min_required_padding = max(epitope_lengths) - 1
if not given_padding:
return min_required_padding
else:
require_integer(given_padding, "Padding around mutation")
if given_padding < min_required_padding:
raise ValueError(
"Padding around mutation %d cannot be less than %d "
"for epitope lengths %s" % (
given_padding,
min_required_padding,
epitope_lengths))
return given_padding | If user doesn't provide any padding around the mutation we need
to at least include enough of the surrounding non-mutated
esidues to construct candidate epitopes of the specified lengths. | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/sequence_helpers.py#L52-L70 | null | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from typechecks import require_integer
def protein_subsequences_around_mutations(effects, padding_around_mutation):
"""
From each effect get a mutant protein sequence and pull out a subsequence
around the mutation (based on the given padding). Returns a dictionary
of subsequences and a dictionary of subsequence start offsets.
"""
protein_subsequences = {}
protein_subsequence_start_offsets = {}
for effect in effects:
protein_sequence = effect.mutant_protein_sequence
# some effects will lack a mutant protein sequence since
# they are either silent or unpredictable
if protein_sequence:
mutation_start = effect.aa_mutation_start_offset
mutation_end = effect.aa_mutation_end_offset
seq_start_offset = max(
0,
mutation_start - padding_around_mutation)
# some pseudogenes have stop codons in the reference sequence,
# if we try to use them for epitope prediction we should trim
# the sequence to not include the stop character '*'
first_stop_codon_index = protein_sequence.find("*")
if first_stop_codon_index < 0:
first_stop_codon_index = len(protein_sequence)
seq_end_offset = min(
first_stop_codon_index,
mutation_end + padding_around_mutation)
subsequence = protein_sequence[seq_start_offset:seq_end_offset]
protein_subsequences[effect] = subsequence
protein_subsequence_start_offsets[effect] = seq_start_offset
return protein_subsequences, protein_subsequence_start_offsets
def contains_mutant_residues(
peptide_start_in_protein,
peptide_length,
mutation_start_in_protein,
mutation_end_in_protein):
peptide_end_in_protein = peptide_start_in_protein + peptide_length - 1
return (
peptide_start_in_protein < mutation_end_in_protein and
peptide_end_in_protein >= mutation_start_in_protein
)
def peptide_mutation_interval(
peptide_start_in_protein,
peptide_length,
mutation_start_in_protein,
mutation_end_in_protein):
"""
Half-open interval of mutated residues in the peptide, determined
from the mutation interval in the original protein sequence.
Parameters
----------
peptide_start_in_protein : int
Position of the first peptide residue within the protein
(starting from 0)
peptide_length : int
mutation_start_in_protein : int
Position of the first mutated residue starting from 0. In the case of a
deletion, the position where the first residue had been.
mutation_end_in_protein : int
Position of the last mutated residue in the mutant protein. In the case
of a deletion, this is equal to the mutation_start_in_protein.
)
"""
if peptide_start_in_protein > mutation_end_in_protein:
raise ValueError("Peptide starts after mutation")
elif peptide_start_in_protein + peptide_length < mutation_start_in_protein:
raise ValueError("Peptide ends before mutation")
# need a half-open start/end interval
peptide_mutation_start_offset = min(
peptide_length,
max(0, mutation_start_in_protein - peptide_start_in_protein))
peptide_mutation_end_offset = min(
peptide_length,
max(0, mutation_end_in_protein - peptide_start_in_protein))
return (peptide_mutation_start_offset, peptide_mutation_end_offset)
|
openvax/topiary | topiary/sequence_helpers.py | peptide_mutation_interval | python | def peptide_mutation_interval(
peptide_start_in_protein,
peptide_length,
mutation_start_in_protein,
mutation_end_in_protein):
if peptide_start_in_protein > mutation_end_in_protein:
raise ValueError("Peptide starts after mutation")
elif peptide_start_in_protein + peptide_length < mutation_start_in_protein:
raise ValueError("Peptide ends before mutation")
# need a half-open start/end interval
peptide_mutation_start_offset = min(
peptide_length,
max(0, mutation_start_in_protein - peptide_start_in_protein))
peptide_mutation_end_offset = min(
peptide_length,
max(0, mutation_end_in_protein - peptide_start_in_protein))
return (peptide_mutation_start_offset, peptide_mutation_end_offset) | Half-open interval of mutated residues in the peptide, determined
from the mutation interval in the original protein sequence.
Parameters
----------
peptide_start_in_protein : int
Position of the first peptide residue within the protein
(starting from 0)
peptide_length : int
mutation_start_in_protein : int
Position of the first mutated residue starting from 0. In the case of a
deletion, the position where the first residue had been.
mutation_end_in_protein : int
Position of the last mutated residue in the mutant protein. In the case
of a deletion, this is equal to the mutation_start_in_protein.
) | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/sequence_helpers.py#L83-L121 | null | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from typechecks import require_integer
def protein_subsequences_around_mutations(effects, padding_around_mutation):
"""
From each effect get a mutant protein sequence and pull out a subsequence
around the mutation (based on the given padding). Returns a dictionary
of subsequences and a dictionary of subsequence start offsets.
"""
protein_subsequences = {}
protein_subsequence_start_offsets = {}
for effect in effects:
protein_sequence = effect.mutant_protein_sequence
# some effects will lack a mutant protein sequence since
# they are either silent or unpredictable
if protein_sequence:
mutation_start = effect.aa_mutation_start_offset
mutation_end = effect.aa_mutation_end_offset
seq_start_offset = max(
0,
mutation_start - padding_around_mutation)
# some pseudogenes have stop codons in the reference sequence,
# if we try to use them for epitope prediction we should trim
# the sequence to not include the stop character '*'
first_stop_codon_index = protein_sequence.find("*")
if first_stop_codon_index < 0:
first_stop_codon_index = len(protein_sequence)
seq_end_offset = min(
first_stop_codon_index,
mutation_end + padding_around_mutation)
subsequence = protein_sequence[seq_start_offset:seq_end_offset]
protein_subsequences[effect] = subsequence
protein_subsequence_start_offsets[effect] = seq_start_offset
return protein_subsequences, protein_subsequence_start_offsets
def check_padding_around_mutation(given_padding, epitope_lengths):
"""
If user doesn't provide any padding around the mutation we need
to at least include enough of the surrounding non-mutated
esidues to construct candidate epitopes of the specified lengths.
"""
min_required_padding = max(epitope_lengths) - 1
if not given_padding:
return min_required_padding
else:
require_integer(given_padding, "Padding around mutation")
if given_padding < min_required_padding:
raise ValueError(
"Padding around mutation %d cannot be less than %d "
"for epitope lengths %s" % (
given_padding,
min_required_padding,
epitope_lengths))
return given_padding
def contains_mutant_residues(
peptide_start_in_protein,
peptide_length,
mutation_start_in_protein,
mutation_end_in_protein):
peptide_end_in_protein = peptide_start_in_protein + peptide_length - 1
return (
peptide_start_in_protein < mutation_end_in_protein and
peptide_end_in_protein >= mutation_start_in_protein
)
|
openvax/topiary | topiary/filters.py | apply_filter | python | def apply_filter(
filter_fn,
collection,
result_fn=None,
filter_name="",
collection_name=""):
n_before = len(collection)
filtered = [x for x in collection if filter_fn(x)]
n_after = len(filtered)
if not collection_name:
collection_name = collection.__class__.__name__
logging.info(
"%s filtering removed %d/%d entries of %s",
filter_name,
(n_before - n_after),
n_before,
collection_name)
return result_fn(filtered) if result_fn else collection.__class__(filtered) | Apply filter to effect collection and print number of dropped elements
Parameters
---------- | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/filters.py#L24-L47 | null | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper functions for filtering variants, effects, and epitope predictions
"""
from __future__ import print_function, division, absolute_import
import logging
from varcode import NonsilentCodingMutation
def filter_silent_and_noncoding_effects(effects):
"""
Keep only variant effects which result in modified proteins.
Parameters
----------
effects : varcode.EffectCollection
"""
return apply_filter(
filter_fn=lambda effect: isinstance(effect, NonsilentCodingMutation),
collection=effects,
result_fn=effects.clone_with_new_elements,
filter_name="Silent mutation")
def apply_variant_expression_filters(
variants,
gene_expression_dict,
gene_expression_threshold,
transcript_expression_dict,
transcript_expression_threshold):
"""
Filter a collection of variants by gene and transcript expression thresholds
Parameters
----------
variants : varcode.VariantCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float
"""
if gene_expression_dict:
variants = apply_filter(
lambda variant: any(
gene_expression_dict.get(gene_id, 0.0) >=
gene_expression_threshold
for gene_id in variant.gene_ids
),
variants,
result_fn=variants.clone_with_new_elements,
filter_name="Variant gene expression (min=%0.4f)" % gene_expression_threshold)
if transcript_expression_dict:
variants = apply_filter(
lambda variant: any(
transcript_expression_dict.get(transcript_id, 0.0) >=
transcript_expression_threshold
for transcript_id in variant.transcript_ids
),
variants,
result_fn=variants.clone_with_new_elements,
filter_name=(
"Variant transcript expression (min=%0.4f)" % (
transcript_expression_threshold,)))
return variants
def apply_effect_expression_filters(
effects,
gene_expression_dict,
gene_expression_threshold,
transcript_expression_dict,
transcript_expression_threshold):
"""
Filter collection of varcode effects by given gene
and transcript expression thresholds.
Parameters
----------
effects : varcode.EffectCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float
"""
if gene_expression_dict:
effects = apply_filter(
lambda effect: (
gene_expression_dict.get(effect.gene_id, 0.0) >=
gene_expression_threshold),
effects,
result_fn=effects.clone_with_new_elements,
filter_name="Effect gene expression (min = %0.4f)" % gene_expression_threshold)
if transcript_expression_dict:
effects = apply_filter(
lambda effect: (
transcript_expression_dict.get(effect.transcript_id, 0.0) >=
transcript_expression_threshold
),
effects,
result_fn=effects.clone_with_new_elements,
filter_name=(
"Effect transcript expression (min=%0.4f)" % (
transcript_expression_threshold,)))
return effects
|
openvax/topiary | topiary/filters.py | filter_silent_and_noncoding_effects | python | def filter_silent_and_noncoding_effects(effects):
return apply_filter(
filter_fn=lambda effect: isinstance(effect, NonsilentCodingMutation),
collection=effects,
result_fn=effects.clone_with_new_elements,
filter_name="Silent mutation") | Keep only variant effects which result in modified proteins.
Parameters
----------
effects : varcode.EffectCollection | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/filters.py#L49-L61 | [
"def apply_filter(\n filter_fn,\n collection,\n result_fn=None,\n filter_name=\"\",\n collection_name=\"\"):\n \"\"\"\n Apply filter to effect collection and print number of dropped elements\n\n Parameters\n ----------\n \"\"\"\n n_before = len(collection)\n f... | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper functions for filtering variants, effects, and epitope predictions
"""
from __future__ import print_function, division, absolute_import
import logging
from varcode import NonsilentCodingMutation
def apply_filter(
filter_fn,
collection,
result_fn=None,
filter_name="",
collection_name=""):
"""
Apply filter to effect collection and print number of dropped elements
Parameters
----------
"""
n_before = len(collection)
filtered = [x for x in collection if filter_fn(x)]
n_after = len(filtered)
if not collection_name:
collection_name = collection.__class__.__name__
logging.info(
"%s filtering removed %d/%d entries of %s",
filter_name,
(n_before - n_after),
n_before,
collection_name)
return result_fn(filtered) if result_fn else collection.__class__(filtered)
def apply_variant_expression_filters(
variants,
gene_expression_dict,
gene_expression_threshold,
transcript_expression_dict,
transcript_expression_threshold):
"""
Filter a collection of variants by gene and transcript expression thresholds
Parameters
----------
variants : varcode.VariantCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float
"""
if gene_expression_dict:
variants = apply_filter(
lambda variant: any(
gene_expression_dict.get(gene_id, 0.0) >=
gene_expression_threshold
for gene_id in variant.gene_ids
),
variants,
result_fn=variants.clone_with_new_elements,
filter_name="Variant gene expression (min=%0.4f)" % gene_expression_threshold)
if transcript_expression_dict:
variants = apply_filter(
lambda variant: any(
transcript_expression_dict.get(transcript_id, 0.0) >=
transcript_expression_threshold
for transcript_id in variant.transcript_ids
),
variants,
result_fn=variants.clone_with_new_elements,
filter_name=(
"Variant transcript expression (min=%0.4f)" % (
transcript_expression_threshold,)))
return variants
def apply_effect_expression_filters(
effects,
gene_expression_dict,
gene_expression_threshold,
transcript_expression_dict,
transcript_expression_threshold):
"""
Filter collection of varcode effects by given gene
and transcript expression thresholds.
Parameters
----------
effects : varcode.EffectCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float
"""
if gene_expression_dict:
effects = apply_filter(
lambda effect: (
gene_expression_dict.get(effect.gene_id, 0.0) >=
gene_expression_threshold),
effects,
result_fn=effects.clone_with_new_elements,
filter_name="Effect gene expression (min = %0.4f)" % gene_expression_threshold)
if transcript_expression_dict:
effects = apply_filter(
lambda effect: (
transcript_expression_dict.get(effect.transcript_id, 0.0) >=
transcript_expression_threshold
),
effects,
result_fn=effects.clone_with_new_elements,
filter_name=(
"Effect transcript expression (min=%0.4f)" % (
transcript_expression_threshold,)))
return effects
|
openvax/topiary | topiary/filters.py | apply_variant_expression_filters | python | def apply_variant_expression_filters(
variants,
gene_expression_dict,
gene_expression_threshold,
transcript_expression_dict,
transcript_expression_threshold):
if gene_expression_dict:
variants = apply_filter(
lambda variant: any(
gene_expression_dict.get(gene_id, 0.0) >=
gene_expression_threshold
for gene_id in variant.gene_ids
),
variants,
result_fn=variants.clone_with_new_elements,
filter_name="Variant gene expression (min=%0.4f)" % gene_expression_threshold)
if transcript_expression_dict:
variants = apply_filter(
lambda variant: any(
transcript_expression_dict.get(transcript_id, 0.0) >=
transcript_expression_threshold
for transcript_id in variant.transcript_ids
),
variants,
result_fn=variants.clone_with_new_elements,
filter_name=(
"Variant transcript expression (min=%0.4f)" % (
transcript_expression_threshold,)))
return variants | Filter a collection of variants by gene and transcript expression thresholds
Parameters
----------
variants : varcode.VariantCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/filters.py#L64-L107 | [
"def apply_filter(\n filter_fn,\n collection,\n result_fn=None,\n filter_name=\"\",\n collection_name=\"\"):\n \"\"\"\n Apply filter to effect collection and print number of dropped elements\n\n Parameters\n ----------\n \"\"\"\n n_before = len(collection)\n f... | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper functions for filtering variants, effects, and epitope predictions
"""
from __future__ import print_function, division, absolute_import
import logging
from varcode import NonsilentCodingMutation
def apply_filter(
filter_fn,
collection,
result_fn=None,
filter_name="",
collection_name=""):
"""
Apply filter to effect collection and print number of dropped elements
Parameters
----------
"""
n_before = len(collection)
filtered = [x for x in collection if filter_fn(x)]
n_after = len(filtered)
if not collection_name:
collection_name = collection.__class__.__name__
logging.info(
"%s filtering removed %d/%d entries of %s",
filter_name,
(n_before - n_after),
n_before,
collection_name)
return result_fn(filtered) if result_fn else collection.__class__(filtered)
def filter_silent_and_noncoding_effects(effects):
"""
Keep only variant effects which result in modified proteins.
Parameters
----------
effects : varcode.EffectCollection
"""
return apply_filter(
filter_fn=lambda effect: isinstance(effect, NonsilentCodingMutation),
collection=effects,
result_fn=effects.clone_with_new_elements,
filter_name="Silent mutation")
def apply_effect_expression_filters(
effects,
gene_expression_dict,
gene_expression_threshold,
transcript_expression_dict,
transcript_expression_threshold):
"""
Filter collection of varcode effects by given gene
and transcript expression thresholds.
Parameters
----------
effects : varcode.EffectCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float
"""
if gene_expression_dict:
effects = apply_filter(
lambda effect: (
gene_expression_dict.get(effect.gene_id, 0.0) >=
gene_expression_threshold),
effects,
result_fn=effects.clone_with_new_elements,
filter_name="Effect gene expression (min = %0.4f)" % gene_expression_threshold)
if transcript_expression_dict:
effects = apply_filter(
lambda effect: (
transcript_expression_dict.get(effect.transcript_id, 0.0) >=
transcript_expression_threshold
),
effects,
result_fn=effects.clone_with_new_elements,
filter_name=(
"Effect transcript expression (min=%0.4f)" % (
transcript_expression_threshold,)))
return effects
|
openvax/topiary | topiary/filters.py | apply_effect_expression_filters | python | def apply_effect_expression_filters(
effects,
gene_expression_dict,
gene_expression_threshold,
transcript_expression_dict,
transcript_expression_threshold):
if gene_expression_dict:
effects = apply_filter(
lambda effect: (
gene_expression_dict.get(effect.gene_id, 0.0) >=
gene_expression_threshold),
effects,
result_fn=effects.clone_with_new_elements,
filter_name="Effect gene expression (min = %0.4f)" % gene_expression_threshold)
if transcript_expression_dict:
effects = apply_filter(
lambda effect: (
transcript_expression_dict.get(effect.transcript_id, 0.0) >=
transcript_expression_threshold
),
effects,
result_fn=effects.clone_with_new_elements,
filter_name=(
"Effect transcript expression (min=%0.4f)" % (
transcript_expression_threshold,)))
return effects | Filter collection of varcode effects by given gene
and transcript expression thresholds.
Parameters
----------
effects : varcode.EffectCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/filters.py#L109-L151 | [
"def apply_filter(\n filter_fn,\n collection,\n result_fn=None,\n filter_name=\"\",\n collection_name=\"\"):\n \"\"\"\n Apply filter to effect collection and print number of dropped elements\n\n Parameters\n ----------\n \"\"\"\n n_before = len(collection)\n f... | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper functions for filtering variants, effects, and epitope predictions
"""
from __future__ import print_function, division, absolute_import
import logging
from varcode import NonsilentCodingMutation
def apply_filter(
filter_fn,
collection,
result_fn=None,
filter_name="",
collection_name=""):
"""
Apply filter to effect collection and print number of dropped elements
Parameters
----------
"""
n_before = len(collection)
filtered = [x for x in collection if filter_fn(x)]
n_after = len(filtered)
if not collection_name:
collection_name = collection.__class__.__name__
logging.info(
"%s filtering removed %d/%d entries of %s",
filter_name,
(n_before - n_after),
n_before,
collection_name)
return result_fn(filtered) if result_fn else collection.__class__(filtered)
def filter_silent_and_noncoding_effects(effects):
"""
Keep only variant effects which result in modified proteins.
Parameters
----------
effects : varcode.EffectCollection
"""
return apply_filter(
filter_fn=lambda effect: isinstance(effect, NonsilentCodingMutation),
collection=effects,
result_fn=effects.clone_with_new_elements,
filter_name="Silent mutation")
def apply_variant_expression_filters(
variants,
gene_expression_dict,
gene_expression_threshold,
transcript_expression_dict,
transcript_expression_threshold):
"""
Filter a collection of variants by gene and transcript expression thresholds
Parameters
----------
variants : varcode.VariantCollection
gene_expression_dict : dict
gene_expression_threshold : float
transcript_expression_dict : dict
transcript_expression_threshold : float
"""
if gene_expression_dict:
variants = apply_filter(
lambda variant: any(
gene_expression_dict.get(gene_id, 0.0) >=
gene_expression_threshold
for gene_id in variant.gene_ids
),
variants,
result_fn=variants.clone_with_new_elements,
filter_name="Variant gene expression (min=%0.4f)" % gene_expression_threshold)
if transcript_expression_dict:
variants = apply_filter(
lambda variant: any(
transcript_expression_dict.get(transcript_id, 0.0) >=
transcript_expression_threshold
for transcript_id in variant.transcript_ids
),
variants,
result_fn=variants.clone_with_new_elements,
filter_name=(
"Variant transcript expression (min=%0.4f)" % (
transcript_expression_threshold,)))
return variants
|
openvax/topiary | topiary/rna/gtf.py | _get_gtf_column | python | def _get_gtf_column(column_name, gtf_path, df):
if column_name in df.columns:
return list(df[column_name])
else:
raise ValueError(
"Missing '%s' in columns of %s, available: %s" % (
column_name,
gtf_path,
list(df.columns))) | Helper function which returns a dictionary column or raises an ValueError
abou the absence of that column in a GTF file. | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/rna/gtf.py#L22-L35 | null | # Copyright (c) 2017-2018. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import logging
import gtfparse
def load_transcript_fpkm_dict_from_gtf(
gtf_path,
transcript_id_column_name="reference_id",
fpkm_column_name="FPKM",
feature_column_name="feature"):
"""
Load a GTF file generated by StringTie which contains transcript-level
quantification of abundance. Returns a dictionary mapping Ensembl
IDs of transcripts to FPKM values.
"""
df = gtfparse.read_gtf(
gtf_path,
column_converters={fpkm_column_name: float})
transcript_ids = _get_gtf_column(transcript_id_column_name, gtf_path, df)
fpkm_values = _get_gtf_column(fpkm_column_name, gtf_path, df)
features = _get_gtf_column(feature_column_name, gtf_path, df)
logging.info("Loaded %d rows from %s" % (len(transcript_ids), gtf_path))
logging.info("Found %s transcript entries" % sum(
feature == "transcript" for feature in features))
result = {
transcript_id: float(fpkm)
for (transcript_id, fpkm, feature)
in zip(transcript_ids, fpkm_values, features)
if (
(transcript_id is not None) and
(len(transcript_id) > 0) and
(feature == "transcript")
)
}
logging.info("Keeping %d transcript rows with reference IDs" % (
len(result),))
return result
|
openvax/topiary | topiary/rna/gtf.py | load_transcript_fpkm_dict_from_gtf | python | def load_transcript_fpkm_dict_from_gtf(
gtf_path,
transcript_id_column_name="reference_id",
fpkm_column_name="FPKM",
feature_column_name="feature"):
df = gtfparse.read_gtf(
gtf_path,
column_converters={fpkm_column_name: float})
transcript_ids = _get_gtf_column(transcript_id_column_name, gtf_path, df)
fpkm_values = _get_gtf_column(fpkm_column_name, gtf_path, df)
features = _get_gtf_column(feature_column_name, gtf_path, df)
logging.info("Loaded %d rows from %s" % (len(transcript_ids), gtf_path))
logging.info("Found %s transcript entries" % sum(
feature == "transcript" for feature in features))
result = {
transcript_id: float(fpkm)
for (transcript_id, fpkm, feature)
in zip(transcript_ids, fpkm_values, features)
if (
(transcript_id is not None) and
(len(transcript_id) > 0) and
(feature == "transcript")
)
}
logging.info("Keeping %d transcript rows with reference IDs" % (
len(result),))
return result | Load a GTF file generated by StringTie which contains transcript-level
quantification of abundance. Returns a dictionary mapping Ensembl
IDs of transcripts to FPKM values. | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/rna/gtf.py#L37-L68 | [
"def _get_gtf_column(column_name, gtf_path, df):\n \"\"\"\n Helper function which returns a dictionary column or raises an ValueError\n abou the absence of that column in a GTF file.\n \"\"\"\n if column_name in df.columns:\n return list(df[column_name])\n\n else:\n raise ValueError(... | # Copyright (c) 2017-2018. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import logging
import gtfparse
def _get_gtf_column(column_name, gtf_path, df):
"""
Helper function which returns a dictionary column or raises an ValueError
abou the absence of that column in a GTF file.
"""
if column_name in df.columns:
return list(df[column_name])
else:
raise ValueError(
"Missing '%s' in columns of %s, available: %s" % (
column_name,
gtf_path,
list(df.columns)))
|
openvax/topiary | topiary/predictor.py | TopiaryPredictor.predict_from_named_sequences | python | def predict_from_named_sequences(
self, name_to_sequence_dict):
df = self.mhc_model.predict_subsequences_dataframe(name_to_sequence_dict)
return df.rename(
columns={
"length": "peptide_length",
"offset": "peptide_offset"}) | Parameters
----------
name_to_sequence_dict : (str->str) dict
Dictionary mapping sequence names to amino acid sequences
Returns pandas.DataFrame with the following columns:
- source_sequence_name
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/predictor.py#L91-L113 | null | class TopiaryPredictor(object):
def __init__(
self,
mhc_model,
padding_around_mutation=None,
ic50_cutoff=None,
percentile_cutoff=None,
min_gene_expression=0.0,
min_transcript_expression=0.0,
only_novel_epitopes=False,
raise_on_error=True):
"""
Parameters
----------
mhc_model : mhctools.BasePredictor
Any instance of a peptide-MHC binding affinity predictor
padding_around_mutation : int
How many residues surrounding a mutation to consider including in a
candidate epitope. Default is the minimum size necessary for epitope
length of the mhc model.
min_gene_expression : float, optional
If gene expression values are provided, only keep effects on
genes with expression above this threshold.
min_transcript_expression : float, optional
If transcript expression values are provided, only keep effects on
transcripts with expression above this threshold.
ic50_cutoff : float, optional
Maximum predicted IC50 value for a peptide to be considered a binder.
percentile_cutoff : float, optional
Maximum percentile rank of IC50 values for a peptide to be considered
a binder.
only_novel_epitopes : bool, optional
If True, then drop peptides which either don't contain a mutation.
TODO: make this also check that peptide doesn't occur elsewhere in
the reference ligandome
raise_on_error : bool
Raise an exception if error is encountered or skip
the variant or peptide which generated the error.
"""
self.mhc_model = mhc_model
self.padding_around_mutation = check_padding_around_mutation(
given_padding=padding_around_mutation,
epitope_lengths=self.mhc_model.default_peptide_lengths)
self.ic50_cutoff = ic50_cutoff
self.percentile_cutoff = percentile_cutoff
self.min_transcript_expression = min_transcript_expression
self.min_gene_expression = min_gene_expression
self.only_novel_epitopes = only_novel_epitopes
self.raise_on_error = raise_on_error
def predict_from_sequences(self, sequences):
"""
Predict MHC ligands for sub-sequences of each input sequence.
Parameters
----------
sequences : list of str
Multiple amino acid sequences (without any names or IDs)
Returns DataFrame with the following fields:
- source_sequence
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
"""
# make each sequence its own unique ID
sequence_dict = {
seq: seq
for seq in sequences
}
df = self.predict_from_named_sequences(sequence_dict)
return df.rename(columns={"source_sequence_name": "source_sequence"})
def predict_from_mutation_effects(
self,
effects,
transcript_expression_dict=None,
gene_expression_dict=None):
"""Given a Varcode.EffectCollection of predicted protein effects,
return predicted epitopes around each mutation.
Parameters
----------
effects : Varcode.EffectCollection
transcript_expression_dict : dict
Dictionary mapping transcript IDs to RNA expression estimates. Used
both for transcript expression filtering and for selecting the
most abundant transcript for a particular variant. If omitted then
transcript selection is done using priority of variant effects and
transcript length.
gene_expression_dict : dict, optional
Dictionary mapping gene IDs to RNA expression estimates
Returns DataFrame with the following columns:
- variant
- gene
- gene_id
- transcript_id
- transcript_name
- effect
- effect_type
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
- contains_mutant_residues
- mutation_start_in_peptide
- mutation_end_in_peptide
Optionall will also include the following columns if corresponding
expression dictionary inputs are provided:
- gene_expression
- transcript_expression
"""
# we only care about effects which impact the coding sequence of a
# protein
effects = filter_silent_and_noncoding_effects(effects)
effects = apply_effect_expression_filters(
effects,
transcript_expression_dict=transcript_expression_dict,
transcript_expression_threshold=self.min_transcript_expression,
gene_expression_dict=gene_expression_dict,
gene_expression_threshold=self.min_gene_expression)
# group by variants, so that we end up with only one mutant
# sequence per mutation
variant_effect_groups = effects.groupby_variant()
if len(variant_effect_groups) == 0:
logging.warn("No candidates for MHC binding prediction")
return []
if transcript_expression_dict:
# if expression data is available, then for each variant
# keep the effect annotation for the most abundant transcript
top_effects = [
variant_effects.top_expression_effect(
transcript_expression_dict)
for variant_effects in variant_effect_groups.values()
]
else:
# if no transcript abundance data is available, then
# for each variant keep the effect with the most significant
# predicted effect on the protein sequence, along with using
# transcript/CDS length as a tie-breaker for effects with the same
# priority.
top_effects = [
variant_effects.top_priority_effect()
for variant_effects in variant_effect_groups.values()
]
# 1) dictionary mapping varcode effect objects to subsequences
# around each mutation
# 2) dictionary mapping varcode effect to start offset of subsequence
# within the full mutant protein sequence
effect_to_subsequence_dict, effect_to_offset_dict = \
protein_subsequences_around_mutations(
effects=top_effects,
padding_around_mutation=self.padding_around_mutation)
# since we know that each set of variant effects has been
# reduced to a single 'top priority' effect, we can uniquely
# identify each variant sequence by its original genomic variant
variant_string_to_effect_dict = {
effect.variant.short_description: effect
for effect in effect_to_subsequence_dict.keys()
}
variant_string_to_subsequence_dict = {
effect.variant.short_description: subseq
for (effect, subseq) in effect_to_subsequence_dict.items()
}
variant_string_to_offset_dict = {
effect.variant.short_description: subseq_offset
for (effect, subseq_offset) in effect_to_offset_dict.items()
}
df = self.predict_from_named_sequences(variant_string_to_subsequence_dict)
logging.info("MHC predictor returned %d peptide binding predictions" % (
len(df)))
# since we used variant descrptions as the name of each sequence
# let's rename that column to be more informative
df = df.rename(columns={"source_sequence_name": "variant"})
# adjust offset to be relative to start of protein, rather
# than whatever subsequence we used for prediction
def compute_peptide_offset_relative_to_protein(row):
subsequence_offset = variant_string_to_offset_dict[row.variant]
return row.peptide_offset + subsequence_offset
df["peptide_offset"] = df.apply(
compute_peptide_offset_relative_to_protein,
axis=1)
if self.ic50_cutoff:
df = df[df.affinity <= self.ic50_cutoff]
logging.info("Kept %d predictions after filtering affinity <= %f" % (
len(df), self.ic50_cutoff))
if self.percentile_cutoff:
df = df[df.percentile_rank <= self.percentile_cutoff]
logging.info("Kept %d predictions after filtering percentile <= %f" % (
len(df), self.percentile_cutoff))
extra_columns = OrderedDict([
('gene', []),
('gene_id', []),
('transcript_id', []),
('transcript_name', []),
('effect', []),
('effect_type', []),
('contains_mutant_residues', []),
('mutation_start_in_peptide', []),
('mutation_end_in_peptide', []),
])
if gene_expression_dict is not None:
extra_columns["gene_expression"] = []
if transcript_expression_dict is not None:
extra_columns["transcript_expression"] = []
for _, row in df.iterrows():
effect = variant_string_to_effect_dict[row.variant]
mutation_start_in_protein = effect.aa_mutation_start_offset
mutation_end_in_protein = effect.aa_mutation_end_offset
peptide_length = len(row.peptide)
is_mutant = contains_mutant_residues(
peptide_start_in_protein=row.peptide_offset,
peptide_length=peptide_length,
mutation_start_in_protein=mutation_start_in_protein,
mutation_end_in_protein=mutation_end_in_protein)
if is_mutant:
mutation_start_in_peptide, mutation_end_in_peptide = peptide_mutation_interval(
peptide_start_in_protein=row.peptide_offset,
peptide_length=peptide_length,
mutation_start_in_protein=mutation_start_in_protein,
mutation_end_in_protein=mutation_end_in_protein)
else:
mutation_start_in_peptide = mutation_end_in_peptide = None
extra_columns["gene"].append(effect.gene_name)
gene_id = effect.gene_id
extra_columns["gene_id"].append(gene_id)
if gene_expression_dict is not None:
extra_columns["gene_expression"].append(
gene_expression_dict.get(gene_id, 0.0))
transcript_id = effect.transcript_id
extra_columns["transcript_id"].append(transcript_id)
extra_columns["transcript_name"].append(effect.transcript_name)
if transcript_expression_dict is not None:
extra_columns["transcript_expression"].append(
transcript_expression_dict.get(transcript_id, 0.0))
extra_columns["effect"].append(effect.short_description)
extra_columns["effect_type"].append(effect.__class__.__name__)
extra_columns["contains_mutant_residues"].append(is_mutant)
extra_columns["mutation_start_in_peptide"].append(mutation_start_in_peptide)
extra_columns["mutation_end_in_peptide"].append(mutation_end_in_peptide)
for col, values in extra_columns.items():
df[col] = values
# TODO: add extra boolean field
# novel = is_mutant | not_in_reference
# Requires keeping a quick lookup structure for all peptides in
# the reference proteome
if self.only_novel_epitopes:
df = df[df.contains_mutant_residues]
return df
def predict_from_variants(
self,
variants,
transcript_expression_dict=None,
gene_expression_dict=None):
"""
Predict epitopes from a Variant collection, filtering options, and
optional gene and transcript expression data.
Parameters
----------
variants : varcode.VariantCollection
transcript_expression_dict : dict
Maps from Ensembl transcript IDs to FPKM expression values.
gene_expression_dict : dict, optional
Maps from Ensembl gene IDs to FPKM expression values.
Returns DataFrame with the following columns:
- variant
- gene
- gene_id
- transcript_id
- transcript_name
- effect
- effect_type
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
- contains_mutant_residues
- mutation_start_in_peptide
- mutation_end_in_peptide
Optionall will also include the following columns if corresponding
expression dictionary inputs are provided:
- gene_expression
- transcript_expression
"""
# pre-filter variants by checking if any of the genes or
# transcripts they overlap have sufficient expression.
# I'm tolerating the redundancy of this code since it's much cheaper
# to filter a variant *before* trying to predict its impact/effect
# on the protein sequence.
variants = apply_variant_expression_filters(
variants,
transcript_expression_dict=transcript_expression_dict,
transcript_expression_threshold=self.min_transcript_expression,
gene_expression_dict=gene_expression_dict,
gene_expression_threshold=self.min_gene_expression)
effects = variants.effects(raise_on_error=self.raise_on_error)
return self.predict_from_mutation_effects(
effects=effects,
transcript_expression_dict=transcript_expression_dict,
gene_expression_dict=gene_expression_dict)
|
openvax/topiary | topiary/predictor.py | TopiaryPredictor.predict_from_sequences | python | def predict_from_sequences(self, sequences):
# make each sequence its own unique ID
sequence_dict = {
seq: seq
for seq in sequences
}
df = self.predict_from_named_sequences(sequence_dict)
return df.rename(columns={"source_sequence_name": "source_sequence"}) | Predict MHC ligands for sub-sequences of each input sequence.
Parameters
----------
sequences : list of str
Multiple amino acid sequences (without any names or IDs)
Returns DataFrame with the following fields:
- source_sequence
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/predictor.py#L115-L140 | [
"def predict_from_named_sequences(\n self, name_to_sequence_dict):\n \"\"\"\n Parameters\n ----------\n name_to_sequence_dict : (str->str) dict\n Dictionary mapping sequence names to amino acid sequences\n\n Returns pandas.DataFrame with the following columns:\n - source_sequence... | class TopiaryPredictor(object):
def __init__(
self,
mhc_model,
padding_around_mutation=None,
ic50_cutoff=None,
percentile_cutoff=None,
min_gene_expression=0.0,
min_transcript_expression=0.0,
only_novel_epitopes=False,
raise_on_error=True):
"""
Parameters
----------
mhc_model : mhctools.BasePredictor
Any instance of a peptide-MHC binding affinity predictor
padding_around_mutation : int
How many residues surrounding a mutation to consider including in a
candidate epitope. Default is the minimum size necessary for epitope
length of the mhc model.
min_gene_expression : float, optional
If gene expression values are provided, only keep effects on
genes with expression above this threshold.
min_transcript_expression : float, optional
If transcript expression values are provided, only keep effects on
transcripts with expression above this threshold.
ic50_cutoff : float, optional
Maximum predicted IC50 value for a peptide to be considered a binder.
percentile_cutoff : float, optional
Maximum percentile rank of IC50 values for a peptide to be considered
a binder.
only_novel_epitopes : bool, optional
If True, then drop peptides which either don't contain a mutation.
TODO: make this also check that peptide doesn't occur elsewhere in
the reference ligandome
raise_on_error : bool
Raise an exception if error is encountered or skip
the variant or peptide which generated the error.
"""
self.mhc_model = mhc_model
self.padding_around_mutation = check_padding_around_mutation(
given_padding=padding_around_mutation,
epitope_lengths=self.mhc_model.default_peptide_lengths)
self.ic50_cutoff = ic50_cutoff
self.percentile_cutoff = percentile_cutoff
self.min_transcript_expression = min_transcript_expression
self.min_gene_expression = min_gene_expression
self.only_novel_epitopes = only_novel_epitopes
self.raise_on_error = raise_on_error
def predict_from_named_sequences(
self, name_to_sequence_dict):
"""
Parameters
----------
name_to_sequence_dict : (str->str) dict
Dictionary mapping sequence names to amino acid sequences
Returns pandas.DataFrame with the following columns:
- source_sequence_name
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
"""
df = self.mhc_model.predict_subsequences_dataframe(name_to_sequence_dict)
return df.rename(
columns={
"length": "peptide_length",
"offset": "peptide_offset"})
def predict_from_mutation_effects(
self,
effects,
transcript_expression_dict=None,
gene_expression_dict=None):
"""Given a Varcode.EffectCollection of predicted protein effects,
return predicted epitopes around each mutation.
Parameters
----------
effects : Varcode.EffectCollection
transcript_expression_dict : dict
Dictionary mapping transcript IDs to RNA expression estimates. Used
both for transcript expression filtering and for selecting the
most abundant transcript for a particular variant. If omitted then
transcript selection is done using priority of variant effects and
transcript length.
gene_expression_dict : dict, optional
Dictionary mapping gene IDs to RNA expression estimates
Returns DataFrame with the following columns:
- variant
- gene
- gene_id
- transcript_id
- transcript_name
- effect
- effect_type
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
- contains_mutant_residues
- mutation_start_in_peptide
- mutation_end_in_peptide
Optionall will also include the following columns if corresponding
expression dictionary inputs are provided:
- gene_expression
- transcript_expression
"""
# we only care about effects which impact the coding sequence of a
# protein
effects = filter_silent_and_noncoding_effects(effects)
effects = apply_effect_expression_filters(
effects,
transcript_expression_dict=transcript_expression_dict,
transcript_expression_threshold=self.min_transcript_expression,
gene_expression_dict=gene_expression_dict,
gene_expression_threshold=self.min_gene_expression)
# group by variants, so that we end up with only one mutant
# sequence per mutation
variant_effect_groups = effects.groupby_variant()
if len(variant_effect_groups) == 0:
logging.warn("No candidates for MHC binding prediction")
return []
if transcript_expression_dict:
# if expression data is available, then for each variant
# keep the effect annotation for the most abundant transcript
top_effects = [
variant_effects.top_expression_effect(
transcript_expression_dict)
for variant_effects in variant_effect_groups.values()
]
else:
# if no transcript abundance data is available, then
# for each variant keep the effect with the most significant
# predicted effect on the protein sequence, along with using
# transcript/CDS length as a tie-breaker for effects with the same
# priority.
top_effects = [
variant_effects.top_priority_effect()
for variant_effects in variant_effect_groups.values()
]
# 1) dictionary mapping varcode effect objects to subsequences
# around each mutation
# 2) dictionary mapping varcode effect to start offset of subsequence
# within the full mutant protein sequence
effect_to_subsequence_dict, effect_to_offset_dict = \
protein_subsequences_around_mutations(
effects=top_effects,
padding_around_mutation=self.padding_around_mutation)
# since we know that each set of variant effects has been
# reduced to a single 'top priority' effect, we can uniquely
# identify each variant sequence by its original genomic variant
variant_string_to_effect_dict = {
effect.variant.short_description: effect
for effect in effect_to_subsequence_dict.keys()
}
variant_string_to_subsequence_dict = {
effect.variant.short_description: subseq
for (effect, subseq) in effect_to_subsequence_dict.items()
}
variant_string_to_offset_dict = {
effect.variant.short_description: subseq_offset
for (effect, subseq_offset) in effect_to_offset_dict.items()
}
df = self.predict_from_named_sequences(variant_string_to_subsequence_dict)
logging.info("MHC predictor returned %d peptide binding predictions" % (
len(df)))
# since we used variant descrptions as the name of each sequence
# let's rename that column to be more informative
df = df.rename(columns={"source_sequence_name": "variant"})
# adjust offset to be relative to start of protein, rather
# than whatever subsequence we used for prediction
def compute_peptide_offset_relative_to_protein(row):
subsequence_offset = variant_string_to_offset_dict[row.variant]
return row.peptide_offset + subsequence_offset
df["peptide_offset"] = df.apply(
compute_peptide_offset_relative_to_protein,
axis=1)
if self.ic50_cutoff:
df = df[df.affinity <= self.ic50_cutoff]
logging.info("Kept %d predictions after filtering affinity <= %f" % (
len(df), self.ic50_cutoff))
if self.percentile_cutoff:
df = df[df.percentile_rank <= self.percentile_cutoff]
logging.info("Kept %d predictions after filtering percentile <= %f" % (
len(df), self.percentile_cutoff))
extra_columns = OrderedDict([
('gene', []),
('gene_id', []),
('transcript_id', []),
('transcript_name', []),
('effect', []),
('effect_type', []),
('contains_mutant_residues', []),
('mutation_start_in_peptide', []),
('mutation_end_in_peptide', []),
])
if gene_expression_dict is not None:
extra_columns["gene_expression"] = []
if transcript_expression_dict is not None:
extra_columns["transcript_expression"] = []
for _, row in df.iterrows():
effect = variant_string_to_effect_dict[row.variant]
mutation_start_in_protein = effect.aa_mutation_start_offset
mutation_end_in_protein = effect.aa_mutation_end_offset
peptide_length = len(row.peptide)
is_mutant = contains_mutant_residues(
peptide_start_in_protein=row.peptide_offset,
peptide_length=peptide_length,
mutation_start_in_protein=mutation_start_in_protein,
mutation_end_in_protein=mutation_end_in_protein)
if is_mutant:
mutation_start_in_peptide, mutation_end_in_peptide = peptide_mutation_interval(
peptide_start_in_protein=row.peptide_offset,
peptide_length=peptide_length,
mutation_start_in_protein=mutation_start_in_protein,
mutation_end_in_protein=mutation_end_in_protein)
else:
mutation_start_in_peptide = mutation_end_in_peptide = None
extra_columns["gene"].append(effect.gene_name)
gene_id = effect.gene_id
extra_columns["gene_id"].append(gene_id)
if gene_expression_dict is not None:
extra_columns["gene_expression"].append(
gene_expression_dict.get(gene_id, 0.0))
transcript_id = effect.transcript_id
extra_columns["transcript_id"].append(transcript_id)
extra_columns["transcript_name"].append(effect.transcript_name)
if transcript_expression_dict is not None:
extra_columns["transcript_expression"].append(
transcript_expression_dict.get(transcript_id, 0.0))
extra_columns["effect"].append(effect.short_description)
extra_columns["effect_type"].append(effect.__class__.__name__)
extra_columns["contains_mutant_residues"].append(is_mutant)
extra_columns["mutation_start_in_peptide"].append(mutation_start_in_peptide)
extra_columns["mutation_end_in_peptide"].append(mutation_end_in_peptide)
for col, values in extra_columns.items():
df[col] = values
# TODO: add extra boolean field
# novel = is_mutant | not_in_reference
# Requires keeping a quick lookup structure for all peptides in
# the reference proteome
if self.only_novel_epitopes:
df = df[df.contains_mutant_residues]
return df
def predict_from_variants(
self,
variants,
transcript_expression_dict=None,
gene_expression_dict=None):
"""
Predict epitopes from a Variant collection, filtering options, and
optional gene and transcript expression data.
Parameters
----------
variants : varcode.VariantCollection
transcript_expression_dict : dict
Maps from Ensembl transcript IDs to FPKM expression values.
gene_expression_dict : dict, optional
Maps from Ensembl gene IDs to FPKM expression values.
Returns DataFrame with the following columns:
- variant
- gene
- gene_id
- transcript_id
- transcript_name
- effect
- effect_type
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
- contains_mutant_residues
- mutation_start_in_peptide
- mutation_end_in_peptide
Optionall will also include the following columns if corresponding
expression dictionary inputs are provided:
- gene_expression
- transcript_expression
"""
# pre-filter variants by checking if any of the genes or
# transcripts they overlap have sufficient expression.
# I'm tolerating the redundancy of this code since it's much cheaper
# to filter a variant *before* trying to predict its impact/effect
# on the protein sequence.
variants = apply_variant_expression_filters(
variants,
transcript_expression_dict=transcript_expression_dict,
transcript_expression_threshold=self.min_transcript_expression,
gene_expression_dict=gene_expression_dict,
gene_expression_threshold=self.min_gene_expression)
effects = variants.effects(raise_on_error=self.raise_on_error)
return self.predict_from_mutation_effects(
effects=effects,
transcript_expression_dict=transcript_expression_dict,
gene_expression_dict=gene_expression_dict)
|
openvax/topiary | topiary/predictor.py | TopiaryPredictor.predict_from_mutation_effects | python | def predict_from_mutation_effects(
self,
effects,
transcript_expression_dict=None,
gene_expression_dict=None):
# we only care about effects which impact the coding sequence of a
# protein
effects = filter_silent_and_noncoding_effects(effects)
effects = apply_effect_expression_filters(
effects,
transcript_expression_dict=transcript_expression_dict,
transcript_expression_threshold=self.min_transcript_expression,
gene_expression_dict=gene_expression_dict,
gene_expression_threshold=self.min_gene_expression)
# group by variants, so that we end up with only one mutant
# sequence per mutation
variant_effect_groups = effects.groupby_variant()
if len(variant_effect_groups) == 0:
logging.warn("No candidates for MHC binding prediction")
return []
if transcript_expression_dict:
# if expression data is available, then for each variant
# keep the effect annotation for the most abundant transcript
top_effects = [
variant_effects.top_expression_effect(
transcript_expression_dict)
for variant_effects in variant_effect_groups.values()
]
else:
# if no transcript abundance data is available, then
# for each variant keep the effect with the most significant
# predicted effect on the protein sequence, along with using
# transcript/CDS length as a tie-breaker for effects with the same
# priority.
top_effects = [
variant_effects.top_priority_effect()
for variant_effects in variant_effect_groups.values()
]
# 1) dictionary mapping varcode effect objects to subsequences
# around each mutation
# 2) dictionary mapping varcode effect to start offset of subsequence
# within the full mutant protein sequence
effect_to_subsequence_dict, effect_to_offset_dict = \
protein_subsequences_around_mutations(
effects=top_effects,
padding_around_mutation=self.padding_around_mutation)
# since we know that each set of variant effects has been
# reduced to a single 'top priority' effect, we can uniquely
# identify each variant sequence by its original genomic variant
variant_string_to_effect_dict = {
effect.variant.short_description: effect
for effect in effect_to_subsequence_dict.keys()
}
variant_string_to_subsequence_dict = {
effect.variant.short_description: subseq
for (effect, subseq) in effect_to_subsequence_dict.items()
}
variant_string_to_offset_dict = {
effect.variant.short_description: subseq_offset
for (effect, subseq_offset) in effect_to_offset_dict.items()
}
df = self.predict_from_named_sequences(variant_string_to_subsequence_dict)
logging.info("MHC predictor returned %d peptide binding predictions" % (
len(df)))
# since we used variant descrptions as the name of each sequence
# let's rename that column to be more informative
df = df.rename(columns={"source_sequence_name": "variant"})
# adjust offset to be relative to start of protein, rather
# than whatever subsequence we used for prediction
def compute_peptide_offset_relative_to_protein(row):
subsequence_offset = variant_string_to_offset_dict[row.variant]
return row.peptide_offset + subsequence_offset
df["peptide_offset"] = df.apply(
compute_peptide_offset_relative_to_protein,
axis=1)
if self.ic50_cutoff:
df = df[df.affinity <= self.ic50_cutoff]
logging.info("Kept %d predictions after filtering affinity <= %f" % (
len(df), self.ic50_cutoff))
if self.percentile_cutoff:
df = df[df.percentile_rank <= self.percentile_cutoff]
logging.info("Kept %d predictions after filtering percentile <= %f" % (
len(df), self.percentile_cutoff))
extra_columns = OrderedDict([
('gene', []),
('gene_id', []),
('transcript_id', []),
('transcript_name', []),
('effect', []),
('effect_type', []),
('contains_mutant_residues', []),
('mutation_start_in_peptide', []),
('mutation_end_in_peptide', []),
])
if gene_expression_dict is not None:
extra_columns["gene_expression"] = []
if transcript_expression_dict is not None:
extra_columns["transcript_expression"] = []
for _, row in df.iterrows():
effect = variant_string_to_effect_dict[row.variant]
mutation_start_in_protein = effect.aa_mutation_start_offset
mutation_end_in_protein = effect.aa_mutation_end_offset
peptide_length = len(row.peptide)
is_mutant = contains_mutant_residues(
peptide_start_in_protein=row.peptide_offset,
peptide_length=peptide_length,
mutation_start_in_protein=mutation_start_in_protein,
mutation_end_in_protein=mutation_end_in_protein)
if is_mutant:
mutation_start_in_peptide, mutation_end_in_peptide = peptide_mutation_interval(
peptide_start_in_protein=row.peptide_offset,
peptide_length=peptide_length,
mutation_start_in_protein=mutation_start_in_protein,
mutation_end_in_protein=mutation_end_in_protein)
else:
mutation_start_in_peptide = mutation_end_in_peptide = None
extra_columns["gene"].append(effect.gene_name)
gene_id = effect.gene_id
extra_columns["gene_id"].append(gene_id)
if gene_expression_dict is not None:
extra_columns["gene_expression"].append(
gene_expression_dict.get(gene_id, 0.0))
transcript_id = effect.transcript_id
extra_columns["transcript_id"].append(transcript_id)
extra_columns["transcript_name"].append(effect.transcript_name)
if transcript_expression_dict is not None:
extra_columns["transcript_expression"].append(
transcript_expression_dict.get(transcript_id, 0.0))
extra_columns["effect"].append(effect.short_description)
extra_columns["effect_type"].append(effect.__class__.__name__)
extra_columns["contains_mutant_residues"].append(is_mutant)
extra_columns["mutation_start_in_peptide"].append(mutation_start_in_peptide)
extra_columns["mutation_end_in_peptide"].append(mutation_end_in_peptide)
for col, values in extra_columns.items():
df[col] = values
# TODO: add extra boolean field
# novel = is_mutant | not_in_reference
# Requires keeping a quick lookup structure for all peptides in
# the reference proteome
if self.only_novel_epitopes:
df = df[df.contains_mutant_residues]
return df | Given a Varcode.EffectCollection of predicted protein effects,
return predicted epitopes around each mutation.
Parameters
----------
effects : Varcode.EffectCollection
transcript_expression_dict : dict
Dictionary mapping transcript IDs to RNA expression estimates. Used
both for transcript expression filtering and for selecting the
most abundant transcript for a particular variant. If omitted then
transcript selection is done using priority of variant effects and
transcript length.
gene_expression_dict : dict, optional
Dictionary mapping gene IDs to RNA expression estimates
Returns DataFrame with the following columns:
- variant
- gene
- gene_id
- transcript_id
- transcript_name
- effect
- effect_type
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
- contains_mutant_residues
- mutation_start_in_peptide
- mutation_end_in_peptide
Optionall will also include the following columns if corresponding
expression dictionary inputs are provided:
- gene_expression
- transcript_expression | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/predictor.py#L142-L345 | [
"def peptide_mutation_interval(\n peptide_start_in_protein,\n peptide_length,\n mutation_start_in_protein,\n mutation_end_in_protein):\n \"\"\"\n Half-open interval of mutated residues in the peptide, determined\n from the mutation interval in the original protein sequence.\n\n ... | class TopiaryPredictor(object):
def __init__(
self,
mhc_model,
padding_around_mutation=None,
ic50_cutoff=None,
percentile_cutoff=None,
min_gene_expression=0.0,
min_transcript_expression=0.0,
only_novel_epitopes=False,
raise_on_error=True):
"""
Parameters
----------
mhc_model : mhctools.BasePredictor
Any instance of a peptide-MHC binding affinity predictor
padding_around_mutation : int
How many residues surrounding a mutation to consider including in a
candidate epitope. Default is the minimum size necessary for epitope
length of the mhc model.
min_gene_expression : float, optional
If gene expression values are provided, only keep effects on
genes with expression above this threshold.
min_transcript_expression : float, optional
If transcript expression values are provided, only keep effects on
transcripts with expression above this threshold.
ic50_cutoff : float, optional
Maximum predicted IC50 value for a peptide to be considered a binder.
percentile_cutoff : float, optional
Maximum percentile rank of IC50 values for a peptide to be considered
a binder.
only_novel_epitopes : bool, optional
If True, then drop peptides which either don't contain a mutation.
TODO: make this also check that peptide doesn't occur elsewhere in
the reference ligandome
raise_on_error : bool
Raise an exception if error is encountered or skip
the variant or peptide which generated the error.
"""
self.mhc_model = mhc_model
self.padding_around_mutation = check_padding_around_mutation(
given_padding=padding_around_mutation,
epitope_lengths=self.mhc_model.default_peptide_lengths)
self.ic50_cutoff = ic50_cutoff
self.percentile_cutoff = percentile_cutoff
self.min_transcript_expression = min_transcript_expression
self.min_gene_expression = min_gene_expression
self.only_novel_epitopes = only_novel_epitopes
self.raise_on_error = raise_on_error
def predict_from_named_sequences(
self, name_to_sequence_dict):
"""
Parameters
----------
name_to_sequence_dict : (str->str) dict
Dictionary mapping sequence names to amino acid sequences
Returns pandas.DataFrame with the following columns:
- source_sequence_name
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
"""
df = self.mhc_model.predict_subsequences_dataframe(name_to_sequence_dict)
return df.rename(
columns={
"length": "peptide_length",
"offset": "peptide_offset"})
def predict_from_sequences(self, sequences):
"""
Predict MHC ligands for sub-sequences of each input sequence.
Parameters
----------
sequences : list of str
Multiple amino acid sequences (without any names or IDs)
Returns DataFrame with the following fields:
- source_sequence
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
"""
# make each sequence its own unique ID
sequence_dict = {
seq: seq
for seq in sequences
}
df = self.predict_from_named_sequences(sequence_dict)
return df.rename(columns={"source_sequence_name": "source_sequence"})
def predict_from_variants(
self,
variants,
transcript_expression_dict=None,
gene_expression_dict=None):
"""
Predict epitopes from a Variant collection, filtering options, and
optional gene and transcript expression data.
Parameters
----------
variants : varcode.VariantCollection
transcript_expression_dict : dict
Maps from Ensembl transcript IDs to FPKM expression values.
gene_expression_dict : dict, optional
Maps from Ensembl gene IDs to FPKM expression values.
Returns DataFrame with the following columns:
- variant
- gene
- gene_id
- transcript_id
- transcript_name
- effect
- effect_type
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
- contains_mutant_residues
- mutation_start_in_peptide
- mutation_end_in_peptide
Optionall will also include the following columns if corresponding
expression dictionary inputs are provided:
- gene_expression
- transcript_expression
"""
# pre-filter variants by checking if any of the genes or
# transcripts they overlap have sufficient expression.
# I'm tolerating the redundancy of this code since it's much cheaper
# to filter a variant *before* trying to predict its impact/effect
# on the protein sequence.
variants = apply_variant_expression_filters(
variants,
transcript_expression_dict=transcript_expression_dict,
transcript_expression_threshold=self.min_transcript_expression,
gene_expression_dict=gene_expression_dict,
gene_expression_threshold=self.min_gene_expression)
effects = variants.effects(raise_on_error=self.raise_on_error)
return self.predict_from_mutation_effects(
effects=effects,
transcript_expression_dict=transcript_expression_dict,
gene_expression_dict=gene_expression_dict)
|
openvax/topiary | topiary/predictor.py | TopiaryPredictor.predict_from_variants | python | def predict_from_variants(
self,
variants,
transcript_expression_dict=None,
gene_expression_dict=None):
# pre-filter variants by checking if any of the genes or
# transcripts they overlap have sufficient expression.
# I'm tolerating the redundancy of this code since it's much cheaper
# to filter a variant *before* trying to predict its impact/effect
# on the protein sequence.
variants = apply_variant_expression_filters(
variants,
transcript_expression_dict=transcript_expression_dict,
transcript_expression_threshold=self.min_transcript_expression,
gene_expression_dict=gene_expression_dict,
gene_expression_threshold=self.min_gene_expression)
effects = variants.effects(raise_on_error=self.raise_on_error)
return self.predict_from_mutation_effects(
effects=effects,
transcript_expression_dict=transcript_expression_dict,
gene_expression_dict=gene_expression_dict) | Predict epitopes from a Variant collection, filtering options, and
optional gene and transcript expression data.
Parameters
----------
variants : varcode.VariantCollection
transcript_expression_dict : dict
Maps from Ensembl transcript IDs to FPKM expression values.
gene_expression_dict : dict, optional
Maps from Ensembl gene IDs to FPKM expression values.
Returns DataFrame with the following columns:
- variant
- gene
- gene_id
- transcript_id
- transcript_name
- effect
- effect_type
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
- contains_mutant_residues
- mutation_start_in_peptide
- mutation_end_in_peptide
Optionall will also include the following columns if corresponding
expression dictionary inputs are provided:
- gene_expression
- transcript_expression | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/predictor.py#L347-L407 | [
"def apply_variant_expression_filters(\n variants,\n gene_expression_dict,\n gene_expression_threshold,\n transcript_expression_dict,\n transcript_expression_threshold):\n \"\"\"\n Filter a collection of variants by gene and transcript expression thresholds\n\n Parameters... | class TopiaryPredictor(object):
def __init__(
self,
mhc_model,
padding_around_mutation=None,
ic50_cutoff=None,
percentile_cutoff=None,
min_gene_expression=0.0,
min_transcript_expression=0.0,
only_novel_epitopes=False,
raise_on_error=True):
"""
Parameters
----------
mhc_model : mhctools.BasePredictor
Any instance of a peptide-MHC binding affinity predictor
padding_around_mutation : int
How many residues surrounding a mutation to consider including in a
candidate epitope. Default is the minimum size necessary for epitope
length of the mhc model.
min_gene_expression : float, optional
If gene expression values are provided, only keep effects on
genes with expression above this threshold.
min_transcript_expression : float, optional
If transcript expression values are provided, only keep effects on
transcripts with expression above this threshold.
ic50_cutoff : float, optional
Maximum predicted IC50 value for a peptide to be considered a binder.
percentile_cutoff : float, optional
Maximum percentile rank of IC50 values for a peptide to be considered
a binder.
only_novel_epitopes : bool, optional
If True, then drop peptides which either don't contain a mutation.
TODO: make this also check that peptide doesn't occur elsewhere in
the reference ligandome
raise_on_error : bool
Raise an exception if error is encountered or skip
the variant or peptide which generated the error.
"""
self.mhc_model = mhc_model
self.padding_around_mutation = check_padding_around_mutation(
given_padding=padding_around_mutation,
epitope_lengths=self.mhc_model.default_peptide_lengths)
self.ic50_cutoff = ic50_cutoff
self.percentile_cutoff = percentile_cutoff
self.min_transcript_expression = min_transcript_expression
self.min_gene_expression = min_gene_expression
self.only_novel_epitopes = only_novel_epitopes
self.raise_on_error = raise_on_error
def predict_from_named_sequences(
self, name_to_sequence_dict):
"""
Parameters
----------
name_to_sequence_dict : (str->str) dict
Dictionary mapping sequence names to amino acid sequences
Returns pandas.DataFrame with the following columns:
- source_sequence_name
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
"""
df = self.mhc_model.predict_subsequences_dataframe(name_to_sequence_dict)
return df.rename(
columns={
"length": "peptide_length",
"offset": "peptide_offset"})
def predict_from_sequences(self, sequences):
"""
Predict MHC ligands for sub-sequences of each input sequence.
Parameters
----------
sequences : list of str
Multiple amino acid sequences (without any names or IDs)
Returns DataFrame with the following fields:
- source_sequence
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
"""
# make each sequence its own unique ID
sequence_dict = {
seq: seq
for seq in sequences
}
df = self.predict_from_named_sequences(sequence_dict)
return df.rename(columns={"source_sequence_name": "source_sequence"})
def predict_from_mutation_effects(
self,
effects,
transcript_expression_dict=None,
gene_expression_dict=None):
"""Given a Varcode.EffectCollection of predicted protein effects,
return predicted epitopes around each mutation.
Parameters
----------
effects : Varcode.EffectCollection
transcript_expression_dict : dict
Dictionary mapping transcript IDs to RNA expression estimates. Used
both for transcript expression filtering and for selecting the
most abundant transcript for a particular variant. If omitted then
transcript selection is done using priority of variant effects and
transcript length.
gene_expression_dict : dict, optional
Dictionary mapping gene IDs to RNA expression estimates
Returns DataFrame with the following columns:
- variant
- gene
- gene_id
- transcript_id
- transcript_name
- effect
- effect_type
- peptide
- peptide_offset
- peptide_length
- allele
- affinity
- percentile_rank
- prediction_method_name
- contains_mutant_residues
- mutation_start_in_peptide
- mutation_end_in_peptide
Optionall will also include the following columns if corresponding
expression dictionary inputs are provided:
- gene_expression
- transcript_expression
"""
# we only care about effects which impact the coding sequence of a
# protein
effects = filter_silent_and_noncoding_effects(effects)
effects = apply_effect_expression_filters(
effects,
transcript_expression_dict=transcript_expression_dict,
transcript_expression_threshold=self.min_transcript_expression,
gene_expression_dict=gene_expression_dict,
gene_expression_threshold=self.min_gene_expression)
# group by variants, so that we end up with only one mutant
# sequence per mutation
variant_effect_groups = effects.groupby_variant()
if len(variant_effect_groups) == 0:
logging.warn("No candidates for MHC binding prediction")
return []
if transcript_expression_dict:
# if expression data is available, then for each variant
# keep the effect annotation for the most abundant transcript
top_effects = [
variant_effects.top_expression_effect(
transcript_expression_dict)
for variant_effects in variant_effect_groups.values()
]
else:
# if no transcript abundance data is available, then
# for each variant keep the effect with the most significant
# predicted effect on the protein sequence, along with using
# transcript/CDS length as a tie-breaker for effects with the same
# priority.
top_effects = [
variant_effects.top_priority_effect()
for variant_effects in variant_effect_groups.values()
]
# 1) dictionary mapping varcode effect objects to subsequences
# around each mutation
# 2) dictionary mapping varcode effect to start offset of subsequence
# within the full mutant protein sequence
effect_to_subsequence_dict, effect_to_offset_dict = \
protein_subsequences_around_mutations(
effects=top_effects,
padding_around_mutation=self.padding_around_mutation)
# since we know that each set of variant effects has been
# reduced to a single 'top priority' effect, we can uniquely
# identify each variant sequence by its original genomic variant
variant_string_to_effect_dict = {
effect.variant.short_description: effect
for effect in effect_to_subsequence_dict.keys()
}
variant_string_to_subsequence_dict = {
effect.variant.short_description: subseq
for (effect, subseq) in effect_to_subsequence_dict.items()
}
variant_string_to_offset_dict = {
effect.variant.short_description: subseq_offset
for (effect, subseq_offset) in effect_to_offset_dict.items()
}
df = self.predict_from_named_sequences(variant_string_to_subsequence_dict)
logging.info("MHC predictor returned %d peptide binding predictions" % (
len(df)))
# since we used variant descrptions as the name of each sequence
# let's rename that column to be more informative
df = df.rename(columns={"source_sequence_name": "variant"})
# adjust offset to be relative to start of protein, rather
# than whatever subsequence we used for prediction
def compute_peptide_offset_relative_to_protein(row):
subsequence_offset = variant_string_to_offset_dict[row.variant]
return row.peptide_offset + subsequence_offset
df["peptide_offset"] = df.apply(
compute_peptide_offset_relative_to_protein,
axis=1)
if self.ic50_cutoff:
df = df[df.affinity <= self.ic50_cutoff]
logging.info("Kept %d predictions after filtering affinity <= %f" % (
len(df), self.ic50_cutoff))
if self.percentile_cutoff:
df = df[df.percentile_rank <= self.percentile_cutoff]
logging.info("Kept %d predictions after filtering percentile <= %f" % (
len(df), self.percentile_cutoff))
extra_columns = OrderedDict([
('gene', []),
('gene_id', []),
('transcript_id', []),
('transcript_name', []),
('effect', []),
('effect_type', []),
('contains_mutant_residues', []),
('mutation_start_in_peptide', []),
('mutation_end_in_peptide', []),
])
if gene_expression_dict is not None:
extra_columns["gene_expression"] = []
if transcript_expression_dict is not None:
extra_columns["transcript_expression"] = []
for _, row in df.iterrows():
effect = variant_string_to_effect_dict[row.variant]
mutation_start_in_protein = effect.aa_mutation_start_offset
mutation_end_in_protein = effect.aa_mutation_end_offset
peptide_length = len(row.peptide)
is_mutant = contains_mutant_residues(
peptide_start_in_protein=row.peptide_offset,
peptide_length=peptide_length,
mutation_start_in_protein=mutation_start_in_protein,
mutation_end_in_protein=mutation_end_in_protein)
if is_mutant:
mutation_start_in_peptide, mutation_end_in_peptide = peptide_mutation_interval(
peptide_start_in_protein=row.peptide_offset,
peptide_length=peptide_length,
mutation_start_in_protein=mutation_start_in_protein,
mutation_end_in_protein=mutation_end_in_protein)
else:
mutation_start_in_peptide = mutation_end_in_peptide = None
extra_columns["gene"].append(effect.gene_name)
gene_id = effect.gene_id
extra_columns["gene_id"].append(gene_id)
if gene_expression_dict is not None:
extra_columns["gene_expression"].append(
gene_expression_dict.get(gene_id, 0.0))
transcript_id = effect.transcript_id
extra_columns["transcript_id"].append(transcript_id)
extra_columns["transcript_name"].append(effect.transcript_name)
if transcript_expression_dict is not None:
extra_columns["transcript_expression"].append(
transcript_expression_dict.get(transcript_id, 0.0))
extra_columns["effect"].append(effect.short_description)
extra_columns["effect_type"].append(effect.__class__.__name__)
extra_columns["contains_mutant_residues"].append(is_mutant)
extra_columns["mutation_start_in_peptide"].append(mutation_start_in_peptide)
extra_columns["mutation_end_in_peptide"].append(mutation_end_in_peptide)
for col, values in extra_columns.items():
df[col] = values
# TODO: add extra boolean field
# novel = is_mutant | not_in_reference
# Requires keeping a quick lookup structure for all peptides in
# the reference proteome
if self.only_novel_epitopes:
df = df[df.contains_mutant_residues]
return df
|
openvax/topiary | topiary/cli/script.py | main | python | def main(args_list=None):
args = parse_args(args_list)
print("Topiary commandline arguments:")
print(args)
df = predict_epitopes_from_args(args)
write_outputs(df, args)
print("Total count: %d" % len(df)) | Script entry-point to predict neo-epitopes from genomic variants using
Topiary. | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/cli/script.py#L45-L55 | [
"def parse_args(args_list=None):\n if args_list is None:\n args_list = sys.argv[1:]\n return arg_parser.parse_args(args_list)\n",
"def predict_epitopes_from_args(args):\n \"\"\"\n Returns an epitope collection from the given commandline arguments.\n\n Parameters\n ----------\n args : a... | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to generate epitope predictions from somatic cancer variants
and (optionally) tumor RNA-seq data.
Example usage:
topiary \
--mhc-predictor netmhcpan
--mhc-alleles-file HLA.txt
--vcf somatic.vcf
--rna-gene-fpkm-file genes.fpkm_tracking
--rna-transcript-fpkm-file isoforms.fpkm_tracking
--filter-ic50 500
--filter-percentile 2
--output results.csv
"""
from __future__ import print_function, division, absolute_import
import sys
from .args import arg_parser, predict_epitopes_from_args
from .outputs import write_outputs
def parse_args(args_list=None):
if args_list is None:
args_list = sys.argv[1:]
return arg_parser.parse_args(args_list)
|
openvax/topiary | topiary/cli/rna.py | rna_transcript_expression_dict_from_args | python | def rna_transcript_expression_dict_from_args(args):
if args.rna_transcript_fpkm_tracking_file:
return load_cufflinks_fpkm_dict(args.rna_transcript_fpkm_tracking_file)
elif args.rna_transcript_fpkm_gtf_file:
return load_transcript_fpkm_dict_from_gtf(
args.rna_transcript_fpkm_gtf_file)
else:
return None | Returns a dictionary mapping Ensembl transcript IDs to FPKM expression
values or None if neither Cufflinks tracking file nor StringTie GTF file
were specified. | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/cli/rna.py#L75-L87 | [
"def load_cufflinks_fpkm_dict(*args, **kwargs):\n \"\"\"\n Returns dictionary mapping feature identifier (either transcript or gene ID)\n to FPKM expression value.\n \"\"\"\n return {\n row.id: row.fpkm\n for (_, row)\n in load_cufflinks_dataframe(*args, **kwargs).iterrows()\n ... | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Common commandline arguments for filtering by gene/transcript expression
"""
from __future__ import print_function, division, absolute_import
from ..rna import (
load_cufflinks_fpkm_dict,
load_transcript_fpkm_dict_from_gtf
)
def add_rna_args(arg_parser):
rna_group = arg_parser.add_argument_group(
title="RNA-Seq",
description="Transcript and gene abundance quantification")
rna_group.add_argument(
"--rna-transcript-fpkm-tracking-file",
help="".join([
"Cufflinks tracking file (FPKM estimates for Ensembl transcripts). ",
"Used both for expression filtering and selecting the most abundant ",
"transcript to use for determining a mutant protein sequence."]))
rna_group.add_argument(
"--rna-transcript-fpkm-gtf-file",
help="".join([
"GTF file containing FPKM estimates for Ensembl transcripts.",
"Used both for expression filtering and selecting the most abundant ",
"transcript to use for determining a mutant protein sequence."]))
rna_group.add_argument(
"--rna-min-transcript-expression",
help="Minimum FPKM for transcript expression",
default=0.0,
type=float)
rna_group.add_argument(
"--rna-gene-fpkm-tracking-file",
help="Cufflinks tracking file (FPKM estimates for Ensembl genes)",
required=False)
rna_group.add_argument(
"--rna-min-gene-expression",
help="Minimum FPKM for gene expression",
default=0.0,
type=float)
return rna_group
def rna_gene_expression_dict_from_args(args):
"""
Returns a dictionary mapping Ensembl gene IDs to FPKM expression values
or None if neither Cufflinks tracking file nor StringTie GTF file specified
in the commandline arguments.
"""
if args.rna_gene_fpkm_tracking_file:
return load_cufflinks_fpkm_dict(args.rna_gene_fpkm_tracking_file)
else:
return None
|
openvax/topiary | topiary/rna/common.py | infer_delimiter | python | def infer_delimiter(filename, comment_char="#", n_lines=3):
lines = []
with open(filename, "r") as f:
for line in f:
if line.startswith(comment_char):
continue
if len(lines) < n_lines:
lines.append(line)
else:
break
if len(lines) < n_lines:
raise ValueError(
"Not enough lines in %s to infer delimiter" % filename)
candidate_delimiters = ["\t", ",", "\s+"]
for candidate_delimiter in candidate_delimiters:
counts = [len(re.split(candidate_delimiter, line)) for line in lines]
first_line_count = counts[0]
if all(c == first_line_count for c in counts) and first_line_count > 1:
return candidate_delimiter
raise ValueError("Could not determine delimiter for %s" % filename) | Given a file which contains data separated by one of the following:
- commas
- tabs
- spaces
Return the most likely separator by sniffing the first few lines
of the file's contents. | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/rna/common.py#L19-L46 | null | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import re
def check_required_columns(df, filename, required_columns):
"""
Ensure that all required columns are present in the given dataframe,
otherwise raise an exception.
"""
available_columns = set(df.columns)
for column_name in required_columns:
if column_name not in available_columns:
raise ValueError("FPKM tracking file %s missing column '%s'" % (
filename,
column_name))
|
openvax/topiary | topiary/rna/common.py | check_required_columns | python | def check_required_columns(df, filename, required_columns):
available_columns = set(df.columns)
for column_name in required_columns:
if column_name not in available_columns:
raise ValueError("FPKM tracking file %s missing column '%s'" % (
filename,
column_name)) | Ensure that all required columns are present in the given dataframe,
otherwise raise an exception. | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/rna/common.py#L49-L59 | null | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import re
def infer_delimiter(filename, comment_char="#", n_lines=3):
"""
Given a file which contains data separated by one of the following:
- commas
- tabs
- spaces
Return the most likely separator by sniffing the first few lines
of the file's contents.
"""
lines = []
with open(filename, "r") as f:
for line in f:
if line.startswith(comment_char):
continue
if len(lines) < n_lines:
lines.append(line)
else:
break
if len(lines) < n_lines:
raise ValueError(
"Not enough lines in %s to infer delimiter" % filename)
candidate_delimiters = ["\t", ",", "\s+"]
for candidate_delimiter in candidate_delimiters:
counts = [len(re.split(candidate_delimiter, line)) for line in lines]
first_line_count = counts[0]
if all(c == first_line_count for c in counts) and first_line_count > 1:
return candidate_delimiter
raise ValueError("Could not determine delimiter for %s" % filename)
|
openvax/topiary | topiary/rna/cufflinks.py | load_cufflinks_dataframe | python | def load_cufflinks_dataframe(
filename,
id_column=ID_COLUMN,
fpkm_column=FPKM_COLUMN,
status_column=STATUS_COLUMN,
locus_column=LOCUS_COLUMN,
gene_names_column=GENE_NAMES_COLUMN,
drop_failed=True,
drop_lowdata=False,
drop_hidata=True,
replace_hidata_fpkm_value=None,
drop_nonchromosomal_loci=False,
drop_novel=False,
sep=None):
if sep is None:
sep = infer_delimiter(filename)
df = pd.read_csv(filename, sep=sep, engine="c")
required_columns = {
status_column,
locus_column,
id_column,
gene_names_column,
fpkm_column
}
check_required_columns(df, filename, required_columns)
for flag, status_value in [
(drop_failed, "FAIL"),
(drop_lowdata, "LOWDATA"),
(drop_hidata, "HIDATA")]:
mask = df[status_column] == status_value
mask_count = mask.sum()
total_count = len(df)
if flag and mask_count > 0:
verb_str = "Dropping"
df = df[~mask]
else:
verb_str = "Keeping"
logging.info(
"%s %d/%d entries from %s with status=%s",
verb_str,
mask_count,
total_count,
filename,
status_value)
if drop_nonchromosomal_loci:
loci = df[locus_column]
chromosomal_loci = loci.str.startswith("chr")
n_dropped = (~chromosomal_loci).sum()
if n_dropped > 0:
logging.info("Dropping %d/%d non-chromosomal loci from %s" % (
n_dropped, len(df), filename))
df = df[chromosomal_loci]
if replace_hidata_fpkm_value:
hidata_mask = df[status_column] == "HIDATA"
n_hidata = hidata_mask.sum()
logging.info(
"Setting FPKM=%s for %d/%d entries with status=HIDATA",
replace_hidata_fpkm_value,
n_hidata,
len(df))
df[fpkm_column][hidata_mask] = replace_hidata_fpkm_value
if len(df) == 0:
raise ValueError("Empty FPKM tracking file: %s" % filename)
ids = df[id_column]
known = ids.str.startswith("ENS")
if known.sum() == 0:
raise ValueError("No Ensembl IDs found in %s" % filename)
if drop_novel:
n_dropped = (~known).sum()
if n_dropped > 0:
logging.info(
"Dropping %d/%d novel entries from %s",
n_dropped,
len(df),
filename)
df = df[known]
known = np.ones(len(df), dtype='bool')
loci = df[locus_column]
chromosomes, starts, ends = parse_locus_column(df[locus_column])
# gene names are given either as "-" or a comma separated list
# e.g. "BRAF1,PFAM2"
gene_names_strings = df[gene_names_column].copy()
gene_names_strings[gene_names_strings == "-"] = ""
# split each entry into a list of zero or more strings
gene_names_lists = gene_names_strings.str.split(",")
return pd.DataFrame({
"id": df[id_column],
"novel": ~known,
"fpkm": df[fpkm_column],
"chr": chromosomes,
"start": starts,
"end": ends,
"gene_names": gene_names_lists
}) | Loads a Cufflinks tracking file, which contains expression levels
(in FPKM: Fragments Per Kilobase of transcript per Million fragments)
for transcript isoforms or whole genes. These transcripts/genes may be
previously known (in which case they have an Ensembl ID) or a novel
assembly from the RNA-Seq data (in which case their IDs look like "CUFF.1")
Parameters
----------
filename : str
Filename of tracking file e.g. "genes.tracking_fpkm"
id_column : str, optional
fpkm_column : str, optional
status_column : str, optional
Name of column which indicates the FPKM estimate status. The column
name is typically "FPKM_status". Possible contained within this column
will be OK, FAIL, LOWDATA, HIDATA.
locus_column : str, optional
gene_names_column : str, optional
drop_failed : bool, optional
Drop rows whose FPKM status is "FAIL" (default=True)
drop_lowdata : bool, optional
Drop rows whose FPKM status is "LOWDATA", meaning that Cufflinks thought
there were too few reads to accurately estimate the FPKM (default=False)
drop_hidata : bool, optional
Drop rows whose FPKM status is "HIDATA", meaning that too many
fragments aligned to a feature for Cufflinks to process. Dropping
the most expressed genes seems like a stupid idea so: default=False
replace_hidata_fpkm_value : float, optional
If drop_hidata=False, the HIDATA entries will still have an FPKM=0.0,
this argument lets you replace the FPKM with some known constant.
drop_nonchromosomal_loci : bool, optional
Drop rows whose location isn't on a canonical chromosome
i.e. doesn't start with "chr" (default=False)
drop_novel : bool, optional
Drop genes or isoforms that aren't found in Ensembl (default = False)
sep : str, optional
Separator between data fields in the FPKM tracking file
(default is to infer whether the file uses comma or whitespace)
Returns DataFrame with columns:
id : str
novel : bool
fpkm : float
chr : str
start : int
end : int
gene_names : str list | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/rna/cufflinks.py#L45-L212 | [
"def infer_delimiter(filename, comment_char=\"#\", n_lines=3):\n \"\"\"\n Given a file which contains data separated by one of the following:\n - commas\n - tabs\n - spaces\n Return the most likely separator by sniffing the first few lines\n of the file's contents.\n \"\"\"\n ... | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import logging
import pandas as pd
import numpy as np
from .common import infer_delimiter, check_required_columns
def parse_locus_column(loci):
# capture all characters before ':' (drop 'chr' if present)
chromosomes = loci.str.extract("(?:chr)?([^:]*):.*", expand=False)
# capture all characters after e.g. 'chr1:', which look like '132-394'
ranges = loci.str.extract("(?:chr)?[^:]*:(.*)", expand=False)
# capture all numbers before the dash
starts = ranges.str.extract("(\d*)-\d*", expand=False).astype(int)
# capture all numbers after the dash
ends = ranges.str.extract("\d*-(\d*)", expand=False).astype(int)
return chromosomes, starts, ends
# default column names from cufflinks tracking files
# for gene and isoform expression levels
STATUS_COLUMN = "FPKM_status"
ID_COLUMN = "tracking_id"
FPKM_COLUMN = "FPKM"
LOCUS_COLUMN = "locus"
GENE_NAMES_COLUMN = "gene_short_name"
def load_cufflinks_dict(*args, **kwargs):
"""
Returns dictionary mapping feature identifier (either transcript or gene ID)
to a DataFrame row with fields:
id : str
novel : bool
fpkm : float
chr : str
start : int
end : int
gene_names : str list
"""
return {
row.id: row
for (_, row)
in load_cufflinks_dataframe(*args, **kwargs).iterrows()
}
def load_cufflinks_fpkm_dict(*args, **kwargs):
"""
Returns dictionary mapping feature identifier (either transcript or gene ID)
to FPKM expression value.
"""
return {
row.id: row.fpkm
for (_, row)
in load_cufflinks_dataframe(*args, **kwargs).iterrows()
}
|
openvax/topiary | topiary/rna/cufflinks.py | load_cufflinks_dict | python | def load_cufflinks_dict(*args, **kwargs):
return {
row.id: row
for (_, row)
in load_cufflinks_dataframe(*args, **kwargs).iterrows()
} | Returns dictionary mapping feature identifier (either transcript or gene ID)
to a DataFrame row with fields:
id : str
novel : bool
fpkm : float
chr : str
start : int
end : int
gene_names : str list | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/rna/cufflinks.py#L215-L231 | [
"def load_cufflinks_dataframe(\n filename,\n id_column=ID_COLUMN,\n fpkm_column=FPKM_COLUMN,\n status_column=STATUS_COLUMN,\n locus_column=LOCUS_COLUMN,\n gene_names_column=GENE_NAMES_COLUMN,\n drop_failed=True,\n drop_lowdata=False,\n drop_hidata=True,... | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import logging
import pandas as pd
import numpy as np
from .common import infer_delimiter, check_required_columns
def parse_locus_column(loci):
# capture all characters before ':' (drop 'chr' if present)
chromosomes = loci.str.extract("(?:chr)?([^:]*):.*", expand=False)
# capture all characters after e.g. 'chr1:', which look like '132-394'
ranges = loci.str.extract("(?:chr)?[^:]*:(.*)", expand=False)
# capture all numbers before the dash
starts = ranges.str.extract("(\d*)-\d*", expand=False).astype(int)
# capture all numbers after the dash
ends = ranges.str.extract("\d*-(\d*)", expand=False).astype(int)
return chromosomes, starts, ends
# default column names from cufflinks tracking files
# for gene and isoform expression levels
STATUS_COLUMN = "FPKM_status"
ID_COLUMN = "tracking_id"
FPKM_COLUMN = "FPKM"
LOCUS_COLUMN = "locus"
GENE_NAMES_COLUMN = "gene_short_name"
def load_cufflinks_dataframe(
filename,
id_column=ID_COLUMN,
fpkm_column=FPKM_COLUMN,
status_column=STATUS_COLUMN,
locus_column=LOCUS_COLUMN,
gene_names_column=GENE_NAMES_COLUMN,
drop_failed=True,
drop_lowdata=False,
drop_hidata=True,
replace_hidata_fpkm_value=None,
drop_nonchromosomal_loci=False,
drop_novel=False,
sep=None):
"""
Loads a Cufflinks tracking file, which contains expression levels
(in FPKM: Fragments Per Kilobase of transcript per Million fragments)
for transcript isoforms or whole genes. These transcripts/genes may be
previously known (in which case they have an Ensembl ID) or a novel
assembly from the RNA-Seq data (in which case their IDs look like "CUFF.1")
Parameters
----------
filename : str
Filename of tracking file e.g. "genes.tracking_fpkm"
id_column : str, optional
fpkm_column : str, optional
status_column : str, optional
Name of column which indicates the FPKM estimate status. The column
name is typically "FPKM_status". Possible contained within this column
will be OK, FAIL, LOWDATA, HIDATA.
locus_column : str, optional
gene_names_column : str, optional
drop_failed : bool, optional
Drop rows whose FPKM status is "FAIL" (default=True)
drop_lowdata : bool, optional
Drop rows whose FPKM status is "LOWDATA", meaning that Cufflinks thought
there were too few reads to accurately estimate the FPKM (default=False)
drop_hidata : bool, optional
Drop rows whose FPKM status is "HIDATA", meaning that too many
fragments aligned to a feature for Cufflinks to process. Dropping
the most expressed genes seems like a stupid idea so: default=False
replace_hidata_fpkm_value : float, optional
If drop_hidata=False, the HIDATA entries will still have an FPKM=0.0,
this argument lets you replace the FPKM with some known constant.
drop_nonchromosomal_loci : bool, optional
Drop rows whose location isn't on a canonical chromosome
i.e. doesn't start with "chr" (default=False)
drop_novel : bool, optional
Drop genes or isoforms that aren't found in Ensembl (default = False)
sep : str, optional
Separator between data fields in the FPKM tracking file
(default is to infer whether the file uses comma or whitespace)
Returns DataFrame with columns:
id : str
novel : bool
fpkm : float
chr : str
start : int
end : int
gene_names : str list
"""
if sep is None:
sep = infer_delimiter(filename)
df = pd.read_csv(filename, sep=sep, engine="c")
required_columns = {
status_column,
locus_column,
id_column,
gene_names_column,
fpkm_column
}
check_required_columns(df, filename, required_columns)
for flag, status_value in [
(drop_failed, "FAIL"),
(drop_lowdata, "LOWDATA"),
(drop_hidata, "HIDATA")]:
mask = df[status_column] == status_value
mask_count = mask.sum()
total_count = len(df)
if flag and mask_count > 0:
verb_str = "Dropping"
df = df[~mask]
else:
verb_str = "Keeping"
logging.info(
"%s %d/%d entries from %s with status=%s",
verb_str,
mask_count,
total_count,
filename,
status_value)
if drop_nonchromosomal_loci:
loci = df[locus_column]
chromosomal_loci = loci.str.startswith("chr")
n_dropped = (~chromosomal_loci).sum()
if n_dropped > 0:
logging.info("Dropping %d/%d non-chromosomal loci from %s" % (
n_dropped, len(df), filename))
df = df[chromosomal_loci]
if replace_hidata_fpkm_value:
hidata_mask = df[status_column] == "HIDATA"
n_hidata = hidata_mask.sum()
logging.info(
"Setting FPKM=%s for %d/%d entries with status=HIDATA",
replace_hidata_fpkm_value,
n_hidata,
len(df))
df[fpkm_column][hidata_mask] = replace_hidata_fpkm_value
if len(df) == 0:
raise ValueError("Empty FPKM tracking file: %s" % filename)
ids = df[id_column]
known = ids.str.startswith("ENS")
if known.sum() == 0:
raise ValueError("No Ensembl IDs found in %s" % filename)
if drop_novel:
n_dropped = (~known).sum()
if n_dropped > 0:
logging.info(
"Dropping %d/%d novel entries from %s",
n_dropped,
len(df),
filename)
df = df[known]
known = np.ones(len(df), dtype='bool')
loci = df[locus_column]
chromosomes, starts, ends = parse_locus_column(df[locus_column])
# gene names are given either as "-" or a comma separated list
# e.g. "BRAF1,PFAM2"
gene_names_strings = df[gene_names_column].copy()
gene_names_strings[gene_names_strings == "-"] = ""
# split each entry into a list of zero or more strings
gene_names_lists = gene_names_strings.str.split(",")
return pd.DataFrame({
"id": df[id_column],
"novel": ~known,
"fpkm": df[fpkm_column],
"chr": chromosomes,
"start": starts,
"end": ends,
"gene_names": gene_names_lists
})
def load_cufflinks_fpkm_dict(*args, **kwargs):
"""
Returns dictionary mapping feature identifier (either transcript or gene ID)
to FPKM expression value.
"""
return {
row.id: row.fpkm
for (_, row)
in load_cufflinks_dataframe(*args, **kwargs).iterrows()
}
|
openvax/topiary | topiary/rna/cufflinks.py | load_cufflinks_fpkm_dict | python | def load_cufflinks_fpkm_dict(*args, **kwargs):
return {
row.id: row.fpkm
for (_, row)
in load_cufflinks_dataframe(*args, **kwargs).iterrows()
} | Returns dictionary mapping feature identifier (either transcript or gene ID)
to FPKM expression value. | train | https://github.com/openvax/topiary/blob/04f0077bc4bf1ad350a0e78c26fa48c55fe7813b/topiary/rna/cufflinks.py#L234-L243 | [
"def load_cufflinks_dataframe(\n filename,\n id_column=ID_COLUMN,\n fpkm_column=FPKM_COLUMN,\n status_column=STATUS_COLUMN,\n locus_column=LOCUS_COLUMN,\n gene_names_column=GENE_NAMES_COLUMN,\n drop_failed=True,\n drop_lowdata=False,\n drop_hidata=True,... | # Copyright (c) 2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import logging
import pandas as pd
import numpy as np
from .common import infer_delimiter, check_required_columns
def parse_locus_column(loci):
# capture all characters before ':' (drop 'chr' if present)
chromosomes = loci.str.extract("(?:chr)?([^:]*):.*", expand=False)
# capture all characters after e.g. 'chr1:', which look like '132-394'
ranges = loci.str.extract("(?:chr)?[^:]*:(.*)", expand=False)
# capture all numbers before the dash
starts = ranges.str.extract("(\d*)-\d*", expand=False).astype(int)
# capture all numbers after the dash
ends = ranges.str.extract("\d*-(\d*)", expand=False).astype(int)
return chromosomes, starts, ends
# default column names from cufflinks tracking files
# for gene and isoform expression levels
STATUS_COLUMN = "FPKM_status"
ID_COLUMN = "tracking_id"
FPKM_COLUMN = "FPKM"
LOCUS_COLUMN = "locus"
GENE_NAMES_COLUMN = "gene_short_name"
def load_cufflinks_dataframe(
filename,
id_column=ID_COLUMN,
fpkm_column=FPKM_COLUMN,
status_column=STATUS_COLUMN,
locus_column=LOCUS_COLUMN,
gene_names_column=GENE_NAMES_COLUMN,
drop_failed=True,
drop_lowdata=False,
drop_hidata=True,
replace_hidata_fpkm_value=None,
drop_nonchromosomal_loci=False,
drop_novel=False,
sep=None):
"""
Loads a Cufflinks tracking file, which contains expression levels
(in FPKM: Fragments Per Kilobase of transcript per Million fragments)
for transcript isoforms or whole genes. These transcripts/genes may be
previously known (in which case they have an Ensembl ID) or a novel
assembly from the RNA-Seq data (in which case their IDs look like "CUFF.1")
Parameters
----------
filename : str
Filename of tracking file e.g. "genes.tracking_fpkm"
id_column : str, optional
fpkm_column : str, optional
status_column : str, optional
Name of column which indicates the FPKM estimate status. The column
name is typically "FPKM_status". Possible contained within this column
will be OK, FAIL, LOWDATA, HIDATA.
locus_column : str, optional
gene_names_column : str, optional
drop_failed : bool, optional
Drop rows whose FPKM status is "FAIL" (default=True)
drop_lowdata : bool, optional
Drop rows whose FPKM status is "LOWDATA", meaning that Cufflinks thought
there were too few reads to accurately estimate the FPKM (default=False)
drop_hidata : bool, optional
Drop rows whose FPKM status is "HIDATA", meaning that too many
fragments aligned to a feature for Cufflinks to process. Dropping
the most expressed genes seems like a stupid idea so: default=False
replace_hidata_fpkm_value : float, optional
If drop_hidata=False, the HIDATA entries will still have an FPKM=0.0,
this argument lets you replace the FPKM with some known constant.
drop_nonchromosomal_loci : bool, optional
Drop rows whose location isn't on a canonical chromosome
i.e. doesn't start with "chr" (default=False)
drop_novel : bool, optional
Drop genes or isoforms that aren't found in Ensembl (default = False)
sep : str, optional
Separator between data fields in the FPKM tracking file
(default is to infer whether the file uses comma or whitespace)
Returns DataFrame with columns:
id : str
novel : bool
fpkm : float
chr : str
start : int
end : int
gene_names : str list
"""
if sep is None:
sep = infer_delimiter(filename)
df = pd.read_csv(filename, sep=sep, engine="c")
required_columns = {
status_column,
locus_column,
id_column,
gene_names_column,
fpkm_column
}
check_required_columns(df, filename, required_columns)
for flag, status_value in [
(drop_failed, "FAIL"),
(drop_lowdata, "LOWDATA"),
(drop_hidata, "HIDATA")]:
mask = df[status_column] == status_value
mask_count = mask.sum()
total_count = len(df)
if flag and mask_count > 0:
verb_str = "Dropping"
df = df[~mask]
else:
verb_str = "Keeping"
logging.info(
"%s %d/%d entries from %s with status=%s",
verb_str,
mask_count,
total_count,
filename,
status_value)
if drop_nonchromosomal_loci:
loci = df[locus_column]
chromosomal_loci = loci.str.startswith("chr")
n_dropped = (~chromosomal_loci).sum()
if n_dropped > 0:
logging.info("Dropping %d/%d non-chromosomal loci from %s" % (
n_dropped, len(df), filename))
df = df[chromosomal_loci]
if replace_hidata_fpkm_value:
hidata_mask = df[status_column] == "HIDATA"
n_hidata = hidata_mask.sum()
logging.info(
"Setting FPKM=%s for %d/%d entries with status=HIDATA",
replace_hidata_fpkm_value,
n_hidata,
len(df))
df[fpkm_column][hidata_mask] = replace_hidata_fpkm_value
if len(df) == 0:
raise ValueError("Empty FPKM tracking file: %s" % filename)
ids = df[id_column]
known = ids.str.startswith("ENS")
if known.sum() == 0:
raise ValueError("No Ensembl IDs found in %s" % filename)
if drop_novel:
n_dropped = (~known).sum()
if n_dropped > 0:
logging.info(
"Dropping %d/%d novel entries from %s",
n_dropped,
len(df),
filename)
df = df[known]
known = np.ones(len(df), dtype='bool')
loci = df[locus_column]
chromosomes, starts, ends = parse_locus_column(df[locus_column])
# gene names are given either as "-" or a comma separated list
# e.g. "BRAF1,PFAM2"
gene_names_strings = df[gene_names_column].copy()
gene_names_strings[gene_names_strings == "-"] = ""
# split each entry into a list of zero or more strings
gene_names_lists = gene_names_strings.str.split(",")
return pd.DataFrame({
"id": df[id_column],
"novel": ~known,
"fpkm": df[fpkm_column],
"chr": chromosomes,
"start": starts,
"end": ends,
"gene_names": gene_names_lists
})
def load_cufflinks_dict(*args, **kwargs):
"""
Returns dictionary mapping feature identifier (either transcript or gene ID)
to a DataFrame row with fields:
id : str
novel : bool
fpkm : float
chr : str
start : int
end : int
gene_names : str list
"""
return {
row.id: row
for (_, row)
in load_cufflinks_dataframe(*args, **kwargs).iterrows()
}
|
IBM/pyxcli | pyxcli/helpers/xml_util.py | str_brief | python | def str_brief(obj, lim=20, dots='...', use_repr=True):
if isinstance(obj, basestring) or not use_repr:
full = str(obj)
else:
full = repr(obj)
postfix = []
CLOSERS = {'(': ')', '{': '}', '[': ']', '"': '"', "'": "'", '<': '>'}
for i, c in enumerate(full):
if i >= lim + len(postfix):
return full[:i] + dots + ''.join(reversed(postfix))
if postfix and postfix[-1] == c:
postfix.pop(-1)
continue
closer = CLOSERS.get(c, None)
if closer is not None:
postfix.append(closer)
return full | Truncates a string, starting from 'lim' chars. The given object
can be a string, or something that can be casted to a string.
>>> import string
>>> str_brief(string.uppercase)
'ABCDEFGHIJKLMNOPQRST...'
>>> str_brief(2 ** 50, lim=10, dots='0')
'11258999060' | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/helpers/xml_util.py#L42-L67 | null | ##############################################################################
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import xml.etree.ElementTree as et
import xml.etree.cElementTree as cet
from contextlib import contextmanager
from xml.parsers.expat import ExpatError
Element = cet.Element
tostring = cet.tostring
class XMLException(Exception):
pass
class ElementNotFoundException(XMLException):
def __init__(self, xml, notFound):
XMLException.__init__(self)
self.xml = xml
self.notFound = notFound
def __str__(self):
return "Cannot parse XML (cannot find %s):\n%s" % (
self.notFound, tostring(self.xml))
class XMLSyntaxError(XMLException):
"""
A somewhat friendlier XML Syntax Error.
>>> fromstring('<a/>').tostring()
'<a/>'
>>> tag='<tag description="a very long description that will be briefed">'
>>> fromstring(tag) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
XMLSyntaxError: Malformed XML (line 1, XML '<tag description...>'): ...
>>> fromstring('')
Traceback (most recent call last):
...
XMLSyntaxError: Malformed XML (line None, XML ''): No specific message
>>>
"""
def __init__(self, original, msg, lineno):
XMLException.__init__(self)
self.original = original
self.msg = msg or "No specific message"
self.lineno = lineno
def pretty(self):
return ("Syntax error parsing XML:\n\tLine#: %s\n\tMessage: %s\n\t"
"XML:%r" % (self.lineno, self.msg, self.original))
def __str__(self):
return "Malformed XML (line %s, XML %r): %s" % (
self.lineno, str_brief(self.original, lim=15), self.msg)
@contextmanager
def _translateExceptions(original):
try:
yield None
except ExpatError as e:
raise XMLSyntaxError(original, e.args[0], e.lineno)
except (cet.ParseError, et.ParseError) as e:
raise XMLSyntaxError(original, e.args[0], e.lineno)
def fromstring(text):
with _translateExceptions(None):
return cet.fromstring(text)
def parse(obj):
with _translateExceptions(None):
return cet.parse(obj)
def xml_find(elem, path, attrib=None):
elem2 = elem.find(path)
if elem2 is None:
raise ElementNotFoundException(elem, path)
if attrib is None:
return elem2
else:
if attrib not in elem2.attrib:
raise ElementNotFoundException(elem, "%s/@%s" % (path, attrib))
return elem2.attrib[attrib]
# =========================================================================
# TerminationDetectingXMLParser
# =========================================================================
class _TerminationDetectingTreeBuilder(et.TreeBuilder):
def __init__(self):
et.TreeBuilder.__init__(self)
self.root_element = None
self.root_element_closed = False
def start(self, tag, attrs):
element = et.TreeBuilder.start(self, tag, attrs)
if self.root_element is None:
self.root_element = element
return element
def end(self, tag):
element = et.TreeBuilder.end(self, tag)
if self.root_element is element:
self.root_element_closed = True
return element
class TerminationDetectingXMLParser(object):
"""An XML parser which you can feed from a stream; knows automatically
when the first tag was closed"
>>> td = TerminationDetectingXMLParser()
>>> td.feed('<a>')
>>> td.root_element_closed
False
>>> td.feed('</a>')
>>> td.root_element_closed
True
>>> td.close().tostring()
'<a/>'
>>> td = TerminationDetectingXMLParser()
>>> td.feed('<a><b></b></a>')
>>> td.root_element_closed
True
>>> td.close().tostring()
'<a><b/></a>'
>>> td = TerminationDetectingXMLParser()
>>> td.feed('<a<a') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: not well-formed (invalid token): line 1, column 2
>>>
"""
def __init__(self):
self.tree_builder = _TerminationDetectingTreeBuilder()
self.xml_tree_builder = et.XMLTreeBuilder(target=self.tree_builder)
def feed(self, chunk):
with _translateExceptions(chunk):
self.xml_tree_builder.feed(chunk)
def close(self):
with _translateExceptions(None):
tree = self.xml_tree_builder.close()
return fromstring(tostring(tree))
@property
def root_element_closed(self):
return self.tree_builder.root_element_closed
|
IBM/pyxcli | pyxcli/mirroring/mirrored_entities.py | MirroredEntities.get_mirror_resources_by_name_map | python | def get_mirror_resources_by_name_map(self, scope=None):
""" returns a map volume_name -> volume, cg_name->cg
scope is either None or CG or Volume
"""
volumes_mirrors_by_name = dict()
cgs_mirrors_by_name = dict()
if ((scope is None) or (scope.lower() == 'volume')):
mirror_list = self.xcli_client.cmd.mirror_list(scope='Volume')
for xcli_mirror in mirror_list:
name = MirroredEntities.get_mirrored_object_name(xcli_mirror)
volumes_mirrors_by_name[name] = xcli_mirror
if ((scope is None) or (scope.lower() == CG)):
for xcli_mirror in self.xcli_client.cmd.mirror_list(scope='CG'):
name = MirroredEntities.get_mirrored_object_name(xcli_mirror)
cgs_mirrors_by_name[name] = xcli_mirror
res = Bunch(volumes=volumes_mirrors_by_name, cgs=cgs_mirrors_by_name)
return res | returns a map volume_name -> volume, cg_name->cg
scope is either None or CG or Volume | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/mirroring/mirrored_entities.py#L48-L64 | [
"def get_mirrored_object_name(cls, xcli_mirror, remote_name=False):\n if remote_name:\n return xcli_mirror.remote_peer_name\n return xcli_mirror.local_peer_name\n"
] | class MirroredEntities(object):
xcli_client = None
def __init__(self, xcli_client):
self.xcli_client = xcli_client
@classmethod
def get_mirrored_object_name(cls, xcli_mirror, remote_name=False):
if remote_name:
return xcli_mirror.remote_peer_name
return xcli_mirror.local_peer_name
@classmethod
def is_mirror_master(cls, xcli_mirror):
return xcli_mirror.current_role == 'Master'
@classmethod
def is_target_connected(cls, xcli_mirror):
return xcli_mirror.connected == 'yes'
def get_mirror_resources_by_name_map(self, scope=None):
""" returns a map volume_name -> volume, cg_name->cg
scope is either None or CG or Volume
"""
volumes_mirrors_by_name = dict()
cgs_mirrors_by_name = dict()
if ((scope is None) or (scope.lower() == 'volume')):
mirror_list = self.xcli_client.cmd.mirror_list(scope='Volume')
for xcli_mirror in mirror_list:
name = MirroredEntities.get_mirrored_object_name(xcli_mirror)
volumes_mirrors_by_name[name] = xcli_mirror
if ((scope is None) or (scope.lower() == CG)):
for xcli_mirror in self.xcli_client.cmd.mirror_list(scope='CG'):
name = MirroredEntities.get_mirrored_object_name(xcli_mirror)
cgs_mirrors_by_name[name] = xcli_mirror
res = Bunch(volumes=volumes_mirrors_by_name, cgs=cgs_mirrors_by_name)
return res
def get_cg_mirrors(self):
return self.get_mirror_resources_by_name_map(scope="CG").cgs
def get_vol_mirrors(self):
return self.get_mirror_resources_by_name_map(scope="Volume").volumes
def get_volume_by_name_map(self):
return self.xcli_client.cmd.vol_list().as_dict('name')
def get_volume_by_name(self, vol_name):
return self.xcli_client.cmd.vol_list(vol=vol_name).as_single_element
def get_pool_by_name_map(self):
return self.xcli_client.cmd.pool_list().as_dict('name')
def get_pool_by_name(self, name):
return self.xcli_client.cmd.pool_list(pool=name).as_single_element
def get_hosts_by_name_map(self):
return self.xcli_client.cmd.host_list().as_dict('name')
def get_hosts_by_name(self, name):
return self.xcli_client.cmd.host_list(host=name).as_single_element
def get_hosts_by_clusters(self):
clusters = dict()
for cluster in self.xcli_client.cmd.cluster_list():
host_list = cluster.hosts.split(',') if cluster.hosts != '' else []
clusters[cluster.name] = host_list
return clusters
def get_hosts_by_ports(self):
hosts_by_ports = dict()
for host in self.xcli_client.cmd.host_list():
for fc_port in host.fc_ports.split(','):
hosts_by_ports[fc_port] = host
for iscsi_port in host.iscsi_ports.split(','):
hosts_by_ports[iscsi_port] = host
return hosts_by_ports
def get_snapshots_by_snap_groups(self):
snap_groups = dict()
for volume in self.get_volume_by_name_map().values():
if volume.sg_name != '':
if volume.sg_name not in snap_groups:
snap_groups[volume.sg_name] = list()
snap_groups[volume.sg_name].append(volume.name)
return snap_groups
def get_host_port_names(self, host_name):
""" return a list of the port names of XIV host """
port_names = list()
host = self.get_hosts_by_name(host_name)
fc_ports = host.fc_ports
iscsi_ports = host.iscsi_ports
port_names.extend(fc_ports.split(',') if fc_ports != '' else [])
port_names.extend(iscsi_ports.split(',') if iscsi_ports != '' else [])
return port_names
def get_cluster_port_names(self, cluster_name):
""" return a list of the port names under XIV CLuster """
port_names = list()
for host_name in self.get_hosts_by_clusters()[cluster_name]:
port_names.extend(self.get_hosts_by_name(host_name))
return port_names
|
IBM/pyxcli | pyxcli/mirroring/mirrored_entities.py | MirroredEntities.get_host_port_names | python | def get_host_port_names(self, host_name):
""" return a list of the port names of XIV host """
port_names = list()
host = self.get_hosts_by_name(host_name)
fc_ports = host.fc_ports
iscsi_ports = host.iscsi_ports
port_names.extend(fc_ports.split(',') if fc_ports != '' else [])
port_names.extend(iscsi_ports.split(',') if iscsi_ports != '' else [])
return port_names | return a list of the port names of XIV host | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/mirroring/mirrored_entities.py#L115-L123 | [
"def get_hosts_by_name(self, name):\n return self.xcli_client.cmd.host_list(host=name).as_single_element\n"
] | class MirroredEntities(object):
xcli_client = None
def __init__(self, xcli_client):
self.xcli_client = xcli_client
@classmethod
def get_mirrored_object_name(cls, xcli_mirror, remote_name=False):
if remote_name:
return xcli_mirror.remote_peer_name
return xcli_mirror.local_peer_name
@classmethod
def is_mirror_master(cls, xcli_mirror):
return xcli_mirror.current_role == 'Master'
@classmethod
def is_target_connected(cls, xcli_mirror):
return xcli_mirror.connected == 'yes'
def get_mirror_resources_by_name_map(self, scope=None):
""" returns a map volume_name -> volume, cg_name->cg
scope is either None or CG or Volume
"""
volumes_mirrors_by_name = dict()
cgs_mirrors_by_name = dict()
if ((scope is None) or (scope.lower() == 'volume')):
mirror_list = self.xcli_client.cmd.mirror_list(scope='Volume')
for xcli_mirror in mirror_list:
name = MirroredEntities.get_mirrored_object_name(xcli_mirror)
volumes_mirrors_by_name[name] = xcli_mirror
if ((scope is None) or (scope.lower() == CG)):
for xcli_mirror in self.xcli_client.cmd.mirror_list(scope='CG'):
name = MirroredEntities.get_mirrored_object_name(xcli_mirror)
cgs_mirrors_by_name[name] = xcli_mirror
res = Bunch(volumes=volumes_mirrors_by_name, cgs=cgs_mirrors_by_name)
return res
def get_cg_mirrors(self):
return self.get_mirror_resources_by_name_map(scope="CG").cgs
def get_vol_mirrors(self):
return self.get_mirror_resources_by_name_map(scope="Volume").volumes
def get_volume_by_name_map(self):
return self.xcli_client.cmd.vol_list().as_dict('name')
def get_volume_by_name(self, vol_name):
return self.xcli_client.cmd.vol_list(vol=vol_name).as_single_element
def get_pool_by_name_map(self):
return self.xcli_client.cmd.pool_list().as_dict('name')
def get_pool_by_name(self, name):
return self.xcli_client.cmd.pool_list(pool=name).as_single_element
def get_hosts_by_name_map(self):
return self.xcli_client.cmd.host_list().as_dict('name')
def get_hosts_by_name(self, name):
return self.xcli_client.cmd.host_list(host=name).as_single_element
def get_hosts_by_clusters(self):
clusters = dict()
for cluster in self.xcli_client.cmd.cluster_list():
host_list = cluster.hosts.split(',') if cluster.hosts != '' else []
clusters[cluster.name] = host_list
return clusters
def get_hosts_by_ports(self):
hosts_by_ports = dict()
for host in self.xcli_client.cmd.host_list():
for fc_port in host.fc_ports.split(','):
hosts_by_ports[fc_port] = host
for iscsi_port in host.iscsi_ports.split(','):
hosts_by_ports[iscsi_port] = host
return hosts_by_ports
def get_snapshots_by_snap_groups(self):
snap_groups = dict()
for volume in self.get_volume_by_name_map().values():
if volume.sg_name != '':
if volume.sg_name not in snap_groups:
snap_groups[volume.sg_name] = list()
snap_groups[volume.sg_name].append(volume.name)
return snap_groups
def get_host_port_names(self, host_name):
""" return a list of the port names of XIV host """
port_names = list()
host = self.get_hosts_by_name(host_name)
fc_ports = host.fc_ports
iscsi_ports = host.iscsi_ports
port_names.extend(fc_ports.split(',') if fc_ports != '' else [])
port_names.extend(iscsi_ports.split(',') if iscsi_ports != '' else [])
return port_names
def get_cluster_port_names(self, cluster_name):
""" return a list of the port names under XIV CLuster """
port_names = list()
for host_name in self.get_hosts_by_clusters()[cluster_name]:
port_names.extend(self.get_hosts_by_name(host_name))
return port_names
|
IBM/pyxcli | pyxcli/mirroring/mirrored_entities.py | MirroredEntities.get_cluster_port_names | python | def get_cluster_port_names(self, cluster_name):
""" return a list of the port names under XIV CLuster """
port_names = list()
for host_name in self.get_hosts_by_clusters()[cluster_name]:
port_names.extend(self.get_hosts_by_name(host_name))
return port_names | return a list of the port names under XIV CLuster | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/mirroring/mirrored_entities.py#L125-L130 | [
"def get_hosts_by_name(self, name):\n return self.xcli_client.cmd.host_list(host=name).as_single_element\n",
"def get_hosts_by_clusters(self):\n clusters = dict()\n for cluster in self.xcli_client.cmd.cluster_list():\n host_list = cluster.hosts.split(',') if cluster.hosts != '' else []\n cl... | class MirroredEntities(object):
xcli_client = None
def __init__(self, xcli_client):
self.xcli_client = xcli_client
@classmethod
def get_mirrored_object_name(cls, xcli_mirror, remote_name=False):
if remote_name:
return xcli_mirror.remote_peer_name
return xcli_mirror.local_peer_name
@classmethod
def is_mirror_master(cls, xcli_mirror):
return xcli_mirror.current_role == 'Master'
@classmethod
def is_target_connected(cls, xcli_mirror):
return xcli_mirror.connected == 'yes'
def get_mirror_resources_by_name_map(self, scope=None):
""" returns a map volume_name -> volume, cg_name->cg
scope is either None or CG or Volume
"""
volumes_mirrors_by_name = dict()
cgs_mirrors_by_name = dict()
if ((scope is None) or (scope.lower() == 'volume')):
mirror_list = self.xcli_client.cmd.mirror_list(scope='Volume')
for xcli_mirror in mirror_list:
name = MirroredEntities.get_mirrored_object_name(xcli_mirror)
volumes_mirrors_by_name[name] = xcli_mirror
if ((scope is None) or (scope.lower() == CG)):
for xcli_mirror in self.xcli_client.cmd.mirror_list(scope='CG'):
name = MirroredEntities.get_mirrored_object_name(xcli_mirror)
cgs_mirrors_by_name[name] = xcli_mirror
res = Bunch(volumes=volumes_mirrors_by_name, cgs=cgs_mirrors_by_name)
return res
def get_cg_mirrors(self):
return self.get_mirror_resources_by_name_map(scope="CG").cgs
def get_vol_mirrors(self):
return self.get_mirror_resources_by_name_map(scope="Volume").volumes
def get_volume_by_name_map(self):
return self.xcli_client.cmd.vol_list().as_dict('name')
def get_volume_by_name(self, vol_name):
return self.xcli_client.cmd.vol_list(vol=vol_name).as_single_element
def get_pool_by_name_map(self):
return self.xcli_client.cmd.pool_list().as_dict('name')
def get_pool_by_name(self, name):
return self.xcli_client.cmd.pool_list(pool=name).as_single_element
def get_hosts_by_name_map(self):
return self.xcli_client.cmd.host_list().as_dict('name')
def get_hosts_by_name(self, name):
return self.xcli_client.cmd.host_list(host=name).as_single_element
def get_hosts_by_clusters(self):
clusters = dict()
for cluster in self.xcli_client.cmd.cluster_list():
host_list = cluster.hosts.split(',') if cluster.hosts != '' else []
clusters[cluster.name] = host_list
return clusters
def get_hosts_by_ports(self):
hosts_by_ports = dict()
for host in self.xcli_client.cmd.host_list():
for fc_port in host.fc_ports.split(','):
hosts_by_ports[fc_port] = host
for iscsi_port in host.iscsi_ports.split(','):
hosts_by_ports[iscsi_port] = host
return hosts_by_ports
def get_snapshots_by_snap_groups(self):
snap_groups = dict()
for volume in self.get_volume_by_name_map().values():
if volume.sg_name != '':
if volume.sg_name not in snap_groups:
snap_groups[volume.sg_name] = list()
snap_groups[volume.sg_name].append(volume.name)
return snap_groups
def get_host_port_names(self, host_name):
""" return a list of the port names of XIV host """
port_names = list()
host = self.get_hosts_by_name(host_name)
fc_ports = host.fc_ports
iscsi_ports = host.iscsi_ports
port_names.extend(fc_ports.split(',') if fc_ports != '' else [])
port_names.extend(iscsi_ports.split(',') if iscsi_ports != '' else [])
return port_names
def get_cluster_port_names(self, cluster_name):
""" return a list of the port names under XIV CLuster """
port_names = list()
for host_name in self.get_hosts_by_clusters()[cluster_name]:
port_names.extend(self.get_hosts_by_name(host_name))
return port_names
|
IBM/pyxcli | pyxcli/pool.py | XCLIClientPool.flush | python | def flush(self):
now = time.time()
to_remove = []
for k, entry in self.pool.items():
if entry.timestamp < now:
entry.client.close()
to_remove.append(k)
for k in to_remove:
del self.pool[k] | remove all stale clients from pool | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/pool.py#L70-L79 | null | class XCLIClientPool(object):
"""The XCLI client pool alleviates the need to open a new connection
every time to need to send a single command, and reduces the system's
load, as only a single connection is kept for every system. It uses
the XCLIClient multiplexing abilities, so that only a single
connection is opened to every system, over which different users can send
their commands separately; and it's thread safe, as the underlying
XCLIClient is thread safe (it will send only a single command over the
transport at any point of time).
The pool can be configured with a time-to-live for connections, so
that connections older than this TTL will be flushed and reopened.
To use the pull, import one of the built-in pool objects,
``xcli_ssl_pool`` and use the ``get`` method. For example::
from pyxcli.pool import xcli_ssl_pool
client = xcli_ssl_pool.get("admin", "mypass", "192.168.1.102")
client.cmd.vol_list()
"""
def __init__(self, connector, time_to_live=10 * 60):
self.connector = connector
self.time_to_live = time_to_live
self.pool = {}
def clear(self):
for entry in self.pool.values():
entry.client.close()
self.pool.clear()
def get(self, user, password, endpoints):
"""Gets an existing connection or opens a new one
"""
now = time.time()
# endpoints can either be str or list
if isinstance(endpoints, str):
endpoints = [endpoints]
for ep in endpoints:
if ep not in self.pool:
continue
entry = self.pool[ep]
if (not entry.client.is_connected() or
entry.timestamp + self.time_to_live < now):
xlog.debug("XCLIClientPool: clearing stale client %s",
ep)
del self.pool[ep]
entry.client.close()
continue
user_client = entry.user_clients.get(user, None)
if not user_client or not user_client.is_connected():
user_client = entry.client.get_user_client(user, password)
entry.user_clients[user] = user_client
return user_client
xlog.debug("XCLIClientPool: connecting to %s", endpoints)
client = self.connector(None, None, endpoints)
user_client = {user: client.get_user_client(user, password)}
for ep in endpoints:
self.pool[ep] = PoolEntry(client, now, user_client)
return user_client[user]
|
IBM/pyxcli | pyxcli/pool.py | XCLIClientPool.get | python | def get(self, user, password, endpoints):
now = time.time()
# endpoints can either be str or list
if isinstance(endpoints, str):
endpoints = [endpoints]
for ep in endpoints:
if ep not in self.pool:
continue
entry = self.pool[ep]
if (not entry.client.is_connected() or
entry.timestamp + self.time_to_live < now):
xlog.debug("XCLIClientPool: clearing stale client %s",
ep)
del self.pool[ep]
entry.client.close()
continue
user_client = entry.user_clients.get(user, None)
if not user_client or not user_client.is_connected():
user_client = entry.client.get_user_client(user, password)
entry.user_clients[user] = user_client
return user_client
xlog.debug("XCLIClientPool: connecting to %s", endpoints)
client = self.connector(None, None, endpoints)
user_client = {user: client.get_user_client(user, password)}
for ep in endpoints:
self.pool[ep] = PoolEntry(client, now, user_client)
return user_client[user] | Gets an existing connection or opens a new one | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/pool.py#L81-L110 | null | class XCLIClientPool(object):
"""The XCLI client pool alleviates the need to open a new connection
every time to need to send a single command, and reduces the system's
load, as only a single connection is kept for every system. It uses
the XCLIClient multiplexing abilities, so that only a single
connection is opened to every system, over which different users can send
their commands separately; and it's thread safe, as the underlying
XCLIClient is thread safe (it will send only a single command over the
transport at any point of time).
The pool can be configured with a time-to-live for connections, so
that connections older than this TTL will be flushed and reopened.
To use the pull, import one of the built-in pool objects,
``xcli_ssl_pool`` and use the ``get`` method. For example::
from pyxcli.pool import xcli_ssl_pool
client = xcli_ssl_pool.get("admin", "mypass", "192.168.1.102")
client.cmd.vol_list()
"""
def __init__(self, connector, time_to_live=10 * 60):
self.connector = connector
self.time_to_live = time_to_live
self.pool = {}
def clear(self):
for entry in self.pool.values():
entry.client.close()
self.pool.clear()
def flush(self):
"""remove all stale clients from pool"""
now = time.time()
to_remove = []
for k, entry in self.pool.items():
if entry.timestamp < now:
entry.client.close()
to_remove.append(k)
for k in to_remove:
del self.pool[k]
|
IBM/pyxcli | pyxcli/events/events.py | EventsManager.send_event | python | def send_event(self, action, properties, event_severity=EVENT_SEVERITY):
# verify properties
event_properties = dict() if (properties is None) else properties
if type(event_properties) is not dict:
raise TypeError('properties is not dict')
# prepare event
event_bunch = Bunch(
Product=self.product_name,
Version=self.product_version,
Server=self.server_name,
Platform=self.platform,
Action=action,
Properties=event_properties)
event_description = self._get_description_prefix() + \
json.dumps(event_bunch)
use_custom_event = True
if CSS_PRODUCT_EVENT in dir(self.xcli.cmd):
try:
# send css product event
log.debug("sending css_product_event "
"description=%s severity=%s",
event_description, event_severity)
self.xcli.cmd.css_product_event(severity=event_severity,
product=self.product_name,
version=self.product_version,
server=self.server_name,
platform=self.platform,
action=action,
properties=event_properties)
use_custom_event = False
except (UnrecognizedCommandError,
OperationForbiddenForUserCategoryError):
log.warning("failed css_product_event "
"description=%s severity=%s",
event_description, event_severity)
if use_custom_event:
# send custom event
log.debug("sending custom_event description=%s severity=%s",
event_description, event_severity)
self.xcli.cmd.custom_event(
description=event_description, severity=event_severity) | send css_event and if fails send custom_event instead
Args:
action (ACTIONS): the action causing the event
properties (dict): the action additional properties
event_severity (string): the event severity
Raises:
XCLIError: if the xcli.cmd.custom_event failed
KeyError: if action wasn't predefined
TypeError: if properties is not None or dict | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/events/events.py#L71-L124 | [
"def _get_description_prefix():\n return HOST_PRODUCT\n"
] | class EventsManager(object):
"""
Handle CSS event sending to XIV
"""
def __init__(self, xcli, product_name, product_version):
"""
init an EventsManager
Args:
xcli (XCLIClient): xcli client to send the event
product_name (string): the sending product's name
product_version (string): the sending product's version
Raises:
ValueError: if missing product_name or product_version
"""
self.xcli = xcli
self.product_name = product_name
self.product_version = product_version
self.server_name = getfqdn()
self.platform = get_platform_details()
# verify init params
if not self.product_name:
raise ValueError('product_name is empty')
if not self.product_version:
raise ValueError('product_version is empty')
@staticmethod
def _get_description_prefix():
return HOST_PRODUCT
|
IBM/pyxcli | pyxcli/mirroring/recovery_manager.py | RecoveryManager._create_mirror | python | def _create_mirror(self, resource_type, resource_name, target_name,
mirror_type, slave_resource_name, create_slave='no',
remote_pool=None, rpo=None, remote_rpo=None,
schedule=None, remote_schedule=None,
activate_mirror='no'):
'''creates a mirror and returns a mirror object.
resource_type must be 'vol' or 'cg',
target name must be a valid target from target_list,
mirror type must be 'sync' or 'async',
slave_resource_name would be the slave_vol or slave_cg name'''
kwargs = {
resource_type: resource_name,
'target': target_name,
'type': mirror_type,
'slave_' + resource_type: slave_resource_name,
'create_slave': create_slave,
'remote_pool': remote_pool,
'rpo': rpo,
'remote_rpo': remote_rpo,
'schedule': schedule,
'remote_schedule': remote_schedule
}
if mirror_type == 'sync':
kwargs['type'] = 'sync_best_effort'
kwargs['rpo'] = None
else:
kwargs['type'] = 'async_interval'
if kwargs['remote_schedule'] is None:
kwargs['remote_schedule'] = kwargs['schedule']
# avoids a python3 issue of the dict changing
# during iteration
keys = set(kwargs.keys()).copy()
for k in keys:
if kwargs[k] is None:
kwargs.pop(k)
logger.info('creating mirror with arguments: %s' % kwargs)
self.xcli_client.cmd.mirror_create(**kwargs)
if activate_mirror == 'yes':
logger.info('Activating mirror %s' % resource_name)
self.activate_mirror(resource_name)
return self.get_mirror_resources()[resource_name] | creates a mirror and returns a mirror object.
resource_type must be 'vol' or 'cg',
target name must be a valid target from target_list,
mirror type must be 'sync' or 'async',
slave_resource_name would be the slave_vol or slave_cg name | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/mirroring/recovery_manager.py#L273-L319 | [
"def get_mirror_resources(self):\n return self.action_entities.get_cg_mirrors()\n",
"def activate_mirror(self, resource_id):\n self._activate_mirror(cg=resource_id)\n",
"def get_mirror_resources(self):\n return self.action_entities.get_vol_mirrors()\n",
"def activate_mirror(self, vol_id):\n self._... | class RecoveryManager(object):
xcli_client = None
action_entities = None
# ============================================ INIT =================
def __init__(self, should_use_cache, xcli_client):
self.xcli_client = xcli_client
if (should_use_cache):
self.action_entities = MirroredCachedEntities(
xcli_client)
else:
self.action_entities = MirroredEntities(xcli_client)
def set_action_entities(self, action_entities):
self.action_entities = action_entities
def get_mirror_resources(self):
raise RecoveryMethodNotImplementedException("Unsupported")
def get_type_str(self):
return " - "
def get_target_by_system_id(self, system_id):
target_names = [target.name for target in
self.xcli_client.cmd.target_list().as_list
if target.system_id == system_id]
return target_names[0] if len(target_names) >= 1 else None
# ========================================== FAILOVER =============
def switch_roles(self, resource_id):
self._switch_roles(resource_id)
def verify_readiness_for_failover(self, resource_id):
logger.debug("Verify failover readiness for %s %s" %
(self.get_type_str(), resource_id))
self.verify_mirror_existence(resource_id)
self.verify_slave_consistency(resource_id)
def promote(self, resource_id):
self.verify_mirror_existence(resource_id)
xcli_mirror = self.get_mirror_resources()[resource_id]
self._promote_resource(xcli_mirror, resource_id)
def _promote_resource(self, xcli_mirror, resource_id):
type_str = self.get_type_str()
if not MirroredEntities.is_mirror_master(xcli_mirror):
logger.debug("-> Promoting and setting [r/w] attributes "
"for %s [%s]." % (type_str, resource_id,))
try:
if self._is_sync_mirror(xcli_mirror):
self.snap_target_before_possible_override(resource_id)
else:
self.duplicate_target_snapshot_before_possible_override(
resource_id)
except CommandExecutionError as e:
logger.warning("Failed to create failsafe snap group "
"for %s [%s]." % (type_str,
resource_id, str(e)))
except Exception as e:
logger.error("Unexpected exception %s", e)
raise e
self.change_role_to_master(resource_id)
else:
logger.warning("-> %s [%s] is already promoted to "
"Read-Write" % (type_str, resource_id,))
# =============================================== TEST FAILOVER ==========
def _get_snapshot_name(self, test_snapshot_prefix, resource_id,
snap_time_format):
time_tuple = time.localtime()
str_time = time.strftime(snap_time_format, time_tuple)
snapshot_name = ("%s_%s_%s" % (test_snapshot_prefix,
resource_id,
str_time))[0:CHAR_LIMIT]
return snapshot_name
def test_promote_start(self, resource_id,
test_snapshot_prefix, snap_time_format):
logger.info(
"Commence: Temporary Snapshot Copy of [%s] on array is "
"being created for testFailoverStart request" % (resource_id))
self.verify_mirror_existence(resource_id)
self._verify_resource_not_in_test_failover(
resource_id, test_snapshot_prefix)
self.verify_slave_consistency(resource_id)
self.verify_snapshot_space_for_resource(resource_id)
snapshot_name = self._get_snapshot_name(test_snapshot_prefix,
resource_id, snap_time_format)
self._create_and_unlock_snapshot(resource_id,
snapshot_name,
test_snapshot_prefix)
def test_promote_stop(self, resource_id, test_snapshot_prefix):
logger.info("Commence: Deletion of temporary Snapshot "
"%s Copy of [%s] for testFailoverStop"
"request." % (self.get_type_str(), resource_id,))
self.verify_mirror_existence(resource_id)
self._unmap_and_delete_test_snapshots(
resource_id, test_snapshot_prefix)
def _verify_resource_not_in_test_failover(self, resource_id,
test_snapshot_prefix):
if self._does_resource_have_mapped_test_snapshot(resource_id,
test_snapshot_prefix):
raise AlreadyInTestFailoverStartRecoveryException()
def _does_resource_have_mapped_test_snapshot(self, resource_id,
test_snapshot_prefix):
raise RecoveryMethodNotImplementedException("Unsupported")
def _create_and_unlock_snapshot(self, resource_id,
snapshot_name, test_snapshot_prefix):
raise RecoveryMethodNotImplementedException("Unsupported")
def _unmap_and_delete_test_snapshots(self, resource_id,
test_snapshot_prefix):
raise RecoveryMethodNotImplementedException("Unsupported")
# ============================================ REVERSE REPLICATION =======
def prepare_reverse_replication(self, resource_id, should_unmap=False):
# This method should be called on Primary before reverse_replication
logger.info("Commence: %s [%s] is being set as target for replication "
"for prepareReverseReplication"
"request." % (self.get_type_str(), resource_id,))
self.verify_mirror_existence(resource_id)
self.verify_snapshot_space_for_resource(resource_id)
self.snap_and_change_role_to_slave(resource_id)
if should_unmap:
self.unmap_all_volumes(resource_id)
def reverse_replication(self, resource_id):
# This method should be called on Secondary for reverse_replication
logger.info("Commence: Establish replication for "
"%s [%s] with Primary role for reverseReplication "
"request." % (self.get_type_str(), resource_id,))
self.verify_mirror_existence(resource_id)
self.verify_mirror_connectivity(resource_id)
self.reactivate_mirror(resource_id)
logger.debug("-> Setting Primary role for "
"%s [%s]." % (self.get_type_str(), resource_id,))
self._mirror_change_designation(resource_id, 'Primary')
# ================================================= MIRROR ACTIONS =======
def delete_mirror(self, resource_id):
raise RecoveryMethodNotImplementedException("Unsupported")
def activate_mirror(self, resource_id):
raise RecoveryMethodNotImplementedException("Unsupported")
def deactivate_mirror(self, resource_id):
raise RecoveryMethodNotImplementedException("Unsupported")
def is_mirror_active(self, resource_id):
xcli_mirror = self.get_mirror_resources()[resource_id]
return self._is_mirror_active(xcli_mirror)
def reactivate_mirror(self, resource_id):
logger.debug("-> Reactivating mirror for %s %s" %
(self.get_type_str(), resource_id))
self.verify_mirror_existence(resource_id)
xcli_mirror = self.get_mirror_resources()[resource_id]
if self._is_mirror_active(xcli_mirror):
self.deactivate_mirror(resource_id)
self.activate_mirror(resource_id)
def change_role_to_master(self, resource_id):
self._change_role(resource_id, 'Master')
def change_role_to_slave(self, resource_id):
self._change_role(resource_id, 'Slave')
def start_async_job(self, xcli_mirror):
logger.debug("Starting manual sync job for %s => %s" % (
xcli_mirror.local_peer_name, xcli_mirror.remote_peer_name))
temp_schedule_name = ('temp_manual_%s' %
(xcli_mirror.local_peer_name,))[0:CHAR_LIMIT]
current_schedule = xcli_mirror.schedule_name
try:
self.xcli_client.cmd.schedule_create(
schedule=temp_schedule_name, type='manual')
self._set_mirror_schedule(xcli_mirror, temp_schedule_name)
self.xcli_client.cmd.schedule_create_tick(
schedule=temp_schedule_name)
except Exception as e:
logger.error("Failed to start manual sync job for %s => %s."
" Reason: %s" % (xcli_mirror.local_peer_name,
xcli_mirror.remote_peer_name,
str(e)))
raise (e)
finally:
self._set_mirror_schedule(xcli_mirror, current_schedule)
self.xcli_client.cmd.schedule_delete(schedule=temp_schedule_name)
def is_async_job_running(self, resource_id):
raise RecoveryMethodNotImplementedException("Unsupported")
def _create_mirror(self, resource_type, resource_name, target_name,
mirror_type, slave_resource_name, create_slave='no',
remote_pool=None, rpo=None, remote_rpo=None,
schedule=None, remote_schedule=None,
activate_mirror='no'):
'''creates a mirror and returns a mirror object.
resource_type must be 'vol' or 'cg',
target name must be a valid target from target_list,
mirror type must be 'sync' or 'async',
slave_resource_name would be the slave_vol or slave_cg name'''
kwargs = {
resource_type: resource_name,
'target': target_name,
'type': mirror_type,
'slave_' + resource_type: slave_resource_name,
'create_slave': create_slave,
'remote_pool': remote_pool,
'rpo': rpo,
'remote_rpo': remote_rpo,
'schedule': schedule,
'remote_schedule': remote_schedule
}
if mirror_type == 'sync':
kwargs['type'] = 'sync_best_effort'
kwargs['rpo'] = None
else:
kwargs['type'] = 'async_interval'
if kwargs['remote_schedule'] is None:
kwargs['remote_schedule'] = kwargs['schedule']
# avoids a python3 issue of the dict changing
# during iteration
keys = set(kwargs.keys()).copy()
for k in keys:
if kwargs[k] is None:
kwargs.pop(k)
logger.info('creating mirror with arguments: %s' % kwargs)
self.xcli_client.cmd.mirror_create(**kwargs)
if activate_mirror == 'yes':
logger.info('Activating mirror %s' % resource_name)
self.activate_mirror(resource_name)
return self.get_mirror_resources()[resource_name]
def _set_mirror_schedule(self, xcli_mirror, new_schedule):
is_cg = (xcli_mirror.mirror_object == 'CG')
if is_cg:
self.xcli_client.cmd.mirror_change_schedule(
cg=xcli_mirror.local_peer_name, schedule=new_schedule)
else:
self.xcli_client.cmd.mirror_change_schedule(
vol=xcli_mirror.local_peer_name, schedule=new_schedule)
def _is_mirror_active(self, xcli_mirror):
return xcli_mirror.active == "yes"
def _delete_mirror(self, **kwargs):
logger.info('Deleting mirror %s' % kwargs)
self.xcli_client.cmd.mirror_delete(**kwargs)
def _deactivate_mirror(self, **kwargs):
# if we get SYNC_ALREADY_INACTIVE (status 3) it is safe to ignore it
try:
self.xcli_client.cmd.mirror_deactivate(**kwargs)
except SyncAlreadyInactiveError:
logger.warning("_deactivate_mirror got an error, "
"Synchronization is already inactive")
def _activate_mirror(self, **kwargs):
# if we get SYNC_ALREADY_ACTIVE (status 3) it is safe to ignore it
try:
self.xcli_client.cmd.mirror_activate(**kwargs)
except SyncAlreadyActiveError:
logger.warning("_activate_mirror got an error, "
"Synchronization is already active")
def _change_role(self, resource_id, role):
raise RecoveryMethodNotImplementedException("Unsupported")
def _switch_roles(self, resource_id):
raise RecoveryMethodNotImplementedException("Unsupported")
def _mirror_change_designation(self, resource_id, new_designation_str):
raise RecoveryMethodNotImplementedException("Unsupported")
# =============================================== MAPPING ================
def unmap_all_volumes(self, resource_id):
raise RecoveryMethodNotImplementedException("Unsupported")
def unmap_volume(self, volume_name):
logger.info("-> Remove all mappings of volume %s" % volume_name)
for mapping in self.xcli_client.cmd.vol_mapping_list(vol=volume_name):
if mapping.type == 'cluster':
ports = self.action_entities.get_cluster_port_names(
mapping.host)
logger.debug("-> Volume / Snapshot [%s] is being masked "
"from XIV cluster [%s]. Ports "
"contained %s." % (volume_name, mapping.host,
ports))
try:
self.xcli_client.cmd.unmap_vol(
vol=volume_name, cluster=mapping.host)
except XCLIError as e:
logger.error(e)
raise e
else:
ports = self.action_entities.get_host_port_names(mapping.host)
logger.debug("-> Volume / Snapshot [%s] is being masked from"
" XIV host [%s]. Ports "
"contained %s." % (volume_name, mapping.host,
ports))
self.xcli_client.cmd.unmap_vol(vol=volume_name,
host=mapping.host)
def is_volume_mapped(self, volume_name):
logger.debug("Testing if volume %s is mapped" % volume_name)
if len(self.xcli_client.cmd.vol_mapping_list(vol=volume_name)) > 0:
logger.debug("Volume %s is mapped" % volume_name)
return True
logger.debug("Volume %s is not mapped" % volume_name)
return False
def _is_vol_locked(self, volume_name):
status = self.action_entities.get_volume_by_name(volume_name).locked
return status == 'yes'
# ================================================= SNAPSHOT =============
def snap_target_before_possible_override(self, resource_id,
snap_name=None):
raise RecoveryMethodNotImplementedException("Unsupported")
def duplicate_target_snapshot_before_possible_override(self, resource_id,
snapshot_name=None):
raise RecoveryMethodNotImplementedException("Unsupported")
def snap_and_change_role_to_slave(self, resource_id):
xcli_mirror = self.get_mirror_resources()[resource_id]
type_str = self.get_type_str()
if MirroredEntities.is_mirror_master(xcli_mirror):
self.snap_target_before_possible_override(resource_id)
logger.debug(
"-> Setting %s [%s] as replication target." % (type_str,
resource_id))
if self._is_mirror_active(xcli_mirror):
self.deactivate_mirror(resource_id)
self.change_role_to_slave(resource_id)
else:
logger.warning("-> %s [%s] is already a replication target "
"target" % (type_str, resource_id))
def verify_snapshot_space_for_resource(self, resource_id):
raise RecoveryMethodNotImplementedException("Unsupported")
# ================================================= QUERIES ==============
def is_resource_locked(self, resource_id):
raise RecoveryMethodNotImplementedException("Unsupported")
def is_mirror_slave_ready_for_failover(self, resource_id):
xcli_mirror = self.get_mirror_resources()[resource_id]
return self._is_mirror_slave_ready_for_promote(xcli_mirror)
def _is_mirror_slave_ready_for_promote(self, xcli_mirror):
return xcli_mirror.sync_state != 'Initializing'
def _is_sync_mirror(self, xcli_mirror):
return xcli_mirror.sync_type == 'sync_best_effort'
def verify_mirror_existence(self, resource_id):
if resource_id not in self.get_mirror_resources():
raise NoMirrorDefinedError()
def verify_slave_consistency(self, resource_id):
xcli_mirror = self.get_mirror_resources()[resource_id]
if not self._is_slave_consistent(xcli_mirror):
raise SlaveIsNotConsistentRecoveryException()
def is_slave_consistent(self, resource_id):
xcli_mirror = self.get_mirror_resources()[resource_id]
return self._is_slave_consistent(xcli_mirror)
def _is_slave_consistent(self, xcli_mirror):
if not MirroredEntities.is_mirror_master(xcli_mirror):
if not self._is_mirror_slave_ready_for_promote(xcli_mirror):
return False
return True
def verify_mirror_is_active(self, resource_id):
xcli_mirror = self.get_mirror_resources()[resource_id]
self._verify_mirror_is_active(xcli_mirror)
def _verify_mirror_is_active(self, xcli_mirror):
if not self._is_mirror_active(xcli_mirror):
if xcli_mirror.mirror_error == 'No_Error':
msg = 'Deactivated by an admin'
else:
msg = xcli_mirror.mirror_error
raise MirrorInactiveError(msg)
def verify_mirror_connectivity(self, resource_id):
xcli_mirror = self.get_mirror_resources()[resource_id]
self._verify_mirror_connectivity(xcli_mirror)
def _verify_mirror_connectivity(self, xcli_mirror):
if not MirroredEntities.is_target_connected(xcli_mirror):
raise NoMirrorConnectivityRecoveryException()
def _does_pool_have_required_space_for_snapshots(self, pool_name,
num_of_volumes):
pool = self.action_entities.get_pool_by_name(pool_name)
snapshot_size = int(pool.snapshot_size)
used_by_snapshots = int(pool.used_by_snapshots)
# A new snapshot requires up to 17 GB per volume in an
# XIV/Spectrum Accelerate system. The size is smaller for A9000.
if snapshot_size > used_by_snapshots + num_of_volumes * 17 + 17:
return True
return False
# ================================================= Actions ==============
def is_cg_replicated(self, local_cg_id):
return local_cg_id in self.action_entities.get_cg_mirrors()
|
IBM/pyxcli | pyxcli/helpers/exceptool.py | chained | python | def chained(wrapping_exc):
# pylint: disable=W0212
t, v, tb = sys.exc_info()
if not t:
return wrapping_exc
wrapping_exc._inner_exc = v
lines = traceback.format_exception(t, v, tb)
wrapping_exc._inner_tb = "".join(lines[1:])
return wrapping_exc | Embeds the current exception information into the given one (which
will replace the current one).
For example::
try:
...
except OSError as ex:
raise chained(MyError("database not found!")) | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/helpers/exceptool.py#L21-L39 | null | ##############################################################################
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import sys
import traceback
|
IBM/pyxcli | pyxcli/client.py | BaseXCLIClient.options | python | def options(self, **options):
self._contexts.append(self._contexts[-1].copy())
self.set_options(**options)
try:
yield
finally:
self._contexts.pop(-1) | A context-manager for setting connection options; the original
values of the options will be restored when the context-manager exits.
For example::
with c.options(gui_mode = False):
c.cmd.vol_list() | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/client.py#L108-L121 | [
"def set_options(self, **options):\n \"\"\"Sets the value of the given options (as keyword arguments).\n Note that underscored in the option's name will be replaced with\n hyphens (i.e., ``c.set_options(gui_mode = True)``\n will set the option ``gui-mode``)\n \"\"\"\n opt2 = self._contexts[-1]\n ... | class BaseXCLIClient(object):
DEFAULT_OPTIONS = {}
def __init__(self):
self._contexts = [self.DEFAULT_OPTIONS.copy()]
self.cmd = CommandNamespace(weakproxy(self))
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def _populate_commands(self):
for info in self.execute("help"):
invoker = getattr(self.cmd, info.name)
invoker.__doc__ = info.description + "\nUsage: " + info.syntax
invoker.syntax = info.syntax
setattr(self.cmd, info.name, invoker)
def is_connected(self):
raise NotImplementedError()
def close(self):
raise NotImplementedError()
def execute(self, cmd, **kwargs):
"""Executes command (with the arguments) on the connected machine"""
return self.execute_remote(None, cmd, **kwargs)
def execute_remote(self, remote_target, cmd, **kwargs):
"""
Executes the given command (with the given arguments)
on the given remote target of the connected machine
"""
raise NotImplementedError()
@contextmanager
def get_option(self, name):
"""Returns the value of the given option
(or a ``KeyError`` if it does not exist)
"""
return self._contexts[-1].get(name)
def set_options(self, **options):
"""Sets the value of the given options (as keyword arguments).
Note that underscored in the option's name will be replaced with
hyphens (i.e., ``c.set_options(gui_mode = True)``
will set the option ``gui-mode``)
"""
opt2 = self._contexts[-1]
for k, v in options.items():
k2 = k.replace("_", "-")
if v is None:
opt2.pop(k2, None)
else:
opt2[k2] = v
|
IBM/pyxcli | pyxcli/client.py | BaseXCLIClient.set_options | python | def set_options(self, **options):
opt2 = self._contexts[-1]
for k, v in options.items():
k2 = k.replace("_", "-")
if v is None:
opt2.pop(k2, None)
else:
opt2[k2] = v | Sets the value of the given options (as keyword arguments).
Note that underscored in the option's name will be replaced with
hyphens (i.e., ``c.set_options(gui_mode = True)``
will set the option ``gui-mode``) | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/client.py#L129-L141 | null | class BaseXCLIClient(object):
DEFAULT_OPTIONS = {}
def __init__(self):
self._contexts = [self.DEFAULT_OPTIONS.copy()]
self.cmd = CommandNamespace(weakproxy(self))
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def _populate_commands(self):
for info in self.execute("help"):
invoker = getattr(self.cmd, info.name)
invoker.__doc__ = info.description + "\nUsage: " + info.syntax
invoker.syntax = info.syntax
setattr(self.cmd, info.name, invoker)
def is_connected(self):
raise NotImplementedError()
def close(self):
raise NotImplementedError()
def execute(self, cmd, **kwargs):
"""Executes command (with the arguments) on the connected machine"""
return self.execute_remote(None, cmd, **kwargs)
def execute_remote(self, remote_target, cmd, **kwargs):
"""
Executes the given command (with the given arguments)
on the given remote target of the connected machine
"""
raise NotImplementedError()
@contextmanager
def options(self, **options):
"""A context-manager for setting connection options; the original
values of the options will be restored when the context-manager exits.
For example::
with c.options(gui_mode = False):
c.cmd.vol_list()
"""
self._contexts.append(self._contexts[-1].copy())
self.set_options(**options)
try:
yield
finally:
self._contexts.pop(-1)
def get_option(self, name):
"""Returns the value of the given option
(or a ``KeyError`` if it does not exist)
"""
return self._contexts[-1].get(name)
|
IBM/pyxcli | pyxcli/client.py | XCLIClient.connect_ssl | python | def connect_ssl(cls, user, password, endpoints,
ca_certs=None, validate=None):
if isinstance(endpoints, basestring):
endpoints = [endpoints]
transport = SingleEndpointTransport(
SocketTransport.connect_ssl, endpoints, ca_certs=ca_certs,
validate=validate)
return cls(transport, user, password) | Creates an SSL transport to the first endpoint (aserver) to which
we successfully connect | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/client.py#L200-L211 | [
"def SingleEndpointTransport(connector, endpoints, ca_certs=None,\n validate=None):\n exceptions = []\n xlog.debug(\"SingleEndpointTransport connecting %r to %r\", connector,\n endpoints)\n for ep in endpoints:\n try:\n xlog.debug(\"Attempting %r\"... | class XCLIClient(BaseXCLIClient):
"""
The class implementing the XCLI client. Use the class' factory methods
(``connect_ssl``, etc.) to create an XCLI client.
Client objects have a special attribute named ``cmd``, which can be used
to represent XCLI commands as python functions (this is the recommened way
to use this class). For example::
client = XCLIClient.connect_ssl("admin", "mypass", "192.168.1.102")
results = client.cmd.vol_list(pool = "foobar")
"""
DEFAULT_OPTIONS = {
"i-am-sure": "yes",
"gui-mode": "yes",
"force-output": "yes",
"print-header": "no",
"compress-output": "base64",
}
def __init__(self, transport, user, password, populate=True):
"""
Initializes an XCLI client over the given transport object; do not
use this directly (unless you know what you're doing) -- use one of
the factory methods.
If ``user`` is not given (``None``), the XIV machine will
not be queried (``version_get``, etc.) and the ``cmd``
namespace will not be populated. If ``populate`` is False, the
``cmd`` namespace will not be populated (by running ``help``).
"""
BaseXCLIClient.__init__(self)
self.transport = transport
self._lock = Lock()
self._cmdindex = itertools.count(1)
if user is not None:
self.set_options(user=user, password=password)
if populate:
self._populate_commands()
def is_connected(self):
return self.transport.is_connected()
def close(self):
"""
Closes the client
"""
self.transport.close()
self.transport = ClosedTransport
def reconnect(self):
"""
Reconnect as last valid connection
"""
self.transport.reconnect()
@classmethod
@classmethod
def connect_multiendpoint_ssl(cls, user, password, endpoints,
auto_discover=True, ca_certs=None,
validate=None):
"""
Creates a MultiEndpointTransport, so that if the current endpoint
(aserver) fails, it would automatically move to the next available
endpoint.
If ``auto_discover`` is ``True``, we will execute ipinterface_list
on the system to discover all management IP interfaces and add them
to the list of endpoints
"""
if isinstance(endpoints, basestring):
endpoints = [endpoints]
client, transport = cls._initiate_client_for_multi_endpoint(user,
password,
endpoints,
ca_certs,
validate)
if auto_discover and user:
all_endpoints = [ipif.address for ipif in
client.cmd.ipinterface_list()
if ipif.type.lower() == "management"]
transport.add_endpoints(all_endpoints)
return client
@classmethod
def _initiate_client_for_multi_endpoint(cls, usr, pwd, endpoints,
ca_certs, validate):
while True:
try:
transport = MultiEndpointTransport(SocketTransport
.connect_ssl,
endpoints,
ca_certs=ca_certs,
validate=validate)
client = cls(transport, usr, pwd)
return client, transport
except CommandFailedAServerError:
return cls._initiate_client_for_multi_endpoint(usr, pwd,
endpoints[1:],
ca_certs,
validate)
def _dump_xcli(self, obj):
if isinstance(obj, bool):
return "yes" if obj else "no"
return str(obj)
def _build_command(self, cmd, params, options, remote_target=None):
root = etree.Element("command", id=str(self._cmdindex.next()),
type=cmd, close_on_return="no")
if remote_target:
root.attrib["remote_target"] = remote_target
for k, v in options.items():
root.append(etree.Element("option", name=self._dump_xcli(k),
value=self._dump_xcli(v)))
for k, v in params.items():
root.append(etree.Element("argument", name=self._dump_xcli(k),
value=self._dump_xcli(v)))
data = etree.tostring(root)
anon = data.replace(options["password"],
"XXX") if "password" in options else data
xlog.debug("SEND %s" % (anon))
return data
def _build_response(self, rootelem):
# "/command/aserver/@status"
aserver = etree.xml_find(rootelem, "aserver", "status")
if aserver != "DELIVERY_SUCCESSFUL":
raise CommandFailedAServerError.instantiate(aserver, rootelem)
try:
# "/command/administrator/command"
cmdroot = etree.xml_find(rootelem, "administrator/command")
except ElementNotFoundException:
# "/command/command/administrator/command"
cmdroot = etree.xml_find(rootelem, "command/administrator/command")
# "code/@value"
code = etree.xml_find(cmdroot, "code", "value")
encoding = self.get_option("compress-output")
if code != "SUCCESS":
raise CommandExecutionError.instantiate(rootelem,
cmdroot, encoding)
return XCLIResponse.instantiate(cmdroot, encoding)
def execute_remote(self, remote_target, cmd, **kwargs):
"""
Executes the given command (with the given arguments)
on the given remote target of the connected machine
"""
data = self._build_command(cmd, kwargs, self._contexts[-1],
remote_target)
with self._lock:
rootelem = self.transport.send(data)
try:
return self._build_response(rootelem)
except ElementNotFoundException:
xlog.exception("XCLIClient.execute")
raise chained(CorruptResponse(rootelem))
except Exception as e:
xlog.exception("XCLIClient.execute")
raise e
def get_user_client(self, user, password, populate=True):
"""
Returns a new client for the given user. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
"""
return XCLIClientForUser(weakproxy(self), user, password,
populate=populate)
def get_remote_client(self, target_name, user=None, password=None):
"""
Returns a new client for the remote target. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
"""
if user:
base = self.get_user_client(user, password, populate=False)
else:
base = weakproxy(self)
return RemoteXCLIClient(base, target_name, populate=True)
@contextmanager
def as_user(self, user, password):
"""
A context-manager for ``get_user_client``. Allows the execution
of commands as a different user with ease.
Example:
>>> c.cmd.vol_list()
>>> with c.as_user("user", "password"):
... c.cmd.vol_list()
"""
with self.options(user=user, password=password):
yield self
|
IBM/pyxcli | pyxcli/client.py | XCLIClient.connect_multiendpoint_ssl | python | def connect_multiendpoint_ssl(cls, user, password, endpoints,
auto_discover=True, ca_certs=None,
validate=None):
if isinstance(endpoints, basestring):
endpoints = [endpoints]
client, transport = cls._initiate_client_for_multi_endpoint(user,
password,
endpoints,
ca_certs,
validate)
if auto_discover and user:
all_endpoints = [ipif.address for ipif in
client.cmd.ipinterface_list()
if ipif.type.lower() == "management"]
transport.add_endpoints(all_endpoints)
return client | Creates a MultiEndpointTransport, so that if the current endpoint
(aserver) fails, it would automatically move to the next available
endpoint.
If ``auto_discover`` is ``True``, we will execute ipinterface_list
on the system to discover all management IP interfaces and add them
to the list of endpoints | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/client.py#L214-L238 | null | class XCLIClient(BaseXCLIClient):
"""
The class implementing the XCLI client. Use the class' factory methods
(``connect_ssl``, etc.) to create an XCLI client.
Client objects have a special attribute named ``cmd``, which can be used
to represent XCLI commands as python functions (this is the recommened way
to use this class). For example::
client = XCLIClient.connect_ssl("admin", "mypass", "192.168.1.102")
results = client.cmd.vol_list(pool = "foobar")
"""
DEFAULT_OPTIONS = {
"i-am-sure": "yes",
"gui-mode": "yes",
"force-output": "yes",
"print-header": "no",
"compress-output": "base64",
}
def __init__(self, transport, user, password, populate=True):
"""
Initializes an XCLI client over the given transport object; do not
use this directly (unless you know what you're doing) -- use one of
the factory methods.
If ``user`` is not given (``None``), the XIV machine will
not be queried (``version_get``, etc.) and the ``cmd``
namespace will not be populated. If ``populate`` is False, the
``cmd`` namespace will not be populated (by running ``help``).
"""
BaseXCLIClient.__init__(self)
self.transport = transport
self._lock = Lock()
self._cmdindex = itertools.count(1)
if user is not None:
self.set_options(user=user, password=password)
if populate:
self._populate_commands()
def is_connected(self):
return self.transport.is_connected()
def close(self):
"""
Closes the client
"""
self.transport.close()
self.transport = ClosedTransport
def reconnect(self):
"""
Reconnect as last valid connection
"""
self.transport.reconnect()
@classmethod
def connect_ssl(cls, user, password, endpoints,
ca_certs=None, validate=None):
"""
Creates an SSL transport to the first endpoint (aserver) to which
we successfully connect
"""
if isinstance(endpoints, basestring):
endpoints = [endpoints]
transport = SingleEndpointTransport(
SocketTransport.connect_ssl, endpoints, ca_certs=ca_certs,
validate=validate)
return cls(transport, user, password)
@classmethod
@classmethod
def _initiate_client_for_multi_endpoint(cls, usr, pwd, endpoints,
ca_certs, validate):
while True:
try:
transport = MultiEndpointTransport(SocketTransport
.connect_ssl,
endpoints,
ca_certs=ca_certs,
validate=validate)
client = cls(transport, usr, pwd)
return client, transport
except CommandFailedAServerError:
return cls._initiate_client_for_multi_endpoint(usr, pwd,
endpoints[1:],
ca_certs,
validate)
def _dump_xcli(self, obj):
if isinstance(obj, bool):
return "yes" if obj else "no"
return str(obj)
def _build_command(self, cmd, params, options, remote_target=None):
root = etree.Element("command", id=str(self._cmdindex.next()),
type=cmd, close_on_return="no")
if remote_target:
root.attrib["remote_target"] = remote_target
for k, v in options.items():
root.append(etree.Element("option", name=self._dump_xcli(k),
value=self._dump_xcli(v)))
for k, v in params.items():
root.append(etree.Element("argument", name=self._dump_xcli(k),
value=self._dump_xcli(v)))
data = etree.tostring(root)
anon = data.replace(options["password"],
"XXX") if "password" in options else data
xlog.debug("SEND %s" % (anon))
return data
def _build_response(self, rootelem):
# "/command/aserver/@status"
aserver = etree.xml_find(rootelem, "aserver", "status")
if aserver != "DELIVERY_SUCCESSFUL":
raise CommandFailedAServerError.instantiate(aserver, rootelem)
try:
# "/command/administrator/command"
cmdroot = etree.xml_find(rootelem, "administrator/command")
except ElementNotFoundException:
# "/command/command/administrator/command"
cmdroot = etree.xml_find(rootelem, "command/administrator/command")
# "code/@value"
code = etree.xml_find(cmdroot, "code", "value")
encoding = self.get_option("compress-output")
if code != "SUCCESS":
raise CommandExecutionError.instantiate(rootelem,
cmdroot, encoding)
return XCLIResponse.instantiate(cmdroot, encoding)
def execute_remote(self, remote_target, cmd, **kwargs):
"""
Executes the given command (with the given arguments)
on the given remote target of the connected machine
"""
data = self._build_command(cmd, kwargs, self._contexts[-1],
remote_target)
with self._lock:
rootelem = self.transport.send(data)
try:
return self._build_response(rootelem)
except ElementNotFoundException:
xlog.exception("XCLIClient.execute")
raise chained(CorruptResponse(rootelem))
except Exception as e:
xlog.exception("XCLIClient.execute")
raise e
def get_user_client(self, user, password, populate=True):
"""
Returns a new client for the given user. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
"""
return XCLIClientForUser(weakproxy(self), user, password,
populate=populate)
def get_remote_client(self, target_name, user=None, password=None):
"""
Returns a new client for the remote target. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
"""
if user:
base = self.get_user_client(user, password, populate=False)
else:
base = weakproxy(self)
return RemoteXCLIClient(base, target_name, populate=True)
@contextmanager
def as_user(self, user, password):
"""
A context-manager for ``get_user_client``. Allows the execution
of commands as a different user with ease.
Example:
>>> c.cmd.vol_list()
>>> with c.as_user("user", "password"):
... c.cmd.vol_list()
"""
with self.options(user=user, password=password):
yield self
|
IBM/pyxcli | pyxcli/client.py | XCLIClient.execute_remote | python | def execute_remote(self, remote_target, cmd, **kwargs):
data = self._build_command(cmd, kwargs, self._contexts[-1],
remote_target)
with self._lock:
rootelem = self.transport.send(data)
try:
return self._build_response(rootelem)
except ElementNotFoundException:
xlog.exception("XCLIClient.execute")
raise chained(CorruptResponse(rootelem))
except Exception as e:
xlog.exception("XCLIClient.execute")
raise e | Executes the given command (with the given arguments)
on the given remote target of the connected machine | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/client.py#L304-L320 | [
"def chained(wrapping_exc):\n # pylint: disable=W0212\n \"\"\"\n Embeds the current exception information into the given one (which\n will replace the current one).\n For example::\n\n try:\n ...\n except OSError as ex:\n raise chained(MyError(\"database not found!... | class XCLIClient(BaseXCLIClient):
"""
The class implementing the XCLI client. Use the class' factory methods
(``connect_ssl``, etc.) to create an XCLI client.
Client objects have a special attribute named ``cmd``, which can be used
to represent XCLI commands as python functions (this is the recommened way
to use this class). For example::
client = XCLIClient.connect_ssl("admin", "mypass", "192.168.1.102")
results = client.cmd.vol_list(pool = "foobar")
"""
DEFAULT_OPTIONS = {
"i-am-sure": "yes",
"gui-mode": "yes",
"force-output": "yes",
"print-header": "no",
"compress-output": "base64",
}
def __init__(self, transport, user, password, populate=True):
"""
Initializes an XCLI client over the given transport object; do not
use this directly (unless you know what you're doing) -- use one of
the factory methods.
If ``user`` is not given (``None``), the XIV machine will
not be queried (``version_get``, etc.) and the ``cmd``
namespace will not be populated. If ``populate`` is False, the
``cmd`` namespace will not be populated (by running ``help``).
"""
BaseXCLIClient.__init__(self)
self.transport = transport
self._lock = Lock()
self._cmdindex = itertools.count(1)
if user is not None:
self.set_options(user=user, password=password)
if populate:
self._populate_commands()
def is_connected(self):
return self.transport.is_connected()
def close(self):
"""
Closes the client
"""
self.transport.close()
self.transport = ClosedTransport
def reconnect(self):
"""
Reconnect as last valid connection
"""
self.transport.reconnect()
@classmethod
def connect_ssl(cls, user, password, endpoints,
ca_certs=None, validate=None):
"""
Creates an SSL transport to the first endpoint (aserver) to which
we successfully connect
"""
if isinstance(endpoints, basestring):
endpoints = [endpoints]
transport = SingleEndpointTransport(
SocketTransport.connect_ssl, endpoints, ca_certs=ca_certs,
validate=validate)
return cls(transport, user, password)
@classmethod
def connect_multiendpoint_ssl(cls, user, password, endpoints,
auto_discover=True, ca_certs=None,
validate=None):
"""
Creates a MultiEndpointTransport, so that if the current endpoint
(aserver) fails, it would automatically move to the next available
endpoint.
If ``auto_discover`` is ``True``, we will execute ipinterface_list
on the system to discover all management IP interfaces and add them
to the list of endpoints
"""
if isinstance(endpoints, basestring):
endpoints = [endpoints]
client, transport = cls._initiate_client_for_multi_endpoint(user,
password,
endpoints,
ca_certs,
validate)
if auto_discover and user:
all_endpoints = [ipif.address for ipif in
client.cmd.ipinterface_list()
if ipif.type.lower() == "management"]
transport.add_endpoints(all_endpoints)
return client
@classmethod
def _initiate_client_for_multi_endpoint(cls, usr, pwd, endpoints,
ca_certs, validate):
while True:
try:
transport = MultiEndpointTransport(SocketTransport
.connect_ssl,
endpoints,
ca_certs=ca_certs,
validate=validate)
client = cls(transport, usr, pwd)
return client, transport
except CommandFailedAServerError:
return cls._initiate_client_for_multi_endpoint(usr, pwd,
endpoints[1:],
ca_certs,
validate)
def _dump_xcli(self, obj):
if isinstance(obj, bool):
return "yes" if obj else "no"
return str(obj)
def _build_command(self, cmd, params, options, remote_target=None):
root = etree.Element("command", id=str(self._cmdindex.next()),
type=cmd, close_on_return="no")
if remote_target:
root.attrib["remote_target"] = remote_target
for k, v in options.items():
root.append(etree.Element("option", name=self._dump_xcli(k),
value=self._dump_xcli(v)))
for k, v in params.items():
root.append(etree.Element("argument", name=self._dump_xcli(k),
value=self._dump_xcli(v)))
data = etree.tostring(root)
anon = data.replace(options["password"],
"XXX") if "password" in options else data
xlog.debug("SEND %s" % (anon))
return data
def _build_response(self, rootelem):
# "/command/aserver/@status"
aserver = etree.xml_find(rootelem, "aserver", "status")
if aserver != "DELIVERY_SUCCESSFUL":
raise CommandFailedAServerError.instantiate(aserver, rootelem)
try:
# "/command/administrator/command"
cmdroot = etree.xml_find(rootelem, "administrator/command")
except ElementNotFoundException:
# "/command/command/administrator/command"
cmdroot = etree.xml_find(rootelem, "command/administrator/command")
# "code/@value"
code = etree.xml_find(cmdroot, "code", "value")
encoding = self.get_option("compress-output")
if code != "SUCCESS":
raise CommandExecutionError.instantiate(rootelem,
cmdroot, encoding)
return XCLIResponse.instantiate(cmdroot, encoding)
def get_user_client(self, user, password, populate=True):
"""
Returns a new client for the given user. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
"""
return XCLIClientForUser(weakproxy(self), user, password,
populate=populate)
def get_remote_client(self, target_name, user=None, password=None):
"""
Returns a new client for the remote target. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
"""
if user:
base = self.get_user_client(user, password, populate=False)
else:
base = weakproxy(self)
return RemoteXCLIClient(base, target_name, populate=True)
@contextmanager
def as_user(self, user, password):
"""
A context-manager for ``get_user_client``. Allows the execution
of commands as a different user with ease.
Example:
>>> c.cmd.vol_list()
>>> with c.as_user("user", "password"):
... c.cmd.vol_list()
"""
with self.options(user=user, password=password):
yield self
|
IBM/pyxcli | pyxcli/client.py | XCLIClient.get_user_client | python | def get_user_client(self, user, password, populate=True):
return XCLIClientForUser(weakproxy(self), user, password,
populate=populate) | Returns a new client for the given user. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/client.py#L322-L329 | null | class XCLIClient(BaseXCLIClient):
"""
The class implementing the XCLI client. Use the class' factory methods
(``connect_ssl``, etc.) to create an XCLI client.
Client objects have a special attribute named ``cmd``, which can be used
to represent XCLI commands as python functions (this is the recommened way
to use this class). For example::
client = XCLIClient.connect_ssl("admin", "mypass", "192.168.1.102")
results = client.cmd.vol_list(pool = "foobar")
"""
DEFAULT_OPTIONS = {
"i-am-sure": "yes",
"gui-mode": "yes",
"force-output": "yes",
"print-header": "no",
"compress-output": "base64",
}
def __init__(self, transport, user, password, populate=True):
"""
Initializes an XCLI client over the given transport object; do not
use this directly (unless you know what you're doing) -- use one of
the factory methods.
If ``user`` is not given (``None``), the XIV machine will
not be queried (``version_get``, etc.) and the ``cmd``
namespace will not be populated. If ``populate`` is False, the
``cmd`` namespace will not be populated (by running ``help``).
"""
BaseXCLIClient.__init__(self)
self.transport = transport
self._lock = Lock()
self._cmdindex = itertools.count(1)
if user is not None:
self.set_options(user=user, password=password)
if populate:
self._populate_commands()
def is_connected(self):
return self.transport.is_connected()
def close(self):
"""
Closes the client
"""
self.transport.close()
self.transport = ClosedTransport
def reconnect(self):
"""
Reconnect as last valid connection
"""
self.transport.reconnect()
@classmethod
def connect_ssl(cls, user, password, endpoints,
ca_certs=None, validate=None):
"""
Creates an SSL transport to the first endpoint (aserver) to which
we successfully connect
"""
if isinstance(endpoints, basestring):
endpoints = [endpoints]
transport = SingleEndpointTransport(
SocketTransport.connect_ssl, endpoints, ca_certs=ca_certs,
validate=validate)
return cls(transport, user, password)
@classmethod
def connect_multiendpoint_ssl(cls, user, password, endpoints,
auto_discover=True, ca_certs=None,
validate=None):
"""
Creates a MultiEndpointTransport, so that if the current endpoint
(aserver) fails, it would automatically move to the next available
endpoint.
If ``auto_discover`` is ``True``, we will execute ipinterface_list
on the system to discover all management IP interfaces and add them
to the list of endpoints
"""
if isinstance(endpoints, basestring):
endpoints = [endpoints]
client, transport = cls._initiate_client_for_multi_endpoint(user,
password,
endpoints,
ca_certs,
validate)
if auto_discover and user:
all_endpoints = [ipif.address for ipif in
client.cmd.ipinterface_list()
if ipif.type.lower() == "management"]
transport.add_endpoints(all_endpoints)
return client
@classmethod
def _initiate_client_for_multi_endpoint(cls, usr, pwd, endpoints,
ca_certs, validate):
while True:
try:
transport = MultiEndpointTransport(SocketTransport
.connect_ssl,
endpoints,
ca_certs=ca_certs,
validate=validate)
client = cls(transport, usr, pwd)
return client, transport
except CommandFailedAServerError:
return cls._initiate_client_for_multi_endpoint(usr, pwd,
endpoints[1:],
ca_certs,
validate)
def _dump_xcli(self, obj):
if isinstance(obj, bool):
return "yes" if obj else "no"
return str(obj)
def _build_command(self, cmd, params, options, remote_target=None):
root = etree.Element("command", id=str(self._cmdindex.next()),
type=cmd, close_on_return="no")
if remote_target:
root.attrib["remote_target"] = remote_target
for k, v in options.items():
root.append(etree.Element("option", name=self._dump_xcli(k),
value=self._dump_xcli(v)))
for k, v in params.items():
root.append(etree.Element("argument", name=self._dump_xcli(k),
value=self._dump_xcli(v)))
data = etree.tostring(root)
anon = data.replace(options["password"],
"XXX") if "password" in options else data
xlog.debug("SEND %s" % (anon))
return data
def _build_response(self, rootelem):
# "/command/aserver/@status"
aserver = etree.xml_find(rootelem, "aserver", "status")
if aserver != "DELIVERY_SUCCESSFUL":
raise CommandFailedAServerError.instantiate(aserver, rootelem)
try:
# "/command/administrator/command"
cmdroot = etree.xml_find(rootelem, "administrator/command")
except ElementNotFoundException:
# "/command/command/administrator/command"
cmdroot = etree.xml_find(rootelem, "command/administrator/command")
# "code/@value"
code = etree.xml_find(cmdroot, "code", "value")
encoding = self.get_option("compress-output")
if code != "SUCCESS":
raise CommandExecutionError.instantiate(rootelem,
cmdroot, encoding)
return XCLIResponse.instantiate(cmdroot, encoding)
def execute_remote(self, remote_target, cmd, **kwargs):
"""
Executes the given command (with the given arguments)
on the given remote target of the connected machine
"""
data = self._build_command(cmd, kwargs, self._contexts[-1],
remote_target)
with self._lock:
rootelem = self.transport.send(data)
try:
return self._build_response(rootelem)
except ElementNotFoundException:
xlog.exception("XCLIClient.execute")
raise chained(CorruptResponse(rootelem))
except Exception as e:
xlog.exception("XCLIClient.execute")
raise e
def get_remote_client(self, target_name, user=None, password=None):
"""
Returns a new client for the remote target. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
"""
if user:
base = self.get_user_client(user, password, populate=False)
else:
base = weakproxy(self)
return RemoteXCLIClient(base, target_name, populate=True)
@contextmanager
def as_user(self, user, password):
"""
A context-manager for ``get_user_client``. Allows the execution
of commands as a different user with ease.
Example:
>>> c.cmd.vol_list()
>>> with c.as_user("user", "password"):
... c.cmd.vol_list()
"""
with self.options(user=user, password=password):
yield self
|
IBM/pyxcli | pyxcli/client.py | XCLIClient.get_remote_client | python | def get_remote_client(self, target_name, user=None, password=None):
if user:
base = self.get_user_client(user, password, populate=False)
else:
base = weakproxy(self)
return RemoteXCLIClient(base, target_name, populate=True) | Returns a new client for the remote target. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/client.py#L331-L341 | [
"def get_user_client(self, user, password, populate=True):\n \"\"\"\n Returns a new client for the given user. This is a lightweight\n client that only uses different credentials and shares the transport\n with the underlying client\n \"\"\"\n return XCLIClientForUser(weakproxy(self), user, passwo... | class XCLIClient(BaseXCLIClient):
"""
The class implementing the XCLI client. Use the class' factory methods
(``connect_ssl``, etc.) to create an XCLI client.
Client objects have a special attribute named ``cmd``, which can be used
to represent XCLI commands as python functions (this is the recommened way
to use this class). For example::
client = XCLIClient.connect_ssl("admin", "mypass", "192.168.1.102")
results = client.cmd.vol_list(pool = "foobar")
"""
DEFAULT_OPTIONS = {
"i-am-sure": "yes",
"gui-mode": "yes",
"force-output": "yes",
"print-header": "no",
"compress-output": "base64",
}
def __init__(self, transport, user, password, populate=True):
"""
Initializes an XCLI client over the given transport object; do not
use this directly (unless you know what you're doing) -- use one of
the factory methods.
If ``user`` is not given (``None``), the XIV machine will
not be queried (``version_get``, etc.) and the ``cmd``
namespace will not be populated. If ``populate`` is False, the
``cmd`` namespace will not be populated (by running ``help``).
"""
BaseXCLIClient.__init__(self)
self.transport = transport
self._lock = Lock()
self._cmdindex = itertools.count(1)
if user is not None:
self.set_options(user=user, password=password)
if populate:
self._populate_commands()
def is_connected(self):
return self.transport.is_connected()
def close(self):
"""
Closes the client
"""
self.transport.close()
self.transport = ClosedTransport
def reconnect(self):
"""
Reconnect as last valid connection
"""
self.transport.reconnect()
@classmethod
def connect_ssl(cls, user, password, endpoints,
ca_certs=None, validate=None):
"""
Creates an SSL transport to the first endpoint (aserver) to which
we successfully connect
"""
if isinstance(endpoints, basestring):
endpoints = [endpoints]
transport = SingleEndpointTransport(
SocketTransport.connect_ssl, endpoints, ca_certs=ca_certs,
validate=validate)
return cls(transport, user, password)
@classmethod
def connect_multiendpoint_ssl(cls, user, password, endpoints,
auto_discover=True, ca_certs=None,
validate=None):
"""
Creates a MultiEndpointTransport, so that if the current endpoint
(aserver) fails, it would automatically move to the next available
endpoint.
If ``auto_discover`` is ``True``, we will execute ipinterface_list
on the system to discover all management IP interfaces and add them
to the list of endpoints
"""
if isinstance(endpoints, basestring):
endpoints = [endpoints]
client, transport = cls._initiate_client_for_multi_endpoint(user,
password,
endpoints,
ca_certs,
validate)
if auto_discover and user:
all_endpoints = [ipif.address for ipif in
client.cmd.ipinterface_list()
if ipif.type.lower() == "management"]
transport.add_endpoints(all_endpoints)
return client
@classmethod
def _initiate_client_for_multi_endpoint(cls, usr, pwd, endpoints,
ca_certs, validate):
while True:
try:
transport = MultiEndpointTransport(SocketTransport
.connect_ssl,
endpoints,
ca_certs=ca_certs,
validate=validate)
client = cls(transport, usr, pwd)
return client, transport
except CommandFailedAServerError:
return cls._initiate_client_for_multi_endpoint(usr, pwd,
endpoints[1:],
ca_certs,
validate)
def _dump_xcli(self, obj):
if isinstance(obj, bool):
return "yes" if obj else "no"
return str(obj)
def _build_command(self, cmd, params, options, remote_target=None):
root = etree.Element("command", id=str(self._cmdindex.next()),
type=cmd, close_on_return="no")
if remote_target:
root.attrib["remote_target"] = remote_target
for k, v in options.items():
root.append(etree.Element("option", name=self._dump_xcli(k),
value=self._dump_xcli(v)))
for k, v in params.items():
root.append(etree.Element("argument", name=self._dump_xcli(k),
value=self._dump_xcli(v)))
data = etree.tostring(root)
anon = data.replace(options["password"],
"XXX") if "password" in options else data
xlog.debug("SEND %s" % (anon))
return data
def _build_response(self, rootelem):
# "/command/aserver/@status"
aserver = etree.xml_find(rootelem, "aserver", "status")
if aserver != "DELIVERY_SUCCESSFUL":
raise CommandFailedAServerError.instantiate(aserver, rootelem)
try:
# "/command/administrator/command"
cmdroot = etree.xml_find(rootelem, "administrator/command")
except ElementNotFoundException:
# "/command/command/administrator/command"
cmdroot = etree.xml_find(rootelem, "command/administrator/command")
# "code/@value"
code = etree.xml_find(cmdroot, "code", "value")
encoding = self.get_option("compress-output")
if code != "SUCCESS":
raise CommandExecutionError.instantiate(rootelem,
cmdroot, encoding)
return XCLIResponse.instantiate(cmdroot, encoding)
def execute_remote(self, remote_target, cmd, **kwargs):
"""
Executes the given command (with the given arguments)
on the given remote target of the connected machine
"""
data = self._build_command(cmd, kwargs, self._contexts[-1],
remote_target)
with self._lock:
rootelem = self.transport.send(data)
try:
return self._build_response(rootelem)
except ElementNotFoundException:
xlog.exception("XCLIClient.execute")
raise chained(CorruptResponse(rootelem))
except Exception as e:
xlog.exception("XCLIClient.execute")
raise e
def get_user_client(self, user, password, populate=True):
"""
Returns a new client for the given user. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
"""
return XCLIClientForUser(weakproxy(self), user, password,
populate=populate)
@contextmanager
def as_user(self, user, password):
"""
A context-manager for ``get_user_client``. Allows the execution
of commands as a different user with ease.
Example:
>>> c.cmd.vol_list()
>>> with c.as_user("user", "password"):
... c.cmd.vol_list()
"""
with self.options(user=user, password=password):
yield self
|
IBM/pyxcli | pyxcli/client.py | XCLIClient.as_user | python | def as_user(self, user, password):
with self.options(user=user, password=password):
yield self | A context-manager for ``get_user_client``. Allows the execution
of commands as a different user with ease.
Example:
>>> c.cmd.vol_list()
>>> with c.as_user("user", "password"):
... c.cmd.vol_list() | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/client.py#L344-L358 | null | class XCLIClient(BaseXCLIClient):
"""
The class implementing the XCLI client. Use the class' factory methods
(``connect_ssl``, etc.) to create an XCLI client.
Client objects have a special attribute named ``cmd``, which can be used
to represent XCLI commands as python functions (this is the recommened way
to use this class). For example::
client = XCLIClient.connect_ssl("admin", "mypass", "192.168.1.102")
results = client.cmd.vol_list(pool = "foobar")
"""
DEFAULT_OPTIONS = {
"i-am-sure": "yes",
"gui-mode": "yes",
"force-output": "yes",
"print-header": "no",
"compress-output": "base64",
}
def __init__(self, transport, user, password, populate=True):
"""
Initializes an XCLI client over the given transport object; do not
use this directly (unless you know what you're doing) -- use one of
the factory methods.
If ``user`` is not given (``None``), the XIV machine will
not be queried (``version_get``, etc.) and the ``cmd``
namespace will not be populated. If ``populate`` is False, the
``cmd`` namespace will not be populated (by running ``help``).
"""
BaseXCLIClient.__init__(self)
self.transport = transport
self._lock = Lock()
self._cmdindex = itertools.count(1)
if user is not None:
self.set_options(user=user, password=password)
if populate:
self._populate_commands()
def is_connected(self):
return self.transport.is_connected()
def close(self):
"""
Closes the client
"""
self.transport.close()
self.transport = ClosedTransport
def reconnect(self):
"""
Reconnect as last valid connection
"""
self.transport.reconnect()
@classmethod
def connect_ssl(cls, user, password, endpoints,
ca_certs=None, validate=None):
"""
Creates an SSL transport to the first endpoint (aserver) to which
we successfully connect
"""
if isinstance(endpoints, basestring):
endpoints = [endpoints]
transport = SingleEndpointTransport(
SocketTransport.connect_ssl, endpoints, ca_certs=ca_certs,
validate=validate)
return cls(transport, user, password)
@classmethod
def connect_multiendpoint_ssl(cls, user, password, endpoints,
auto_discover=True, ca_certs=None,
validate=None):
"""
Creates a MultiEndpointTransport, so that if the current endpoint
(aserver) fails, it would automatically move to the next available
endpoint.
If ``auto_discover`` is ``True``, we will execute ipinterface_list
on the system to discover all management IP interfaces and add them
to the list of endpoints
"""
if isinstance(endpoints, basestring):
endpoints = [endpoints]
client, transport = cls._initiate_client_for_multi_endpoint(user,
password,
endpoints,
ca_certs,
validate)
if auto_discover and user:
all_endpoints = [ipif.address for ipif in
client.cmd.ipinterface_list()
if ipif.type.lower() == "management"]
transport.add_endpoints(all_endpoints)
return client
@classmethod
def _initiate_client_for_multi_endpoint(cls, usr, pwd, endpoints,
ca_certs, validate):
while True:
try:
transport = MultiEndpointTransport(SocketTransport
.connect_ssl,
endpoints,
ca_certs=ca_certs,
validate=validate)
client = cls(transport, usr, pwd)
return client, transport
except CommandFailedAServerError:
return cls._initiate_client_for_multi_endpoint(usr, pwd,
endpoints[1:],
ca_certs,
validate)
def _dump_xcli(self, obj):
if isinstance(obj, bool):
return "yes" if obj else "no"
return str(obj)
def _build_command(self, cmd, params, options, remote_target=None):
root = etree.Element("command", id=str(self._cmdindex.next()),
type=cmd, close_on_return="no")
if remote_target:
root.attrib["remote_target"] = remote_target
for k, v in options.items():
root.append(etree.Element("option", name=self._dump_xcli(k),
value=self._dump_xcli(v)))
for k, v in params.items():
root.append(etree.Element("argument", name=self._dump_xcli(k),
value=self._dump_xcli(v)))
data = etree.tostring(root)
anon = data.replace(options["password"],
"XXX") if "password" in options else data
xlog.debug("SEND %s" % (anon))
return data
def _build_response(self, rootelem):
# "/command/aserver/@status"
aserver = etree.xml_find(rootelem, "aserver", "status")
if aserver != "DELIVERY_SUCCESSFUL":
raise CommandFailedAServerError.instantiate(aserver, rootelem)
try:
# "/command/administrator/command"
cmdroot = etree.xml_find(rootelem, "administrator/command")
except ElementNotFoundException:
# "/command/command/administrator/command"
cmdroot = etree.xml_find(rootelem, "command/administrator/command")
# "code/@value"
code = etree.xml_find(cmdroot, "code", "value")
encoding = self.get_option("compress-output")
if code != "SUCCESS":
raise CommandExecutionError.instantiate(rootelem,
cmdroot, encoding)
return XCLIResponse.instantiate(cmdroot, encoding)
def execute_remote(self, remote_target, cmd, **kwargs):
"""
Executes the given command (with the given arguments)
on the given remote target of the connected machine
"""
data = self._build_command(cmd, kwargs, self._contexts[-1],
remote_target)
with self._lock:
rootelem = self.transport.send(data)
try:
return self._build_response(rootelem)
except ElementNotFoundException:
xlog.exception("XCLIClient.execute")
raise chained(CorruptResponse(rootelem))
except Exception as e:
xlog.exception("XCLIClient.execute")
raise e
def get_user_client(self, user, password, populate=True):
"""
Returns a new client for the given user. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
"""
return XCLIClientForUser(weakproxy(self), user, password,
populate=populate)
def get_remote_client(self, target_name, user=None, password=None):
"""
Returns a new client for the remote target. This is a lightweight
client that only uses different credentials and shares the transport
with the underlying client
"""
if user:
base = self.get_user_client(user, password, populate=False)
else:
base = weakproxy(self)
return RemoteXCLIClient(base, target_name, populate=True)
@contextmanager
|
IBM/pyxcli | pyxcli/mirroring/cg_recovery_manager.py | CGRecoveryManager.create_mirror | python | def create_mirror(self, resource_name, target_name, mirror_type,
slave_resource_name, rpo=None, remote_rpo=None,
schedule=None, remote_schedule=None,
activate_mirror='no'):
'''creates a mirror and returns a mirror object.
target name must be a valid target from target_list,
mirror type must be 'sync' or 'async',
slave_resource_name would be the slave_cg name'''
return self._create_mirror('cg', resource_name, target_name,
mirror_type, slave_resource_name, rpo=rpo,
remote_rpo=remote_rpo, schedule=schedule,
remote_schedule=remote_schedule,
activate_mirror=activate_mirror) | creates a mirror and returns a mirror object.
target name must be a valid target from target_list,
mirror type must be 'sync' or 'async',
slave_resource_name would be the slave_cg name | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/mirroring/cg_recovery_manager.py#L105-L118 | [
"def _create_mirror(self, resource_type, resource_name, target_name,\n mirror_type, slave_resource_name, create_slave='no',\n remote_pool=None, rpo=None, remote_rpo=None,\n schedule=None, remote_schedule=None,\n activate_mirror='no'):\n '''c... | class CGRecoveryManager(RecoveryManager):
# ================================================= INIT =================
def __init__(self, should_use_cache, xcli_client):
super(CGRecoveryManager, self).__init__(
should_use_cache, xcli_client)
def get_mirror_resources(self):
return self.action_entities.get_cg_mirrors()
def get_type_str(self):
return CG
def _does_resource_have_mapped_test_snapshot(self, group_id,
test_snapshot_prefix):
# Mapped is when all snapshots are mapped
groups = self.get_target_group_test_snap_groups(group_id,
test_snapshot_prefix)
for snap_group in groups:
logger.debug("Found snap group %s for cg %s" %
(snap_group.name, group_id))
all_mapped = True
snaps = self.action_entities.get_snapshots_by_snap_groups()
for snapshot in snaps[snap_group.name]:
if not self.is_volume_mapped(snapshot):
all_mapped = False
if all_mapped:
logger.debug(
"All snapshots in snap group %s are "
"mapped" % snap_group.name)
return True
return False
def _unmap_and_delete_test_snapshots(self, group_id, test_snapshot_prefix):
groups = self.get_target_group_test_snap_groups(group_id,
test_snapshot_prefix)
for snap_group in groups:
snapshots = self.action_entities.get_snapshots_by_snap_groups()
for snapshot in snapshots[snap_group.name]:
self.unmap_volume(snapshot)
self.delete_snap_group(snap_group.name)
def _create_and_unlock_snapshot(self, cg, snap_group,
test_snapshot_prefix):
existing_snap_groups = self.get_target_group_test_snap_groups(
cg, test_snapshot_prefix)
if len(existing_snap_groups) == 0:
logger.debug(
"creating and unlocking snap group"
"%s for cg %s" % (snap_group, cg))
xcli_mirror = self.get_mirror_resources()[cg]
if self._is_sync_mirror(xcli_mirror):
self.xcli_client.cmd.cg_snapshots_create(
cg=cg, snap_group=snap_group)
else:
self.xcli_client.cmd.snap_group_duplicate(
snap_group=self._get_last_replicated_snapshot_name(cg),
new_snap_group=snap_group)
self.xcli_client.cmd.snap_group_unlock(snap_group=snap_group)
else:
logger.debug("unlocking previously existing snap group"
"%s for cg %s" % (existing_snap_groups[0].name, cg))
self.xcli_client.cmd.snap_group_unlock(
snap_group=existing_snap_groups[0].name)
def get_target_group_test_snap_groups(self, group_id,
test_snapshot_prefix):
snap_groups = list()
for snap_group in self.xcli_client.cmd.snap_group_list(cg=group_id):
if snap_group.name.startswith(test_snapshot_prefix):
snap_groups.append(snap_group)
return snap_groups
# ============================================ REVERSE REPLICATION =======
# ================================================= MIRROR ACTIONS =======
def create_mirror(self, resource_name, target_name, mirror_type,
slave_resource_name, rpo=None, remote_rpo=None,
schedule=None, remote_schedule=None,
activate_mirror='no'):
'''creates a mirror and returns a mirror object.
target name must be a valid target from target_list,
mirror type must be 'sync' or 'async',
slave_resource_name would be the slave_cg name'''
return self._create_mirror('cg', resource_name, target_name,
mirror_type, slave_resource_name, rpo=rpo,
remote_rpo=remote_rpo, schedule=schedule,
remote_schedule=remote_schedule,
activate_mirror=activate_mirror)
def delete_mirror(self, resource_id):
'''delete a mirror by resource_id'''
self._delete_mirror(cg=resource_id)
def activate_mirror(self, resource_id):
self._activate_mirror(cg=resource_id)
def deactivate_mirror(self, resource_id):
self._deactivate_mirror(cg=resource_id)
def _change_role(self, resource_id, role):
self.xcli_client.cmd.mirror_change_role(cg=resource_id, new_role=role)
def _switch_roles(self, resource_id):
self.xcli_client.cmd.mirror_switch_roles(cg=resource_id)
def _mirror_change_designation(self, resource_id, new_designation_str):
self.xcli_client.cmd.mirror_change_designation(
cg=resource_id, new_designation=new_designation_str)
# =============================================== MAPPING ================
def unmap_all_volumes(self, group_id):
for volume in self.get_cg_volumes(group_id):
self.unmap_volume(volume)
# ================================================= SNAPSHOT =============
def snap_target_before_possible_override(self, cg_id,
snap_group_name=None):
if (snap_group_name is None):
snap_group_name = ("temp_synced_%s" % cg_id)[0:CHAR_LIMIT]
logger.debug("-> Replica of consistency group [%s] with key [%s] is "
"being cloned." % (cg_id, snap_group_name) +
"This is the failsafe snapshot to use in case production "
"site had failed before replication has completed."
)
snap_groups = self.action_entities.get_snapshots_by_snap_groups()
if snap_group_name not in snap_groups:
self.xcli_client.cmd.cg_snapshots_create(
cg=cg_id, snap_group=snap_group_name)
else:
self.xcli_client.cmd.cg_snapshots_create(
cg=cg_id, overwrite=snap_group_name)
def duplicate_target_snapshot_before_possible_override(
self, cg_id, snap_group_name=None):
if snap_group_name is None:
snap_group_name = ("temp_synced_%s" % cg_id)[0:CHAR_LIMIT]
logger.debug("-> Replica of consistency group [%s] with key [%s] is "
"being cloned." % (cg_id, snap_group_name) +
"This is the failsafe snapshot to use in case production "
"site had failed before replication has completed."
)
snap_groups = self.action_entities.get_snapshots_by_snap_groups()
if snap_group_name in snap_groups:
self.xcli_client.cmd.snap_group_delete(snap_group=snap_group_name)
self.xcli_client.cmd.snap_group_duplicate(
snap_group=self._get_last_replicated_snapshot_name(cg_id),
new_snap_group=snap_group_name)
def delete_snap_group(self, snap_group_name):
logger.debug("Deleting snap group %s" % snap_group_name)
self.xcli_client.cmd.snap_group_delete(snap_group=snap_group_name)
def _get_last_replicated_snapshot_name(self, cg):
for snap_group in self.xcli_client.cmd.snap_group_list(cg=cg):
if snap_group.name.startswith('last-replicated-'):
return snap_group.name
raise NoLastReplicatedSnapshotRecoveryException()
def verify_snapshot_space_for_resource(self, group_id):
number_of_volumes = len(self.xcli_client.cmd.vol_list(cg=group_id))
pool_name = self.xcli_client.cmd.cg_list(
cg=group_id).as_single_element.pool
if not self._does_pool_have_required_space_for_snapshots(
pool_name, number_of_volumes):
raise InsufficientSnapshotSpaceRecoveryException()
def is_async_job_running(self, group_id):
return len(self.xcli_client.cmd.sync_job_list(cg=group_id)) > 0
def is_resource_locked(self, group_id):
for volume in self.get_cg_volumes(group_id):
if self._is_vol_locked(volume):
return True
return False
def get_cg_volumes(self, group_id):
""" return all non snapshots volumes in cg """
for volume in self.xcli_client.cmd.vol_list(cg=group_id):
if volume.snapshot_of == '':
yield volume.name
def verify_devices_in_cg(self, devices, group):
for device in devices:
logger.debug('Verify volume %s is in cg %s' % (device, group))
if device not in self.get_cg_volumes(group):
logger.error("Device %s is not in cg %s" % (device, group))
raise NoMirrorDefinedError()
|
IBM/pyxcli | pyxcli/mirroring/cg_recovery_manager.py | CGRecoveryManager.get_cg_volumes | python | def get_cg_volumes(self, group_id):
""" return all non snapshots volumes in cg """
for volume in self.xcli_client.cmd.vol_list(cg=group_id):
if volume.snapshot_of == '':
yield volume.name | return all non snapshots volumes in cg | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/mirroring/cg_recovery_manager.py#L206-L210 | null | class CGRecoveryManager(RecoveryManager):
# ================================================= INIT =================
def __init__(self, should_use_cache, xcli_client):
super(CGRecoveryManager, self).__init__(
should_use_cache, xcli_client)
def get_mirror_resources(self):
return self.action_entities.get_cg_mirrors()
def get_type_str(self):
return CG
def _does_resource_have_mapped_test_snapshot(self, group_id,
test_snapshot_prefix):
# Mapped is when all snapshots are mapped
groups = self.get_target_group_test_snap_groups(group_id,
test_snapshot_prefix)
for snap_group in groups:
logger.debug("Found snap group %s for cg %s" %
(snap_group.name, group_id))
all_mapped = True
snaps = self.action_entities.get_snapshots_by_snap_groups()
for snapshot in snaps[snap_group.name]:
if not self.is_volume_mapped(snapshot):
all_mapped = False
if all_mapped:
logger.debug(
"All snapshots in snap group %s are "
"mapped" % snap_group.name)
return True
return False
def _unmap_and_delete_test_snapshots(self, group_id, test_snapshot_prefix):
groups = self.get_target_group_test_snap_groups(group_id,
test_snapshot_prefix)
for snap_group in groups:
snapshots = self.action_entities.get_snapshots_by_snap_groups()
for snapshot in snapshots[snap_group.name]:
self.unmap_volume(snapshot)
self.delete_snap_group(snap_group.name)
def _create_and_unlock_snapshot(self, cg, snap_group,
test_snapshot_prefix):
existing_snap_groups = self.get_target_group_test_snap_groups(
cg, test_snapshot_prefix)
if len(existing_snap_groups) == 0:
logger.debug(
"creating and unlocking snap group"
"%s for cg %s" % (snap_group, cg))
xcli_mirror = self.get_mirror_resources()[cg]
if self._is_sync_mirror(xcli_mirror):
self.xcli_client.cmd.cg_snapshots_create(
cg=cg, snap_group=snap_group)
else:
self.xcli_client.cmd.snap_group_duplicate(
snap_group=self._get_last_replicated_snapshot_name(cg),
new_snap_group=snap_group)
self.xcli_client.cmd.snap_group_unlock(snap_group=snap_group)
else:
logger.debug("unlocking previously existing snap group"
"%s for cg %s" % (existing_snap_groups[0].name, cg))
self.xcli_client.cmd.snap_group_unlock(
snap_group=existing_snap_groups[0].name)
def get_target_group_test_snap_groups(self, group_id,
test_snapshot_prefix):
snap_groups = list()
for snap_group in self.xcli_client.cmd.snap_group_list(cg=group_id):
if snap_group.name.startswith(test_snapshot_prefix):
snap_groups.append(snap_group)
return snap_groups
# ============================================ REVERSE REPLICATION =======
# ================================================= MIRROR ACTIONS =======
def create_mirror(self, resource_name, target_name, mirror_type,
slave_resource_name, rpo=None, remote_rpo=None,
schedule=None, remote_schedule=None,
activate_mirror='no'):
'''creates a mirror and returns a mirror object.
target name must be a valid target from target_list,
mirror type must be 'sync' or 'async',
slave_resource_name would be the slave_cg name'''
return self._create_mirror('cg', resource_name, target_name,
mirror_type, slave_resource_name, rpo=rpo,
remote_rpo=remote_rpo, schedule=schedule,
remote_schedule=remote_schedule,
activate_mirror=activate_mirror)
def delete_mirror(self, resource_id):
'''delete a mirror by resource_id'''
self._delete_mirror(cg=resource_id)
def activate_mirror(self, resource_id):
self._activate_mirror(cg=resource_id)
def deactivate_mirror(self, resource_id):
self._deactivate_mirror(cg=resource_id)
def _change_role(self, resource_id, role):
self.xcli_client.cmd.mirror_change_role(cg=resource_id, new_role=role)
def _switch_roles(self, resource_id):
self.xcli_client.cmd.mirror_switch_roles(cg=resource_id)
def _mirror_change_designation(self, resource_id, new_designation_str):
self.xcli_client.cmd.mirror_change_designation(
cg=resource_id, new_designation=new_designation_str)
# =============================================== MAPPING ================
def unmap_all_volumes(self, group_id):
for volume in self.get_cg_volumes(group_id):
self.unmap_volume(volume)
# ================================================= SNAPSHOT =============
def snap_target_before_possible_override(self, cg_id,
snap_group_name=None):
if (snap_group_name is None):
snap_group_name = ("temp_synced_%s" % cg_id)[0:CHAR_LIMIT]
logger.debug("-> Replica of consistency group [%s] with key [%s] is "
"being cloned." % (cg_id, snap_group_name) +
"This is the failsafe snapshot to use in case production "
"site had failed before replication has completed."
)
snap_groups = self.action_entities.get_snapshots_by_snap_groups()
if snap_group_name not in snap_groups:
self.xcli_client.cmd.cg_snapshots_create(
cg=cg_id, snap_group=snap_group_name)
else:
self.xcli_client.cmd.cg_snapshots_create(
cg=cg_id, overwrite=snap_group_name)
def duplicate_target_snapshot_before_possible_override(
self, cg_id, snap_group_name=None):
if snap_group_name is None:
snap_group_name = ("temp_synced_%s" % cg_id)[0:CHAR_LIMIT]
logger.debug("-> Replica of consistency group [%s] with key [%s] is "
"being cloned." % (cg_id, snap_group_name) +
"This is the failsafe snapshot to use in case production "
"site had failed before replication has completed."
)
snap_groups = self.action_entities.get_snapshots_by_snap_groups()
if snap_group_name in snap_groups:
self.xcli_client.cmd.snap_group_delete(snap_group=snap_group_name)
self.xcli_client.cmd.snap_group_duplicate(
snap_group=self._get_last_replicated_snapshot_name(cg_id),
new_snap_group=snap_group_name)
def delete_snap_group(self, snap_group_name):
logger.debug("Deleting snap group %s" % snap_group_name)
self.xcli_client.cmd.snap_group_delete(snap_group=snap_group_name)
def _get_last_replicated_snapshot_name(self, cg):
for snap_group in self.xcli_client.cmd.snap_group_list(cg=cg):
if snap_group.name.startswith('last-replicated-'):
return snap_group.name
raise NoLastReplicatedSnapshotRecoveryException()
def verify_snapshot_space_for_resource(self, group_id):
number_of_volumes = len(self.xcli_client.cmd.vol_list(cg=group_id))
pool_name = self.xcli_client.cmd.cg_list(
cg=group_id).as_single_element.pool
if not self._does_pool_have_required_space_for_snapshots(
pool_name, number_of_volumes):
raise InsufficientSnapshotSpaceRecoveryException()
def is_async_job_running(self, group_id):
return len(self.xcli_client.cmd.sync_job_list(cg=group_id)) > 0
def is_resource_locked(self, group_id):
for volume in self.get_cg_volumes(group_id):
if self._is_vol_locked(volume):
return True
return False
def get_cg_volumes(self, group_id):
""" return all non snapshots volumes in cg """
for volume in self.xcli_client.cmd.vol_list(cg=group_id):
if volume.snapshot_of == '':
yield volume.name
def verify_devices_in_cg(self, devices, group):
for device in devices:
logger.debug('Verify volume %s is in cg %s' % (device, group))
if device not in self.get_cg_volumes(group):
logger.error("Device %s is not in cg %s" % (device, group))
raise NoMirrorDefinedError()
|
IBM/pyxcli | pyxcli/transports.py | SocketTransport._certificate_required | python | def _certificate_required(cls, hostname, port=XCLI_DEFAULT_PORT,
ca_certs=None, validate=None):
'''
returns true if connection should verify certificate
'''
if not ca_certs:
return False
xlog.debug("CONNECT SSL %s:%s, cert_file=%s",
hostname, port, ca_certs)
certificate = ssl.get_server_certificate((hostname, port),
ca_certs=None)
# handle XIV pre-defined certifications
# if a validation function was given - we let the user check
# the certificate himself, with the user's own validate function.
# if the validate returned True - the user checked the cert
# and we don't need check it, so we return false.
if validate:
return not validate(certificate)
return True | returns true if connection should verify certificate | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/transports.py#L161-L180 | null | class SocketTransport(object):
MAX_IO_CHUNK = 16000
def __init__(self, sock):
self.sock = sock
# The following host and port will be used for reconnect
try:
h, p = self.sock.getpeername()
self.host = h
self.port = p
self.connect_timout = self.sock.gettimeout()
except Exception as e:
xlog.debug(e)
raise e
def __repr__(self):
try:
h, p = self.sock.getpeername()
except IOError:
return "<%s disconnected>" % (self.__class__.__name__,)
else:
ssl = "(ssl)" if hasattr(self.sock, "getpeercert") else "(no ssl)"
return "<%s connected to %s:%s %s>" % (self.__class__.__name__,
h, p, ssl)
@classmethod
def connect(cls, hostname, port, timeout=5.0):
xlog.debug("CONNECT (non SSL) %s:%s", hostname, port)
sock = socket.socket()
sock.settimeout(timeout)
sock.connect((hostname, port))
return cls(sock)
@classmethod
@classmethod
def connect_ssl(cls, hostname, port=XCLI_DEFAULT_PORT, timeout=5.0,
ca_certs=None, validate=None):
certificate_required = cls._certificate_required(hostname,
port, ca_certs,
validate)
xlog.debug("CONNECT SSL %s:%s", hostname, port)
if certificate_required:
sock = ssl.wrap_socket(
socket.socket(),
ca_certs=ca_certs,
cert_reqs=ssl.CERT_REQUIRED)
else:
sock = ssl.wrap_socket(socket.socket())
sock.settimeout(timeout)
sock.connect((hostname, port))
return cls(sock)
def close(self):
try:
self.sock.shutdown(socket.SHUT_RDWR)
except IOError:
pass
self.sock.close()
self.sock = ClosedFile
def is_connected(self):
try:
self.sock.getpeername()
except IOError:
return False
else:
return True
def fileno(self):
return self.sock.fileno()
def send(self, data, timeout=None):
while data:
chunk = data[:self.MAX_IO_CHUNK]
sent = self.sock.send(chunk)
data = data[sent:]
parser = TerminationDetectingXMLParser()
raw = ""
try:
while not parser.root_element_closed:
chunk = self.sock.recv(self.MAX_IO_CHUNK)
if not chunk:
break
raw += chunk
parser.feed(chunk)
return parser.close()
except XMLException as ex:
xlog.exception("Termination-detecting parser failed, %s", ex)
if not self.is_connected():
ex = chained(DisconnectedWhileReceivingData())
else:
ex = chained(CorruptResponse(str(ex), raw))
self.close()
raise ex
def reconnect(self):
if self.is_connected():
self.close()
self.sock = ssl.wrap_socket(socket.socket())
self.sock.settimeout(self.connect_timout)
self.sock.connect((self.host, self.port))
|
IBM/pyxcli | pyxcli/response.py | _populate_bunch_with_element | python | def _populate_bunch_with_element(element):
if 'value' in element.attrib:
return element.get('value')
current_bunch = Bunch()
if element.get('id'):
current_bunch['nextra_element_id'] = element.get('id')
for subelement in element.getchildren():
current_bunch[subelement.tag] = _populate_bunch_with_element(
subelement)
return current_bunch | Helper function to recursively populates a Bunch from an XML tree.
Returns leaf XML elements as a simple value, branch elements are returned
as Bunches containing their subelements as value or recursively generated
Bunch members. | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/response.py#L122-L138 | [
"def _populate_bunch_with_element(element):\n \"\"\"\n Helper function to recursively populates a Bunch from an XML tree.\n Returns leaf XML elements as a simple value, branch elements are returned\n as Bunches containing their subelements as value or recursively generated\n Bunch members.\n \"\"\... | ##############################################################################
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
""" IBM XCLI response Module
.. module: response
:Description: Provides a response object for the IBM XCLI Client Classes. The
object helps to analyse the results returned by the Accelerate storage
arrays.
"""
from bunch import Bunch
from pyxcli.helpers import xml_util as etree
class XCLIResponse(object):
RETURN_PATH = "return"
def __init__(self, cmdroot):
self.response_etree = cmdroot
@classmethod
def instantiate(cls, cmdroot, encoding):
compressed = cmdroot.find("compressed_return")
if compressed is not None:
text = compressed.attrib["value"]
raw = text.decode(encoding).decode("zlib")
cmdroot.append(etree.fromstring("<return>%s</return>" % (raw,)))
cmdroot.remove(compressed)
return cls(cmdroot)
@property
def as_return_etree(self):
return self.response_etree.find(self.RETURN_PATH)
@property
def contained_element_types(self):
return set(subelement.tag for subelement in
self.as_return_etree.getchildren())
# @ReservedAssignment
def all(self, element_type=None, response_path=None):
"""
Generates Bunches, each representing a single subelement of the
response. If an element_type is requested, only elements whose
tag matches the element_type are returned. If the response has no
subelements (for example, in a <return>-less command), yields None.
"""
path = self.RETURN_PATH
if response_path is not None:
path += "/" + response_path
response_element = self.response_etree.find(path)
if response_element is None:
return
for subelement in self.response_etree.find(path).getchildren():
if element_type is None or subelement.tag == element_type:
yield _populate_bunch_with_element(subelement)
@property
def as_single_element(self):
"""
Processes the response as a single-element response,
like config_get or system_counters_get.
If there is more then one element in the response or no
elements this raises a ResponseError
"""
if self.as_return_etree is None:
return None
if len(self.as_return_etree.getchildren()) == 1:
return _populate_bunch_with_element(self.as_return_etree.
getchildren()[0])
return _populate_bunch_with_element(self.as_return_etree)
@property
def as_list(self, element_type=None, response_path=None):
return list(self.all(element_type, response_path))
def as_dict(self, key, element_type=None, response_path=None):
result = {}
for element in self.all(element_type, response_path):
result[getattr(element, key)] = element
return result
def __iter__(self):
return self.all()
def __len__(self):
return len(self.as_list)
def __getitem__(self, item):
if isinstance(item, basestring):
return self.all(item)
elif isinstance(item, (int, long)):
return list(self.all())[item]
else:
raise TypeError("'item' can be a string or an int", item)
def __nonzero__(self):
return any(self.all())
__bool__ = __nonzero__
def __str__(self):
return etree.tostring(self.response_etree)
|
IBM/pyxcli | pyxcli/response.py | XCLIResponse.all | python | def all(self, element_type=None, response_path=None):
path = self.RETURN_PATH
if response_path is not None:
path += "/" + response_path
response_element = self.response_etree.find(path)
if response_element is None:
return
for subelement in self.response_etree.find(path).getchildren():
if element_type is None or subelement.tag == element_type:
yield _populate_bunch_with_element(subelement) | Generates Bunches, each representing a single subelement of the
response. If an element_type is requested, only elements whose
tag matches the element_type are returned. If the response has no
subelements (for example, in a <return>-less command), yields None. | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/response.py#L57-L72 | [
"def _populate_bunch_with_element(element):\n \"\"\"\n Helper function to recursively populates a Bunch from an XML tree.\n Returns leaf XML elements as a simple value, branch elements are returned\n as Bunches containing their subelements as value or recursively generated\n Bunch members.\n \"\"\... | class XCLIResponse(object):
RETURN_PATH = "return"
def __init__(self, cmdroot):
self.response_etree = cmdroot
@classmethod
def instantiate(cls, cmdroot, encoding):
compressed = cmdroot.find("compressed_return")
if compressed is not None:
text = compressed.attrib["value"]
raw = text.decode(encoding).decode("zlib")
cmdroot.append(etree.fromstring("<return>%s</return>" % (raw,)))
cmdroot.remove(compressed)
return cls(cmdroot)
@property
def as_return_etree(self):
return self.response_etree.find(self.RETURN_PATH)
@property
def contained_element_types(self):
return set(subelement.tag for subelement in
self.as_return_etree.getchildren())
# @ReservedAssignment
@property
def as_single_element(self):
"""
Processes the response as a single-element response,
like config_get or system_counters_get.
If there is more then one element in the response or no
elements this raises a ResponseError
"""
if self.as_return_etree is None:
return None
if len(self.as_return_etree.getchildren()) == 1:
return _populate_bunch_with_element(self.as_return_etree.
getchildren()[0])
return _populate_bunch_with_element(self.as_return_etree)
@property
def as_list(self, element_type=None, response_path=None):
return list(self.all(element_type, response_path))
def as_dict(self, key, element_type=None, response_path=None):
result = {}
for element in self.all(element_type, response_path):
result[getattr(element, key)] = element
return result
def __iter__(self):
return self.all()
def __len__(self):
return len(self.as_list)
def __getitem__(self, item):
if isinstance(item, basestring):
return self.all(item)
elif isinstance(item, (int, long)):
return list(self.all())[item]
else:
raise TypeError("'item' can be a string or an int", item)
def __nonzero__(self):
return any(self.all())
__bool__ = __nonzero__
def __str__(self):
return etree.tostring(self.response_etree)
|
IBM/pyxcli | pyxcli/response.py | XCLIResponse.as_single_element | python | def as_single_element(self):
if self.as_return_etree is None:
return None
if len(self.as_return_etree.getchildren()) == 1:
return _populate_bunch_with_element(self.as_return_etree.
getchildren()[0])
return _populate_bunch_with_element(self.as_return_etree) | Processes the response as a single-element response,
like config_get or system_counters_get.
If there is more then one element in the response or no
elements this raises a ResponseError | train | https://github.com/IBM/pyxcli/blob/7d8ece1dcc16f50246740a447aa81b94a0dbced4/pyxcli/response.py#L75-L87 | [
"def _populate_bunch_with_element(element):\n \"\"\"\n Helper function to recursively populates a Bunch from an XML tree.\n Returns leaf XML elements as a simple value, branch elements are returned\n as Bunches containing their subelements as value or recursively generated\n Bunch members.\n \"\"\... | class XCLIResponse(object):
RETURN_PATH = "return"
def __init__(self, cmdroot):
self.response_etree = cmdroot
@classmethod
def instantiate(cls, cmdroot, encoding):
compressed = cmdroot.find("compressed_return")
if compressed is not None:
text = compressed.attrib["value"]
raw = text.decode(encoding).decode("zlib")
cmdroot.append(etree.fromstring("<return>%s</return>" % (raw,)))
cmdroot.remove(compressed)
return cls(cmdroot)
@property
def as_return_etree(self):
return self.response_etree.find(self.RETURN_PATH)
@property
def contained_element_types(self):
return set(subelement.tag for subelement in
self.as_return_etree.getchildren())
# @ReservedAssignment
def all(self, element_type=None, response_path=None):
"""
Generates Bunches, each representing a single subelement of the
response. If an element_type is requested, only elements whose
tag matches the element_type are returned. If the response has no
subelements (for example, in a <return>-less command), yields None.
"""
path = self.RETURN_PATH
if response_path is not None:
path += "/" + response_path
response_element = self.response_etree.find(path)
if response_element is None:
return
for subelement in self.response_etree.find(path).getchildren():
if element_type is None or subelement.tag == element_type:
yield _populate_bunch_with_element(subelement)
@property
@property
def as_list(self, element_type=None, response_path=None):
return list(self.all(element_type, response_path))
def as_dict(self, key, element_type=None, response_path=None):
result = {}
for element in self.all(element_type, response_path):
result[getattr(element, key)] = element
return result
def __iter__(self):
return self.all()
def __len__(self):
return len(self.as_list)
def __getitem__(self, item):
if isinstance(item, basestring):
return self.all(item)
elif isinstance(item, (int, long)):
return list(self.all())[item]
else:
raise TypeError("'item' can be a string or an int", item)
def __nonzero__(self):
return any(self.all())
__bool__ = __nonzero__
def __str__(self):
return etree.tostring(self.response_etree)
|
ReadabilityHoldings/python-readability-api | readability/utils.py | cast_datetime_filter | python | def cast_datetime_filter(value):
if isinstance(value, str):
dtime = parse_datetime(value)
elif isinstance(value, datetime):
dtime = value
else:
raise ValueError('Received value of type {0}'.format(type(value)))
return dtime.isoformat() | Cast a datetime filter value.
:param value: string representation of a value that needs to be casted to
a `datetime` object. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/utils.py#L46-L61 | null | # -*- coding: utf-8 -*-
"""
readability.utils
~~~~~~~~~~~~~~~~~
This module provides various utils to the rest of the package.
"""
import logging
from datetime import datetime
from dateutil.parser import parse as parse_datetime
logger = logging.getLogger(__name__)
# map of filter names to a data type. This is used to map names to a
# casting function when needed.
filter_type_map = {
'added_since': 'datetime',
'added_until': 'datetime',
'archive': 'int',
'archived_since': 'datetime',
'archived_until': 'datetime',
'exclude_accessibility': 'string',
'favorite': 'int',
'favorited_since': 'datetime',
'favorited_until': 'datetime',
'domain': 'string',
'only_delete': 'int',
'opened_since': 'datetime',
'opened_until': 'datetime',
'order': 'string',
'page': 'int',
'per_page': 'int',
'tags': 'string',
'updated_since': 'datetime',
'updated_until': 'datetime',
}
def cast_integer_filter(value):
"""Cast an integer filter value.
Theses are usually booleans in Python but they need to be sent as
1s and 0s to the API.
:param value: boolean value that needs to be casted to an int
"""
return int(value)
def filter_args_to_dict(filter_dict, accepted_filter_keys=[]):
"""Cast and validate filter args.
:param filter_dict: Filter kwargs
:param accepted_filter_keys: List of keys that are acceptable to use.
"""
out_dict = {}
for k, v in filter_dict.items():
# make sure that the filter k is acceptable
# and that there is a value associated with the key
if k not in accepted_filter_keys or v is None:
logger.debug(
'Filter was not in accepted_filter_keys or value is None.')
# skip it
continue
filter_type = filter_type_map.get(k, None)
if filter_type is None:
logger.debug('Filter key not foud in map.')
# hmm, this was an acceptable filter type but not in the map...
# Going to skip it.
continue
# map of casting funcitons to filter types
filter_cast_map = {
'int': cast_integer_filter,
'datetime': cast_datetime_filter
}
cast_function = filter_cast_map.get(filter_type, None)
# if we get a cast function, call it with v. If not, just use v.
if cast_function:
out_value = cast_function(v)
else:
out_value = v
out_dict[k] = out_value
return out_dict
|
ReadabilityHoldings/python-readability-api | readability/utils.py | filter_args_to_dict | python | def filter_args_to_dict(filter_dict, accepted_filter_keys=[]):
out_dict = {}
for k, v in filter_dict.items():
# make sure that the filter k is acceptable
# and that there is a value associated with the key
if k not in accepted_filter_keys or v is None:
logger.debug(
'Filter was not in accepted_filter_keys or value is None.')
# skip it
continue
filter_type = filter_type_map.get(k, None)
if filter_type is None:
logger.debug('Filter key not foud in map.')
# hmm, this was an acceptable filter type but not in the map...
# Going to skip it.
continue
# map of casting funcitons to filter types
filter_cast_map = {
'int': cast_integer_filter,
'datetime': cast_datetime_filter
}
cast_function = filter_cast_map.get(filter_type, None)
# if we get a cast function, call it with v. If not, just use v.
if cast_function:
out_value = cast_function(v)
else:
out_value = v
out_dict[k] = out_value
return out_dict | Cast and validate filter args.
:param filter_dict: Filter kwargs
:param accepted_filter_keys: List of keys that are acceptable to use. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/utils.py#L75-L113 | null | # -*- coding: utf-8 -*-
"""
readability.utils
~~~~~~~~~~~~~~~~~
This module provides various utils to the rest of the package.
"""
import logging
from datetime import datetime
from dateutil.parser import parse as parse_datetime
logger = logging.getLogger(__name__)
# map of filter names to a data type. This is used to map names to a
# casting function when needed.
filter_type_map = {
'added_since': 'datetime',
'added_until': 'datetime',
'archive': 'int',
'archived_since': 'datetime',
'archived_until': 'datetime',
'exclude_accessibility': 'string',
'favorite': 'int',
'favorited_since': 'datetime',
'favorited_until': 'datetime',
'domain': 'string',
'only_delete': 'int',
'opened_since': 'datetime',
'opened_until': 'datetime',
'order': 'string',
'page': 'int',
'per_page': 'int',
'tags': 'string',
'updated_since': 'datetime',
'updated_until': 'datetime',
}
def cast_datetime_filter(value):
"""Cast a datetime filter value.
:param value: string representation of a value that needs to be casted to
a `datetime` object.
"""
if isinstance(value, str):
dtime = parse_datetime(value)
elif isinstance(value, datetime):
dtime = value
else:
raise ValueError('Received value of type {0}'.format(type(value)))
return dtime.isoformat()
def cast_integer_filter(value):
"""Cast an integer filter value.
Theses are usually booleans in Python but they need to be sent as
1s and 0s to the API.
:param value: boolean value that needs to be casted to an int
"""
return int(value)
|
ReadabilityHoldings/python-readability-api | readability/auth.py | xauth | python | def xauth(base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
username = xargs.get('username') or required_from_env('READABILITY_USERNAME')
password = xargs.get('password') or required_from_env('READABILITY_PASSWORD')
client = Client(consumer_key, client_secret=consumer_secret, signature_type='BODY')
url = base_url_template.format(ACCESS_TOKEN_URL)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
params = {
'x_auth_username': username,
'x_auth_password': password,
'x_auth_mode': 'client_auth'
}
uri, headers, body = client.sign(url,
http_method='POST',
body=urlencode(params),
headers=headers)
response = requests.post(uri, data=body)
logger.debug('POST to %s.', uri)
token = parse_qs(response.content)
try:
# The indexes below are a little weird. parse_qs above gives us
# back a dict where each value is a list. We want the first value
# in those lists.
token = (token[b'oauth_token'][0].decode(), token[b'oauth_token_secret'][0].decode())
except KeyError:
raise ValueError('Invalid Credentials.')
return token | Returns an OAuth token tuple that can be used with clients.ReaderClient.
:param base_url_template: Template for generating Readability API urls.
:param consumer_key: Readability consumer key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Readability consumer secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param username: A username, otherwise read from READABILITY_USERNAME.
:param password: A password, otherwise read from READABILITY_PASSWORD. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/auth.py#L37-L79 | [
"def required_from_env(key):\n \"\"\"\n Retrieve a required variable from the current environment variables.\n\n Raises a ValueError if the env variable is not found or has no value.\n\n \"\"\"\n val = os.environ.get(key)\n if not val:\n raise ValueError(\n \"Required argument '{... | # -*- coding: utf-8 -*-
"""
readability.auth
~~~~~~~~~~~~~~~~
This module provides the xauth functionality for the Readability
Reader API.
"""
from __future__ import unicode_literals
import logging
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
from urllib.parse import parse_qs
except ImportError:
from urlparse import parse_qs
import requests
from oauthlib.oauth1 import Client
from readability.clients import DEFAULT_READER_URL_TEMPLATE
from readability.core import required_from_env
logger = logging.getLogger(__name__)
ACCESS_TOKEN_URL = 'oauth/access_token/'
|
ReadabilityHoldings/python-readability-api | readability/core.py | required_from_env | python | def required_from_env(key):
val = os.environ.get(key)
if not val:
raise ValueError(
"Required argument '{}' not supplied and not found in environment variables".format(key))
return val | Retrieve a required variable from the current environment variables.
Raises a ValueError if the env variable is not found or has no value. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/core.py#L4-L15 | null | import os
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ReaderClient.get | python | def get(self, url):
logger.debug('Making GET request to %s', url)
return self.oauth_session.get(url) | Make a HTTP GET request to the Reader API.
:param url: url to which to make a GET request. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L75-L82 | null | class ReaderClient(object):
"""
Client for interacting with the Readability Reader API.
Docs can be found at `http://www.readability.com/developers/api/reader`.
"""
def __init__(self, token_key, token_secret,
base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
"""
Initialize the ReaderClient.
:param consumer_key: Reader API key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Reader API secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param token_key: Readability user token key
:param token_secret: Readability user token secret
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
self.base_url_template = base_url_template
self.oauth_session = OAuth1Session(consumer_key, consumer_secret, token_key, token_secret)
def post(self, url, post_params=None):
"""
Make a HTTP POST request to the Reader API.
:param url: url to which to make a POST request.
:param post_params: parameters to be sent in the request's body.
"""
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return self.oauth_session.post(url, data=params)
def delete(self, url):
"""
Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request.
"""
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url)
def _generate_url(self, resource, query_params=None):
"""
Generate a Readability URL to the given resource.
:param resource: the path to the resource that the request should
go to.
:param query_params (optional): a dict of query params that should
be added to the url.
"""
if query_params:
resource = '{0}?{1}'.format(
resource, urlencode(query_params))
return self.base_url_template.format(resource)
def get_article(self, article_id):
"""
Get a single article represented by `article_id`.
:param article_id: ID of the article to retrieve.
"""
url = self._generate_url('articles/{0}'.format(article_id))
return self.get(url)
def get_bookmarks(self, **filters):
"""
Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks.
"""
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url)
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url)
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
"""
Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url
"""
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params)
def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None):
"""
Updates given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to update.
:param favorite (optional): Whether this article is favorited or not.
:param archive (optional): Whether this article is archived or not.
:param read_percent (optional): The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
params = {}
if favorite is not None:
params['favorite'] = 1 if favorite == True else 0
if archive is not None:
params['archive'] = 1 if archive == True else 0
if read_percent is not None:
try:
params['read_percent'] = float(read_percent)
except ValueError:
pass
return self.post(rdb_url, params)
def favorite_bookmark(self, bookmark_id):
"""
Favorites given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to favorite.
"""
return self.update_bookmark(bookmark_id, favorite=True)
def archive_bookmark(self, bookmark_id):
"""
Archives given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to archive.
"""
return self.update_bookmark(bookmark_id, archive=True)
def set_read_percent_of_bookmark(self, bookmark_id, read_percent):
"""
Set the read percentage of given bookmark. The requested bookmark must
belong to the current user.
:param bookmark_id: ID of the bookmark to update.
:param read_percent: The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
return self.update_bookmark(bookmark_id, read_percent=read_percent)
def delete_bookmark(self, bookmark_id):
"""
Delete a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.delete(url)
def get_bookmark_tags(self, bookmark_id):
"""
Retrieve tags that have been applied to a bookmark.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
return self.get(url)
def add_tags_to_bookmark(self, bookmark_id, tags):
"""
Add tags to to a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
:param tags: Comma separated tags to be applied.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
params = dict(tags=tags)
return self.post(url, params)
def delete_tag_from_bookmark(self, bookmark_id, tag_id):
"""
Remove a single tag from a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags/{1}'.format(
bookmark_id, tag_id))
return self.delete(url)
def get_tag(self, tag_id):
"""
Get a single tag represented by `tag_id`.
The requested tag must belong to the current user.
:param tag_id: ID fo the tag to retrieve.
"""
url = self._generate_url('tags/{0}'.format(tag_id))
return self.get(url)
def get_tags(self):
"""
Get all tags belonging to the current user.
"""
url = self._generate_url('tags')
return self.get(url)
def get_user(self):
"""
Retrives the current user.
"""
url = self._generate_url('users/_current')
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ReaderClient.post | python | def post(self, url, post_params=None):
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return self.oauth_session.post(url, data=params) | Make a HTTP POST request to the Reader API.
:param url: url to which to make a POST request.
:param post_params: parameters to be sent in the request's body. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L84-L93 | null | class ReaderClient(object):
"""
Client for interacting with the Readability Reader API.
Docs can be found at `http://www.readability.com/developers/api/reader`.
"""
def __init__(self, token_key, token_secret,
base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
"""
Initialize the ReaderClient.
:param consumer_key: Reader API key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Reader API secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param token_key: Readability user token key
:param token_secret: Readability user token secret
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
self.base_url_template = base_url_template
self.oauth_session = OAuth1Session(consumer_key, consumer_secret, token_key, token_secret)
def get(self, url):
"""
Make a HTTP GET request to the Reader API.
:param url: url to which to make a GET request.
"""
logger.debug('Making GET request to %s', url)
return self.oauth_session.get(url)
def delete(self, url):
"""
Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request.
"""
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url)
def _generate_url(self, resource, query_params=None):
"""
Generate a Readability URL to the given resource.
:param resource: the path to the resource that the request should
go to.
:param query_params (optional): a dict of query params that should
be added to the url.
"""
if query_params:
resource = '{0}?{1}'.format(
resource, urlencode(query_params))
return self.base_url_template.format(resource)
def get_article(self, article_id):
"""
Get a single article represented by `article_id`.
:param article_id: ID of the article to retrieve.
"""
url = self._generate_url('articles/{0}'.format(article_id))
return self.get(url)
def get_bookmarks(self, **filters):
"""
Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks.
"""
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url)
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url)
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
"""
Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url
"""
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params)
def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None):
"""
Updates given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to update.
:param favorite (optional): Whether this article is favorited or not.
:param archive (optional): Whether this article is archived or not.
:param read_percent (optional): The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
params = {}
if favorite is not None:
params['favorite'] = 1 if favorite == True else 0
if archive is not None:
params['archive'] = 1 if archive == True else 0
if read_percent is not None:
try:
params['read_percent'] = float(read_percent)
except ValueError:
pass
return self.post(rdb_url, params)
def favorite_bookmark(self, bookmark_id):
"""
Favorites given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to favorite.
"""
return self.update_bookmark(bookmark_id, favorite=True)
def archive_bookmark(self, bookmark_id):
"""
Archives given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to archive.
"""
return self.update_bookmark(bookmark_id, archive=True)
def set_read_percent_of_bookmark(self, bookmark_id, read_percent):
"""
Set the read percentage of given bookmark. The requested bookmark must
belong to the current user.
:param bookmark_id: ID of the bookmark to update.
:param read_percent: The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
return self.update_bookmark(bookmark_id, read_percent=read_percent)
def delete_bookmark(self, bookmark_id):
"""
Delete a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.delete(url)
def get_bookmark_tags(self, bookmark_id):
"""
Retrieve tags that have been applied to a bookmark.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
return self.get(url)
def add_tags_to_bookmark(self, bookmark_id, tags):
"""
Add tags to to a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
:param tags: Comma separated tags to be applied.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
params = dict(tags=tags)
return self.post(url, params)
def delete_tag_from_bookmark(self, bookmark_id, tag_id):
"""
Remove a single tag from a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags/{1}'.format(
bookmark_id, tag_id))
return self.delete(url)
def get_tag(self, tag_id):
"""
Get a single tag represented by `tag_id`.
The requested tag must belong to the current user.
:param tag_id: ID fo the tag to retrieve.
"""
url = self._generate_url('tags/{0}'.format(tag_id))
return self.get(url)
def get_tags(self):
"""
Get all tags belonging to the current user.
"""
url = self._generate_url('tags')
return self.get(url)
def get_user(self):
"""
Retrives the current user.
"""
url = self._generate_url('users/_current')
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ReaderClient.delete | python | def delete(self, url):
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url) | Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L95-L102 | null | class ReaderClient(object):
"""
Client for interacting with the Readability Reader API.
Docs can be found at `http://www.readability.com/developers/api/reader`.
"""
def __init__(self, token_key, token_secret,
base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
"""
Initialize the ReaderClient.
:param consumer_key: Reader API key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Reader API secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param token_key: Readability user token key
:param token_secret: Readability user token secret
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
self.base_url_template = base_url_template
self.oauth_session = OAuth1Session(consumer_key, consumer_secret, token_key, token_secret)
def get(self, url):
"""
Make a HTTP GET request to the Reader API.
:param url: url to which to make a GET request.
"""
logger.debug('Making GET request to %s', url)
return self.oauth_session.get(url)
def post(self, url, post_params=None):
"""
Make a HTTP POST request to the Reader API.
:param url: url to which to make a POST request.
:param post_params: parameters to be sent in the request's body.
"""
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return self.oauth_session.post(url, data=params)
def _generate_url(self, resource, query_params=None):
"""
Generate a Readability URL to the given resource.
:param resource: the path to the resource that the request should
go to.
:param query_params (optional): a dict of query params that should
be added to the url.
"""
if query_params:
resource = '{0}?{1}'.format(
resource, urlencode(query_params))
return self.base_url_template.format(resource)
def get_article(self, article_id):
"""
Get a single article represented by `article_id`.
:param article_id: ID of the article to retrieve.
"""
url = self._generate_url('articles/{0}'.format(article_id))
return self.get(url)
def get_bookmarks(self, **filters):
"""
Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks.
"""
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url)
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url)
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
"""
Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url
"""
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params)
def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None):
"""
Updates given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to update.
:param favorite (optional): Whether this article is favorited or not.
:param archive (optional): Whether this article is archived or not.
:param read_percent (optional): The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
params = {}
if favorite is not None:
params['favorite'] = 1 if favorite == True else 0
if archive is not None:
params['archive'] = 1 if archive == True else 0
if read_percent is not None:
try:
params['read_percent'] = float(read_percent)
except ValueError:
pass
return self.post(rdb_url, params)
def favorite_bookmark(self, bookmark_id):
"""
Favorites given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to favorite.
"""
return self.update_bookmark(bookmark_id, favorite=True)
def archive_bookmark(self, bookmark_id):
"""
Archives given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to archive.
"""
return self.update_bookmark(bookmark_id, archive=True)
def set_read_percent_of_bookmark(self, bookmark_id, read_percent):
"""
Set the read percentage of given bookmark. The requested bookmark must
belong to the current user.
:param bookmark_id: ID of the bookmark to update.
:param read_percent: The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
return self.update_bookmark(bookmark_id, read_percent=read_percent)
def delete_bookmark(self, bookmark_id):
"""
Delete a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.delete(url)
def get_bookmark_tags(self, bookmark_id):
"""
Retrieve tags that have been applied to a bookmark.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
return self.get(url)
def add_tags_to_bookmark(self, bookmark_id, tags):
"""
Add tags to to a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
:param tags: Comma separated tags to be applied.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
params = dict(tags=tags)
return self.post(url, params)
def delete_tag_from_bookmark(self, bookmark_id, tag_id):
"""
Remove a single tag from a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags/{1}'.format(
bookmark_id, tag_id))
return self.delete(url)
def get_tag(self, tag_id):
"""
Get a single tag represented by `tag_id`.
The requested tag must belong to the current user.
:param tag_id: ID fo the tag to retrieve.
"""
url = self._generate_url('tags/{0}'.format(tag_id))
return self.get(url)
def get_tags(self):
"""
Get all tags belonging to the current user.
"""
url = self._generate_url('tags')
return self.get(url)
def get_user(self):
"""
Retrives the current user.
"""
url = self._generate_url('users/_current')
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ReaderClient.get_article | python | def get_article(self, article_id):
url = self._generate_url('articles/{0}'.format(article_id))
return self.get(url) | Get a single article represented by `article_id`.
:param article_id: ID of the article to retrieve. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L119-L126 | [
"def get(self, url):\n \"\"\"\n Make a HTTP GET request to the Reader API.\n\n :param url: url to which to make a GET request.\n \"\"\"\n logger.debug('Making GET request to %s', url)\n return self.oauth_session.get(url)\n",
"def _generate_url(self, resource, query_params=None):\n \"\"\"\n ... | class ReaderClient(object):
"""
Client for interacting with the Readability Reader API.
Docs can be found at `http://www.readability.com/developers/api/reader`.
"""
def __init__(self, token_key, token_secret,
base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
"""
Initialize the ReaderClient.
:param consumer_key: Reader API key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Reader API secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param token_key: Readability user token key
:param token_secret: Readability user token secret
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
self.base_url_template = base_url_template
self.oauth_session = OAuth1Session(consumer_key, consumer_secret, token_key, token_secret)
def get(self, url):
"""
Make a HTTP GET request to the Reader API.
:param url: url to which to make a GET request.
"""
logger.debug('Making GET request to %s', url)
return self.oauth_session.get(url)
def post(self, url, post_params=None):
"""
Make a HTTP POST request to the Reader API.
:param url: url to which to make a POST request.
:param post_params: parameters to be sent in the request's body.
"""
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return self.oauth_session.post(url, data=params)
def delete(self, url):
"""
Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request.
"""
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url)
def _generate_url(self, resource, query_params=None):
"""
Generate a Readability URL to the given resource.
:param resource: the path to the resource that the request should
go to.
:param query_params (optional): a dict of query params that should
be added to the url.
"""
if query_params:
resource = '{0}?{1}'.format(
resource, urlencode(query_params))
return self.base_url_template.format(resource)
def get_bookmarks(self, **filters):
"""
Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks.
"""
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url)
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url)
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
"""
Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url
"""
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params)
def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None):
"""
Updates given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to update.
:param favorite (optional): Whether this article is favorited or not.
:param archive (optional): Whether this article is archived or not.
:param read_percent (optional): The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
params = {}
if favorite is not None:
params['favorite'] = 1 if favorite == True else 0
if archive is not None:
params['archive'] = 1 if archive == True else 0
if read_percent is not None:
try:
params['read_percent'] = float(read_percent)
except ValueError:
pass
return self.post(rdb_url, params)
def favorite_bookmark(self, bookmark_id):
"""
Favorites given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to favorite.
"""
return self.update_bookmark(bookmark_id, favorite=True)
def archive_bookmark(self, bookmark_id):
"""
Archives given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to archive.
"""
return self.update_bookmark(bookmark_id, archive=True)
def set_read_percent_of_bookmark(self, bookmark_id, read_percent):
"""
Set the read percentage of given bookmark. The requested bookmark must
belong to the current user.
:param bookmark_id: ID of the bookmark to update.
:param read_percent: The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
return self.update_bookmark(bookmark_id, read_percent=read_percent)
def delete_bookmark(self, bookmark_id):
"""
Delete a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.delete(url)
def get_bookmark_tags(self, bookmark_id):
"""
Retrieve tags that have been applied to a bookmark.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
return self.get(url)
def add_tags_to_bookmark(self, bookmark_id, tags):
"""
Add tags to to a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
:param tags: Comma separated tags to be applied.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
params = dict(tags=tags)
return self.post(url, params)
def delete_tag_from_bookmark(self, bookmark_id, tag_id):
"""
Remove a single tag from a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags/{1}'.format(
bookmark_id, tag_id))
return self.delete(url)
def get_tag(self, tag_id):
"""
Get a single tag represented by `tag_id`.
The requested tag must belong to the current user.
:param tag_id: ID fo the tag to retrieve.
"""
url = self._generate_url('tags/{0}'.format(tag_id))
return self.get(url)
def get_tags(self):
"""
Get all tags belonging to the current user.
"""
url = self._generate_url('tags')
return self.get(url)
def get_user(self):
"""
Retrives the current user.
"""
url = self._generate_url('users/_current')
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ReaderClient.get_bookmarks | python | def get_bookmarks(self, **filters):
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url) | Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L128-L152 | [
"def filter_args_to_dict(filter_dict, accepted_filter_keys=[]):\n \"\"\"Cast and validate filter args.\n\n :param filter_dict: Filter kwargs\n :param accepted_filter_keys: List of keys that are acceptable to use.\n\n \"\"\"\n out_dict = {}\n for k, v in filter_dict.items():\n # make sure th... | class ReaderClient(object):
"""
Client for interacting with the Readability Reader API.
Docs can be found at `http://www.readability.com/developers/api/reader`.
"""
def __init__(self, token_key, token_secret,
base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
"""
Initialize the ReaderClient.
:param consumer_key: Reader API key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Reader API secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param token_key: Readability user token key
:param token_secret: Readability user token secret
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
self.base_url_template = base_url_template
self.oauth_session = OAuth1Session(consumer_key, consumer_secret, token_key, token_secret)
def get(self, url):
"""
Make a HTTP GET request to the Reader API.
:param url: url to which to make a GET request.
"""
logger.debug('Making GET request to %s', url)
return self.oauth_session.get(url)
def post(self, url, post_params=None):
"""
Make a HTTP POST request to the Reader API.
:param url: url to which to make a POST request.
:param post_params: parameters to be sent in the request's body.
"""
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return self.oauth_session.post(url, data=params)
def delete(self, url):
"""
Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request.
"""
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url)
def _generate_url(self, resource, query_params=None):
"""
Generate a Readability URL to the given resource.
:param resource: the path to the resource that the request should
go to.
:param query_params (optional): a dict of query params that should
be added to the url.
"""
if query_params:
resource = '{0}?{1}'.format(
resource, urlencode(query_params))
return self.base_url_template.format(resource)
def get_article(self, article_id):
"""
Get a single article represented by `article_id`.
:param article_id: ID of the article to retrieve.
"""
url = self._generate_url('articles/{0}'.format(article_id))
return self.get(url)
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url)
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
"""
Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url
"""
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params)
def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None):
"""
Updates given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to update.
:param favorite (optional): Whether this article is favorited or not.
:param archive (optional): Whether this article is archived or not.
:param read_percent (optional): The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
params = {}
if favorite is not None:
params['favorite'] = 1 if favorite == True else 0
if archive is not None:
params['archive'] = 1 if archive == True else 0
if read_percent is not None:
try:
params['read_percent'] = float(read_percent)
except ValueError:
pass
return self.post(rdb_url, params)
def favorite_bookmark(self, bookmark_id):
"""
Favorites given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to favorite.
"""
return self.update_bookmark(bookmark_id, favorite=True)
def archive_bookmark(self, bookmark_id):
"""
Archives given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to archive.
"""
return self.update_bookmark(bookmark_id, archive=True)
def set_read_percent_of_bookmark(self, bookmark_id, read_percent):
"""
Set the read percentage of given bookmark. The requested bookmark must
belong to the current user.
:param bookmark_id: ID of the bookmark to update.
:param read_percent: The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
return self.update_bookmark(bookmark_id, read_percent=read_percent)
def delete_bookmark(self, bookmark_id):
"""
Delete a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.delete(url)
def get_bookmark_tags(self, bookmark_id):
"""
Retrieve tags that have been applied to a bookmark.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
return self.get(url)
def add_tags_to_bookmark(self, bookmark_id, tags):
"""
Add tags to to a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
:param tags: Comma separated tags to be applied.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
params = dict(tags=tags)
return self.post(url, params)
def delete_tag_from_bookmark(self, bookmark_id, tag_id):
"""
Remove a single tag from a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags/{1}'.format(
bookmark_id, tag_id))
return self.delete(url)
def get_tag(self, tag_id):
"""
Get a single tag represented by `tag_id`.
The requested tag must belong to the current user.
:param tag_id: ID fo the tag to retrieve.
"""
url = self._generate_url('tags/{0}'.format(tag_id))
return self.get(url)
def get_tags(self):
"""
Get all tags belonging to the current user.
"""
url = self._generate_url('tags')
return self.get(url)
def get_user(self):
"""
Retrives the current user.
"""
url = self._generate_url('users/_current')
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ReaderClient.get_bookmark | python | def get_bookmark(self, bookmark_id):
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url) | Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L154-L163 | [
"def get(self, url):\n \"\"\"\n Make a HTTP GET request to the Reader API.\n\n :param url: url to which to make a GET request.\n \"\"\"\n logger.debug('Making GET request to %s', url)\n return self.oauth_session.get(url)\n",
"def _generate_url(self, resource, query_params=None):\n \"\"\"\n ... | class ReaderClient(object):
"""
Client for interacting with the Readability Reader API.
Docs can be found at `http://www.readability.com/developers/api/reader`.
"""
def __init__(self, token_key, token_secret,
base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
"""
Initialize the ReaderClient.
:param consumer_key: Reader API key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Reader API secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param token_key: Readability user token key
:param token_secret: Readability user token secret
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
self.base_url_template = base_url_template
self.oauth_session = OAuth1Session(consumer_key, consumer_secret, token_key, token_secret)
def get(self, url):
"""
Make a HTTP GET request to the Reader API.
:param url: url to which to make a GET request.
"""
logger.debug('Making GET request to %s', url)
return self.oauth_session.get(url)
def post(self, url, post_params=None):
"""
Make a HTTP POST request to the Reader API.
:param url: url to which to make a POST request.
:param post_params: parameters to be sent in the request's body.
"""
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return self.oauth_session.post(url, data=params)
def delete(self, url):
"""
Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request.
"""
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url)
def _generate_url(self, resource, query_params=None):
"""
Generate a Readability URL to the given resource.
:param resource: the path to the resource that the request should
go to.
:param query_params (optional): a dict of query params that should
be added to the url.
"""
if query_params:
resource = '{0}?{1}'.format(
resource, urlencode(query_params))
return self.base_url_template.format(resource)
def get_article(self, article_id):
"""
Get a single article represented by `article_id`.
:param article_id: ID of the article to retrieve.
"""
url = self._generate_url('articles/{0}'.format(article_id))
return self.get(url)
def get_bookmarks(self, **filters):
"""
Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks.
"""
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url)
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
"""
Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url
"""
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params)
def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None):
"""
Updates given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to update.
:param favorite (optional): Whether this article is favorited or not.
:param archive (optional): Whether this article is archived or not.
:param read_percent (optional): The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
params = {}
if favorite is not None:
params['favorite'] = 1 if favorite == True else 0
if archive is not None:
params['archive'] = 1 if archive == True else 0
if read_percent is not None:
try:
params['read_percent'] = float(read_percent)
except ValueError:
pass
return self.post(rdb_url, params)
def favorite_bookmark(self, bookmark_id):
"""
Favorites given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to favorite.
"""
return self.update_bookmark(bookmark_id, favorite=True)
def archive_bookmark(self, bookmark_id):
"""
Archives given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to archive.
"""
return self.update_bookmark(bookmark_id, archive=True)
def set_read_percent_of_bookmark(self, bookmark_id, read_percent):
"""
Set the read percentage of given bookmark. The requested bookmark must
belong to the current user.
:param bookmark_id: ID of the bookmark to update.
:param read_percent: The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
return self.update_bookmark(bookmark_id, read_percent=read_percent)
def delete_bookmark(self, bookmark_id):
"""
Delete a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.delete(url)
def get_bookmark_tags(self, bookmark_id):
"""
Retrieve tags that have been applied to a bookmark.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
return self.get(url)
def add_tags_to_bookmark(self, bookmark_id, tags):
"""
Add tags to to a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
:param tags: Comma separated tags to be applied.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
params = dict(tags=tags)
return self.post(url, params)
def delete_tag_from_bookmark(self, bookmark_id, tag_id):
"""
Remove a single tag from a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags/{1}'.format(
bookmark_id, tag_id))
return self.delete(url)
def get_tag(self, tag_id):
"""
Get a single tag represented by `tag_id`.
The requested tag must belong to the current user.
:param tag_id: ID fo the tag to retrieve.
"""
url = self._generate_url('tags/{0}'.format(tag_id))
return self.get(url)
def get_tags(self):
"""
Get all tags belonging to the current user.
"""
url = self._generate_url('tags')
return self.get(url)
def get_user(self):
"""
Retrives the current user.
"""
url = self._generate_url('users/_current')
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ReaderClient.add_bookmark | python | def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params) | Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L165-L182 | [
"def post(self, url, post_params=None):\n \"\"\"\n Make a HTTP POST request to the Reader API.\n\n :param url: url to which to make a POST request.\n :param post_params: parameters to be sent in the request's body.\n \"\"\"\n params = urlencode(post_params)\n logger.debug('Making POST request t... | class ReaderClient(object):
"""
Client for interacting with the Readability Reader API.
Docs can be found at `http://www.readability.com/developers/api/reader`.
"""
def __init__(self, token_key, token_secret,
base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
"""
Initialize the ReaderClient.
:param consumer_key: Reader API key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Reader API secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param token_key: Readability user token key
:param token_secret: Readability user token secret
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
self.base_url_template = base_url_template
self.oauth_session = OAuth1Session(consumer_key, consumer_secret, token_key, token_secret)
def get(self, url):
"""
Make a HTTP GET request to the Reader API.
:param url: url to which to make a GET request.
"""
logger.debug('Making GET request to %s', url)
return self.oauth_session.get(url)
def post(self, url, post_params=None):
"""
Make a HTTP POST request to the Reader API.
:param url: url to which to make a POST request.
:param post_params: parameters to be sent in the request's body.
"""
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return self.oauth_session.post(url, data=params)
def delete(self, url):
"""
Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request.
"""
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url)
def _generate_url(self, resource, query_params=None):
"""
Generate a Readability URL to the given resource.
:param resource: the path to the resource that the request should
go to.
:param query_params (optional): a dict of query params that should
be added to the url.
"""
if query_params:
resource = '{0}?{1}'.format(
resource, urlencode(query_params))
return self.base_url_template.format(resource)
def get_article(self, article_id):
"""
Get a single article represented by `article_id`.
:param article_id: ID of the article to retrieve.
"""
url = self._generate_url('articles/{0}'.format(article_id))
return self.get(url)
def get_bookmarks(self, **filters):
"""
Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks.
"""
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url)
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url)
def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None):
"""
Updates given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to update.
:param favorite (optional): Whether this article is favorited or not.
:param archive (optional): Whether this article is archived or not.
:param read_percent (optional): The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
params = {}
if favorite is not None:
params['favorite'] = 1 if favorite == True else 0
if archive is not None:
params['archive'] = 1 if archive == True else 0
if read_percent is not None:
try:
params['read_percent'] = float(read_percent)
except ValueError:
pass
return self.post(rdb_url, params)
def favorite_bookmark(self, bookmark_id):
"""
Favorites given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to favorite.
"""
return self.update_bookmark(bookmark_id, favorite=True)
def archive_bookmark(self, bookmark_id):
"""
Archives given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to archive.
"""
return self.update_bookmark(bookmark_id, archive=True)
def set_read_percent_of_bookmark(self, bookmark_id, read_percent):
"""
Set the read percentage of given bookmark. The requested bookmark must
belong to the current user.
:param bookmark_id: ID of the bookmark to update.
:param read_percent: The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
return self.update_bookmark(bookmark_id, read_percent=read_percent)
def delete_bookmark(self, bookmark_id):
"""
Delete a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.delete(url)
def get_bookmark_tags(self, bookmark_id):
"""
Retrieve tags that have been applied to a bookmark.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
return self.get(url)
def add_tags_to_bookmark(self, bookmark_id, tags):
"""
Add tags to to a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
:param tags: Comma separated tags to be applied.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
params = dict(tags=tags)
return self.post(url, params)
def delete_tag_from_bookmark(self, bookmark_id, tag_id):
"""
Remove a single tag from a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags/{1}'.format(
bookmark_id, tag_id))
return self.delete(url)
def get_tag(self, tag_id):
"""
Get a single tag represented by `tag_id`.
The requested tag must belong to the current user.
:param tag_id: ID fo the tag to retrieve.
"""
url = self._generate_url('tags/{0}'.format(tag_id))
return self.get(url)
def get_tags(self):
"""
Get all tags belonging to the current user.
"""
url = self._generate_url('tags')
return self.get(url)
def get_user(self):
"""
Retrives the current user.
"""
url = self._generate_url('users/_current')
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ReaderClient.update_bookmark | python | def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None):
rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
params = {}
if favorite is not None:
params['favorite'] = 1 if favorite == True else 0
if archive is not None:
params['archive'] = 1 if archive == True else 0
if read_percent is not None:
try:
params['read_percent'] = float(read_percent)
except ValueError:
pass
return self.post(rdb_url, params) | Updates given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to update.
:param favorite (optional): Whether this article is favorited or not.
:param archive (optional): Whether this article is archived or not.
:param read_percent (optional): The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L184-L206 | [
"def post(self, url, post_params=None):\n \"\"\"\n Make a HTTP POST request to the Reader API.\n\n :param url: url to which to make a POST request.\n :param post_params: parameters to be sent in the request's body.\n \"\"\"\n params = urlencode(post_params)\n logger.debug('Making POST request t... | class ReaderClient(object):
"""
Client for interacting with the Readability Reader API.
Docs can be found at `http://www.readability.com/developers/api/reader`.
"""
def __init__(self, token_key, token_secret,
base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
"""
Initialize the ReaderClient.
:param consumer_key: Reader API key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Reader API secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param token_key: Readability user token key
:param token_secret: Readability user token secret
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
self.base_url_template = base_url_template
self.oauth_session = OAuth1Session(consumer_key, consumer_secret, token_key, token_secret)
def get(self, url):
"""
Make a HTTP GET request to the Reader API.
:param url: url to which to make a GET request.
"""
logger.debug('Making GET request to %s', url)
return self.oauth_session.get(url)
def post(self, url, post_params=None):
"""
Make a HTTP POST request to the Reader API.
:param url: url to which to make a POST request.
:param post_params: parameters to be sent in the request's body.
"""
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return self.oauth_session.post(url, data=params)
def delete(self, url):
"""
Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request.
"""
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url)
def _generate_url(self, resource, query_params=None):
"""
Generate a Readability URL to the given resource.
:param resource: the path to the resource that the request should
go to.
:param query_params (optional): a dict of query params that should
be added to the url.
"""
if query_params:
resource = '{0}?{1}'.format(
resource, urlencode(query_params))
return self.base_url_template.format(resource)
def get_article(self, article_id):
"""
Get a single article represented by `article_id`.
:param article_id: ID of the article to retrieve.
"""
url = self._generate_url('articles/{0}'.format(article_id))
return self.get(url)
def get_bookmarks(self, **filters):
"""
Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks.
"""
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url)
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url)
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
"""
Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url
"""
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params)
def favorite_bookmark(self, bookmark_id):
"""
Favorites given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to favorite.
"""
return self.update_bookmark(bookmark_id, favorite=True)
def archive_bookmark(self, bookmark_id):
"""
Archives given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to archive.
"""
return self.update_bookmark(bookmark_id, archive=True)
def set_read_percent_of_bookmark(self, bookmark_id, read_percent):
"""
Set the read percentage of given bookmark. The requested bookmark must
belong to the current user.
:param bookmark_id: ID of the bookmark to update.
:param read_percent: The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
return self.update_bookmark(bookmark_id, read_percent=read_percent)
def delete_bookmark(self, bookmark_id):
"""
Delete a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.delete(url)
def get_bookmark_tags(self, bookmark_id):
"""
Retrieve tags that have been applied to a bookmark.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
return self.get(url)
def add_tags_to_bookmark(self, bookmark_id, tags):
"""
Add tags to to a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
:param tags: Comma separated tags to be applied.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
params = dict(tags=tags)
return self.post(url, params)
def delete_tag_from_bookmark(self, bookmark_id, tag_id):
"""
Remove a single tag from a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags/{1}'.format(
bookmark_id, tag_id))
return self.delete(url)
def get_tag(self, tag_id):
"""
Get a single tag represented by `tag_id`.
The requested tag must belong to the current user.
:param tag_id: ID fo the tag to retrieve.
"""
url = self._generate_url('tags/{0}'.format(tag_id))
return self.get(url)
def get_tags(self):
"""
Get all tags belonging to the current user.
"""
url = self._generate_url('tags')
return self.get(url)
def get_user(self):
"""
Retrives the current user.
"""
url = self._generate_url('users/_current')
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ReaderClient.delete_bookmark | python | def delete_bookmark(self, bookmark_id):
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.delete(url) | Delete a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L237-L246 | [
"def delete(self, url):\n \"\"\"\n Make a HTTP DELETE request to the Readability API.\n\n :param url: The url to which to send a DELETE request.\n \"\"\"\n logger.debug('Making DELETE request to %s', url)\n return self.oauth_session.delete(url)\n",
"def _generate_url(self, resource, query_params... | class ReaderClient(object):
"""
Client for interacting with the Readability Reader API.
Docs can be found at `http://www.readability.com/developers/api/reader`.
"""
def __init__(self, token_key, token_secret,
base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
"""
Initialize the ReaderClient.
:param consumer_key: Reader API key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Reader API secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param token_key: Readability user token key
:param token_secret: Readability user token secret
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
self.base_url_template = base_url_template
self.oauth_session = OAuth1Session(consumer_key, consumer_secret, token_key, token_secret)
def get(self, url):
"""
Make a HTTP GET request to the Reader API.
:param url: url to which to make a GET request.
"""
logger.debug('Making GET request to %s', url)
return self.oauth_session.get(url)
def post(self, url, post_params=None):
"""
Make a HTTP POST request to the Reader API.
:param url: url to which to make a POST request.
:param post_params: parameters to be sent in the request's body.
"""
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return self.oauth_session.post(url, data=params)
def delete(self, url):
"""
Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request.
"""
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url)
def _generate_url(self, resource, query_params=None):
"""
Generate a Readability URL to the given resource.
:param resource: the path to the resource that the request should
go to.
:param query_params (optional): a dict of query params that should
be added to the url.
"""
if query_params:
resource = '{0}?{1}'.format(
resource, urlencode(query_params))
return self.base_url_template.format(resource)
def get_article(self, article_id):
"""
Get a single article represented by `article_id`.
:param article_id: ID of the article to retrieve.
"""
url = self._generate_url('articles/{0}'.format(article_id))
return self.get(url)
def get_bookmarks(self, **filters):
"""
Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks.
"""
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url)
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url)
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
"""
Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url
"""
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params)
def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None):
"""
Updates given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to update.
:param favorite (optional): Whether this article is favorited or not.
:param archive (optional): Whether this article is archived or not.
:param read_percent (optional): The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
params = {}
if favorite is not None:
params['favorite'] = 1 if favorite == True else 0
if archive is not None:
params['archive'] = 1 if archive == True else 0
if read_percent is not None:
try:
params['read_percent'] = float(read_percent)
except ValueError:
pass
return self.post(rdb_url, params)
def favorite_bookmark(self, bookmark_id):
"""
Favorites given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to favorite.
"""
return self.update_bookmark(bookmark_id, favorite=True)
def archive_bookmark(self, bookmark_id):
"""
Archives given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to archive.
"""
return self.update_bookmark(bookmark_id, archive=True)
def set_read_percent_of_bookmark(self, bookmark_id, read_percent):
"""
Set the read percentage of given bookmark. The requested bookmark must
belong to the current user.
:param bookmark_id: ID of the bookmark to update.
:param read_percent: The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
return self.update_bookmark(bookmark_id, read_percent=read_percent)
def get_bookmark_tags(self, bookmark_id):
"""
Retrieve tags that have been applied to a bookmark.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
return self.get(url)
def add_tags_to_bookmark(self, bookmark_id, tags):
"""
Add tags to to a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
:param tags: Comma separated tags to be applied.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
params = dict(tags=tags)
return self.post(url, params)
def delete_tag_from_bookmark(self, bookmark_id, tag_id):
"""
Remove a single tag from a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags/{1}'.format(
bookmark_id, tag_id))
return self.delete(url)
def get_tag(self, tag_id):
"""
Get a single tag represented by `tag_id`.
The requested tag must belong to the current user.
:param tag_id: ID fo the tag to retrieve.
"""
url = self._generate_url('tags/{0}'.format(tag_id))
return self.get(url)
def get_tags(self):
"""
Get all tags belonging to the current user.
"""
url = self._generate_url('tags')
return self.get(url)
def get_user(self):
"""
Retrives the current user.
"""
url = self._generate_url('users/_current')
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ReaderClient.get_bookmark_tags | python | def get_bookmark_tags(self, bookmark_id):
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
return self.get(url) | Retrieve tags that have been applied to a bookmark.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L248-L257 | [
"def get(self, url):\n \"\"\"\n Make a HTTP GET request to the Reader API.\n\n :param url: url to which to make a GET request.\n \"\"\"\n logger.debug('Making GET request to %s', url)\n return self.oauth_session.get(url)\n",
"def _generate_url(self, resource, query_params=None):\n \"\"\"\n ... | class ReaderClient(object):
"""
Client for interacting with the Readability Reader API.
Docs can be found at `http://www.readability.com/developers/api/reader`.
"""
def __init__(self, token_key, token_secret,
base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
"""
Initialize the ReaderClient.
:param consumer_key: Reader API key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Reader API secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param token_key: Readability user token key
:param token_secret: Readability user token secret
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
self.base_url_template = base_url_template
self.oauth_session = OAuth1Session(consumer_key, consumer_secret, token_key, token_secret)
def get(self, url):
"""
Make a HTTP GET request to the Reader API.
:param url: url to which to make a GET request.
"""
logger.debug('Making GET request to %s', url)
return self.oauth_session.get(url)
def post(self, url, post_params=None):
"""
Make a HTTP POST request to the Reader API.
:param url: url to which to make a POST request.
:param post_params: parameters to be sent in the request's body.
"""
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return self.oauth_session.post(url, data=params)
def delete(self, url):
"""
Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request.
"""
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url)
def _generate_url(self, resource, query_params=None):
"""
Generate a Readability URL to the given resource.
:param resource: the path to the resource that the request should
go to.
:param query_params (optional): a dict of query params that should
be added to the url.
"""
if query_params:
resource = '{0}?{1}'.format(
resource, urlencode(query_params))
return self.base_url_template.format(resource)
def get_article(self, article_id):
"""
Get a single article represented by `article_id`.
:param article_id: ID of the article to retrieve.
"""
url = self._generate_url('articles/{0}'.format(article_id))
return self.get(url)
def get_bookmarks(self, **filters):
"""
Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks.
"""
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url)
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url)
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
"""
Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url
"""
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params)
def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None):
"""
Updates given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to update.
:param favorite (optional): Whether this article is favorited or not.
:param archive (optional): Whether this article is archived or not.
:param read_percent (optional): The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
params = {}
if favorite is not None:
params['favorite'] = 1 if favorite == True else 0
if archive is not None:
params['archive'] = 1 if archive == True else 0
if read_percent is not None:
try:
params['read_percent'] = float(read_percent)
except ValueError:
pass
return self.post(rdb_url, params)
def favorite_bookmark(self, bookmark_id):
"""
Favorites given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to favorite.
"""
return self.update_bookmark(bookmark_id, favorite=True)
def archive_bookmark(self, bookmark_id):
"""
Archives given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to archive.
"""
return self.update_bookmark(bookmark_id, archive=True)
def set_read_percent_of_bookmark(self, bookmark_id, read_percent):
"""
Set the read percentage of given bookmark. The requested bookmark must
belong to the current user.
:param bookmark_id: ID of the bookmark to update.
:param read_percent: The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
return self.update_bookmark(bookmark_id, read_percent=read_percent)
def delete_bookmark(self, bookmark_id):
"""
Delete a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.delete(url)
def add_tags_to_bookmark(self, bookmark_id, tags):
"""
Add tags to to a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
:param tags: Comma separated tags to be applied.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
params = dict(tags=tags)
return self.post(url, params)
def delete_tag_from_bookmark(self, bookmark_id, tag_id):
"""
Remove a single tag from a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags/{1}'.format(
bookmark_id, tag_id))
return self.delete(url)
def get_tag(self, tag_id):
"""
Get a single tag represented by `tag_id`.
The requested tag must belong to the current user.
:param tag_id: ID fo the tag to retrieve.
"""
url = self._generate_url('tags/{0}'.format(tag_id))
return self.get(url)
def get_tags(self):
"""
Get all tags belonging to the current user.
"""
url = self._generate_url('tags')
return self.get(url)
def get_user(self):
"""
Retrives the current user.
"""
url = self._generate_url('users/_current')
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ReaderClient.add_tags_to_bookmark | python | def add_tags_to_bookmark(self, bookmark_id, tags):
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
params = dict(tags=tags)
return self.post(url, params) | Add tags to to a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
:param tags: Comma separated tags to be applied. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L259-L270 | [
"def post(self, url, post_params=None):\n \"\"\"\n Make a HTTP POST request to the Reader API.\n\n :param url: url to which to make a POST request.\n :param post_params: parameters to be sent in the request's body.\n \"\"\"\n params = urlencode(post_params)\n logger.debug('Making POST request t... | class ReaderClient(object):
"""
Client for interacting with the Readability Reader API.
Docs can be found at `http://www.readability.com/developers/api/reader`.
"""
def __init__(self, token_key, token_secret,
base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
"""
Initialize the ReaderClient.
:param consumer_key: Reader API key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Reader API secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param token_key: Readability user token key
:param token_secret: Readability user token secret
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
self.base_url_template = base_url_template
self.oauth_session = OAuth1Session(consumer_key, consumer_secret, token_key, token_secret)
def get(self, url):
"""
Make a HTTP GET request to the Reader API.
:param url: url to which to make a GET request.
"""
logger.debug('Making GET request to %s', url)
return self.oauth_session.get(url)
def post(self, url, post_params=None):
"""
Make a HTTP POST request to the Reader API.
:param url: url to which to make a POST request.
:param post_params: parameters to be sent in the request's body.
"""
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return self.oauth_session.post(url, data=params)
def delete(self, url):
"""
Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request.
"""
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url)
def _generate_url(self, resource, query_params=None):
"""
Generate a Readability URL to the given resource.
:param resource: the path to the resource that the request should
go to.
:param query_params (optional): a dict of query params that should
be added to the url.
"""
if query_params:
resource = '{0}?{1}'.format(
resource, urlencode(query_params))
return self.base_url_template.format(resource)
def get_article(self, article_id):
"""
Get a single article represented by `article_id`.
:param article_id: ID of the article to retrieve.
"""
url = self._generate_url('articles/{0}'.format(article_id))
return self.get(url)
def get_bookmarks(self, **filters):
"""
Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks.
"""
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url)
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url)
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
"""
Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url
"""
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params)
def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None):
"""
Updates given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to update.
:param favorite (optional): Whether this article is favorited or not.
:param archive (optional): Whether this article is archived or not.
:param read_percent (optional): The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
params = {}
if favorite is not None:
params['favorite'] = 1 if favorite == True else 0
if archive is not None:
params['archive'] = 1 if archive == True else 0
if read_percent is not None:
try:
params['read_percent'] = float(read_percent)
except ValueError:
pass
return self.post(rdb_url, params)
def favorite_bookmark(self, bookmark_id):
"""
Favorites given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to favorite.
"""
return self.update_bookmark(bookmark_id, favorite=True)
def archive_bookmark(self, bookmark_id):
"""
Archives given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to archive.
"""
return self.update_bookmark(bookmark_id, archive=True)
def set_read_percent_of_bookmark(self, bookmark_id, read_percent):
"""
Set the read percentage of given bookmark. The requested bookmark must
belong to the current user.
:param bookmark_id: ID of the bookmark to update.
:param read_percent: The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
return self.update_bookmark(bookmark_id, read_percent=read_percent)
def delete_bookmark(self, bookmark_id):
"""
Delete a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.delete(url)
def get_bookmark_tags(self, bookmark_id):
"""
Retrieve tags that have been applied to a bookmark.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
return self.get(url)
def delete_tag_from_bookmark(self, bookmark_id, tag_id):
"""
Remove a single tag from a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags/{1}'.format(
bookmark_id, tag_id))
return self.delete(url)
def get_tag(self, tag_id):
"""
Get a single tag represented by `tag_id`.
The requested tag must belong to the current user.
:param tag_id: ID fo the tag to retrieve.
"""
url = self._generate_url('tags/{0}'.format(tag_id))
return self.get(url)
def get_tags(self):
"""
Get all tags belonging to the current user.
"""
url = self._generate_url('tags')
return self.get(url)
def get_user(self):
"""
Retrives the current user.
"""
url = self._generate_url('users/_current')
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ReaderClient.delete_tag_from_bookmark | python | def delete_tag_from_bookmark(self, bookmark_id, tag_id):
url = self._generate_url('bookmarks/{0}/tags/{1}'.format(
bookmark_id, tag_id))
return self.delete(url) | Remove a single tag from a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L272-L282 | [
"def delete(self, url):\n \"\"\"\n Make a HTTP DELETE request to the Readability API.\n\n :param url: The url to which to send a DELETE request.\n \"\"\"\n logger.debug('Making DELETE request to %s', url)\n return self.oauth_session.delete(url)\n",
"def _generate_url(self, resource, query_params... | class ReaderClient(object):
"""
Client for interacting with the Readability Reader API.
Docs can be found at `http://www.readability.com/developers/api/reader`.
"""
def __init__(self, token_key, token_secret,
base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
"""
Initialize the ReaderClient.
:param consumer_key: Reader API key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Reader API secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param token_key: Readability user token key
:param token_secret: Readability user token secret
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
self.base_url_template = base_url_template
self.oauth_session = OAuth1Session(consumer_key, consumer_secret, token_key, token_secret)
def get(self, url):
"""
Make a HTTP GET request to the Reader API.
:param url: url to which to make a GET request.
"""
logger.debug('Making GET request to %s', url)
return self.oauth_session.get(url)
def post(self, url, post_params=None):
"""
Make a HTTP POST request to the Reader API.
:param url: url to which to make a POST request.
:param post_params: parameters to be sent in the request's body.
"""
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return self.oauth_session.post(url, data=params)
def delete(self, url):
"""
Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request.
"""
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url)
def _generate_url(self, resource, query_params=None):
"""
Generate a Readability URL to the given resource.
:param resource: the path to the resource that the request should
go to.
:param query_params (optional): a dict of query params that should
be added to the url.
"""
if query_params:
resource = '{0}?{1}'.format(
resource, urlencode(query_params))
return self.base_url_template.format(resource)
def get_article(self, article_id):
"""
Get a single article represented by `article_id`.
:param article_id: ID of the article to retrieve.
"""
url = self._generate_url('articles/{0}'.format(article_id))
return self.get(url)
def get_bookmarks(self, **filters):
"""
Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks.
"""
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url)
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url)
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
"""
Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url
"""
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params)
def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None):
"""
Updates given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to update.
:param favorite (optional): Whether this article is favorited or not.
:param archive (optional): Whether this article is archived or not.
:param read_percent (optional): The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
params = {}
if favorite is not None:
params['favorite'] = 1 if favorite == True else 0
if archive is not None:
params['archive'] = 1 if archive == True else 0
if read_percent is not None:
try:
params['read_percent'] = float(read_percent)
except ValueError:
pass
return self.post(rdb_url, params)
def favorite_bookmark(self, bookmark_id):
"""
Favorites given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to favorite.
"""
return self.update_bookmark(bookmark_id, favorite=True)
def archive_bookmark(self, bookmark_id):
"""
Archives given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to archive.
"""
return self.update_bookmark(bookmark_id, archive=True)
def set_read_percent_of_bookmark(self, bookmark_id, read_percent):
"""
Set the read percentage of given bookmark. The requested bookmark must
belong to the current user.
:param bookmark_id: ID of the bookmark to update.
:param read_percent: The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
return self.update_bookmark(bookmark_id, read_percent=read_percent)
def delete_bookmark(self, bookmark_id):
"""
Delete a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.delete(url)
def get_bookmark_tags(self, bookmark_id):
"""
Retrieve tags that have been applied to a bookmark.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
return self.get(url)
def add_tags_to_bookmark(self, bookmark_id, tags):
"""
Add tags to to a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
:param tags: Comma separated tags to be applied.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
params = dict(tags=tags)
return self.post(url, params)
def get_tag(self, tag_id):
"""
Get a single tag represented by `tag_id`.
The requested tag must belong to the current user.
:param tag_id: ID fo the tag to retrieve.
"""
url = self._generate_url('tags/{0}'.format(tag_id))
return self.get(url)
def get_tags(self):
"""
Get all tags belonging to the current user.
"""
url = self._generate_url('tags')
return self.get(url)
def get_user(self):
"""
Retrives the current user.
"""
url = self._generate_url('users/_current')
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ReaderClient.get_tag | python | def get_tag(self, tag_id):
url = self._generate_url('tags/{0}'.format(tag_id))
return self.get(url) | Get a single tag represented by `tag_id`.
The requested tag must belong to the current user.
:param tag_id: ID fo the tag to retrieve. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L284-L293 | [
"def get(self, url):\n \"\"\"\n Make a HTTP GET request to the Reader API.\n\n :param url: url to which to make a GET request.\n \"\"\"\n logger.debug('Making GET request to %s', url)\n return self.oauth_session.get(url)\n",
"def _generate_url(self, resource, query_params=None):\n \"\"\"\n ... | class ReaderClient(object):
"""
Client for interacting with the Readability Reader API.
Docs can be found at `http://www.readability.com/developers/api/reader`.
"""
def __init__(self, token_key, token_secret,
base_url_template=DEFAULT_READER_URL_TEMPLATE, **xargs):
"""
Initialize the ReaderClient.
:param consumer_key: Reader API key, otherwise read from READABILITY_CONSUMER_KEY.
:param consumer_secret: Reader API secret, otherwise read from READABILITY_CONSUMER_SECRET.
:param token_key: Readability user token key
:param token_secret: Readability user token secret
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
consumer_key = xargs.get('consumer_key') or required_from_env('READABILITY_CONSUMER_KEY')
consumer_secret = xargs.get('consumer_secret') or required_from_env('READABILITY_CONSUMER_SECRET')
self.base_url_template = base_url_template
self.oauth_session = OAuth1Session(consumer_key, consumer_secret, token_key, token_secret)
def get(self, url):
"""
Make a HTTP GET request to the Reader API.
:param url: url to which to make a GET request.
"""
logger.debug('Making GET request to %s', url)
return self.oauth_session.get(url)
def post(self, url, post_params=None):
"""
Make a HTTP POST request to the Reader API.
:param url: url to which to make a POST request.
:param post_params: parameters to be sent in the request's body.
"""
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return self.oauth_session.post(url, data=params)
def delete(self, url):
"""
Make a HTTP DELETE request to the Readability API.
:param url: The url to which to send a DELETE request.
"""
logger.debug('Making DELETE request to %s', url)
return self.oauth_session.delete(url)
def _generate_url(self, resource, query_params=None):
"""
Generate a Readability URL to the given resource.
:param resource: the path to the resource that the request should
go to.
:param query_params (optional): a dict of query params that should
be added to the url.
"""
if query_params:
resource = '{0}?{1}'.format(
resource, urlencode(query_params))
return self.base_url_template.format(resource)
def get_article(self, article_id):
"""
Get a single article represented by `article_id`.
:param article_id: ID of the article to retrieve.
"""
url = self._generate_url('articles/{0}'.format(article_id))
return self.get(url)
def get_bookmarks(self, **filters):
"""
Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks.
"""
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url)
def get_bookmark(self, bookmark_id):
"""
Get a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to retrieve.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.get(url)
def add_bookmark(self, url, favorite=False, archive=False, allow_duplicates=True):
"""
Adds given bookmark to the authenticated user.
:param url: URL of the article to bookmark
:param favorite: whether or not the bookmark should be favorited
:param archive: whether or not the bookmark should be archived
:param allow_duplicates: whether or not to allow duplicate bookmarks to
be created for a given url
"""
rdb_url = self._generate_url('bookmarks')
params = {
"url": url,
"favorite": int(favorite),
"archive": int(archive),
"allow_duplicates": int(allow_duplicates)
}
return self.post(rdb_url, params)
def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None):
"""
Updates given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to update.
:param favorite (optional): Whether this article is favorited or not.
:param archive (optional): Whether this article is archived or not.
:param read_percent (optional): The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
params = {}
if favorite is not None:
params['favorite'] = 1 if favorite == True else 0
if archive is not None:
params['archive'] = 1 if archive == True else 0
if read_percent is not None:
try:
params['read_percent'] = float(read_percent)
except ValueError:
pass
return self.post(rdb_url, params)
def favorite_bookmark(self, bookmark_id):
"""
Favorites given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to favorite.
"""
return self.update_bookmark(bookmark_id, favorite=True)
def archive_bookmark(self, bookmark_id):
"""
Archives given bookmark. The requested bookmark must belong to the
current user.
:param bookmark_id: ID of the bookmark to archive.
"""
return self.update_bookmark(bookmark_id, archive=True)
def set_read_percent_of_bookmark(self, bookmark_id, read_percent):
"""
Set the read percentage of given bookmark. The requested bookmark must
belong to the current user.
:param bookmark_id: ID of the bookmark to update.
:param read_percent: The read progress made in this article,
where 1.0 means the bottom and 0.0 means the very top.
"""
return self.update_bookmark(bookmark_id, read_percent=read_percent)
def delete_bookmark(self, bookmark_id):
"""
Delete a single bookmark represented by `bookmark_id`.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}'.format(bookmark_id))
return self.delete(url)
def get_bookmark_tags(self, bookmark_id):
"""
Retrieve tags that have been applied to a bookmark.
The requested bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
return self.get(url)
def add_tags_to_bookmark(self, bookmark_id, tags):
"""
Add tags to to a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
:param tags: Comma separated tags to be applied.
"""
url = self._generate_url('bookmarks/{0}/tags'.format(bookmark_id))
params = dict(tags=tags)
return self.post(url, params)
def delete_tag_from_bookmark(self, bookmark_id, tag_id):
"""
Remove a single tag from a bookmark.
The identified bookmark must belong to the current user.
:param bookmark_id: ID of the bookmark to delete.
"""
url = self._generate_url('bookmarks/{0}/tags/{1}'.format(
bookmark_id, tag_id))
return self.delete(url)
def get_tags(self):
"""
Get all tags belonging to the current user.
"""
url = self._generate_url('tags')
return self.get(url)
def get_user(self):
"""
Retrives the current user.
"""
url = self._generate_url('users/_current')
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ParserClient.post | python | def post(self, url, post_params=None):
post_params['token'] = self.token
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return requests.post(url, data=params) | Make an HTTP POST request to the Parser API.
:param url: url to which to make the request
:param post_params: POST data to send along. Expected to be a dict. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L350-L360 | null | class ParserClient(object):
"""
Client for interacting with the Readability Parser API.
Docs can be found at `http://www.readability.com/developers/api/parser`.
"""
def __init__(self, base_url_template=DEFAULT_PARSER_URL_TEMPLATE, **xargs):
"""
Initialize client.
:param token: parser API token, otherwise read from READABILITY_PARSER_TOKEN.
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
logger.debug('Initializing ParserClient with base url template %s',
base_url_template)
self.token = xargs.get('token', None) or required_from_env('READABILITY_PARSER_TOKEN')
self.base_url_template = base_url_template
def get(self, url):
"""
Make an HTTP GET request to the Parser API.
:param url: url to which to make the request
"""
logger.debug('Making GET request to %s', url)
return requests.get(url)
def head(self, url):
"""
Make an HTTP HEAD request to the Parser API.
:param url: url to which to make the request
"""
logger.debug('Making HEAD request to %s', url)
return requests.head(url)
def _generate_url(self, resource, query_params=None):
"""
Build the url to resource.
:param resource: Name of the resource that is being called. Options are
`''` (empty string) for root resource, `'parser'`, `'confidence'`.
:param query_params: Data to be passed as query parameters.
"""
resource = '{resource}?token={token}'.format(resource=resource, token=self.token)
if query_params:
resource += "&{}".format(urlencode(query_params))
return self.base_url_template.format(resource)
def get_root(self):
"""
Send a GET request to the root resource of the Parser API.
"""
url = self._generate_url('')
return self.get(url)
def get_article(self, url=None, article_id=None, max_pages=25):
"""
Send a GET request to the `parser` endpoint of the parser API to get
back the representation of an article.
The article can be identified by either a URL or an id that exists
in Readability.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
:param max_pages: The maximum number of pages to parse and combine.
The default is 25.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
query_params['max_pages'] = max_pages
url = self._generate_url('parser', query_params=query_params)
return self.get(url)
def post_article_content(self, content, url, max_pages=25):
"""
POST content to be parsed to the Parser API.
Note: Even when POSTing content, a url must still be provided.
:param content: the content to be parsed
:param url: the url that represents the content
:param max_pages (optional): the maximum number of pages to parse
and combine. Default is 25.
"""
params = {
'doc': content,
'max_pages': max_pages
}
url = self._generate_url('parser', {"url": url})
return self.post(url, post_params=params)
def get_article_status(self, url=None, article_id=None):
"""
Send a HEAD request to the `parser` endpoint to the parser API to
get the articles status.
Returned is a `requests.Response` object. The id and status for the
article can be extracted from the `X-Article-Id` and `X-Article-Status`
headers.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
url = self._generate_url('parser', query_params=query_params)
return self.head(url)
def get_confidence(self, url=None, article_id=None):
"""
Send a GET request to the `confidence` endpoint of the Parser API.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
url = self._generate_url('confidence', query_params=query_params)
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ParserClient._generate_url | python | def _generate_url(self, resource, query_params=None):
resource = '{resource}?token={token}'.format(resource=resource, token=self.token)
if query_params:
resource += "&{}".format(urlencode(query_params))
return self.base_url_template.format(resource) | Build the url to resource.
:param resource: Name of the resource that is being called. Options are
`''` (empty string) for root resource, `'parser'`, `'confidence'`.
:param query_params: Data to be passed as query parameters. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L362-L373 | null | class ParserClient(object):
"""
Client for interacting with the Readability Parser API.
Docs can be found at `http://www.readability.com/developers/api/parser`.
"""
def __init__(self, base_url_template=DEFAULT_PARSER_URL_TEMPLATE, **xargs):
"""
Initialize client.
:param token: parser API token, otherwise read from READABILITY_PARSER_TOKEN.
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
logger.debug('Initializing ParserClient with base url template %s',
base_url_template)
self.token = xargs.get('token', None) or required_from_env('READABILITY_PARSER_TOKEN')
self.base_url_template = base_url_template
def get(self, url):
"""
Make an HTTP GET request to the Parser API.
:param url: url to which to make the request
"""
logger.debug('Making GET request to %s', url)
return requests.get(url)
def head(self, url):
"""
Make an HTTP HEAD request to the Parser API.
:param url: url to which to make the request
"""
logger.debug('Making HEAD request to %s', url)
return requests.head(url)
def post(self, url, post_params=None):
"""
Make an HTTP POST request to the Parser API.
:param url: url to which to make the request
:param post_params: POST data to send along. Expected to be a dict.
"""
post_params['token'] = self.token
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return requests.post(url, data=params)
def get_root(self):
"""
Send a GET request to the root resource of the Parser API.
"""
url = self._generate_url('')
return self.get(url)
def get_article(self, url=None, article_id=None, max_pages=25):
"""
Send a GET request to the `parser` endpoint of the parser API to get
back the representation of an article.
The article can be identified by either a URL or an id that exists
in Readability.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
:param max_pages: The maximum number of pages to parse and combine.
The default is 25.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
query_params['max_pages'] = max_pages
url = self._generate_url('parser', query_params=query_params)
return self.get(url)
def post_article_content(self, content, url, max_pages=25):
"""
POST content to be parsed to the Parser API.
Note: Even when POSTing content, a url must still be provided.
:param content: the content to be parsed
:param url: the url that represents the content
:param max_pages (optional): the maximum number of pages to parse
and combine. Default is 25.
"""
params = {
'doc': content,
'max_pages': max_pages
}
url = self._generate_url('parser', {"url": url})
return self.post(url, post_params=params)
def get_article_status(self, url=None, article_id=None):
"""
Send a HEAD request to the `parser` endpoint to the parser API to
get the articles status.
Returned is a `requests.Response` object. The id and status for the
article can be extracted from the `X-Article-Id` and `X-Article-Status`
headers.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
url = self._generate_url('parser', query_params=query_params)
return self.head(url)
def get_confidence(self, url=None, article_id=None):
"""
Send a GET request to the `confidence` endpoint of the Parser API.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
url = self._generate_url('confidence', query_params=query_params)
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ParserClient.get_article | python | def get_article(self, url=None, article_id=None, max_pages=25):
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
query_params['max_pages'] = max_pages
url = self._generate_url('parser', query_params=query_params)
return self.get(url) | Send a GET request to the `parser` endpoint of the parser API to get
back the representation of an article.
The article can be identified by either a URL or an id that exists
in Readability.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
:param max_pages: The maximum number of pages to parse and combine.
The default is 25. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L382-L405 | [
"def get(self, url):\n \"\"\"\n Make an HTTP GET request to the Parser API.\n\n :param url: url to which to make the request\n \"\"\"\n logger.debug('Making GET request to %s', url)\n return requests.get(url)\n",
"def _generate_url(self, resource, query_params=None):\n \"\"\"\n Build the u... | class ParserClient(object):
"""
Client for interacting with the Readability Parser API.
Docs can be found at `http://www.readability.com/developers/api/parser`.
"""
def __init__(self, base_url_template=DEFAULT_PARSER_URL_TEMPLATE, **xargs):
"""
Initialize client.
:param token: parser API token, otherwise read from READABILITY_PARSER_TOKEN.
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
logger.debug('Initializing ParserClient with base url template %s',
base_url_template)
self.token = xargs.get('token', None) or required_from_env('READABILITY_PARSER_TOKEN')
self.base_url_template = base_url_template
def get(self, url):
"""
Make an HTTP GET request to the Parser API.
:param url: url to which to make the request
"""
logger.debug('Making GET request to %s', url)
return requests.get(url)
def head(self, url):
"""
Make an HTTP HEAD request to the Parser API.
:param url: url to which to make the request
"""
logger.debug('Making HEAD request to %s', url)
return requests.head(url)
def post(self, url, post_params=None):
"""
Make an HTTP POST request to the Parser API.
:param url: url to which to make the request
:param post_params: POST data to send along. Expected to be a dict.
"""
post_params['token'] = self.token
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return requests.post(url, data=params)
def _generate_url(self, resource, query_params=None):
"""
Build the url to resource.
:param resource: Name of the resource that is being called. Options are
`''` (empty string) for root resource, `'parser'`, `'confidence'`.
:param query_params: Data to be passed as query parameters.
"""
resource = '{resource}?token={token}'.format(resource=resource, token=self.token)
if query_params:
resource += "&{}".format(urlencode(query_params))
return self.base_url_template.format(resource)
def get_root(self):
"""
Send a GET request to the root resource of the Parser API.
"""
url = self._generate_url('')
return self.get(url)
def post_article_content(self, content, url, max_pages=25):
"""
POST content to be parsed to the Parser API.
Note: Even when POSTing content, a url must still be provided.
:param content: the content to be parsed
:param url: the url that represents the content
:param max_pages (optional): the maximum number of pages to parse
and combine. Default is 25.
"""
params = {
'doc': content,
'max_pages': max_pages
}
url = self._generate_url('parser', {"url": url})
return self.post(url, post_params=params)
def get_article_status(self, url=None, article_id=None):
"""
Send a HEAD request to the `parser` endpoint to the parser API to
get the articles status.
Returned is a `requests.Response` object. The id and status for the
article can be extracted from the `X-Article-Id` and `X-Article-Status`
headers.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
url = self._generate_url('parser', query_params=query_params)
return self.head(url)
def get_confidence(self, url=None, article_id=None):
"""
Send a GET request to the `confidence` endpoint of the Parser API.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
url = self._generate_url('confidence', query_params=query_params)
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ParserClient.post_article_content | python | def post_article_content(self, content, url, max_pages=25):
params = {
'doc': content,
'max_pages': max_pages
}
url = self._generate_url('parser', {"url": url})
return self.post(url, post_params=params) | POST content to be parsed to the Parser API.
Note: Even when POSTing content, a url must still be provided.
:param content: the content to be parsed
:param url: the url that represents the content
:param max_pages (optional): the maximum number of pages to parse
and combine. Default is 25. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L407-L423 | [
"def post(self, url, post_params=None):\n \"\"\"\n Make an HTTP POST request to the Parser API.\n\n :param url: url to which to make the request\n :param post_params: POST data to send along. Expected to be a dict.\n \"\"\"\n post_params['token'] = self.token\n params = urlencode(post_params)\n... | class ParserClient(object):
"""
Client for interacting with the Readability Parser API.
Docs can be found at `http://www.readability.com/developers/api/parser`.
"""
def __init__(self, base_url_template=DEFAULT_PARSER_URL_TEMPLATE, **xargs):
"""
Initialize client.
:param token: parser API token, otherwise read from READABILITY_PARSER_TOKEN.
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
logger.debug('Initializing ParserClient with base url template %s',
base_url_template)
self.token = xargs.get('token', None) or required_from_env('READABILITY_PARSER_TOKEN')
self.base_url_template = base_url_template
def get(self, url):
"""
Make an HTTP GET request to the Parser API.
:param url: url to which to make the request
"""
logger.debug('Making GET request to %s', url)
return requests.get(url)
def head(self, url):
"""
Make an HTTP HEAD request to the Parser API.
:param url: url to which to make the request
"""
logger.debug('Making HEAD request to %s', url)
return requests.head(url)
def post(self, url, post_params=None):
"""
Make an HTTP POST request to the Parser API.
:param url: url to which to make the request
:param post_params: POST data to send along. Expected to be a dict.
"""
post_params['token'] = self.token
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return requests.post(url, data=params)
def _generate_url(self, resource, query_params=None):
"""
Build the url to resource.
:param resource: Name of the resource that is being called. Options are
`''` (empty string) for root resource, `'parser'`, `'confidence'`.
:param query_params: Data to be passed as query parameters.
"""
resource = '{resource}?token={token}'.format(resource=resource, token=self.token)
if query_params:
resource += "&{}".format(urlencode(query_params))
return self.base_url_template.format(resource)
def get_root(self):
"""
Send a GET request to the root resource of the Parser API.
"""
url = self._generate_url('')
return self.get(url)
def get_article(self, url=None, article_id=None, max_pages=25):
"""
Send a GET request to the `parser` endpoint of the parser API to get
back the representation of an article.
The article can be identified by either a URL or an id that exists
in Readability.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
:param max_pages: The maximum number of pages to parse and combine.
The default is 25.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
query_params['max_pages'] = max_pages
url = self._generate_url('parser', query_params=query_params)
return self.get(url)
def get_article_status(self, url=None, article_id=None):
"""
Send a HEAD request to the `parser` endpoint to the parser API to
get the articles status.
Returned is a `requests.Response` object. The id and status for the
article can be extracted from the `X-Article-Id` and `X-Article-Status`
headers.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
url = self._generate_url('parser', query_params=query_params)
return self.head(url)
def get_confidence(self, url=None, article_id=None):
"""
Send a GET request to the `confidence` endpoint of the Parser API.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
url = self._generate_url('confidence', query_params=query_params)
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ParserClient.get_article_status | python | def get_article_status(self, url=None, article_id=None):
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
url = self._generate_url('parser', query_params=query_params)
return self.head(url) | Send a HEAD request to the `parser` endpoint to the parser API to
get the articles status.
Returned is a `requests.Response` object. The id and status for the
article can be extracted from the `X-Article-Id` and `X-Article-Status`
headers.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L425-L446 | [
"def head(self, url):\n \"\"\"\n Make an HTTP HEAD request to the Parser API.\n\n :param url: url to which to make the request\n \"\"\"\n logger.debug('Making HEAD request to %s', url)\n return requests.head(url)\n",
"def _generate_url(self, resource, query_params=None):\n \"\"\"\n Build t... | class ParserClient(object):
"""
Client for interacting with the Readability Parser API.
Docs can be found at `http://www.readability.com/developers/api/parser`.
"""
def __init__(self, base_url_template=DEFAULT_PARSER_URL_TEMPLATE, **xargs):
"""
Initialize client.
:param token: parser API token, otherwise read from READABILITY_PARSER_TOKEN.
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
logger.debug('Initializing ParserClient with base url template %s',
base_url_template)
self.token = xargs.get('token', None) or required_from_env('READABILITY_PARSER_TOKEN')
self.base_url_template = base_url_template
def get(self, url):
"""
Make an HTTP GET request to the Parser API.
:param url: url to which to make the request
"""
logger.debug('Making GET request to %s', url)
return requests.get(url)
def head(self, url):
"""
Make an HTTP HEAD request to the Parser API.
:param url: url to which to make the request
"""
logger.debug('Making HEAD request to %s', url)
return requests.head(url)
def post(self, url, post_params=None):
"""
Make an HTTP POST request to the Parser API.
:param url: url to which to make the request
:param post_params: POST data to send along. Expected to be a dict.
"""
post_params['token'] = self.token
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return requests.post(url, data=params)
def _generate_url(self, resource, query_params=None):
"""
Build the url to resource.
:param resource: Name of the resource that is being called. Options are
`''` (empty string) for root resource, `'parser'`, `'confidence'`.
:param query_params: Data to be passed as query parameters.
"""
resource = '{resource}?token={token}'.format(resource=resource, token=self.token)
if query_params:
resource += "&{}".format(urlencode(query_params))
return self.base_url_template.format(resource)
def get_root(self):
"""
Send a GET request to the root resource of the Parser API.
"""
url = self._generate_url('')
return self.get(url)
def get_article(self, url=None, article_id=None, max_pages=25):
"""
Send a GET request to the `parser` endpoint of the parser API to get
back the representation of an article.
The article can be identified by either a URL or an id that exists
in Readability.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
:param max_pages: The maximum number of pages to parse and combine.
The default is 25.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
query_params['max_pages'] = max_pages
url = self._generate_url('parser', query_params=query_params)
return self.get(url)
def post_article_content(self, content, url, max_pages=25):
"""
POST content to be parsed to the Parser API.
Note: Even when POSTing content, a url must still be provided.
:param content: the content to be parsed
:param url: the url that represents the content
:param max_pages (optional): the maximum number of pages to parse
and combine. Default is 25.
"""
params = {
'doc': content,
'max_pages': max_pages
}
url = self._generate_url('parser', {"url": url})
return self.post(url, post_params=params)
def get_confidence(self, url=None, article_id=None):
"""
Send a GET request to the `confidence` endpoint of the Parser API.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
url = self._generate_url('confidence', query_params=query_params)
return self.get(url)
|
ReadabilityHoldings/python-readability-api | readability/clients.py | ParserClient.get_confidence | python | def get_confidence(self, url=None, article_id=None):
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
url = self._generate_url('confidence', query_params=query_params)
return self.get(url) | Send a GET request to the `confidence` endpoint of the Parser API.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted. | train | https://github.com/ReadabilityHoldings/python-readability-api/blob/4b746166877d5a8dc29222aedccb18c2506a5385/readability/clients.py#L448-L464 | [
"def get(self, url):\n \"\"\"\n Make an HTTP GET request to the Parser API.\n\n :param url: url to which to make the request\n \"\"\"\n logger.debug('Making GET request to %s', url)\n return requests.get(url)\n",
"def _generate_url(self, resource, query_params=None):\n \"\"\"\n Build the u... | class ParserClient(object):
"""
Client for interacting with the Readability Parser API.
Docs can be found at `http://www.readability.com/developers/api/parser`.
"""
def __init__(self, base_url_template=DEFAULT_PARSER_URL_TEMPLATE, **xargs):
"""
Initialize client.
:param token: parser API token, otherwise read from READABILITY_PARSER_TOKEN.
:param base_url_template (optional): Template used to build URL to
which requests will be sent. This shouldn't need to be passed as the
main purpose for it is testing environments that the user probably
doesn't have access to (staging, local dev, etc).
"""
logger.debug('Initializing ParserClient with base url template %s',
base_url_template)
self.token = xargs.get('token', None) or required_from_env('READABILITY_PARSER_TOKEN')
self.base_url_template = base_url_template
def get(self, url):
"""
Make an HTTP GET request to the Parser API.
:param url: url to which to make the request
"""
logger.debug('Making GET request to %s', url)
return requests.get(url)
def head(self, url):
"""
Make an HTTP HEAD request to the Parser API.
:param url: url to which to make the request
"""
logger.debug('Making HEAD request to %s', url)
return requests.head(url)
def post(self, url, post_params=None):
"""
Make an HTTP POST request to the Parser API.
:param url: url to which to make the request
:param post_params: POST data to send along. Expected to be a dict.
"""
post_params['token'] = self.token
params = urlencode(post_params)
logger.debug('Making POST request to %s with body %s', url, params)
return requests.post(url, data=params)
def _generate_url(self, resource, query_params=None):
"""
Build the url to resource.
:param resource: Name of the resource that is being called. Options are
`''` (empty string) for root resource, `'parser'`, `'confidence'`.
:param query_params: Data to be passed as query parameters.
"""
resource = '{resource}?token={token}'.format(resource=resource, token=self.token)
if query_params:
resource += "&{}".format(urlencode(query_params))
return self.base_url_template.format(resource)
def get_root(self):
"""
Send a GET request to the root resource of the Parser API.
"""
url = self._generate_url('')
return self.get(url)
def get_article(self, url=None, article_id=None, max_pages=25):
"""
Send a GET request to the `parser` endpoint of the parser API to get
back the representation of an article.
The article can be identified by either a URL or an id that exists
in Readability.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
:param max_pages: The maximum number of pages to parse and combine.
The default is 25.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
query_params['max_pages'] = max_pages
url = self._generate_url('parser', query_params=query_params)
return self.get(url)
def post_article_content(self, content, url, max_pages=25):
"""
POST content to be parsed to the Parser API.
Note: Even when POSTing content, a url must still be provided.
:param content: the content to be parsed
:param url: the url that represents the content
:param max_pages (optional): the maximum number of pages to parse
and combine. Default is 25.
"""
params = {
'doc': content,
'max_pages': max_pages
}
url = self._generate_url('parser', {"url": url})
return self.post(url, post_params=params)
def get_article_status(self, url=None, article_id=None):
"""
Send a HEAD request to the `parser` endpoint to the parser API to
get the articles status.
Returned is a `requests.Response` object. The id and status for the
article can be extracted from the `X-Article-Id` and `X-Article-Status`
headers.
Note that either the `url` or `article_id` param should be passed.
:param url (optional): The url of an article whose content is wanted.
:param article_id (optional): The id of an article in the Readability
system whose content is wanted.
"""
query_params = {}
if url is not None:
query_params['url'] = url
if article_id is not None:
query_params['article_id'] = article_id
url = self._generate_url('parser', query_params=query_params)
return self.head(url)
|
PlaidWeb/Pushl | pushl/feeds.py | get_feed | python | async def get_feed(config, url):
LOGGER.debug("++WAIT: cache get feed %s", url)
previous = config.cache.get(
'feed', url, schema_version=SCHEMA_VERSION) if config.cache else None
LOGGER.debug("++DONE: cache get feed %s", url)
headers = previous.caching if previous else None
LOGGER.debug("++WAIT: request get %s", url)
request = await utils.retry_get(config, url, headers=headers)
LOGGER.debug("++DONE: request get %s", url)
if not request or not request.success:
LOGGER.error("Could not get feed %s: %d",
url,
request.status if request else -1)
return None, previous, False
if request.cached:
LOGGER.debug("%s: Reusing cached version", url)
return previous, previous, False
current = Feed(request)
if config.cache:
LOGGER.debug("%s: Saving to cache", url)
LOGGER.debug("++WAIT: cache set feed %s", url)
config.cache.set('feed', url, current)
LOGGER.debug("++DONE: cache set feed %s", url)
LOGGER.debug("%s: Returning new content", url)
return current, previous, (not previous
or current.digest != previous.digest
or current.status != previous.status) | Get a feed
Arguments:
config -- the configuration
url -- The URL of the feed
retval -- a tuple of feed,previous_version,changed | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L95-L137 | [
"async def retry_get(config, url, *args, **kwargs):\n \"\"\" aiohttp wrapper for GET \"\"\"\n return await _retry_do(config.session.get, url, *args,\n **_make_headers(config, kwargs))\n"
] | """ Functionality for handling feeds """
import logging
import collections
import hashlib
import feedparser
from . import caching, utils
LOGGER = logging.getLogger(__name__)
SCHEMA_VERSION = 2
class Feed:
""" Encapsulates stuff on feeds """
def __init__(self, request):
""" Given a request object and retrieved text, parse out the feed """
text = request.text
md5 = hashlib.md5(text.encode('utf-8'))
self.digest = md5.digest()
self.url = str(request.url)
self.caching = caching.make_headers(request.headers)
self.feed = feedparser.parse(text)
self.status = request.status
self.links = self.feed.feed.links
self.schema = SCHEMA_VERSION
@property
def archive_namespace(self):
""" Returns the known namespace of the RFC5005 extension, if any """
try:
for ns_prefix, url in self.feed.namespaces.items():
if url == 'http://purl.org/syndication/history/1.0':
return ns_prefix
except AttributeError:
pass
return None
@property
def entry_links(self):
""" Given a parsed feed, return the links to its entries, including ones
which disappeared (as a quick-and-dirty way to support deletions)
"""
return {entry['link'] for entry in self.feed.entries if entry and entry.get('link')}
@property
def is_archive(self):
""" Given a parsed feed, returns True if this is an archive feed """
ns_prefix = self.archive_namespace
if ns_prefix:
if ns_prefix + '_archive' in self.feed.feed:
# This is declared to be an archive view
return True
if ns_prefix + '_current' in self.feed.feed:
# This is declared to be the current view
return False
# Either we don't have the namespace, or the view wasn't declared.
rels = collections.defaultdict(list)
for link in self.feed.feed.links:
rels[link.rel].append(link.href)
return ('current' in rels and
('self' not in rels or
rels['self'] != rels['current']))
async def update_websub(self, config, hub):
""" Update WebSub hub to know about this feed """
try:
LOGGER.debug("WebSub: Notifying %s of %s", hub, self.url)
request = await utils.retry_post(
config,
hub,
data={
'hub.mode': 'publish',
'hub.url': self.url
})
if request.success:
LOGGER.info("%s: WebSub notification sent to %s",
self.url, hub)
else:
LOGGER.warning("%s: Hub %s returned status code %s: %s", self.url, hub,
request.status, request.text)
except Exception as err: # pylint:disable=broad-except
LOGGER.warning("WebSub %s: got %s: %s",
hub, err.__class__.__name__, err)
|
PlaidWeb/Pushl | pushl/feeds.py | Feed.archive_namespace | python | def archive_namespace(self):
try:
for ns_prefix, url in self.feed.namespaces.items():
if url == 'http://purl.org/syndication/history/1.0':
return ns_prefix
except AttributeError:
pass
return None | Returns the known namespace of the RFC5005 extension, if any | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L33-L41 | null | class Feed:
""" Encapsulates stuff on feeds """
def __init__(self, request):
""" Given a request object and retrieved text, parse out the feed """
text = request.text
md5 = hashlib.md5(text.encode('utf-8'))
self.digest = md5.digest()
self.url = str(request.url)
self.caching = caching.make_headers(request.headers)
self.feed = feedparser.parse(text)
self.status = request.status
self.links = self.feed.feed.links
self.schema = SCHEMA_VERSION
@property
@property
def entry_links(self):
""" Given a parsed feed, return the links to its entries, including ones
which disappeared (as a quick-and-dirty way to support deletions)
"""
return {entry['link'] for entry in self.feed.entries if entry and entry.get('link')}
@property
def is_archive(self):
""" Given a parsed feed, returns True if this is an archive feed """
ns_prefix = self.archive_namespace
if ns_prefix:
if ns_prefix + '_archive' in self.feed.feed:
# This is declared to be an archive view
return True
if ns_prefix + '_current' in self.feed.feed:
# This is declared to be the current view
return False
# Either we don't have the namespace, or the view wasn't declared.
rels = collections.defaultdict(list)
for link in self.feed.feed.links:
rels[link.rel].append(link.href)
return ('current' in rels and
('self' not in rels or
rels['self'] != rels['current']))
async def update_websub(self, config, hub):
""" Update WebSub hub to know about this feed """
try:
LOGGER.debug("WebSub: Notifying %s of %s", hub, self.url)
request = await utils.retry_post(
config,
hub,
data={
'hub.mode': 'publish',
'hub.url': self.url
})
if request.success:
LOGGER.info("%s: WebSub notification sent to %s",
self.url, hub)
else:
LOGGER.warning("%s: Hub %s returned status code %s: %s", self.url, hub,
request.status, request.text)
except Exception as err: # pylint:disable=broad-except
LOGGER.warning("WebSub %s: got %s: %s",
hub, err.__class__.__name__, err)
|
PlaidWeb/Pushl | pushl/feeds.py | Feed.entry_links | python | def entry_links(self):
return {entry['link'] for entry in self.feed.entries if entry and entry.get('link')} | Given a parsed feed, return the links to its entries, including ones
which disappeared (as a quick-and-dirty way to support deletions) | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L44-L48 | null | class Feed:
""" Encapsulates stuff on feeds """
def __init__(self, request):
""" Given a request object and retrieved text, parse out the feed """
text = request.text
md5 = hashlib.md5(text.encode('utf-8'))
self.digest = md5.digest()
self.url = str(request.url)
self.caching = caching.make_headers(request.headers)
self.feed = feedparser.parse(text)
self.status = request.status
self.links = self.feed.feed.links
self.schema = SCHEMA_VERSION
@property
def archive_namespace(self):
""" Returns the known namespace of the RFC5005 extension, if any """
try:
for ns_prefix, url in self.feed.namespaces.items():
if url == 'http://purl.org/syndication/history/1.0':
return ns_prefix
except AttributeError:
pass
return None
@property
@property
def is_archive(self):
""" Given a parsed feed, returns True if this is an archive feed """
ns_prefix = self.archive_namespace
if ns_prefix:
if ns_prefix + '_archive' in self.feed.feed:
# This is declared to be an archive view
return True
if ns_prefix + '_current' in self.feed.feed:
# This is declared to be the current view
return False
# Either we don't have the namespace, or the view wasn't declared.
rels = collections.defaultdict(list)
for link in self.feed.feed.links:
rels[link.rel].append(link.href)
return ('current' in rels and
('self' not in rels or
rels['self'] != rels['current']))
async def update_websub(self, config, hub):
""" Update WebSub hub to know about this feed """
try:
LOGGER.debug("WebSub: Notifying %s of %s", hub, self.url)
request = await utils.retry_post(
config,
hub,
data={
'hub.mode': 'publish',
'hub.url': self.url
})
if request.success:
LOGGER.info("%s: WebSub notification sent to %s",
self.url, hub)
else:
LOGGER.warning("%s: Hub %s returned status code %s: %s", self.url, hub,
request.status, request.text)
except Exception as err: # pylint:disable=broad-except
LOGGER.warning("WebSub %s: got %s: %s",
hub, err.__class__.__name__, err)
|
PlaidWeb/Pushl | pushl/feeds.py | Feed.is_archive | python | def is_archive(self):
ns_prefix = self.archive_namespace
if ns_prefix:
if ns_prefix + '_archive' in self.feed.feed:
# This is declared to be an archive view
return True
if ns_prefix + '_current' in self.feed.feed:
# This is declared to be the current view
return False
# Either we don't have the namespace, or the view wasn't declared.
rels = collections.defaultdict(list)
for link in self.feed.feed.links:
rels[link.rel].append(link.href)
return ('current' in rels and
('self' not in rels or
rels['self'] != rels['current'])) | Given a parsed feed, returns True if this is an archive feed | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L51-L70 | null | class Feed:
""" Encapsulates stuff on feeds """
def __init__(self, request):
""" Given a request object and retrieved text, parse out the feed """
text = request.text
md5 = hashlib.md5(text.encode('utf-8'))
self.digest = md5.digest()
self.url = str(request.url)
self.caching = caching.make_headers(request.headers)
self.feed = feedparser.parse(text)
self.status = request.status
self.links = self.feed.feed.links
self.schema = SCHEMA_VERSION
@property
def archive_namespace(self):
""" Returns the known namespace of the RFC5005 extension, if any """
try:
for ns_prefix, url in self.feed.namespaces.items():
if url == 'http://purl.org/syndication/history/1.0':
return ns_prefix
except AttributeError:
pass
return None
@property
def entry_links(self):
""" Given a parsed feed, return the links to its entries, including ones
which disappeared (as a quick-and-dirty way to support deletions)
"""
return {entry['link'] for entry in self.feed.entries if entry and entry.get('link')}
@property
async def update_websub(self, config, hub):
""" Update WebSub hub to know about this feed """
try:
LOGGER.debug("WebSub: Notifying %s of %s", hub, self.url)
request = await utils.retry_post(
config,
hub,
data={
'hub.mode': 'publish',
'hub.url': self.url
})
if request.success:
LOGGER.info("%s: WebSub notification sent to %s",
self.url, hub)
else:
LOGGER.warning("%s: Hub %s returned status code %s: %s", self.url, hub,
request.status, request.text)
except Exception as err: # pylint:disable=broad-except
LOGGER.warning("WebSub %s: got %s: %s",
hub, err.__class__.__name__, err)
|
PlaidWeb/Pushl | pushl/feeds.py | Feed.update_websub | python | async def update_websub(self, config, hub):
try:
LOGGER.debug("WebSub: Notifying %s of %s", hub, self.url)
request = await utils.retry_post(
config,
hub,
data={
'hub.mode': 'publish',
'hub.url': self.url
})
if request.success:
LOGGER.info("%s: WebSub notification sent to %s",
self.url, hub)
else:
LOGGER.warning("%s: Hub %s returned status code %s: %s", self.url, hub,
request.status, request.text)
except Exception as err: # pylint:disable=broad-except
LOGGER.warning("WebSub %s: got %s: %s",
hub, err.__class__.__name__, err) | Update WebSub hub to know about this feed | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/feeds.py#L72-L92 | [
"async def retry_post(config, url, *args, **kwargs):\n \"\"\" aiohttp wrapper for POST \"\"\"\n return await _retry_do(config.session.post, url, *args,\n **_make_headers(config, kwargs))\n"
] | class Feed:
""" Encapsulates stuff on feeds """
def __init__(self, request):
""" Given a request object and retrieved text, parse out the feed """
text = request.text
md5 = hashlib.md5(text.encode('utf-8'))
self.digest = md5.digest()
self.url = str(request.url)
self.caching = caching.make_headers(request.headers)
self.feed = feedparser.parse(text)
self.status = request.status
self.links = self.feed.feed.links
self.schema = SCHEMA_VERSION
@property
def archive_namespace(self):
""" Returns the known namespace of the RFC5005 extension, if any """
try:
for ns_prefix, url in self.feed.namespaces.items():
if url == 'http://purl.org/syndication/history/1.0':
return ns_prefix
except AttributeError:
pass
return None
@property
def entry_links(self):
""" Given a parsed feed, return the links to its entries, including ones
which disappeared (as a quick-and-dirty way to support deletions)
"""
return {entry['link'] for entry in self.feed.entries if entry and entry.get('link')}
@property
def is_archive(self):
""" Given a parsed feed, returns True if this is an archive feed """
ns_prefix = self.archive_namespace
if ns_prefix:
if ns_prefix + '_archive' in self.feed.feed:
# This is declared to be an archive view
return True
if ns_prefix + '_current' in self.feed.feed:
# This is declared to be the current view
return False
# Either we don't have the namespace, or the view wasn't declared.
rels = collections.defaultdict(list)
for link in self.feed.feed.links:
rels[link.rel].append(link.href)
return ('current' in rels and
('self' not in rels or
rels['self'] != rels['current']))
|
PlaidWeb/Pushl | pushl/__main__.py | parse_args | python | def parse_args(*args):
parser = argparse.ArgumentParser(
description="Send push notifications for a feed")
parser.add_argument('--version', action='version',
version="%(prog)s " + __version__.__version__)
parser.add_argument('feeds', type=str, nargs='*', metavar='feed_url',
help='A URL for a feed to process')
parser.add_argument('--cache', '-c', type=str, dest='cache_dir',
help='Cache storage directory',
required=False)
parser.add_argument("-v", "--verbosity", action="count",
help="increase output verbosity",
default=0)
parser.add_argument("-e", "--entry", nargs='+',
help='URLs to entries/pages to index directly',
metavar='entry_url',
dest='entries')
parser.add_argument("-s", "--websub-only", nargs='+',
help='URLs/feeds to only send WebSub notifications for',
metavar='feed_url', dest='websub_only')
parser.add_argument('--timeout', '-t', type=int, dest='timeout',
help='Connection timeout, in seconds',
default=120)
parser.add_argument('--max-connections', type=int, dest='max_connections',
help='Maximum number of connections to have open at once',
default=100)
parser.add_argument('--max-per-host', type=int, dest='max_per_host',
help='Maximum number of connections per host',
default=0)
parser.add_argument('--rel-whitelist', '-w', dest='rel_whitelist', type=str,
help="Comma-separated list of link RELs to whitelist"
+ " for sending webmentions")
parser.add_argument('--rel-blacklist', '-b', dest='rel_blacklist', type=str,
help="Comma-separated list of link RELs to blacklist"
+ " from sending webmentions",
default="nofollow")
parser.add_argument('--max-time', '-m', dest='max_time', type=float,
help="Maximum time (in seconds) to spend on this", default=1800)
parser.add_argument('--user-agent', dest='user_agent', type=str,
help="User-agent string to send", default=__version__.USER_AGENT)
feature = parser.add_mutually_exclusive_group(required=False)
feature.add_argument('--keepalive', dest='keepalive', action='store_true',
help="Keep TCP connections alive")
feature.add_argument('--no-keepalive', dest='keepalive', action='store_false',
help="Don't keep TCP connections alive")
feature.set_defaults(keepalive=False)
feature = parser.add_mutually_exclusive_group(required=False)
feature.add_argument('--archive', '-a', dest='archive', action='store_true',
help='Process archive links in the feed per RFC 5005')
feature.add_argument('--no-archive', dest='archive', action='store_false',
help='Do not process archive links in the feed')
feature.set_defaults(archive=False)
feature = parser.add_mutually_exclusive_group(required=False)
feature.add_argument('--recurse', '-r',
help="Recursively check other discovered feeds",
action='store_true', dest='recurse')
feature.add_argument('--no-recurse', dest='recurse',
action='store_false',
help="Do not recurse into other feeds")
feature.set_defaults(recurse=False)
return parser.parse_args(*args) | Parse the arguments for the command | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/__main__.py#L17-L89 | null | """ Pushl - a tool for pushing updates from a content feed to another destination """
import argparse
import logging
import asyncio
import aiohttp
from . import Pushl, __version__
LOG_LEVELS = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
LOGGER = logging.getLogger("pushl.main")
def main():
""" main entry point """
args = parse_args()
logging.basicConfig(level=LOG_LEVELS[min(
args.verbosity, len(LOG_LEVELS) - 1)])
loop = asyncio.get_event_loop()
loop.run_until_complete(_run(args))
async def _run(args):
connector = aiohttp.TCPConnector(
limit=args.max_connections,
limit_per_host=args.max_per_host,
enable_cleanup_closed=True,
force_close=not args.keepalive
)
# Time spent waiting for a connection pool entry to free up counts against
# total and connect, so instead we just set the new connection and the read
# timeout
timeout = aiohttp.ClientTimeout(
total=None,
connect=None,
sock_connect=args.timeout,
sock_read=args.timeout)
async with aiohttp.ClientSession(timeout=timeout,
connector=connector) as session:
worker = Pushl(session, args)
tasks = []
for url in args.feeds or []:
tasks.append(worker.process_feed(url))
for url in args.websub_only or []:
tasks.append(worker.process_feed(url, False))
for url in args.entries or []:
tasks.append(worker.process_entry(url, add_domain=True))
if tasks:
_, timed_out = await asyncio.wait(tasks, timeout=args.max_time)
if timed_out:
LOGGER.info("Done. %d tasks did not complete within %d seconds",
len(timed_out), args.max_time)
else:
LOGGER.info("Completed all tasks")
if __name__ == "__main__":
main()
|
PlaidWeb/Pushl | pushl/__main__.py | main | python | def main():
args = parse_args()
logging.basicConfig(level=LOG_LEVELS[min(
args.verbosity, len(LOG_LEVELS) - 1)])
loop = asyncio.get_event_loop()
loop.run_until_complete(_run(args)) | main entry point | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/__main__.py#L92-L99 | [
"def parse_args(*args):\n \"\"\" Parse the arguments for the command \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Send push notifications for a feed\")\n\n parser.add_argument('--version', action='version',\n version=\"%(prog)s \" + __version__.__version__)\n\n ... | """ Pushl - a tool for pushing updates from a content feed to another destination """
import argparse
import logging
import asyncio
import aiohttp
from . import Pushl, __version__
LOG_LEVELS = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
LOGGER = logging.getLogger("pushl.main")
def parse_args(*args):
""" Parse the arguments for the command """
parser = argparse.ArgumentParser(
description="Send push notifications for a feed")
parser.add_argument('--version', action='version',
version="%(prog)s " + __version__.__version__)
parser.add_argument('feeds', type=str, nargs='*', metavar='feed_url',
help='A URL for a feed to process')
parser.add_argument('--cache', '-c', type=str, dest='cache_dir',
help='Cache storage directory',
required=False)
parser.add_argument("-v", "--verbosity", action="count",
help="increase output verbosity",
default=0)
parser.add_argument("-e", "--entry", nargs='+',
help='URLs to entries/pages to index directly',
metavar='entry_url',
dest='entries')
parser.add_argument("-s", "--websub-only", nargs='+',
help='URLs/feeds to only send WebSub notifications for',
metavar='feed_url', dest='websub_only')
parser.add_argument('--timeout', '-t', type=int, dest='timeout',
help='Connection timeout, in seconds',
default=120)
parser.add_argument('--max-connections', type=int, dest='max_connections',
help='Maximum number of connections to have open at once',
default=100)
parser.add_argument('--max-per-host', type=int, dest='max_per_host',
help='Maximum number of connections per host',
default=0)
parser.add_argument('--rel-whitelist', '-w', dest='rel_whitelist', type=str,
help="Comma-separated list of link RELs to whitelist"
+ " for sending webmentions")
parser.add_argument('--rel-blacklist', '-b', dest='rel_blacklist', type=str,
help="Comma-separated list of link RELs to blacklist"
+ " from sending webmentions",
default="nofollow")
parser.add_argument('--max-time', '-m', dest='max_time', type=float,
help="Maximum time (in seconds) to spend on this", default=1800)
parser.add_argument('--user-agent', dest='user_agent', type=str,
help="User-agent string to send", default=__version__.USER_AGENT)
feature = parser.add_mutually_exclusive_group(required=False)
feature.add_argument('--keepalive', dest='keepalive', action='store_true',
help="Keep TCP connections alive")
feature.add_argument('--no-keepalive', dest='keepalive', action='store_false',
help="Don't keep TCP connections alive")
feature.set_defaults(keepalive=False)
feature = parser.add_mutually_exclusive_group(required=False)
feature.add_argument('--archive', '-a', dest='archive', action='store_true',
help='Process archive links in the feed per RFC 5005')
feature.add_argument('--no-archive', dest='archive', action='store_false',
help='Do not process archive links in the feed')
feature.set_defaults(archive=False)
feature = parser.add_mutually_exclusive_group(required=False)
feature.add_argument('--recurse', '-r',
help="Recursively check other discovered feeds",
action='store_true', dest='recurse')
feature.add_argument('--no-recurse', dest='recurse',
action='store_false',
help="Do not recurse into other feeds")
feature.set_defaults(recurse=False)
return parser.parse_args(*args)
async def _run(args):
connector = aiohttp.TCPConnector(
limit=args.max_connections,
limit_per_host=args.max_per_host,
enable_cleanup_closed=True,
force_close=not args.keepalive
)
# Time spent waiting for a connection pool entry to free up counts against
# total and connect, so instead we just set the new connection and the read
# timeout
timeout = aiohttp.ClientTimeout(
total=None,
connect=None,
sock_connect=args.timeout,
sock_read=args.timeout)
async with aiohttp.ClientSession(timeout=timeout,
connector=connector) as session:
worker = Pushl(session, args)
tasks = []
for url in args.feeds or []:
tasks.append(worker.process_feed(url))
for url in args.websub_only or []:
tasks.append(worker.process_feed(url, False))
for url in args.entries or []:
tasks.append(worker.process_entry(url, add_domain=True))
if tasks:
_, timed_out = await asyncio.wait(tasks, timeout=args.max_time)
if timed_out:
LOGGER.info("Done. %d tasks did not complete within %d seconds",
len(timed_out), args.max_time)
else:
LOGGER.info("Completed all tasks")
if __name__ == "__main__":
main()
|
PlaidWeb/Pushl | pushl/caching.py | make_headers | python | def make_headers(headers):
out = {}
if 'etag' in headers:
out['if-none-match'] = headers['etag']
if 'last-modified' in headers:
out['if-modified-since'] = headers['last-modified']
return out | Make the cache control headers based on a previous request's
response headers | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/caching.py#L69-L78 | null | """ Simple caching functions """
import pickle
import logging
import hashlib
import os
import sys
from slugify import slugify
LOGGER = logging.getLogger(__name__)
class Cache:
""" A very simple file-based object cache """
def __init__(self, cache_dir):
self.cache_dir = cache_dir
def _get_cache_file(self, prefix, url):
if not self.cache_dir:
return None
md5 = hashlib.md5(url.encode('utf-8'))
filename = md5.hexdigest()[:8] + '.' + slugify(url)[:24]
return os.path.join(self.cache_dir, prefix, filename)
def get(self, prefix, url, schema_version=None):
""" Get the cached object """
if not self.cache_dir:
return None
filename = self._get_cache_file(prefix, url)
try:
with open(filename, 'rb') as file:
item = pickle.load(file)
if schema_version and schema_version != item.schema:
LOGGER.debug("Cache get %s %s: Wanted schema %d, got %d",
prefix, url,
schema_version, item.schema)
return None
return item
except FileNotFoundError:
pass
except Exception: # pylint:disable=broad-except
_, msg, _ = sys.exc_info()
LOGGER.warning("Cache get %s %s failed: %s", prefix, url, msg)
return None
def set(self, prefix, url, obj):
""" Add an object into the cache """
if not self.cache_dir:
return
filename = self._get_cache_file(prefix, url)
try:
os.makedirs(os.path.join(self.cache_dir, prefix))
except OSError:
pass
with open(filename, 'wb') as file:
pickle.dump(obj, file)
|
PlaidWeb/Pushl | pushl/caching.py | Cache.get | python | def get(self, prefix, url, schema_version=None):
if not self.cache_dir:
return None
filename = self._get_cache_file(prefix, url)
try:
with open(filename, 'rb') as file:
item = pickle.load(file)
if schema_version and schema_version != item.schema:
LOGGER.debug("Cache get %s %s: Wanted schema %d, got %d",
prefix, url,
schema_version, item.schema)
return None
return item
except FileNotFoundError:
pass
except Exception: # pylint:disable=broad-except
_, msg, _ = sys.exc_info()
LOGGER.warning("Cache get %s %s failed: %s", prefix, url, msg)
return None | Get the cached object | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/caching.py#L29-L51 | [
"def _get_cache_file(self, prefix, url):\n if not self.cache_dir:\n return None\n\n md5 = hashlib.md5(url.encode('utf-8'))\n filename = md5.hexdigest()[:8] + '.' + slugify(url)[:24]\n\n return os.path.join(self.cache_dir, prefix, filename)\n"
] | class Cache:
""" A very simple file-based object cache """
def __init__(self, cache_dir):
self.cache_dir = cache_dir
def _get_cache_file(self, prefix, url):
if not self.cache_dir:
return None
md5 = hashlib.md5(url.encode('utf-8'))
filename = md5.hexdigest()[:8] + '.' + slugify(url)[:24]
return os.path.join(self.cache_dir, prefix, filename)
def set(self, prefix, url, obj):
""" Add an object into the cache """
if not self.cache_dir:
return
filename = self._get_cache_file(prefix, url)
try:
os.makedirs(os.path.join(self.cache_dir, prefix))
except OSError:
pass
with open(filename, 'wb') as file:
pickle.dump(obj, file)
|
PlaidWeb/Pushl | pushl/caching.py | Cache.set | python | def set(self, prefix, url, obj):
if not self.cache_dir:
return
filename = self._get_cache_file(prefix, url)
try:
os.makedirs(os.path.join(self.cache_dir, prefix))
except OSError:
pass
with open(filename, 'wb') as file:
pickle.dump(obj, file) | Add an object into the cache | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/caching.py#L53-L66 | [
"def _get_cache_file(self, prefix, url):\n if not self.cache_dir:\n return None\n\n md5 = hashlib.md5(url.encode('utf-8'))\n filename = md5.hexdigest()[:8] + '.' + slugify(url)[:24]\n\n return os.path.join(self.cache_dir, prefix, filename)\n"
] | class Cache:
""" A very simple file-based object cache """
def __init__(self, cache_dir):
self.cache_dir = cache_dir
def _get_cache_file(self, prefix, url):
if not self.cache_dir:
return None
md5 = hashlib.md5(url.encode('utf-8'))
filename = md5.hexdigest()[:8] + '.' + slugify(url)[:24]
return os.path.join(self.cache_dir, prefix, filename)
def get(self, prefix, url, schema_version=None):
""" Get the cached object """
if not self.cache_dir:
return None
filename = self._get_cache_file(prefix, url)
try:
with open(filename, 'rb') as file:
item = pickle.load(file)
if schema_version and schema_version != item.schema:
LOGGER.debug("Cache get %s %s: Wanted schema %d, got %d",
prefix, url,
schema_version, item.schema)
return None
return item
except FileNotFoundError:
pass
except Exception: # pylint:disable=broad-except
_, msg, _ = sys.exc_info()
LOGGER.warning("Cache get %s %s failed: %s", prefix, url, msg)
return None
|
PlaidWeb/Pushl | pushl/__init__.py | Pushl.process_feed | python | async def process_feed(self, url, send_mentions=True):
self._feed_domains.add(utils.get_domain(url))
if url in self._processed_feeds:
LOGGER.debug("Skipping already processed feed %s", url)
return
self._processed_feeds.add(url)
LOGGER.debug("++WAIT: %s: get feed", url)
feed, previous, updated = await feeds.get_feed(self, url)
LOGGER.debug("++DONE: %s: get feed", url)
if updated:
LOGGER.info("Feed %s has been updated", url)
if not feed:
return
LOGGER.debug("--- starting process_feed %s %s", url, send_mentions)
pending = []
try:
for link in feed.links:
href = link['href']
if not href:
continue
# RFC5005 archive links
if self.args.archive and link.get('rel') in ('prev-archive',
'next-archive',
'prev-page',
'next-page'):
LOGGER.debug("Found archive link %s", link)
pending.append(
("process feed " + href, self.process_feed(href, send_mentions)))
# WebSub notification
if updated and link.get('rel') == 'hub' and not feed.is_archive:
LOGGER.debug("Found WebSub hub %s", link)
pending.append(
("update websub " + href, feed.update_websub(self, href)))
except (AttributeError, KeyError):
LOGGER.debug("Feed %s has no links", url)
# Schedule the entries
items = set(feed.entry_links)
if previous:
items |= set(previous.entry_links)
for entry in items:
pending.append(("process entry " + entry,
self.process_entry(entry, send_mentions=send_mentions)))
LOGGER.debug("--- finish process_feed %s %s", url, send_mentions)
if pending:
LOGGER.debug("+++WAIT: process_feed(%s): %d subtasks",
url, len(pending))
LOGGER.debug("%s", [name for (name, _) in pending])
await asyncio.wait([task for (_, task) in pending])
LOGGER.debug("+++DONE: process_feed(%s): %d subtasks",
url, len(pending)) | process a feed | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/__init__.py#L31-L94 | [
"def get_domain(url):\n \"\"\" Get the domain part of a URL \"\"\"\n return urllib.parse.urlparse(url).netloc.lower()\n",
"async def get_feed(config, url):\n \"\"\" Get a feed\n\n Arguments:\n\n config -- the configuration\n url -- The URL of the feed\n\n retval -- a tuple of feed,previous_ve... | class Pushl:
""" Top-level process controller """
# pylint:disable=too-many-instance-attributes
def __init__(self, session, args):
""" Set up the process worker """
self.args = args
self.cache = caching.Cache(args.cache_dir) if args.cache_dir else None
self.rel_whitelist = args.rel_whitelist.split(
',') if args.rel_whitelist else None
self.rel_blacklist = args.rel_blacklist.split(
',') if args.rel_blacklist else None
self._processed_feeds = set()
self._processed_entries = set()
self._processed_mentions = set()
self._feed_domains = set()
self.session = session
async def process_entry(self, url, add_domain=False, send_mentions=True):
""" process an entry """
if add_domain:
self._feed_domains.add(utils.get_domain(url))
if url in self._processed_entries:
LOGGER.debug("Skipping already processed entry %s", url)
return
self._processed_entries.add(url)
LOGGER.debug("++WAIT: get entry %s", url)
entry, previous, updated = await entries.get_entry(self, url)
LOGGER.debug("++DONE: get entry %s", url)
LOGGER.debug("--- starting process_entry %s", url)
pending = []
if updated:
LOGGER.info("Processing entry: %s", url)
if send_mentions:
# get the webmention targets
links = entry.get_targets(self)
if previous:
# Only bother with links that changed from the last time
links = links ^ previous.get_targets(self)
for link in links:
pending.append(("send webmention {} -> {}".format(url, link),
self.send_webmention(entry, link)))
if self.args.recurse:
for feed in entry.feeds:
if utils.get_domain(feed) in self._feed_domains:
pending.append(("process feed " + feed,
self.process_feed(feed, send_mentions=send_mentions)))
else:
LOGGER.info("Ignoring non-local feed %s", feed)
LOGGER.debug("--- finish process_entry %s", url)
if pending:
LOGGER.debug("+++WAIT: process_entry(%s): %d subtasks",
url, len(pending))
LOGGER.debug("%s", [name for (name, _) in pending])
await asyncio.wait([task for (_, task) in pending])
LOGGER.debug("+++DONE: process_entry(%s): %d subtasks",
url, len(pending))
async def send_webmention(self, entry, url):
""" send a webmention from an entry to a URL """
if (entry.url, url) in self._processed_mentions:
LOGGER.debug(
"Skipping already processed mention %s -> %s", entry.url, url)
self._processed_mentions.add((entry.url, url))
LOGGER.debug("++WAIT: webmentions.get_target %s", url)
target = await webmentions.get_target(self, url)
LOGGER.debug("++DONE: webmentions.get_target %s", url)
if target:
LOGGER.debug("++WAIT: Sending webmention %s -> %s", entry.url, url)
await target.send(self, entry)
LOGGER.debug("++DONE: Sending webmention %s -> %s", entry.url, url)
|
PlaidWeb/Pushl | pushl/__init__.py | Pushl.process_entry | python | async def process_entry(self, url, add_domain=False, send_mentions=True):
if add_domain:
self._feed_domains.add(utils.get_domain(url))
if url in self._processed_entries:
LOGGER.debug("Skipping already processed entry %s", url)
return
self._processed_entries.add(url)
LOGGER.debug("++WAIT: get entry %s", url)
entry, previous, updated = await entries.get_entry(self, url)
LOGGER.debug("++DONE: get entry %s", url)
LOGGER.debug("--- starting process_entry %s", url)
pending = []
if updated:
LOGGER.info("Processing entry: %s", url)
if send_mentions:
# get the webmention targets
links = entry.get_targets(self)
if previous:
# Only bother with links that changed from the last time
links = links ^ previous.get_targets(self)
for link in links:
pending.append(("send webmention {} -> {}".format(url, link),
self.send_webmention(entry, link)))
if self.args.recurse:
for feed in entry.feeds:
if utils.get_domain(feed) in self._feed_domains:
pending.append(("process feed " + feed,
self.process_feed(feed, send_mentions=send_mentions)))
else:
LOGGER.info("Ignoring non-local feed %s", feed)
LOGGER.debug("--- finish process_entry %s", url)
if pending:
LOGGER.debug("+++WAIT: process_entry(%s): %d subtasks",
url, len(pending))
LOGGER.debug("%s", [name for (name, _) in pending])
await asyncio.wait([task for (_, task) in pending])
LOGGER.debug("+++DONE: process_entry(%s): %d subtasks",
url, len(pending)) | process an entry | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/__init__.py#L96-L144 | [
"def get_domain(url):\n \"\"\" Get the domain part of a URL \"\"\"\n return urllib.parse.urlparse(url).netloc.lower()\n",
"async def get_entry(config, url):\n \"\"\" Given an entry URL, return the entry\n\n Arguments:\n\n config -- the configuration\n url -- the URL of the entry\n\n Returns: ... | class Pushl:
""" Top-level process controller """
# pylint:disable=too-many-instance-attributes
def __init__(self, session, args):
""" Set up the process worker """
self.args = args
self.cache = caching.Cache(args.cache_dir) if args.cache_dir else None
self.rel_whitelist = args.rel_whitelist.split(
',') if args.rel_whitelist else None
self.rel_blacklist = args.rel_blacklist.split(
',') if args.rel_blacklist else None
self._processed_feeds = set()
self._processed_entries = set()
self._processed_mentions = set()
self._feed_domains = set()
self.session = session
async def process_feed(self, url, send_mentions=True):
""" process a feed """
self._feed_domains.add(utils.get_domain(url))
if url in self._processed_feeds:
LOGGER.debug("Skipping already processed feed %s", url)
return
self._processed_feeds.add(url)
LOGGER.debug("++WAIT: %s: get feed", url)
feed, previous, updated = await feeds.get_feed(self, url)
LOGGER.debug("++DONE: %s: get feed", url)
if updated:
LOGGER.info("Feed %s has been updated", url)
if not feed:
return
LOGGER.debug("--- starting process_feed %s %s", url, send_mentions)
pending = []
try:
for link in feed.links:
href = link['href']
if not href:
continue
# RFC5005 archive links
if self.args.archive and link.get('rel') in ('prev-archive',
'next-archive',
'prev-page',
'next-page'):
LOGGER.debug("Found archive link %s", link)
pending.append(
("process feed " + href, self.process_feed(href, send_mentions)))
# WebSub notification
if updated and link.get('rel') == 'hub' and not feed.is_archive:
LOGGER.debug("Found WebSub hub %s", link)
pending.append(
("update websub " + href, feed.update_websub(self, href)))
except (AttributeError, KeyError):
LOGGER.debug("Feed %s has no links", url)
# Schedule the entries
items = set(feed.entry_links)
if previous:
items |= set(previous.entry_links)
for entry in items:
pending.append(("process entry " + entry,
self.process_entry(entry, send_mentions=send_mentions)))
LOGGER.debug("--- finish process_feed %s %s", url, send_mentions)
if pending:
LOGGER.debug("+++WAIT: process_feed(%s): %d subtasks",
url, len(pending))
LOGGER.debug("%s", [name for (name, _) in pending])
await asyncio.wait([task for (_, task) in pending])
LOGGER.debug("+++DONE: process_feed(%s): %d subtasks",
url, len(pending))
async def send_webmention(self, entry, url):
""" send a webmention from an entry to a URL """
if (entry.url, url) in self._processed_mentions:
LOGGER.debug(
"Skipping already processed mention %s -> %s", entry.url, url)
self._processed_mentions.add((entry.url, url))
LOGGER.debug("++WAIT: webmentions.get_target %s", url)
target = await webmentions.get_target(self, url)
LOGGER.debug("++DONE: webmentions.get_target %s", url)
if target:
LOGGER.debug("++WAIT: Sending webmention %s -> %s", entry.url, url)
await target.send(self, entry)
LOGGER.debug("++DONE: Sending webmention %s -> %s", entry.url, url)
|
PlaidWeb/Pushl | pushl/__init__.py | Pushl.send_webmention | python | async def send_webmention(self, entry, url):
if (entry.url, url) in self._processed_mentions:
LOGGER.debug(
"Skipping already processed mention %s -> %s", entry.url, url)
self._processed_mentions.add((entry.url, url))
LOGGER.debug("++WAIT: webmentions.get_target %s", url)
target = await webmentions.get_target(self, url)
LOGGER.debug("++DONE: webmentions.get_target %s", url)
if target:
LOGGER.debug("++WAIT: Sending webmention %s -> %s", entry.url, url)
await target.send(self, entry)
LOGGER.debug("++DONE: Sending webmention %s -> %s", entry.url, url) | send a webmention from an entry to a URL | train | https://github.com/PlaidWeb/Pushl/blob/5ea92275c37a6c1989e3d5f53e26c6e0ebfb9a8c/pushl/__init__.py#L146-L161 | null | class Pushl:
""" Top-level process controller """
# pylint:disable=too-many-instance-attributes
def __init__(self, session, args):
""" Set up the process worker """
self.args = args
self.cache = caching.Cache(args.cache_dir) if args.cache_dir else None
self.rel_whitelist = args.rel_whitelist.split(
',') if args.rel_whitelist else None
self.rel_blacklist = args.rel_blacklist.split(
',') if args.rel_blacklist else None
self._processed_feeds = set()
self._processed_entries = set()
self._processed_mentions = set()
self._feed_domains = set()
self.session = session
async def process_feed(self, url, send_mentions=True):
""" process a feed """
self._feed_domains.add(utils.get_domain(url))
if url in self._processed_feeds:
LOGGER.debug("Skipping already processed feed %s", url)
return
self._processed_feeds.add(url)
LOGGER.debug("++WAIT: %s: get feed", url)
feed, previous, updated = await feeds.get_feed(self, url)
LOGGER.debug("++DONE: %s: get feed", url)
if updated:
LOGGER.info("Feed %s has been updated", url)
if not feed:
return
LOGGER.debug("--- starting process_feed %s %s", url, send_mentions)
pending = []
try:
for link in feed.links:
href = link['href']
if not href:
continue
# RFC5005 archive links
if self.args.archive and link.get('rel') in ('prev-archive',
'next-archive',
'prev-page',
'next-page'):
LOGGER.debug("Found archive link %s", link)
pending.append(
("process feed " + href, self.process_feed(href, send_mentions)))
# WebSub notification
if updated and link.get('rel') == 'hub' and not feed.is_archive:
LOGGER.debug("Found WebSub hub %s", link)
pending.append(
("update websub " + href, feed.update_websub(self, href)))
except (AttributeError, KeyError):
LOGGER.debug("Feed %s has no links", url)
# Schedule the entries
items = set(feed.entry_links)
if previous:
items |= set(previous.entry_links)
for entry in items:
pending.append(("process entry " + entry,
self.process_entry(entry, send_mentions=send_mentions)))
LOGGER.debug("--- finish process_feed %s %s", url, send_mentions)
if pending:
LOGGER.debug("+++WAIT: process_feed(%s): %d subtasks",
url, len(pending))
LOGGER.debug("%s", [name for (name, _) in pending])
await asyncio.wait([task for (_, task) in pending])
LOGGER.debug("+++DONE: process_feed(%s): %d subtasks",
url, len(pending))
async def process_entry(self, url, add_domain=False, send_mentions=True):
""" process an entry """
if add_domain:
self._feed_domains.add(utils.get_domain(url))
if url in self._processed_entries:
LOGGER.debug("Skipping already processed entry %s", url)
return
self._processed_entries.add(url)
LOGGER.debug("++WAIT: get entry %s", url)
entry, previous, updated = await entries.get_entry(self, url)
LOGGER.debug("++DONE: get entry %s", url)
LOGGER.debug("--- starting process_entry %s", url)
pending = []
if updated:
LOGGER.info("Processing entry: %s", url)
if send_mentions:
# get the webmention targets
links = entry.get_targets(self)
if previous:
# Only bother with links that changed from the last time
links = links ^ previous.get_targets(self)
for link in links:
pending.append(("send webmention {} -> {}".format(url, link),
self.send_webmention(entry, link)))
if self.args.recurse:
for feed in entry.feeds:
if utils.get_domain(feed) in self._feed_domains:
pending.append(("process feed " + feed,
self.process_feed(feed, send_mentions=send_mentions)))
else:
LOGGER.info("Ignoring non-local feed %s", feed)
LOGGER.debug("--- finish process_entry %s", url)
if pending:
LOGGER.debug("+++WAIT: process_entry(%s): %d subtasks",
url, len(pending))
LOGGER.debug("%s", [name for (name, _) in pending])
await asyncio.wait([task for (_, task) in pending])
LOGGER.debug("+++DONE: process_entry(%s): %d subtasks",
url, len(pending))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.