repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
astropy/pyregion
|
pyregion/core.py
|
open
|
python
|
def open(fname):
with _builtin_open(fname) as fh:
region_string = fh.read()
return parse(region_string)
|
Open, read and parse DS9 region file.
Parameters
----------
fname : str
Filename
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/core.py#L245-L260
|
[
"def parse(region_string):\n \"\"\"Parse DS9 region string into a ShapeList.\n\n Parameters\n ----------\n region_string : str\n Region string\n\n Returns\n -------\n shapes : `ShapeList`\n List of `~pyregion.Shape`\n \"\"\"\n rp = RegionParser()\n ss = rp.parse(region_string)\n sss1 = rp.convert_attr(ss)\n sss2 = _check_wcs(sss1)\n\n shape_list, comment_list = rp.filter_shape2(sss2)\n return ShapeList(shape_list, comment_list=comment_list)\n"
] |
from itertools import cycle
from .ds9_region_parser import RegionParser
from .wcs_converter import check_wcs as _check_wcs
_builtin_open = open
class ShapeList(list):
"""A list of `~pyregion.Shape` objects.
Parameters
----------
shape_list : list
List of `pyregion.Shape` objects
comment_list : list, None
List of comment strings for each argument
"""
def __init__(self, shape_list, comment_list=None):
if comment_list is not None:
if len(comment_list) != len(shape_list):
err = "Ambiguous number of comments {} for number of shapes {}"
raise ValueError(err.format(len(comment_list),
len(shape_list)))
self._comment_list = comment_list
list.__init__(self, shape_list)
def __getitem__(self, key):
if isinstance(key, slice):
return ShapeList(list.__getitem__(self, key))
else:
return list.__getitem__(self, key)
def __getslice__(self, i, j):
return self[max(0, i):max(0, j):]
def check_imagecoord(self):
"""Are all shapes in image coordinates?
Returns ``True`` if yes, and ``False`` if not.
"""
if [s for s in self if s.coord_format != "image"]:
return False
else:
return True
def as_imagecoord(self, header):
"""New shape list in image coordinates.
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shape_list : `ShapeList`
New shape list, with coordinates of the each shape
converted to the image coordinate using the given header
information.
"""
comment_list = self._comment_list
if comment_list is None:
comment_list = cycle([None])
r = RegionParser.sky_to_image(zip(self, comment_list),
header)
shape_list, comment_list = zip(*list(r))
return ShapeList(shape_list, comment_list=comment_list)
def get_mpl_patches_texts(self, properties_func=None,
text_offset=5.0,
origin=1):
"""
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
"""
from .mpl_helper import as_mpl_artists
patches, txts = as_mpl_artists(self, properties_func,
text_offset,
origin=origin)
return patches, txts
def get_filter(self, header=None, origin=1):
"""Get filter.
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
Parameters
----------
header : `astropy.io.fits.Header`
FITS header
origin : {0, 1}
Pixel coordinate origin
Returns
-------
filter : TODO
Filter object
"""
from .region_to_filter import as_region_filter
if header is None:
if not self.check_imagecoord():
raise RuntimeError("the region has non-image coordinate. header is required.")
reg_in_imagecoord = self
else:
reg_in_imagecoord = self.as_imagecoord(header)
region_filter = as_region_filter(reg_in_imagecoord, origin=origin)
return region_filter
def get_mask(self, hdu=None, header=None, shape=None):
"""Create a 2-d mask.
Parameters
----------
hdu : `astropy.io.fits.ImageHDU`
FITS image HDU
header : `~astropy.io.fits.Header`
FITS header
shape : tuple
Image shape
Returns
-------
mask : `numpy.array`
Boolean mask
Examples
--------
get_mask(hdu=f[0])
get_mask(shape=(10,10))
get_mask(header=f[0].header, shape=(10,10))
"""
if hdu and header is None:
header = hdu.header
if hdu and shape is None:
shape = hdu.data.shape
region_filter = self.get_filter(header=header)
mask = region_filter.mask(shape)
return mask
def write(self, outfile):
"""Write this shape list to a region file.
Parameters
----------
outfile : str
File name
"""
if len(self) < 1:
print("WARNING: The region list is empty. The region file "
"'{:s}' will be empty.".format(outfile))
try:
outf = _builtin_open(outfile, 'w')
outf.close()
return
except IOError as e:
cmsg = "Unable to create region file '{:s}'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
prev_cs = self[0].coord_format
outf = None
try:
outf = _builtin_open(outfile, 'w')
attr0 = self[0].attr[1]
defaultline = " ".join(["{:s}={:s}".format(a, attr0[a])
for a in attr0 if a != 'text'])
# first line is globals
outf.write("global {0}\n".format(defaultline))
# second line must be a coordinate format
outf.write("{0}\n".format(prev_cs))
for shape in self:
shape_attr = '' if prev_cs == shape.coord_format \
else shape.coord_format + "; "
shape_excl = '-' if shape.exclude else ''
text_coordlist = ["{:f}".format(f) for f in shape.coord_list]
shape_coords = "(" + ",".join(text_coordlist) + ")"
shape_comment = " # " + shape.comment if shape.comment else ''
shape_str = (shape_attr + shape_excl + shape.name +
shape_coords + shape_comment)
outf.write("{0}\n".format(shape_str))
except IOError as e:
cmsg = "Unable to create region file \'{:s}\'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
finally:
if outf:
outf.close()
def parse(region_string):
"""Parse DS9 region string into a ShapeList.
Parameters
----------
region_string : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(region_string)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list, comment_list = rp.filter_shape2(sss2)
return ShapeList(shape_list, comment_list=comment_list)
def read_region(s):
"""Read region.
Parameters
----------
s : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(s)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list = rp.filter_shape(sss2)
return ShapeList(shape_list)
def read_region_as_imagecoord(s, header):
"""Read region as image coordinates.
Parameters
----------
s : str
Region string
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shapes : `~pyregion.ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(s)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
sss3 = rp.sky_to_image(sss2, header)
shape_list = rp.filter_shape(sss3)
return ShapeList(shape_list)
def get_mask(region, hdu, origin=1):
"""Get mask.
Parameters
----------
region : `~pyregion.ShapeList`
List of `~pyregion.Shape`
hdu : `~astropy.io.fits.ImageHDU`
FITS image HDU
origin : float
TODO: document me
Returns
-------
mask : `~numpy.array`
Boolean mask
Examples
--------
>>> from astropy.io import fits
>>> from pyregion import read_region_as_imagecoord, get_mask
>>> hdu = fits.open("test.fits")[0]
>>> region = "test01.reg"
>>> reg = read_region_as_imagecoord(open(region), f[0].header)
>>> mask = get_mask(reg, hdu)
"""
from pyregion.region_to_filter import as_region_filter
data = hdu.data
region_filter = as_region_filter(region, origin=origin)
mask = region_filter.mask(data)
return mask
|
astropy/pyregion
|
pyregion/core.py
|
read_region
|
python
|
def read_region(s):
rp = RegionParser()
ss = rp.parse(s)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list = rp.filter_shape(sss2)
return ShapeList(shape_list)
|
Read region.
Parameters
----------
s : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/core.py#L263-L282
|
[
"def check_wcs(l):\n default_coord = \"physical\"\n\n for l1, c1 in l:\n if isinstance(l1, CoordCommand):\n default_coord = l1.text.lower()\n continue\n if isinstance(l1, Shape):\n if default_coord == \"galactic\":\n is_wcs, coord_list = check_wcs_and_convert(l1.params,\n all_dms=True)\n else:\n is_wcs, coord_list = check_wcs_and_convert(l1.params)\n\n if is_wcs and (default_coord == \"physical\"): # ciao format\n coord_format = \"fk5\"\n else:\n coord_format = default_coord\n\n l1n = copy.copy(l1)\n\n l1n.coord_list = coord_list\n l1n.coord_format = coord_format\n\n yield l1n, c1\n else:\n yield l1, c1\n",
"def parse(self, s):\n\n for l in s.split(\"\\n\"):\n try:\n s, c, continued = self.parseLine(l)\n except ParseException:\n warnings.warn(\"Failed to parse : \" + l)\n self.flush()\n continue\n\n if len(s) > 1:\n for s1 in s[:-1]:\n yield s1, None\n\n s[-1].comment = c\n s[-1].continued = continued\n yield s[-1], c\n elif len(s) == 1:\n s[-1].comment = c\n s[-1].continued = continued\n yield s[-1], c\n elif c:\n yield None, c\n\n self.flush()\n",
"def convert_attr(self, l):\n global_attr = [], {}\n\n parser = Ds9AttrParser()\n\n for l1, c1 in l:\n if isinstance(l1, Global):\n for kv in parser.parse_default(l1.text):\n if len(kv) == 1:\n global_attr[0].append(kv[0])\n elif len(kv) == 2:\n if kv[0] == 'tag':\n global_attr[1].setdefault(kv[0], set()).add(kv[1])\n else:\n global_attr[1][kv[0]] = kv[1]\n\n elif isinstance(l1, Shape):\n if c1:\n attr_list = parser.parse_default(c1)\n attr0, attr1 = get_attr(attr_list, global_attr)\n else:\n attr0, attr1 = global_attr\n l1n = copy.copy(l1)\n l1n.attr = attr0, attr1\n yield l1n, c1\n\n elif not l1 and c1:\n shape, attr_list = parser.parse_check_shape(c1)\n if shape:\n shape.attr = get_attr(attr_list, global_attr)\n yield shape, c1\n else:\n yield l1, c1\n",
"def filter_shape(self, sss):\n return [s1[0] for s1 in sss if isinstance(s1[0], Shape)]\n"
] |
from itertools import cycle
from .ds9_region_parser import RegionParser
from .wcs_converter import check_wcs as _check_wcs
_builtin_open = open
class ShapeList(list):
"""A list of `~pyregion.Shape` objects.
Parameters
----------
shape_list : list
List of `pyregion.Shape` objects
comment_list : list, None
List of comment strings for each argument
"""
def __init__(self, shape_list, comment_list=None):
if comment_list is not None:
if len(comment_list) != len(shape_list):
err = "Ambiguous number of comments {} for number of shapes {}"
raise ValueError(err.format(len(comment_list),
len(shape_list)))
self._comment_list = comment_list
list.__init__(self, shape_list)
def __getitem__(self, key):
if isinstance(key, slice):
return ShapeList(list.__getitem__(self, key))
else:
return list.__getitem__(self, key)
def __getslice__(self, i, j):
return self[max(0, i):max(0, j):]
def check_imagecoord(self):
"""Are all shapes in image coordinates?
Returns ``True`` if yes, and ``False`` if not.
"""
if [s for s in self if s.coord_format != "image"]:
return False
else:
return True
def as_imagecoord(self, header):
"""New shape list in image coordinates.
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shape_list : `ShapeList`
New shape list, with coordinates of the each shape
converted to the image coordinate using the given header
information.
"""
comment_list = self._comment_list
if comment_list is None:
comment_list = cycle([None])
r = RegionParser.sky_to_image(zip(self, comment_list),
header)
shape_list, comment_list = zip(*list(r))
return ShapeList(shape_list, comment_list=comment_list)
def get_mpl_patches_texts(self, properties_func=None,
text_offset=5.0,
origin=1):
"""
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
"""
from .mpl_helper import as_mpl_artists
patches, txts = as_mpl_artists(self, properties_func,
text_offset,
origin=origin)
return patches, txts
def get_filter(self, header=None, origin=1):
"""Get filter.
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
Parameters
----------
header : `astropy.io.fits.Header`
FITS header
origin : {0, 1}
Pixel coordinate origin
Returns
-------
filter : TODO
Filter object
"""
from .region_to_filter import as_region_filter
if header is None:
if not self.check_imagecoord():
raise RuntimeError("the region has non-image coordinate. header is required.")
reg_in_imagecoord = self
else:
reg_in_imagecoord = self.as_imagecoord(header)
region_filter = as_region_filter(reg_in_imagecoord, origin=origin)
return region_filter
def get_mask(self, hdu=None, header=None, shape=None):
"""Create a 2-d mask.
Parameters
----------
hdu : `astropy.io.fits.ImageHDU`
FITS image HDU
header : `~astropy.io.fits.Header`
FITS header
shape : tuple
Image shape
Returns
-------
mask : `numpy.array`
Boolean mask
Examples
--------
get_mask(hdu=f[0])
get_mask(shape=(10,10))
get_mask(header=f[0].header, shape=(10,10))
"""
if hdu and header is None:
header = hdu.header
if hdu and shape is None:
shape = hdu.data.shape
region_filter = self.get_filter(header=header)
mask = region_filter.mask(shape)
return mask
def write(self, outfile):
"""Write this shape list to a region file.
Parameters
----------
outfile : str
File name
"""
if len(self) < 1:
print("WARNING: The region list is empty. The region file "
"'{:s}' will be empty.".format(outfile))
try:
outf = _builtin_open(outfile, 'w')
outf.close()
return
except IOError as e:
cmsg = "Unable to create region file '{:s}'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
prev_cs = self[0].coord_format
outf = None
try:
outf = _builtin_open(outfile, 'w')
attr0 = self[0].attr[1]
defaultline = " ".join(["{:s}={:s}".format(a, attr0[a])
for a in attr0 if a != 'text'])
# first line is globals
outf.write("global {0}\n".format(defaultline))
# second line must be a coordinate format
outf.write("{0}\n".format(prev_cs))
for shape in self:
shape_attr = '' if prev_cs == shape.coord_format \
else shape.coord_format + "; "
shape_excl = '-' if shape.exclude else ''
text_coordlist = ["{:f}".format(f) for f in shape.coord_list]
shape_coords = "(" + ",".join(text_coordlist) + ")"
shape_comment = " # " + shape.comment if shape.comment else ''
shape_str = (shape_attr + shape_excl + shape.name +
shape_coords + shape_comment)
outf.write("{0}\n".format(shape_str))
except IOError as e:
cmsg = "Unable to create region file \'{:s}\'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
finally:
if outf:
outf.close()
def parse(region_string):
"""Parse DS9 region string into a ShapeList.
Parameters
----------
region_string : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(region_string)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list, comment_list = rp.filter_shape2(sss2)
return ShapeList(shape_list, comment_list=comment_list)
def open(fname):
"""Open, read and parse DS9 region file.
Parameters
----------
fname : str
Filename
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
with _builtin_open(fname) as fh:
region_string = fh.read()
return parse(region_string)
def read_region_as_imagecoord(s, header):
"""Read region as image coordinates.
Parameters
----------
s : str
Region string
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shapes : `~pyregion.ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(s)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
sss3 = rp.sky_to_image(sss2, header)
shape_list = rp.filter_shape(sss3)
return ShapeList(shape_list)
def get_mask(region, hdu, origin=1):
"""Get mask.
Parameters
----------
region : `~pyregion.ShapeList`
List of `~pyregion.Shape`
hdu : `~astropy.io.fits.ImageHDU`
FITS image HDU
origin : float
TODO: document me
Returns
-------
mask : `~numpy.array`
Boolean mask
Examples
--------
>>> from astropy.io import fits
>>> from pyregion import read_region_as_imagecoord, get_mask
>>> hdu = fits.open("test.fits")[0]
>>> region = "test01.reg"
>>> reg = read_region_as_imagecoord(open(region), f[0].header)
>>> mask = get_mask(reg, hdu)
"""
from pyregion.region_to_filter import as_region_filter
data = hdu.data
region_filter = as_region_filter(region, origin=origin)
mask = region_filter.mask(data)
return mask
|
astropy/pyregion
|
pyregion/core.py
|
read_region_as_imagecoord
|
python
|
def read_region_as_imagecoord(s, header):
rp = RegionParser()
ss = rp.parse(s)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
sss3 = rp.sky_to_image(sss2, header)
shape_list = rp.filter_shape(sss3)
return ShapeList(shape_list)
|
Read region as image coordinates.
Parameters
----------
s : str
Region string
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shapes : `~pyregion.ShapeList`
List of `~pyregion.Shape`
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/core.py#L285-L307
|
[
"def check_wcs(l):\n default_coord = \"physical\"\n\n for l1, c1 in l:\n if isinstance(l1, CoordCommand):\n default_coord = l1.text.lower()\n continue\n if isinstance(l1, Shape):\n if default_coord == \"galactic\":\n is_wcs, coord_list = check_wcs_and_convert(l1.params,\n all_dms=True)\n else:\n is_wcs, coord_list = check_wcs_and_convert(l1.params)\n\n if is_wcs and (default_coord == \"physical\"): # ciao format\n coord_format = \"fk5\"\n else:\n coord_format = default_coord\n\n l1n = copy.copy(l1)\n\n l1n.coord_list = coord_list\n l1n.coord_format = coord_format\n\n yield l1n, c1\n else:\n yield l1, c1\n",
"def parse(self, s):\n\n for l in s.split(\"\\n\"):\n try:\n s, c, continued = self.parseLine(l)\n except ParseException:\n warnings.warn(\"Failed to parse : \" + l)\n self.flush()\n continue\n\n if len(s) > 1:\n for s1 in s[:-1]:\n yield s1, None\n\n s[-1].comment = c\n s[-1].continued = continued\n yield s[-1], c\n elif len(s) == 1:\n s[-1].comment = c\n s[-1].continued = continued\n yield s[-1], c\n elif c:\n yield None, c\n\n self.flush()\n",
"def convert_attr(self, l):\n global_attr = [], {}\n\n parser = Ds9AttrParser()\n\n for l1, c1 in l:\n if isinstance(l1, Global):\n for kv in parser.parse_default(l1.text):\n if len(kv) == 1:\n global_attr[0].append(kv[0])\n elif len(kv) == 2:\n if kv[0] == 'tag':\n global_attr[1].setdefault(kv[0], set()).add(kv[1])\n else:\n global_attr[1][kv[0]] = kv[1]\n\n elif isinstance(l1, Shape):\n if c1:\n attr_list = parser.parse_default(c1)\n attr0, attr1 = get_attr(attr_list, global_attr)\n else:\n attr0, attr1 = global_attr\n l1n = copy.copy(l1)\n l1n.attr = attr0, attr1\n yield l1n, c1\n\n elif not l1 and c1:\n shape, attr_list = parser.parse_check_shape(c1)\n if shape:\n shape.attr = get_attr(attr_list, global_attr)\n yield shape, c1\n else:\n yield l1, c1\n",
"def sky_to_image(shape_list, header):\n \"\"\"Converts a `ShapeList` into shapes with coordinates in image coordinates\n\n Parameters\n ----------\n shape_list : `pyregion.ShapeList`\n The ShapeList to convert\n header : `~astropy.io.fits.Header`\n Specifies what WCS transformations to use.\n\n Yields\n -------\n shape, comment : Shape, str\n Shape with image coordinates and the associated comment\n\n Note\n ----\n The comments in the original `ShapeList` are unaltered\n\n \"\"\"\n\n for shape, comment in shape_list:\n if isinstance(shape, Shape) and \\\n (shape.coord_format not in image_like_coordformats):\n\n new_coords = convert_to_imagecoord(shape, header)\n\n l1n = copy.copy(shape)\n\n l1n.coord_list = new_coords\n l1n.coord_format = \"image\"\n yield l1n, comment\n\n elif isinstance(shape, Shape) and shape.coord_format == \"physical\":\n\n if header is None:\n raise RuntimeError(\"Physical coordinate is not known.\")\n\n new_coordlist = convert_physical_to_imagecoord(shape, header)\n\n l1n = copy.copy(shape)\n\n l1n.coord_list = new_coordlist\n l1n.coord_format = \"image\"\n yield l1n, comment\n\n else:\n yield shape, comment\n",
"def filter_shape(self, sss):\n return [s1[0] for s1 in sss if isinstance(s1[0], Shape)]\n"
] |
from itertools import cycle
from .ds9_region_parser import RegionParser
from .wcs_converter import check_wcs as _check_wcs
_builtin_open = open
class ShapeList(list):
"""A list of `~pyregion.Shape` objects.
Parameters
----------
shape_list : list
List of `pyregion.Shape` objects
comment_list : list, None
List of comment strings for each argument
"""
def __init__(self, shape_list, comment_list=None):
if comment_list is not None:
if len(comment_list) != len(shape_list):
err = "Ambiguous number of comments {} for number of shapes {}"
raise ValueError(err.format(len(comment_list),
len(shape_list)))
self._comment_list = comment_list
list.__init__(self, shape_list)
def __getitem__(self, key):
if isinstance(key, slice):
return ShapeList(list.__getitem__(self, key))
else:
return list.__getitem__(self, key)
def __getslice__(self, i, j):
return self[max(0, i):max(0, j):]
def check_imagecoord(self):
"""Are all shapes in image coordinates?
Returns ``True`` if yes, and ``False`` if not.
"""
if [s for s in self if s.coord_format != "image"]:
return False
else:
return True
def as_imagecoord(self, header):
"""New shape list in image coordinates.
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shape_list : `ShapeList`
New shape list, with coordinates of the each shape
converted to the image coordinate using the given header
information.
"""
comment_list = self._comment_list
if comment_list is None:
comment_list = cycle([None])
r = RegionParser.sky_to_image(zip(self, comment_list),
header)
shape_list, comment_list = zip(*list(r))
return ShapeList(shape_list, comment_list=comment_list)
def get_mpl_patches_texts(self, properties_func=None,
text_offset=5.0,
origin=1):
"""
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
"""
from .mpl_helper import as_mpl_artists
patches, txts = as_mpl_artists(self, properties_func,
text_offset,
origin=origin)
return patches, txts
def get_filter(self, header=None, origin=1):
"""Get filter.
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
Parameters
----------
header : `astropy.io.fits.Header`
FITS header
origin : {0, 1}
Pixel coordinate origin
Returns
-------
filter : TODO
Filter object
"""
from .region_to_filter import as_region_filter
if header is None:
if not self.check_imagecoord():
raise RuntimeError("the region has non-image coordinate. header is required.")
reg_in_imagecoord = self
else:
reg_in_imagecoord = self.as_imagecoord(header)
region_filter = as_region_filter(reg_in_imagecoord, origin=origin)
return region_filter
def get_mask(self, hdu=None, header=None, shape=None):
"""Create a 2-d mask.
Parameters
----------
hdu : `astropy.io.fits.ImageHDU`
FITS image HDU
header : `~astropy.io.fits.Header`
FITS header
shape : tuple
Image shape
Returns
-------
mask : `numpy.array`
Boolean mask
Examples
--------
get_mask(hdu=f[0])
get_mask(shape=(10,10))
get_mask(header=f[0].header, shape=(10,10))
"""
if hdu and header is None:
header = hdu.header
if hdu and shape is None:
shape = hdu.data.shape
region_filter = self.get_filter(header=header)
mask = region_filter.mask(shape)
return mask
def write(self, outfile):
"""Write this shape list to a region file.
Parameters
----------
outfile : str
File name
"""
if len(self) < 1:
print("WARNING: The region list is empty. The region file "
"'{:s}' will be empty.".format(outfile))
try:
outf = _builtin_open(outfile, 'w')
outf.close()
return
except IOError as e:
cmsg = "Unable to create region file '{:s}'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
prev_cs = self[0].coord_format
outf = None
try:
outf = _builtin_open(outfile, 'w')
attr0 = self[0].attr[1]
defaultline = " ".join(["{:s}={:s}".format(a, attr0[a])
for a in attr0 if a != 'text'])
# first line is globals
outf.write("global {0}\n".format(defaultline))
# second line must be a coordinate format
outf.write("{0}\n".format(prev_cs))
for shape in self:
shape_attr = '' if prev_cs == shape.coord_format \
else shape.coord_format + "; "
shape_excl = '-' if shape.exclude else ''
text_coordlist = ["{:f}".format(f) for f in shape.coord_list]
shape_coords = "(" + ",".join(text_coordlist) + ")"
shape_comment = " # " + shape.comment if shape.comment else ''
shape_str = (shape_attr + shape_excl + shape.name +
shape_coords + shape_comment)
outf.write("{0}\n".format(shape_str))
except IOError as e:
cmsg = "Unable to create region file \'{:s}\'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
finally:
if outf:
outf.close()
def parse(region_string):
"""Parse DS9 region string into a ShapeList.
Parameters
----------
region_string : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(region_string)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list, comment_list = rp.filter_shape2(sss2)
return ShapeList(shape_list, comment_list=comment_list)
def open(fname):
"""Open, read and parse DS9 region file.
Parameters
----------
fname : str
Filename
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
with _builtin_open(fname) as fh:
region_string = fh.read()
return parse(region_string)
def read_region(s):
"""Read region.
Parameters
----------
s : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(s)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list = rp.filter_shape(sss2)
return ShapeList(shape_list)
def get_mask(region, hdu, origin=1):
"""Get mask.
Parameters
----------
region : `~pyregion.ShapeList`
List of `~pyregion.Shape`
hdu : `~astropy.io.fits.ImageHDU`
FITS image HDU
origin : float
TODO: document me
Returns
-------
mask : `~numpy.array`
Boolean mask
Examples
--------
>>> from astropy.io import fits
>>> from pyregion import read_region_as_imagecoord, get_mask
>>> hdu = fits.open("test.fits")[0]
>>> region = "test01.reg"
>>> reg = read_region_as_imagecoord(open(region), f[0].header)
>>> mask = get_mask(reg, hdu)
"""
from pyregion.region_to_filter import as_region_filter
data = hdu.data
region_filter = as_region_filter(region, origin=origin)
mask = region_filter.mask(data)
return mask
|
astropy/pyregion
|
pyregion/core.py
|
get_mask
|
python
|
def get_mask(region, hdu, origin=1):
from pyregion.region_to_filter import as_region_filter
data = hdu.data
region_filter = as_region_filter(region, origin=origin)
mask = region_filter.mask(data)
return mask
|
Get mask.
Parameters
----------
region : `~pyregion.ShapeList`
List of `~pyregion.Shape`
hdu : `~astropy.io.fits.ImageHDU`
FITS image HDU
origin : float
TODO: document me
Returns
-------
mask : `~numpy.array`
Boolean mask
Examples
--------
>>> from astropy.io import fits
>>> from pyregion import read_region_as_imagecoord, get_mask
>>> hdu = fits.open("test.fits")[0]
>>> region = "test01.reg"
>>> reg = read_region_as_imagecoord(open(region), f[0].header)
>>> mask = get_mask(reg, hdu)
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/core.py#L310-L341
|
[
"def as_region_filter(shape_list, origin=1):\n \"\"\"\n Often, the regions files implicitly assume the lower-left corner\n of the image as a coordinate (1,1). However, the python convetion\n is that the array index starts from 0. By default (origin = 1),\n coordinates of the returned mpl artists have coordinate shifted by\n (1, 1). If you do not want this shift, use origin=0.\n \"\"\"\n\n filter_list = []\n for shape in shape_list:\n\n if shape.name == \"composite\":\n continue\n\n if shape.name == \"polygon\":\n xy = np.array(shape.coord_list) - origin\n f = region_filter.Polygon(xy[::2], xy[1::2])\n\n elif shape.name == \"rotbox\" or shape.name == \"box\":\n xc, yc, w, h, rot = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n f = region_filter.Rotated(region_filter.Box(xc, yc, w, h),\n rot, xc, yc)\n\n elif shape.name == \"ellipse\":\n xc, yc = shape.coord_list[:2]\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n angle = shape.coord_list[-1]\n\n maj_list, min_list = shape.coord_list[2:-1:2], shape.coord_list[3:-1:2]\n\n if len(maj_list) > 1:\n w1, h1 = max(maj_list), max(min_list)\n w2, h2 = min(maj_list), min(min_list)\n\n f1 = region_filter.Ellipse(xc, yc, w1, h1) \\\n & ~region_filter.Ellipse(xc, yc, w2, h2)\n f = region_filter.Rotated(f1, angle, xc, yc)\n else:\n w, h = maj_list[0], min_list[0]\n f = region_filter.Rotated(region_filter.Ellipse(xc, yc, w, h),\n angle, xc, yc)\n\n elif shape.name == \"annulus\":\n xc, yc = shape.coord_list[:2]\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n r_list = shape.coord_list[2:]\n\n r1 = max(r_list)\n r2 = min(r_list)\n\n f = region_filter.Circle(xc, yc, r1) & ~region_filter.Circle(xc, yc, r2)\n\n elif shape.name == \"circle\":\n xc, yc, r = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n f = region_filter.Circle(xc, yc, r)\n\n elif shape.name == \"panda\":\n xc, yc, a1, a2, an, r1, r2, rn = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n f1 = region_filter.Circle(xc, yc, r2) & ~region_filter.Circle(xc, yc, r1)\n f = f1 & region_filter.AngleRange(xc, yc, a1, a2)\n\n elif shape.name == \"pie\":\n xc, yc, r1, r2, a1, a2 = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n f1 = region_filter.Circle(xc, yc, r2) & ~region_filter.Circle(xc, yc, r1)\n f = f1 & region_filter.AngleRange(xc, yc, a1, a2)\n\n elif shape.name == \"epanda\":\n xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n f1 = region_filter.Ellipse(xc, yc, r21, r22) & ~region_filter.Ellipse(xc, yc, r11, r12)\n f2 = f1 & region_filter.AngleRange(xc, yc, a1, a2)\n f = region_filter.Rotated(f2, angle, xc, yc)\n # f = f2 & region_filter.AngleRange(xc, yc, a1, a2)\n\n elif shape.name == \"bpanda\":\n xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n f1 = region_filter.Box(xc, yc, r21, r22) & ~region_filter.Box(xc, yc, r11, r12)\n f2 = f1 & region_filter.AngleRange(xc, yc, a1, a2)\n f = region_filter.Rotated(f2, angle, xc, yc)\n # f = f2 & region_filter.AngleRange(xc, yc, a1, a2)\n\n else:\n warnings.warn(\"'as_region_filter' does not know how to convert {0}\"\n \" to a region filter.\".format(shape.name))\n continue\n\n if shape.exclude:\n filter_list = [region_filter.RegionOrList(*filter_list) & ~f]\n else:\n filter_list.append(f)\n\n return region_filter.RegionOrList(*filter_list)\n"
] |
from itertools import cycle
from .ds9_region_parser import RegionParser
from .wcs_converter import check_wcs as _check_wcs
_builtin_open = open
class ShapeList(list):
"""A list of `~pyregion.Shape` objects.
Parameters
----------
shape_list : list
List of `pyregion.Shape` objects
comment_list : list, None
List of comment strings for each argument
"""
def __init__(self, shape_list, comment_list=None):
if comment_list is not None:
if len(comment_list) != len(shape_list):
err = "Ambiguous number of comments {} for number of shapes {}"
raise ValueError(err.format(len(comment_list),
len(shape_list)))
self._comment_list = comment_list
list.__init__(self, shape_list)
def __getitem__(self, key):
if isinstance(key, slice):
return ShapeList(list.__getitem__(self, key))
else:
return list.__getitem__(self, key)
def __getslice__(self, i, j):
return self[max(0, i):max(0, j):]
def check_imagecoord(self):
"""Are all shapes in image coordinates?
Returns ``True`` if yes, and ``False`` if not.
"""
if [s for s in self if s.coord_format != "image"]:
return False
else:
return True
def as_imagecoord(self, header):
"""New shape list in image coordinates.
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shape_list : `ShapeList`
New shape list, with coordinates of the each shape
converted to the image coordinate using the given header
information.
"""
comment_list = self._comment_list
if comment_list is None:
comment_list = cycle([None])
r = RegionParser.sky_to_image(zip(self, comment_list),
header)
shape_list, comment_list = zip(*list(r))
return ShapeList(shape_list, comment_list=comment_list)
def get_mpl_patches_texts(self, properties_func=None,
text_offset=5.0,
origin=1):
"""
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
"""
from .mpl_helper import as_mpl_artists
patches, txts = as_mpl_artists(self, properties_func,
text_offset,
origin=origin)
return patches, txts
def get_filter(self, header=None, origin=1):
"""Get filter.
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
Parameters
----------
header : `astropy.io.fits.Header`
FITS header
origin : {0, 1}
Pixel coordinate origin
Returns
-------
filter : TODO
Filter object
"""
from .region_to_filter import as_region_filter
if header is None:
if not self.check_imagecoord():
raise RuntimeError("the region has non-image coordinate. header is required.")
reg_in_imagecoord = self
else:
reg_in_imagecoord = self.as_imagecoord(header)
region_filter = as_region_filter(reg_in_imagecoord, origin=origin)
return region_filter
def get_mask(self, hdu=None, header=None, shape=None):
"""Create a 2-d mask.
Parameters
----------
hdu : `astropy.io.fits.ImageHDU`
FITS image HDU
header : `~astropy.io.fits.Header`
FITS header
shape : tuple
Image shape
Returns
-------
mask : `numpy.array`
Boolean mask
Examples
--------
get_mask(hdu=f[0])
get_mask(shape=(10,10))
get_mask(header=f[0].header, shape=(10,10))
"""
if hdu and header is None:
header = hdu.header
if hdu and shape is None:
shape = hdu.data.shape
region_filter = self.get_filter(header=header)
mask = region_filter.mask(shape)
return mask
def write(self, outfile):
"""Write this shape list to a region file.
Parameters
----------
outfile : str
File name
"""
if len(self) < 1:
print("WARNING: The region list is empty. The region file "
"'{:s}' will be empty.".format(outfile))
try:
outf = _builtin_open(outfile, 'w')
outf.close()
return
except IOError as e:
cmsg = "Unable to create region file '{:s}'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
prev_cs = self[0].coord_format
outf = None
try:
outf = _builtin_open(outfile, 'w')
attr0 = self[0].attr[1]
defaultline = " ".join(["{:s}={:s}".format(a, attr0[a])
for a in attr0 if a != 'text'])
# first line is globals
outf.write("global {0}\n".format(defaultline))
# second line must be a coordinate format
outf.write("{0}\n".format(prev_cs))
for shape in self:
shape_attr = '' if prev_cs == shape.coord_format \
else shape.coord_format + "; "
shape_excl = '-' if shape.exclude else ''
text_coordlist = ["{:f}".format(f) for f in shape.coord_list]
shape_coords = "(" + ",".join(text_coordlist) + ")"
shape_comment = " # " + shape.comment if shape.comment else ''
shape_str = (shape_attr + shape_excl + shape.name +
shape_coords + shape_comment)
outf.write("{0}\n".format(shape_str))
except IOError as e:
cmsg = "Unable to create region file \'{:s}\'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
finally:
if outf:
outf.close()
def parse(region_string):
"""Parse DS9 region string into a ShapeList.
Parameters
----------
region_string : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(region_string)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list, comment_list = rp.filter_shape2(sss2)
return ShapeList(shape_list, comment_list=comment_list)
def open(fname):
"""Open, read and parse DS9 region file.
Parameters
----------
fname : str
Filename
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
with _builtin_open(fname) as fh:
region_string = fh.read()
return parse(region_string)
def read_region(s):
"""Read region.
Parameters
----------
s : str
Region string
Returns
-------
shapes : `ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(s)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
shape_list = rp.filter_shape(sss2)
return ShapeList(shape_list)
def read_region_as_imagecoord(s, header):
"""Read region as image coordinates.
Parameters
----------
s : str
Region string
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shapes : `~pyregion.ShapeList`
List of `~pyregion.Shape`
"""
rp = RegionParser()
ss = rp.parse(s)
sss1 = rp.convert_attr(ss)
sss2 = _check_wcs(sss1)
sss3 = rp.sky_to_image(sss2, header)
shape_list = rp.filter_shape(sss3)
return ShapeList(shape_list)
|
astropy/pyregion
|
pyregion/core.py
|
ShapeList.as_imagecoord
|
python
|
def as_imagecoord(self, header):
comment_list = self._comment_list
if comment_list is None:
comment_list = cycle([None])
r = RegionParser.sky_to_image(zip(self, comment_list),
header)
shape_list, comment_list = zip(*list(r))
return ShapeList(shape_list, comment_list=comment_list)
|
New shape list in image coordinates.
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shape_list : `ShapeList`
New shape list, with coordinates of the each shape
converted to the image coordinate using the given header
information.
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/core.py#L48-L71
|
[
"def sky_to_image(shape_list, header):\n \"\"\"Converts a `ShapeList` into shapes with coordinates in image coordinates\n\n Parameters\n ----------\n shape_list : `pyregion.ShapeList`\n The ShapeList to convert\n header : `~astropy.io.fits.Header`\n Specifies what WCS transformations to use.\n\n Yields\n -------\n shape, comment : Shape, str\n Shape with image coordinates and the associated comment\n\n Note\n ----\n The comments in the original `ShapeList` are unaltered\n\n \"\"\"\n\n for shape, comment in shape_list:\n if isinstance(shape, Shape) and \\\n (shape.coord_format not in image_like_coordformats):\n\n new_coords = convert_to_imagecoord(shape, header)\n\n l1n = copy.copy(shape)\n\n l1n.coord_list = new_coords\n l1n.coord_format = \"image\"\n yield l1n, comment\n\n elif isinstance(shape, Shape) and shape.coord_format == \"physical\":\n\n if header is None:\n raise RuntimeError(\"Physical coordinate is not known.\")\n\n new_coordlist = convert_physical_to_imagecoord(shape, header)\n\n l1n = copy.copy(shape)\n\n l1n.coord_list = new_coordlist\n l1n.coord_format = \"image\"\n yield l1n, comment\n\n else:\n yield shape, comment\n"
] |
class ShapeList(list):
"""A list of `~pyregion.Shape` objects.
Parameters
----------
shape_list : list
List of `pyregion.Shape` objects
comment_list : list, None
List of comment strings for each argument
"""
def __init__(self, shape_list, comment_list=None):
if comment_list is not None:
if len(comment_list) != len(shape_list):
err = "Ambiguous number of comments {} for number of shapes {}"
raise ValueError(err.format(len(comment_list),
len(shape_list)))
self._comment_list = comment_list
list.__init__(self, shape_list)
def __getitem__(self, key):
if isinstance(key, slice):
return ShapeList(list.__getitem__(self, key))
else:
return list.__getitem__(self, key)
def __getslice__(self, i, j):
return self[max(0, i):max(0, j):]
def check_imagecoord(self):
"""Are all shapes in image coordinates?
Returns ``True`` if yes, and ``False`` if not.
"""
if [s for s in self if s.coord_format != "image"]:
return False
else:
return True
def get_mpl_patches_texts(self, properties_func=None,
text_offset=5.0,
origin=1):
"""
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
"""
from .mpl_helper import as_mpl_artists
patches, txts = as_mpl_artists(self, properties_func,
text_offset,
origin=origin)
return patches, txts
def get_filter(self, header=None, origin=1):
"""Get filter.
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
Parameters
----------
header : `astropy.io.fits.Header`
FITS header
origin : {0, 1}
Pixel coordinate origin
Returns
-------
filter : TODO
Filter object
"""
from .region_to_filter import as_region_filter
if header is None:
if not self.check_imagecoord():
raise RuntimeError("the region has non-image coordinate. header is required.")
reg_in_imagecoord = self
else:
reg_in_imagecoord = self.as_imagecoord(header)
region_filter = as_region_filter(reg_in_imagecoord, origin=origin)
return region_filter
def get_mask(self, hdu=None, header=None, shape=None):
"""Create a 2-d mask.
Parameters
----------
hdu : `astropy.io.fits.ImageHDU`
FITS image HDU
header : `~astropy.io.fits.Header`
FITS header
shape : tuple
Image shape
Returns
-------
mask : `numpy.array`
Boolean mask
Examples
--------
get_mask(hdu=f[0])
get_mask(shape=(10,10))
get_mask(header=f[0].header, shape=(10,10))
"""
if hdu and header is None:
header = hdu.header
if hdu and shape is None:
shape = hdu.data.shape
region_filter = self.get_filter(header=header)
mask = region_filter.mask(shape)
return mask
def write(self, outfile):
"""Write this shape list to a region file.
Parameters
----------
outfile : str
File name
"""
if len(self) < 1:
print("WARNING: The region list is empty. The region file "
"'{:s}' will be empty.".format(outfile))
try:
outf = _builtin_open(outfile, 'w')
outf.close()
return
except IOError as e:
cmsg = "Unable to create region file '{:s}'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
prev_cs = self[0].coord_format
outf = None
try:
outf = _builtin_open(outfile, 'w')
attr0 = self[0].attr[1]
defaultline = " ".join(["{:s}={:s}".format(a, attr0[a])
for a in attr0 if a != 'text'])
# first line is globals
outf.write("global {0}\n".format(defaultline))
# second line must be a coordinate format
outf.write("{0}\n".format(prev_cs))
for shape in self:
shape_attr = '' if prev_cs == shape.coord_format \
else shape.coord_format + "; "
shape_excl = '-' if shape.exclude else ''
text_coordlist = ["{:f}".format(f) for f in shape.coord_list]
shape_coords = "(" + ",".join(text_coordlist) + ")"
shape_comment = " # " + shape.comment if shape.comment else ''
shape_str = (shape_attr + shape_excl + shape.name +
shape_coords + shape_comment)
outf.write("{0}\n".format(shape_str))
except IOError as e:
cmsg = "Unable to create region file \'{:s}\'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
finally:
if outf:
outf.close()
|
astropy/pyregion
|
pyregion/core.py
|
ShapeList.get_mpl_patches_texts
|
python
|
def get_mpl_patches_texts(self, properties_func=None,
text_offset=5.0,
origin=1):
from .mpl_helper import as_mpl_artists
patches, txts = as_mpl_artists(self, properties_func,
text_offset,
origin=origin)
return patches, txts
|
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/core.py#L73-L89
|
[
"def as_mpl_artists(shape_list,\n properties_func=None,\n text_offset=5.0, origin=1):\n \"\"\"\n Converts a region list to a list of patches and a list of artists.\n\n\n Optional Keywords:\n [ text_offset ] - If there is text associated with the regions, add\n some vertical offset (in pixels) to the text so that it doesn't overlap\n with the regions.\n\n Often, the regions files implicitly assume the lower-left corner\n of the image as a coordinate (1,1). However, the python convetion\n is that the array index starts from 0. By default (origin = 1),\n coordinates of the returned mpl artists have coordinate shifted by\n (1, 1). If you do not want this shift, set origin=0.\n \"\"\"\n\n patch_list = []\n artist_list = []\n\n if properties_func is None:\n properties_func = properties_func_default\n\n # properties for continued(? multiline?) regions\n saved_attrs = None\n\n for shape in shape_list:\n\n patches = []\n\n if saved_attrs is None:\n _attrs = [], {}\n else:\n _attrs = copy.copy(saved_attrs[0]), copy.copy(saved_attrs[1])\n\n kwargs = properties_func(shape, _attrs)\n\n if shape.name == \"composite\":\n saved_attrs = shape.attr\n continue\n\n if saved_attrs is None and shape.continued:\n saved_attrs = shape.attr\n # elif (shape.name in shape.attr[1]):\n # if (shape.attr[1][shape.name] != \"ignore\"):\n # saved_attrs = shape.attr\n\n if not shape.continued:\n saved_attrs = None\n\n # text associated with the shape\n txt = shape.attr[1].get(\"text\")\n\n if shape.name == \"polygon\":\n xy = np.array(shape.coord_list)\n xy.shape = -1, 2\n\n # -1 for change origin to 0,0\n patches = [mpatches.Polygon(xy - origin, closed=True, **kwargs)]\n\n elif shape.name == \"rotbox\" or shape.name == \"box\":\n xc, yc, w, h, rot = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n _box = np.array([[-w / 2., -h / 2.],\n [-w / 2., h / 2.],\n [w / 2., h / 2.],\n [w / 2., -h / 2.]])\n box = _box + [xc, yc]\n rotbox = rotated_polygon(box, xc, yc, rot)\n patches = [mpatches.Polygon(rotbox, closed=True, **kwargs)]\n\n elif shape.name == \"ellipse\":\n xc, yc = shape.coord_list[:2]\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n angle = shape.coord_list[-1]\n\n maj_list, min_list = shape.coord_list[2:-1:2], shape.coord_list[3:-1:2]\n\n patches = [mpatches.Ellipse((xc, yc), 2 * maj, 2 * min,\n angle=angle, **kwargs)\n for maj, min in zip(maj_list, min_list)]\n\n elif shape.name == \"annulus\":\n xc, yc = shape.coord_list[:2]\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n r_list = shape.coord_list[2:]\n\n patches = [mpatches.Ellipse((xc, yc), 2 * r, 2 * r, **kwargs) for r in r_list]\n\n elif shape.name == \"circle\":\n xc, yc, major = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n patches = [mpatches.Ellipse((xc, yc), 2 * major, 2 * major, angle=0, **kwargs)]\n\n elif shape.name == \"panda\":\n xc, yc, a1, a2, an, r1, r2, rn = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,\n theta1=a1, theta2=a2, **kwargs)\n for rr in np.linspace(r1, r2, rn + 1)]\n\n for aa in np.linspace(a1, a2, an + 1):\n xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc\n yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc\n p = Path(np.transpose([xx, yy]))\n patches.append(mpatches.PathPatch(p, **kwargs))\n\n elif shape.name == \"pie\":\n xc, yc, r1, r2, a1, a2 = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,\n theta1=a1, theta2=a2, **kwargs)\n for rr in [r1, r2]]\n\n for aa in [a1, a2]:\n xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc\n yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc\n p = Path(np.transpose([xx, yy]))\n patches.append(mpatches.PathPatch(p, **kwargs))\n\n elif shape.name == \"epanda\":\n xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n # mpl takes angle a1, a2 as angle as in circle before\n # transformation to ellipse.\n\n x1, y1 = cos(a1 / 180. * pi), sin(a1 / 180. * pi) * r11 / r12\n x2, y2 = cos(a2 / 180. * pi), sin(a2 / 180. * pi) * r11 / r12\n\n a1, a2 = atan2(y1, x1) / pi * 180., atan2(y2, x2) / pi * 180.\n\n patches = [mpatches.Arc((xc, yc), rr1 * 2, rr2 * 2,\n angle=angle, theta1=a1, theta2=a2,\n **kwargs)\n for rr1, rr2 in zip(np.linspace(r11, r21, rn + 1),\n np.linspace(r12, r22, rn + 1))]\n\n for aa in np.linspace(a1, a2, an + 1):\n xx = np.array([r11, r21]) * np.cos(aa / 180. * np.pi)\n yy = np.array([r11, r21]) * np.sin(aa / 180. * np.pi)\n p = Path(np.transpose([xx, yy]))\n tr = Affine2D().scale(1, r12 / r11).rotate_deg(angle).translate(xc, yc)\n p2 = tr.transform_path(p)\n patches.append(mpatches.PathPatch(p2, **kwargs))\n\n elif shape.name == \"text\":\n xc, yc = shape.coord_list[:2]\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n if txt:\n _t = _get_text(txt, xc, yc, 0, 0, **kwargs)\n artist_list.append(_t)\n\n elif shape.name == \"point\":\n xc, yc = shape.coord_list[:2]\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n artist_list.append(Line2D([xc], [yc],\n **kwargs))\n\n if txt:\n textshape = copy.copy(shape)\n textshape.name = \"text\"\n textkwargs = properties_func(textshape, _attrs)\n _t = _get_text(txt, xc, yc, 0, text_offset,\n va=\"bottom\",\n **textkwargs)\n artist_list.append(_t)\n\n elif shape.name in [\"line\", \"vector\"]:\n if shape.name == \"line\":\n x1, y1, x2, y2 = shape.coord_list[:4]\n # -1 for change origin to 0,0\n x1, y1, x2, y2 = x1 - origin, y1 - origin, x2 - origin, y2 - origin\n\n a1, a2 = shape.attr[1].get(\"line\", \"0 0\").strip().split()[:2]\n\n arrowstyle = \"-\"\n if int(a1):\n arrowstyle = \"<\" + arrowstyle\n if int(a2):\n arrowstyle = arrowstyle + \">\"\n\n else: # shape.name == \"vector\"\n x1, y1, l, a = shape.coord_list[:4]\n # -1 for change origin to 0,0\n x1, y1 = x1 - origin, y1 - origin\n x2, y2 = x1 + l * np.cos(a / 180. * np.pi), y1 + l * np.sin(a / 180. * np.pi)\n v1 = int(shape.attr[1].get(\"vector\", \"0\").strip())\n\n if v1:\n arrowstyle = \"->\"\n else:\n arrowstyle = \"-\"\n\n patches = [mpatches.FancyArrowPatch(posA=(x1, y1),\n posB=(x2, y2),\n arrowstyle=arrowstyle,\n arrow_transmuter=None,\n connectionstyle=\"arc3\",\n patchA=None, patchB=None,\n shrinkA=0, shrinkB=0,\n connector=None,\n **kwargs)]\n\n else:\n warnings.warn(\"'as_mpl_artists' does not know how to convert {0} \"\n \"to mpl artist\".format(shape.name))\n\n patch_list.extend(patches)\n\n if txt and patches:\n # the text associated with a shape uses different\n # matplotlib keywords than the shape itself for, e.g.,\n # color\n textshape = copy.copy(shape)\n textshape.name = \"text\"\n textkwargs = properties_func(textshape, _attrs)\n\n # calculate the text position\n _bb = [p.get_window_extent() for p in patches]\n\n # this is to work around backward-incompatible change made\n # in matplotlib 1.2. This change is later reverted so only\n # some versions are affected. With affected version of\n # matplotlib, get_window_extent method calls get_transform\n # method which sets the _transformSet to True, which is\n # not desired.\n for p in patches:\n p._transformSet = False\n\n _bbox = Bbox.union(_bb)\n x0, y0, x1, y1 = _bbox.extents\n xc = .5 * (x0 + x1)\n\n _t = _get_text(txt, xc, y1, 0, text_offset,\n va=\"bottom\",\n **textkwargs)\n artist_list.append(_t)\n\n return patch_list, artist_list\n"
] |
class ShapeList(list):
"""A list of `~pyregion.Shape` objects.
Parameters
----------
shape_list : list
List of `pyregion.Shape` objects
comment_list : list, None
List of comment strings for each argument
"""
def __init__(self, shape_list, comment_list=None):
if comment_list is not None:
if len(comment_list) != len(shape_list):
err = "Ambiguous number of comments {} for number of shapes {}"
raise ValueError(err.format(len(comment_list),
len(shape_list)))
self._comment_list = comment_list
list.__init__(self, shape_list)
def __getitem__(self, key):
if isinstance(key, slice):
return ShapeList(list.__getitem__(self, key))
else:
return list.__getitem__(self, key)
def __getslice__(self, i, j):
return self[max(0, i):max(0, j):]
def check_imagecoord(self):
"""Are all shapes in image coordinates?
Returns ``True`` if yes, and ``False`` if not.
"""
if [s for s in self if s.coord_format != "image"]:
return False
else:
return True
def as_imagecoord(self, header):
"""New shape list in image coordinates.
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shape_list : `ShapeList`
New shape list, with coordinates of the each shape
converted to the image coordinate using the given header
information.
"""
comment_list = self._comment_list
if comment_list is None:
comment_list = cycle([None])
r = RegionParser.sky_to_image(zip(self, comment_list),
header)
shape_list, comment_list = zip(*list(r))
return ShapeList(shape_list, comment_list=comment_list)
def get_filter(self, header=None, origin=1):
"""Get filter.
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
Parameters
----------
header : `astropy.io.fits.Header`
FITS header
origin : {0, 1}
Pixel coordinate origin
Returns
-------
filter : TODO
Filter object
"""
from .region_to_filter import as_region_filter
if header is None:
if not self.check_imagecoord():
raise RuntimeError("the region has non-image coordinate. header is required.")
reg_in_imagecoord = self
else:
reg_in_imagecoord = self.as_imagecoord(header)
region_filter = as_region_filter(reg_in_imagecoord, origin=origin)
return region_filter
def get_mask(self, hdu=None, header=None, shape=None):
"""Create a 2-d mask.
Parameters
----------
hdu : `astropy.io.fits.ImageHDU`
FITS image HDU
header : `~astropy.io.fits.Header`
FITS header
shape : tuple
Image shape
Returns
-------
mask : `numpy.array`
Boolean mask
Examples
--------
get_mask(hdu=f[0])
get_mask(shape=(10,10))
get_mask(header=f[0].header, shape=(10,10))
"""
if hdu and header is None:
header = hdu.header
if hdu and shape is None:
shape = hdu.data.shape
region_filter = self.get_filter(header=header)
mask = region_filter.mask(shape)
return mask
def write(self, outfile):
"""Write this shape list to a region file.
Parameters
----------
outfile : str
File name
"""
if len(self) < 1:
print("WARNING: The region list is empty. The region file "
"'{:s}' will be empty.".format(outfile))
try:
outf = _builtin_open(outfile, 'w')
outf.close()
return
except IOError as e:
cmsg = "Unable to create region file '{:s}'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
prev_cs = self[0].coord_format
outf = None
try:
outf = _builtin_open(outfile, 'w')
attr0 = self[0].attr[1]
defaultline = " ".join(["{:s}={:s}".format(a, attr0[a])
for a in attr0 if a != 'text'])
# first line is globals
outf.write("global {0}\n".format(defaultline))
# second line must be a coordinate format
outf.write("{0}\n".format(prev_cs))
for shape in self:
shape_attr = '' if prev_cs == shape.coord_format \
else shape.coord_format + "; "
shape_excl = '-' if shape.exclude else ''
text_coordlist = ["{:f}".format(f) for f in shape.coord_list]
shape_coords = "(" + ",".join(text_coordlist) + ")"
shape_comment = " # " + shape.comment if shape.comment else ''
shape_str = (shape_attr + shape_excl + shape.name +
shape_coords + shape_comment)
outf.write("{0}\n".format(shape_str))
except IOError as e:
cmsg = "Unable to create region file \'{:s}\'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
finally:
if outf:
outf.close()
|
astropy/pyregion
|
pyregion/core.py
|
ShapeList.get_filter
|
python
|
def get_filter(self, header=None, origin=1):
from .region_to_filter import as_region_filter
if header is None:
if not self.check_imagecoord():
raise RuntimeError("the region has non-image coordinate. header is required.")
reg_in_imagecoord = self
else:
reg_in_imagecoord = self.as_imagecoord(header)
region_filter = as_region_filter(reg_in_imagecoord, origin=origin)
return region_filter
|
Get filter.
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
Parameters
----------
header : `astropy.io.fits.Header`
FITS header
origin : {0, 1}
Pixel coordinate origin
Returns
-------
filter : TODO
Filter object
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/core.py#L91-L124
|
[
"def as_region_filter(shape_list, origin=1):\n \"\"\"\n Often, the regions files implicitly assume the lower-left corner\n of the image as a coordinate (1,1). However, the python convetion\n is that the array index starts from 0. By default (origin = 1),\n coordinates of the returned mpl artists have coordinate shifted by\n (1, 1). If you do not want this shift, use origin=0.\n \"\"\"\n\n filter_list = []\n for shape in shape_list:\n\n if shape.name == \"composite\":\n continue\n\n if shape.name == \"polygon\":\n xy = np.array(shape.coord_list) - origin\n f = region_filter.Polygon(xy[::2], xy[1::2])\n\n elif shape.name == \"rotbox\" or shape.name == \"box\":\n xc, yc, w, h, rot = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n f = region_filter.Rotated(region_filter.Box(xc, yc, w, h),\n rot, xc, yc)\n\n elif shape.name == \"ellipse\":\n xc, yc = shape.coord_list[:2]\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n angle = shape.coord_list[-1]\n\n maj_list, min_list = shape.coord_list[2:-1:2], shape.coord_list[3:-1:2]\n\n if len(maj_list) > 1:\n w1, h1 = max(maj_list), max(min_list)\n w2, h2 = min(maj_list), min(min_list)\n\n f1 = region_filter.Ellipse(xc, yc, w1, h1) \\\n & ~region_filter.Ellipse(xc, yc, w2, h2)\n f = region_filter.Rotated(f1, angle, xc, yc)\n else:\n w, h = maj_list[0], min_list[0]\n f = region_filter.Rotated(region_filter.Ellipse(xc, yc, w, h),\n angle, xc, yc)\n\n elif shape.name == \"annulus\":\n xc, yc = shape.coord_list[:2]\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n r_list = shape.coord_list[2:]\n\n r1 = max(r_list)\n r2 = min(r_list)\n\n f = region_filter.Circle(xc, yc, r1) & ~region_filter.Circle(xc, yc, r2)\n\n elif shape.name == \"circle\":\n xc, yc, r = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n f = region_filter.Circle(xc, yc, r)\n\n elif shape.name == \"panda\":\n xc, yc, a1, a2, an, r1, r2, rn = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n f1 = region_filter.Circle(xc, yc, r2) & ~region_filter.Circle(xc, yc, r1)\n f = f1 & region_filter.AngleRange(xc, yc, a1, a2)\n\n elif shape.name == \"pie\":\n xc, yc, r1, r2, a1, a2 = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n f1 = region_filter.Circle(xc, yc, r2) & ~region_filter.Circle(xc, yc, r1)\n f = f1 & region_filter.AngleRange(xc, yc, a1, a2)\n\n elif shape.name == \"epanda\":\n xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n f1 = region_filter.Ellipse(xc, yc, r21, r22) & ~region_filter.Ellipse(xc, yc, r11, r12)\n f2 = f1 & region_filter.AngleRange(xc, yc, a1, a2)\n f = region_filter.Rotated(f2, angle, xc, yc)\n # f = f2 & region_filter.AngleRange(xc, yc, a1, a2)\n\n elif shape.name == \"bpanda\":\n xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list\n # -1 for change origin to 0,0\n xc, yc = xc - origin, yc - origin\n\n f1 = region_filter.Box(xc, yc, r21, r22) & ~region_filter.Box(xc, yc, r11, r12)\n f2 = f1 & region_filter.AngleRange(xc, yc, a1, a2)\n f = region_filter.Rotated(f2, angle, xc, yc)\n # f = f2 & region_filter.AngleRange(xc, yc, a1, a2)\n\n else:\n warnings.warn(\"'as_region_filter' does not know how to convert {0}\"\n \" to a region filter.\".format(shape.name))\n continue\n\n if shape.exclude:\n filter_list = [region_filter.RegionOrList(*filter_list) & ~f]\n else:\n filter_list.append(f)\n\n return region_filter.RegionOrList(*filter_list)\n",
"def check_imagecoord(self):\n \"\"\"Are all shapes in image coordinates?\n\n Returns ``True`` if yes, and ``False`` if not.\n \"\"\"\n if [s for s in self if s.coord_format != \"image\"]:\n return False\n else:\n return True\n",
"def as_imagecoord(self, header):\n \"\"\"New shape list in image coordinates.\n\n Parameters\n ----------\n header : `~astropy.io.fits.Header`\n FITS header\n\n Returns\n -------\n shape_list : `ShapeList`\n New shape list, with coordinates of the each shape\n converted to the image coordinate using the given header\n information.\n \"\"\"\n\n comment_list = self._comment_list\n if comment_list is None:\n comment_list = cycle([None])\n\n r = RegionParser.sky_to_image(zip(self, comment_list),\n header)\n shape_list, comment_list = zip(*list(r))\n return ShapeList(shape_list, comment_list=comment_list)\n"
] |
class ShapeList(list):
"""A list of `~pyregion.Shape` objects.
Parameters
----------
shape_list : list
List of `pyregion.Shape` objects
comment_list : list, None
List of comment strings for each argument
"""
def __init__(self, shape_list, comment_list=None):
if comment_list is not None:
if len(comment_list) != len(shape_list):
err = "Ambiguous number of comments {} for number of shapes {}"
raise ValueError(err.format(len(comment_list),
len(shape_list)))
self._comment_list = comment_list
list.__init__(self, shape_list)
def __getitem__(self, key):
if isinstance(key, slice):
return ShapeList(list.__getitem__(self, key))
else:
return list.__getitem__(self, key)
def __getslice__(self, i, j):
return self[max(0, i):max(0, j):]
def check_imagecoord(self):
"""Are all shapes in image coordinates?
Returns ``True`` if yes, and ``False`` if not.
"""
if [s for s in self if s.coord_format != "image"]:
return False
else:
return True
def as_imagecoord(self, header):
"""New shape list in image coordinates.
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shape_list : `ShapeList`
New shape list, with coordinates of the each shape
converted to the image coordinate using the given header
information.
"""
comment_list = self._comment_list
if comment_list is None:
comment_list = cycle([None])
r = RegionParser.sky_to_image(zip(self, comment_list),
header)
shape_list, comment_list = zip(*list(r))
return ShapeList(shape_list, comment_list=comment_list)
def get_mpl_patches_texts(self, properties_func=None,
text_offset=5.0,
origin=1):
"""
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
"""
from .mpl_helper import as_mpl_artists
patches, txts = as_mpl_artists(self, properties_func,
text_offset,
origin=origin)
return patches, txts
def get_mask(self, hdu=None, header=None, shape=None):
"""Create a 2-d mask.
Parameters
----------
hdu : `astropy.io.fits.ImageHDU`
FITS image HDU
header : `~astropy.io.fits.Header`
FITS header
shape : tuple
Image shape
Returns
-------
mask : `numpy.array`
Boolean mask
Examples
--------
get_mask(hdu=f[0])
get_mask(shape=(10,10))
get_mask(header=f[0].header, shape=(10,10))
"""
if hdu and header is None:
header = hdu.header
if hdu and shape is None:
shape = hdu.data.shape
region_filter = self.get_filter(header=header)
mask = region_filter.mask(shape)
return mask
def write(self, outfile):
"""Write this shape list to a region file.
Parameters
----------
outfile : str
File name
"""
if len(self) < 1:
print("WARNING: The region list is empty. The region file "
"'{:s}' will be empty.".format(outfile))
try:
outf = _builtin_open(outfile, 'w')
outf.close()
return
except IOError as e:
cmsg = "Unable to create region file '{:s}'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
prev_cs = self[0].coord_format
outf = None
try:
outf = _builtin_open(outfile, 'w')
attr0 = self[0].attr[1]
defaultline = " ".join(["{:s}={:s}".format(a, attr0[a])
for a in attr0 if a != 'text'])
# first line is globals
outf.write("global {0}\n".format(defaultline))
# second line must be a coordinate format
outf.write("{0}\n".format(prev_cs))
for shape in self:
shape_attr = '' if prev_cs == shape.coord_format \
else shape.coord_format + "; "
shape_excl = '-' if shape.exclude else ''
text_coordlist = ["{:f}".format(f) for f in shape.coord_list]
shape_coords = "(" + ",".join(text_coordlist) + ")"
shape_comment = " # " + shape.comment if shape.comment else ''
shape_str = (shape_attr + shape_excl + shape.name +
shape_coords + shape_comment)
outf.write("{0}\n".format(shape_str))
except IOError as e:
cmsg = "Unable to create region file \'{:s}\'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
finally:
if outf:
outf.close()
|
astropy/pyregion
|
pyregion/core.py
|
ShapeList.get_mask
|
python
|
def get_mask(self, hdu=None, header=None, shape=None):
if hdu and header is None:
header = hdu.header
if hdu and shape is None:
shape = hdu.data.shape
region_filter = self.get_filter(header=header)
mask = region_filter.mask(shape)
return mask
|
Create a 2-d mask.
Parameters
----------
hdu : `astropy.io.fits.ImageHDU`
FITS image HDU
header : `~astropy.io.fits.Header`
FITS header
shape : tuple
Image shape
Returns
-------
mask : `numpy.array`
Boolean mask
Examples
--------
get_mask(hdu=f[0])
get_mask(shape=(10,10))
get_mask(header=f[0].header, shape=(10,10))
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/core.py#L126-L158
|
[
"def get_filter(self, header=None, origin=1):\n \"\"\"Get filter.\n Often, the regions files implicitly assume the lower-left\n corner of the image as a coordinate (1,1). However, the python\n convetion is that the array index starts from 0. By default\n (``origin=1``), coordinates of the returned mpl artists have\n coordinate shifted by (1, 1). If you do not want this shift,\n use ``origin=0``.\n\n Parameters\n ----------\n header : `astropy.io.fits.Header`\n FITS header\n origin : {0, 1}\n Pixel coordinate origin\n\n Returns\n -------\n filter : TODO\n Filter object\n \"\"\"\n\n from .region_to_filter import as_region_filter\n\n if header is None:\n if not self.check_imagecoord():\n raise RuntimeError(\"the region has non-image coordinate. header is required.\")\n reg_in_imagecoord = self\n else:\n reg_in_imagecoord = self.as_imagecoord(header)\n\n region_filter = as_region_filter(reg_in_imagecoord, origin=origin)\n\n return region_filter\n"
] |
class ShapeList(list):
"""A list of `~pyregion.Shape` objects.
Parameters
----------
shape_list : list
List of `pyregion.Shape` objects
comment_list : list, None
List of comment strings for each argument
"""
def __init__(self, shape_list, comment_list=None):
if comment_list is not None:
if len(comment_list) != len(shape_list):
err = "Ambiguous number of comments {} for number of shapes {}"
raise ValueError(err.format(len(comment_list),
len(shape_list)))
self._comment_list = comment_list
list.__init__(self, shape_list)
def __getitem__(self, key):
if isinstance(key, slice):
return ShapeList(list.__getitem__(self, key))
else:
return list.__getitem__(self, key)
def __getslice__(self, i, j):
return self[max(0, i):max(0, j):]
def check_imagecoord(self):
"""Are all shapes in image coordinates?
Returns ``True`` if yes, and ``False`` if not.
"""
if [s for s in self if s.coord_format != "image"]:
return False
else:
return True
def as_imagecoord(self, header):
"""New shape list in image coordinates.
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shape_list : `ShapeList`
New shape list, with coordinates of the each shape
converted to the image coordinate using the given header
information.
"""
comment_list = self._comment_list
if comment_list is None:
comment_list = cycle([None])
r = RegionParser.sky_to_image(zip(self, comment_list),
header)
shape_list, comment_list = zip(*list(r))
return ShapeList(shape_list, comment_list=comment_list)
def get_mpl_patches_texts(self, properties_func=None,
text_offset=5.0,
origin=1):
"""
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
"""
from .mpl_helper import as_mpl_artists
patches, txts = as_mpl_artists(self, properties_func,
text_offset,
origin=origin)
return patches, txts
def get_filter(self, header=None, origin=1):
"""Get filter.
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
Parameters
----------
header : `astropy.io.fits.Header`
FITS header
origin : {0, 1}
Pixel coordinate origin
Returns
-------
filter : TODO
Filter object
"""
from .region_to_filter import as_region_filter
if header is None:
if not self.check_imagecoord():
raise RuntimeError("the region has non-image coordinate. header is required.")
reg_in_imagecoord = self
else:
reg_in_imagecoord = self.as_imagecoord(header)
region_filter = as_region_filter(reg_in_imagecoord, origin=origin)
return region_filter
def write(self, outfile):
"""Write this shape list to a region file.
Parameters
----------
outfile : str
File name
"""
if len(self) < 1:
print("WARNING: The region list is empty. The region file "
"'{:s}' will be empty.".format(outfile))
try:
outf = _builtin_open(outfile, 'w')
outf.close()
return
except IOError as e:
cmsg = "Unable to create region file '{:s}'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
prev_cs = self[0].coord_format
outf = None
try:
outf = _builtin_open(outfile, 'w')
attr0 = self[0].attr[1]
defaultline = " ".join(["{:s}={:s}".format(a, attr0[a])
for a in attr0 if a != 'text'])
# first line is globals
outf.write("global {0}\n".format(defaultline))
# second line must be a coordinate format
outf.write("{0}\n".format(prev_cs))
for shape in self:
shape_attr = '' if prev_cs == shape.coord_format \
else shape.coord_format + "; "
shape_excl = '-' if shape.exclude else ''
text_coordlist = ["{:f}".format(f) for f in shape.coord_list]
shape_coords = "(" + ",".join(text_coordlist) + ")"
shape_comment = " # " + shape.comment if shape.comment else ''
shape_str = (shape_attr + shape_excl + shape.name +
shape_coords + shape_comment)
outf.write("{0}\n".format(shape_str))
except IOError as e:
cmsg = "Unable to create region file \'{:s}\'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
finally:
if outf:
outf.close()
|
astropy/pyregion
|
pyregion/core.py
|
ShapeList.write
|
python
|
def write(self, outfile):
if len(self) < 1:
print("WARNING: The region list is empty. The region file "
"'{:s}' will be empty.".format(outfile))
try:
outf = _builtin_open(outfile, 'w')
outf.close()
return
except IOError as e:
cmsg = "Unable to create region file '{:s}'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
prev_cs = self[0].coord_format
outf = None
try:
outf = _builtin_open(outfile, 'w')
attr0 = self[0].attr[1]
defaultline = " ".join(["{:s}={:s}".format(a, attr0[a])
for a in attr0 if a != 'text'])
# first line is globals
outf.write("global {0}\n".format(defaultline))
# second line must be a coordinate format
outf.write("{0}\n".format(prev_cs))
for shape in self:
shape_attr = '' if prev_cs == shape.coord_format \
else shape.coord_format + "; "
shape_excl = '-' if shape.exclude else ''
text_coordlist = ["{:f}".format(f) for f in shape.coord_list]
shape_coords = "(" + ",".join(text_coordlist) + ")"
shape_comment = " # " + shape.comment if shape.comment else ''
shape_str = (shape_attr + shape_excl + shape.name +
shape_coords + shape_comment)
outf.write("{0}\n".format(shape_str))
except IOError as e:
cmsg = "Unable to create region file \'{:s}\'.".format(outfile)
if e.args:
e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:]
else:
e.args = (cmsg,)
raise e
finally:
if outf:
outf.close()
|
Write this shape list to a region file.
Parameters
----------
outfile : str
File name
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/core.py#L160-L220
| null |
class ShapeList(list):
"""A list of `~pyregion.Shape` objects.
Parameters
----------
shape_list : list
List of `pyregion.Shape` objects
comment_list : list, None
List of comment strings for each argument
"""
def __init__(self, shape_list, comment_list=None):
if comment_list is not None:
if len(comment_list) != len(shape_list):
err = "Ambiguous number of comments {} for number of shapes {}"
raise ValueError(err.format(len(comment_list),
len(shape_list)))
self._comment_list = comment_list
list.__init__(self, shape_list)
def __getitem__(self, key):
if isinstance(key, slice):
return ShapeList(list.__getitem__(self, key))
else:
return list.__getitem__(self, key)
def __getslice__(self, i, j):
return self[max(0, i):max(0, j):]
def check_imagecoord(self):
"""Are all shapes in image coordinates?
Returns ``True`` if yes, and ``False`` if not.
"""
if [s for s in self if s.coord_format != "image"]:
return False
else:
return True
def as_imagecoord(self, header):
"""New shape list in image coordinates.
Parameters
----------
header : `~astropy.io.fits.Header`
FITS header
Returns
-------
shape_list : `ShapeList`
New shape list, with coordinates of the each shape
converted to the image coordinate using the given header
information.
"""
comment_list = self._comment_list
if comment_list is None:
comment_list = cycle([None])
r = RegionParser.sky_to_image(zip(self, comment_list),
header)
shape_list, comment_list = zip(*list(r))
return ShapeList(shape_list, comment_list=comment_list)
def get_mpl_patches_texts(self, properties_func=None,
text_offset=5.0,
origin=1):
"""
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
"""
from .mpl_helper import as_mpl_artists
patches, txts = as_mpl_artists(self, properties_func,
text_offset,
origin=origin)
return patches, txts
def get_filter(self, header=None, origin=1):
"""Get filter.
Often, the regions files implicitly assume the lower-left
corner of the image as a coordinate (1,1). However, the python
convetion is that the array index starts from 0. By default
(``origin=1``), coordinates of the returned mpl artists have
coordinate shifted by (1, 1). If you do not want this shift,
use ``origin=0``.
Parameters
----------
header : `astropy.io.fits.Header`
FITS header
origin : {0, 1}
Pixel coordinate origin
Returns
-------
filter : TODO
Filter object
"""
from .region_to_filter import as_region_filter
if header is None:
if not self.check_imagecoord():
raise RuntimeError("the region has non-image coordinate. header is required.")
reg_in_imagecoord = self
else:
reg_in_imagecoord = self.as_imagecoord(header)
region_filter = as_region_filter(reg_in_imagecoord, origin=origin)
return region_filter
def get_mask(self, hdu=None, header=None, shape=None):
"""Create a 2-d mask.
Parameters
----------
hdu : `astropy.io.fits.ImageHDU`
FITS image HDU
header : `~astropy.io.fits.Header`
FITS header
shape : tuple
Image shape
Returns
-------
mask : `numpy.array`
Boolean mask
Examples
--------
get_mask(hdu=f[0])
get_mask(shape=(10,10))
get_mask(header=f[0].header, shape=(10,10))
"""
if hdu and header is None:
header = hdu.header
if hdu and shape is None:
shape = hdu.data.shape
region_filter = self.get_filter(header=header)
mask = region_filter.mask(shape)
return mask
|
astropy/pyregion
|
pyregion/ds9_attr_parser.py
|
get_attr
|
python
|
def get_attr(attr_list, global_attrs):
local_attr = [], {}
for kv in attr_list:
keyword = kv[0]
if len(kv) == 1:
local_attr[0].append(keyword)
continue
elif len(kv) == 2:
value = kv[1]
elif len(kv) > 2:
value = kv[1:]
if keyword == 'tag':
local_attr[1].setdefault(keyword, set()).add(value)
else:
local_attr[1][keyword] = value
attr0 = copy.copy(global_attrs[0])
attr1 = copy.copy(global_attrs[1])
if local_attr[0]:
attr0.extend(local_attr[0])
if local_attr[1]:
attr1.update(local_attr[1])
return attr0, attr1
|
Parameters
----------
attr_list : list
A list of (keyword, value) tuple pairs
global_attrs : tuple(list, dict)
Global attributes which update the local attributes
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/ds9_attr_parser.py#L75-L109
| null |
import copy
from pyparsing import (Literal, CaselessKeyword, Word, Optional, Combine,
ZeroOrMore, nums, alphas, And, Or, quotedString,
QuotedString, White)
from .region_numbers import CoordOdd, CoordEven, Distance, Angle
from .parser_helper import wcs_shape, define_shape_helper, Shape
def get_ds9_attr_parser():
lhs = Word(alphas)
paren = QuotedString("(", endQuoteChar=")")
rhs = Or([Word(alphas + nums),
Combine(Literal("#") + Word(alphas + nums)), # color with '#'
Combine(Word(alphas) + White() + Word(nums)), # for point
quotedString,
QuotedString("{", endQuoteChar="}"),
paren + ZeroOrMore(paren),
Word(nums + " "),
Word(nums + ".")
])
expr = lhs + Optional(Literal("=").suppress() + rhs)
expr.setParseAction(lambda s, l, tok: tuple(tok))
return ZeroOrMore(expr)
ds9_shape_in_comment_defs = dict(
text=wcs_shape(CoordOdd, CoordEven),
vector=wcs_shape(CoordOdd, CoordEven,
Distance, Angle),
composite=wcs_shape(CoordOdd, CoordEven, Angle),
ruler=wcs_shape(CoordOdd, CoordEven, CoordOdd, CoordEven),
compass=wcs_shape(CoordOdd, CoordEven, Distance),
projection=wcs_shape(CoordOdd, CoordEven, CoordOdd, CoordEven, Distance),
segment=wcs_shape(CoordOdd, CoordEven,
repeat=(0, 2))
)
class Ds9AttrParser(object):
def set_continued(self, s, l, tok):
self.continued = True
def __init__(self):
self.continued = False
ds9_attr_parser = get_ds9_attr_parser()
regionShape = define_shape_helper(ds9_shape_in_comment_defs)
regionShape = regionShape.setParseAction(lambda s, l, tok: Shape(tok[0], tok[1:]))
self.parser_default = ds9_attr_parser
cont = CaselessKeyword("||").setParseAction(self.set_continued).suppress()
line = Optional(And([regionShape, Optional(cont)])) + ds9_attr_parser
self.parser_with_shape = line
def parse_default(self, s):
return self.parser_default.parseString(s)
def parse_check_shape(self, s):
l = self.parser_with_shape.parseString(s)
if l and isinstance(l[0], Shape):
if self.continued:
l[0].continued = True
return l[0], l[1:]
else:
return None, l
|
astropy/pyregion
|
pyregion/ds9_region_parser.py
|
RegionParser.sky_to_image
|
python
|
def sky_to_image(shape_list, header):
for shape, comment in shape_list:
if isinstance(shape, Shape) and \
(shape.coord_format not in image_like_coordformats):
new_coords = convert_to_imagecoord(shape, header)
l1n = copy.copy(shape)
l1n.coord_list = new_coords
l1n.coord_format = "image"
yield l1n, comment
elif isinstance(shape, Shape) and shape.coord_format == "physical":
if header is None:
raise RuntimeError("Physical coordinate is not known.")
new_coordlist = convert_physical_to_imagecoord(shape, header)
l1n = copy.copy(shape)
l1n.coord_list = new_coordlist
l1n.coord_format = "image"
yield l1n, comment
else:
yield shape, comment
|
Converts a `ShapeList` into shapes with coordinates in image coordinates
Parameters
----------
shape_list : `pyregion.ShapeList`
The ShapeList to convert
header : `~astropy.io.fits.Header`
Specifies what WCS transformations to use.
Yields
-------
shape, comment : Shape, str
Shape with image coordinates and the associated comment
Note
----
The comments in the original `ShapeList` are unaltered
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/ds9_region_parser.py#L162-L209
|
[
"def convert_to_imagecoord(shape, header):\n \"\"\"Convert the coordlist of `shape` to image coordinates\n\n Parameters\n ----------\n shape : `pyregion.parser_helper.Shape`\n The `Shape` to convert coordinates\n\n header : `~astropy.io.fits.Header`\n Specifies what WCS transformations to use.\n\n Returns\n -------\n new_coordlist : list\n A list of image coordinates defining the shape.\n\n \"\"\"\n arg_types = _generate_arg_types(len(shape.coord_list), shape.name)\n\n new_coordlist = []\n is_even_distance = True\n coord_list_iter = iter(zip(shape.coord_list, arg_types))\n\n new_wcs = WCS(header)\n pixel_scales = proj_plane_pixel_scales(new_wcs)\n\n for coordinate, coordinate_type in coord_list_iter:\n if coordinate_type == CoordOdd:\n even_coordinate = next(coord_list_iter)[0]\n\n old_coordinate = SkyCoord(coordinate, even_coordinate,\n frame=shape.coord_format, unit='degree',\n obstime='J2000')\n new_coordlist.extend(\n np.asscalar(x)\n for x in old_coordinate.to_pixel(new_wcs, origin=1)\n )\n\n elif coordinate_type == Distance:\n if arg_types[-1] == Angle:\n degree_per_pixel = pixel_scales[0 if is_even_distance else 1]\n\n is_even_distance = not is_even_distance\n else:\n degree_per_pixel = np.sqrt(proj_plane_pixel_area(new_wcs))\n\n new_coordlist.append(coordinate / degree_per_pixel)\n\n elif coordinate_type == Angle:\n new_angle = _estimate_angle(coordinate,\n shape.coord_format,\n header)\n new_coordlist.append(new_angle)\n\n else:\n new_coordlist.append(coordinate)\n\n return new_coordlist\n",
"def convert_physical_to_imagecoord(shape, header):\n arg_types = _generate_arg_types(len(shape.coord_list), shape.name)\n\n new_coordlist = []\n coord_list_iter = iter(zip(shape.coord_list, arg_types))\n\n from .physical_coordinate import PhysicalCoordinate\n pc = PhysicalCoordinate(header)\n\n for coordinate, coordinate_type in coord_list_iter:\n if coordinate_type == CoordOdd:\n even_coordinate = next(coord_list_iter)[0]\n\n xy0 = pc.to_image(coordinate, even_coordinate)\n new_coordlist.extend(xy0)\n elif coordinate_type == Distance:\n new_coordlist.append(pc.to_image_distance(coordinate))\n else:\n new_coordlist.append(coordinate)\n\n return new_coordlist\n"
] |
class RegionParser(RegionPusher):
def __init__(self):
RegionPusher.__init__(self)
self.shape_definition = ds9_shape_defs
regionShape = define_shape_helper(self.shape_definition)
regionShape = regionShape.setParseAction(lambda s, l, tok: Shape(tok[0], tok[1:]))
regionExpr = define_expr(regionShape,
negate_func=lambda s, l, tok: tok[-1].set_exclude(),
)
coord_command_keys = ['PHYSICAL', 'IMAGE', 'FK4', 'B1950', 'FK5',
'J2000', 'GALACTIC', 'ECLIPTIC', 'ICRS',
'LINEAR', 'AMPLIFIER', 'DETECTOR']
coordCommandLiterals = define_simple_literals(coord_command_keys)
coordCommandWCS = Combine(CaselessLiteral("WCS") + Optional(Word(alphas)))
coordCommand = (coordCommandLiterals | coordCommandWCS)
coordCommand.setParseAction(lambda s, l, tok: CoordCommand(tok[-1]))
regionGlobal = comment_shell_like(CaselessKeyword("global"),
lambda s, l, tok: Global(tok[-1]))
regionAtom = (regionExpr | coordCommand | regionGlobal)
regionAtom = regionAtom.setParseAction(self.pushAtom)
regionComment = comment_shell_like(Literal("#"),
parseAction=self.pushComment)
line_simple = define_line(atom=regionAtom,
separator=Literal(";"),
comment=regionComment
)
line_w_composite = And([regionAtom,
CaselessKeyword("||").setParseAction(self.set_continued)
]) \
+ Optional(regionComment)
line = Or([line_simple, line_w_composite])
self.parser = Optional(line) + StringEnd()
def parseLine(self, l):
self.parser.parseString(l)
s, c, continued = self.stack, self.comment, self.continued
self.flush()
return s, c, continued
def parse(self, s):
for l in s.split("\n"):
try:
s, c, continued = self.parseLine(l)
except ParseException:
warnings.warn("Failed to parse : " + l)
self.flush()
continue
if len(s) > 1:
for s1 in s[:-1]:
yield s1, None
s[-1].comment = c
s[-1].continued = continued
yield s[-1], c
elif len(s) == 1:
s[-1].comment = c
s[-1].continued = continued
yield s[-1], c
elif c:
yield None, c
self.flush()
def convert_attr(self, l):
global_attr = [], {}
parser = Ds9AttrParser()
for l1, c1 in l:
if isinstance(l1, Global):
for kv in parser.parse_default(l1.text):
if len(kv) == 1:
global_attr[0].append(kv[0])
elif len(kv) == 2:
if kv[0] == 'tag':
global_attr[1].setdefault(kv[0], set()).add(kv[1])
else:
global_attr[1][kv[0]] = kv[1]
elif isinstance(l1, Shape):
if c1:
attr_list = parser.parse_default(c1)
attr0, attr1 = get_attr(attr_list, global_attr)
else:
attr0, attr1 = global_attr
l1n = copy.copy(l1)
l1n.attr = attr0, attr1
yield l1n, c1
elif not l1 and c1:
shape, attr_list = parser.parse_check_shape(c1)
if shape:
shape.attr = get_attr(attr_list, global_attr)
yield shape, c1
else:
yield l1, c1
@staticmethod
def filter_shape(self, sss):
return [s1[0] for s1 in sss if isinstance(s1[0], Shape)]
@staticmethod
def filter_shape2(sss):
r = [s1 for s1 in sss if isinstance(s1[0], Shape)]
return zip(*r)
|
astropy/pyregion
|
pyregion/wcs_helper.py
|
_estimate_angle
|
python
|
def _estimate_angle(angle, reg_coordinate_frame, header):
y_axis_rot = _calculate_rotation_angle(reg_coordinate_frame, header)
return angle - y_axis_rot
|
Transform an angle into a different frame
Parameters
----------
angle : float, int
The number of degrees, measured from the Y axis in origin's frame
reg_coordinate_frame : str
Coordinate frame in which ``angle`` is defined
header : `~astropy.io.fits.Header` instance
Header describing the image
Returns
-------
angle : float
The angle, measured from the Y axis in the WCS defined by ``header'`
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/wcs_helper.py#L7-L27
|
[
"def _calculate_rotation_angle(reg_coordinate_frame, header):\n \"\"\"Calculates the rotation angle from the region to the header's frame\n\n This attempts to be compatible with the implementation used by SAOImage\n DS9. In particular, this measures the rotation of the north axis as\n measured at the center of the image, and therefore requires a\n `~astropy.io.fits.Header` object with defined 'NAXIS1' and 'NAXIS2'\n keywords.\n\n Parameters\n ----------\n reg_coordinate_frame : str\n Coordinate frame used by the region file\n\n header : `~astropy.io.fits.Header` instance\n Header describing the image\n\n Returns\n -------\n y_axis_rot : float\n Degrees by which the north axis in the region's frame is rotated when\n transformed to pixel coordinates\n \"\"\"\n new_wcs = WCS(header)\n region_frame = SkyCoord(\n '0d 0d',\n frame=reg_coordinate_frame,\n obstime='J2000')\n region_frame = SkyCoord(\n '0d 0d',\n frame=reg_coordinate_frame,\n obstime='J2000',\n equinox=region_frame.equinox)\n\n origin = SkyCoord.from_pixel(\n header['NAXIS1'] / 2,\n header['NAXIS2'] / 2,\n wcs=new_wcs,\n origin=1).transform_to(region_frame)\n\n offset = proj_plane_pixel_scales(new_wcs)[1]\n\n origin_x, origin_y = origin.to_pixel(new_wcs, origin=1)\n origin_lon = origin.data.lon.degree\n origin_lat = origin.data.lat.degree\n\n offset_point = SkyCoord(\n origin_lon, origin_lat + offset, unit='degree',\n frame=origin.frame.name, obstime='J2000')\n offset_x, offset_y = offset_point.to_pixel(new_wcs, origin=1)\n\n north_rot = np.arctan2(\n offset_y - origin_y,\n offset_x - origin_x) / np.pi * 180.\n\n cdelt = new_wcs.wcs.get_cdelt()\n if (cdelt > 0).all() or (cdelt < 0).all():\n return north_rot - 90\n else:\n return -(north_rot - 90)\n"
] |
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.wcs import WCS
from astropy.wcs.utils import proj_plane_pixel_scales
def _calculate_rotation_angle(reg_coordinate_frame, header):
"""Calculates the rotation angle from the region to the header's frame
This attempts to be compatible with the implementation used by SAOImage
DS9. In particular, this measures the rotation of the north axis as
measured at the center of the image, and therefore requires a
`~astropy.io.fits.Header` object with defined 'NAXIS1' and 'NAXIS2'
keywords.
Parameters
----------
reg_coordinate_frame : str
Coordinate frame used by the region file
header : `~astropy.io.fits.Header` instance
Header describing the image
Returns
-------
y_axis_rot : float
Degrees by which the north axis in the region's frame is rotated when
transformed to pixel coordinates
"""
new_wcs = WCS(header)
region_frame = SkyCoord(
'0d 0d',
frame=reg_coordinate_frame,
obstime='J2000')
region_frame = SkyCoord(
'0d 0d',
frame=reg_coordinate_frame,
obstime='J2000',
equinox=region_frame.equinox)
origin = SkyCoord.from_pixel(
header['NAXIS1'] / 2,
header['NAXIS2'] / 2,
wcs=new_wcs,
origin=1).transform_to(region_frame)
offset = proj_plane_pixel_scales(new_wcs)[1]
origin_x, origin_y = origin.to_pixel(new_wcs, origin=1)
origin_lon = origin.data.lon.degree
origin_lat = origin.data.lat.degree
offset_point = SkyCoord(
origin_lon, origin_lat + offset, unit='degree',
frame=origin.frame.name, obstime='J2000')
offset_x, offset_y = offset_point.to_pixel(new_wcs, origin=1)
north_rot = np.arctan2(
offset_y - origin_y,
offset_x - origin_x) / np.pi * 180.
cdelt = new_wcs.wcs.get_cdelt()
if (cdelt > 0).all() or (cdelt < 0).all():
return north_rot - 90
else:
return -(north_rot - 90)
|
astropy/pyregion
|
pyregion/wcs_helper.py
|
_calculate_rotation_angle
|
python
|
def _calculate_rotation_angle(reg_coordinate_frame, header):
new_wcs = WCS(header)
region_frame = SkyCoord(
'0d 0d',
frame=reg_coordinate_frame,
obstime='J2000')
region_frame = SkyCoord(
'0d 0d',
frame=reg_coordinate_frame,
obstime='J2000',
equinox=region_frame.equinox)
origin = SkyCoord.from_pixel(
header['NAXIS1'] / 2,
header['NAXIS2'] / 2,
wcs=new_wcs,
origin=1).transform_to(region_frame)
offset = proj_plane_pixel_scales(new_wcs)[1]
origin_x, origin_y = origin.to_pixel(new_wcs, origin=1)
origin_lon = origin.data.lon.degree
origin_lat = origin.data.lat.degree
offset_point = SkyCoord(
origin_lon, origin_lat + offset, unit='degree',
frame=origin.frame.name, obstime='J2000')
offset_x, offset_y = offset_point.to_pixel(new_wcs, origin=1)
north_rot = np.arctan2(
offset_y - origin_y,
offset_x - origin_x) / np.pi * 180.
cdelt = new_wcs.wcs.get_cdelt()
if (cdelt > 0).all() or (cdelt < 0).all():
return north_rot - 90
else:
return -(north_rot - 90)
|
Calculates the rotation angle from the region to the header's frame
This attempts to be compatible with the implementation used by SAOImage
DS9. In particular, this measures the rotation of the north axis as
measured at the center of the image, and therefore requires a
`~astropy.io.fits.Header` object with defined 'NAXIS1' and 'NAXIS2'
keywords.
Parameters
----------
reg_coordinate_frame : str
Coordinate frame used by the region file
header : `~astropy.io.fits.Header` instance
Header describing the image
Returns
-------
y_axis_rot : float
Degrees by which the north axis in the region's frame is rotated when
transformed to pixel coordinates
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/wcs_helper.py#L30-L89
| null |
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.wcs import WCS
from astropy.wcs.utils import proj_plane_pixel_scales
def _estimate_angle(angle, reg_coordinate_frame, header):
"""Transform an angle into a different frame
Parameters
----------
angle : float, int
The number of degrees, measured from the Y axis in origin's frame
reg_coordinate_frame : str
Coordinate frame in which ``angle`` is defined
header : `~astropy.io.fits.Header` instance
Header describing the image
Returns
-------
angle : float
The angle, measured from the Y axis in the WCS defined by ``header'`
"""
y_axis_rot = _calculate_rotation_angle(reg_coordinate_frame, header)
return angle - y_axis_rot
|
astropy/pyregion
|
pyregion/mpl_helper.py
|
as_mpl_artists
|
python
|
def as_mpl_artists(shape_list,
properties_func=None,
text_offset=5.0, origin=1):
patch_list = []
artist_list = []
if properties_func is None:
properties_func = properties_func_default
# properties for continued(? multiline?) regions
saved_attrs = None
for shape in shape_list:
patches = []
if saved_attrs is None:
_attrs = [], {}
else:
_attrs = copy.copy(saved_attrs[0]), copy.copy(saved_attrs[1])
kwargs = properties_func(shape, _attrs)
if shape.name == "composite":
saved_attrs = shape.attr
continue
if saved_attrs is None and shape.continued:
saved_attrs = shape.attr
# elif (shape.name in shape.attr[1]):
# if (shape.attr[1][shape.name] != "ignore"):
# saved_attrs = shape.attr
if not shape.continued:
saved_attrs = None
# text associated with the shape
txt = shape.attr[1].get("text")
if shape.name == "polygon":
xy = np.array(shape.coord_list)
xy.shape = -1, 2
# -1 for change origin to 0,0
patches = [mpatches.Polygon(xy - origin, closed=True, **kwargs)]
elif shape.name == "rotbox" or shape.name == "box":
xc, yc, w, h, rot = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
_box = np.array([[-w / 2., -h / 2.],
[-w / 2., h / 2.],
[w / 2., h / 2.],
[w / 2., -h / 2.]])
box = _box + [xc, yc]
rotbox = rotated_polygon(box, xc, yc, rot)
patches = [mpatches.Polygon(rotbox, closed=True, **kwargs)]
elif shape.name == "ellipse":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
angle = shape.coord_list[-1]
maj_list, min_list = shape.coord_list[2:-1:2], shape.coord_list[3:-1:2]
patches = [mpatches.Ellipse((xc, yc), 2 * maj, 2 * min,
angle=angle, **kwargs)
for maj, min in zip(maj_list, min_list)]
elif shape.name == "annulus":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
r_list = shape.coord_list[2:]
patches = [mpatches.Ellipse((xc, yc), 2 * r, 2 * r, **kwargs) for r in r_list]
elif shape.name == "circle":
xc, yc, major = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Ellipse((xc, yc), 2 * major, 2 * major, angle=0, **kwargs)]
elif shape.name == "panda":
xc, yc, a1, a2, an, r1, r2, rn = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,
theta1=a1, theta2=a2, **kwargs)
for rr in np.linspace(r1, r2, rn + 1)]
for aa in np.linspace(a1, a2, an + 1):
xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc
yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif shape.name == "pie":
xc, yc, r1, r2, a1, a2 = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,
theta1=a1, theta2=a2, **kwargs)
for rr in [r1, r2]]
for aa in [a1, a2]:
xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc
yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif shape.name == "epanda":
xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
# mpl takes angle a1, a2 as angle as in circle before
# transformation to ellipse.
x1, y1 = cos(a1 / 180. * pi), sin(a1 / 180. * pi) * r11 / r12
x2, y2 = cos(a2 / 180. * pi), sin(a2 / 180. * pi) * r11 / r12
a1, a2 = atan2(y1, x1) / pi * 180., atan2(y2, x2) / pi * 180.
patches = [mpatches.Arc((xc, yc), rr1 * 2, rr2 * 2,
angle=angle, theta1=a1, theta2=a2,
**kwargs)
for rr1, rr2 in zip(np.linspace(r11, r21, rn + 1),
np.linspace(r12, r22, rn + 1))]
for aa in np.linspace(a1, a2, an + 1):
xx = np.array([r11, r21]) * np.cos(aa / 180. * np.pi)
yy = np.array([r11, r21]) * np.sin(aa / 180. * np.pi)
p = Path(np.transpose([xx, yy]))
tr = Affine2D().scale(1, r12 / r11).rotate_deg(angle).translate(xc, yc)
p2 = tr.transform_path(p)
patches.append(mpatches.PathPatch(p2, **kwargs))
elif shape.name == "text":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
if txt:
_t = _get_text(txt, xc, yc, 0, 0, **kwargs)
artist_list.append(_t)
elif shape.name == "point":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
artist_list.append(Line2D([xc], [yc],
**kwargs))
if txt:
textshape = copy.copy(shape)
textshape.name = "text"
textkwargs = properties_func(textshape, _attrs)
_t = _get_text(txt, xc, yc, 0, text_offset,
va="bottom",
**textkwargs)
artist_list.append(_t)
elif shape.name in ["line", "vector"]:
if shape.name == "line":
x1, y1, x2, y2 = shape.coord_list[:4]
# -1 for change origin to 0,0
x1, y1, x2, y2 = x1 - origin, y1 - origin, x2 - origin, y2 - origin
a1, a2 = shape.attr[1].get("line", "0 0").strip().split()[:2]
arrowstyle = "-"
if int(a1):
arrowstyle = "<" + arrowstyle
if int(a2):
arrowstyle = arrowstyle + ">"
else: # shape.name == "vector"
x1, y1, l, a = shape.coord_list[:4]
# -1 for change origin to 0,0
x1, y1 = x1 - origin, y1 - origin
x2, y2 = x1 + l * np.cos(a / 180. * np.pi), y1 + l * np.sin(a / 180. * np.pi)
v1 = int(shape.attr[1].get("vector", "0").strip())
if v1:
arrowstyle = "->"
else:
arrowstyle = "-"
patches = [mpatches.FancyArrowPatch(posA=(x1, y1),
posB=(x2, y2),
arrowstyle=arrowstyle,
arrow_transmuter=None,
connectionstyle="arc3",
patchA=None, patchB=None,
shrinkA=0, shrinkB=0,
connector=None,
**kwargs)]
else:
warnings.warn("'as_mpl_artists' does not know how to convert {0} "
"to mpl artist".format(shape.name))
patch_list.extend(patches)
if txt and patches:
# the text associated with a shape uses different
# matplotlib keywords than the shape itself for, e.g.,
# color
textshape = copy.copy(shape)
textshape.name = "text"
textkwargs = properties_func(textshape, _attrs)
# calculate the text position
_bb = [p.get_window_extent() for p in patches]
# this is to work around backward-incompatible change made
# in matplotlib 1.2. This change is later reverted so only
# some versions are affected. With affected version of
# matplotlib, get_window_extent method calls get_transform
# method which sets the _transformSet to True, which is
# not desired.
for p in patches:
p._transformSet = False
_bbox = Bbox.union(_bb)
x0, y0, x1, y1 = _bbox.extents
xc = .5 * (x0 + x1)
_t = _get_text(txt, xc, y1, 0, text_offset,
va="bottom",
**textkwargs)
artist_list.append(_t)
return patch_list, artist_list
|
Converts a region list to a list of patches and a list of artists.
Optional Keywords:
[ text_offset ] - If there is text associated with the regions, add
some vertical offset (in pixels) to the text so that it doesn't overlap
with the regions.
Often, the regions files implicitly assume the lower-left corner
of the image as a coordinate (1,1). However, the python convetion
is that the array index starts from 0. By default (origin = 1),
coordinates of the returned mpl artists have coordinate shifted by
(1, 1). If you do not want this shift, set origin=0.
|
train
|
https://github.com/astropy/pyregion/blob/913af7ea4917855cb2e43d5086d1c8dd99c31363/pyregion/mpl_helper.py#L136-L388
|
[
"def rotated_polygon(xy, ox, oy, angle):\n # angle in degree\n theta = angle / 180. * pi\n\n st = sin(theta)\n ct = cos(theta)\n\n xy = np.asarray(xy, dtype=\"d\")\n x, y = xy[:, 0], xy[:, 1]\n x1 = x - ox\n y1 = y - oy\n\n x2 = ct * x1 + -st * y1\n y2 = st * x1 + ct * y1\n\n xp = x2 + ox\n yp = y2 + oy\n\n return np.hstack((xp.reshape((-1, 1)), yp.reshape((-1, 1))))\n",
"def properties_func_default(shape, saved_attrs):\n attr_list = copy.copy(shape.attr[0])\n attr_dict = copy.copy(shape.attr[1])\n\n attr_list.extend(saved_attrs[0])\n attr_dict.update(saved_attrs[1])\n\n color = attr_dict.get(\"color\", None)\n color = _ds9_to_mpl_colormap.get(color, color)\n\n if shape.name == \"text\":\n kwargs = dict(color=color,\n rotation=attr_dict.get(\"textangle\", 0),\n )\n font = attr_dict.get(\"font\")\n if font:\n a = font.split()\n if len(a) >= 3:\n fontsize = float(a[1])\n kwargs[\"fontsize\"] = fontsize\n elif shape.name == \"point\":\n point_attrs = attr_dict.get(\"point\", \"boxcircle\").split()\n if len(point_attrs) == 1:\n point_type = point_attrs[0]\n point_size = 11\n elif len(point_attrs) > 1:\n point_type = point_attrs[0]\n point_size = int(point_attrs[1])\n\n marker = _point_type_dict.get(point_type, \"o\")\n kwargs = dict(markeredgecolor=color,\n markerfacecolor=\"none\",\n marker=marker,\n markeredgewidth=int(attr_dict.get(\"width\", 1)),\n markersize=point_size\n )\n elif shape.name in [\"line\", \"vector\"]:\n fontsize = 10 # default font size\n\n font = attr_dict.get(\"font\")\n if font:\n a = font.split()\n if len(a) >= 3:\n fontsize = float(a[1])\n\n kwargs = dict(color=color,\n linewidth=int(attr_dict.get(\"width\", 1)),\n mutation_scale=fontsize,\n )\n if int(attr_dict.get(\"dash\", \"0\")):\n kwargs[\"linestyle\"] = \"dashed\"\n\n else:\n kwargs = dict(edgecolor=color,\n linewidth=int(attr_dict.get(\"width\", 1)),\n facecolor=\"none\"\n )\n\n if \"background\" in attr_list:\n kwargs[\"linestyle\"] = \"dashed\"\n\n if int(attr_dict.get(\"dash\", \"0\")):\n kwargs[\"linestyle\"] = \"dashed\"\n if shape.exclude:\n kwargs[\"hatch\"] = \"/\"\n\n return kwargs\n",
"def _get_text(txt, x, y, dx, dy, ha=\"center\", va=\"center\", **kwargs):\n if \"color\" in kwargs:\n textcolor = kwargs[\"color\"]\n del kwargs[\"color\"]\n elif \"markeredgecolor\" in kwargs:\n textcolor = kwargs[\"markeredgecolor\"]\n else:\n import matplotlib as mpl\n textcolor = mpl.rcParams['text.color']\n ann = Annotation(txt, (x, y), xytext=(dx, dy),\n xycoords='data',\n textcoords=\"offset points\",\n color=textcolor,\n ha=ha, va=va,\n **kwargs)\n ann.set_transform(IdentityTransform())\n\n return ann\n"
] |
import copy
import numpy as np
from math import cos, sin, pi, atan2
import warnings
import matplotlib.patches as mpatches
from matplotlib.path import Path
from matplotlib.lines import Line2D
from matplotlib.transforms import Affine2D, Bbox, IdentityTransform
from matplotlib.text import Annotation
def rotated_polygon(xy, ox, oy, angle):
# angle in degree
theta = angle / 180. * pi
st = sin(theta)
ct = cos(theta)
xy = np.asarray(xy, dtype="d")
x, y = xy[:, 0], xy[:, 1]
x1 = x - ox
y1 = y - oy
x2 = ct * x1 + -st * y1
y2 = st * x1 + ct * y1
xp = x2 + ox
yp = y2 + oy
return np.hstack((xp.reshape((-1, 1)), yp.reshape((-1, 1))))
# sss3 = [s1[0] for s1 in sss2 if isinstance(s1[0], parser_ds9.Shape)]
_point_type_dict = dict(circle="o",
box="s",
diamond="D",
x="x",
cross="+",
arrow="^",
boxcircle="*")
_ds9_to_mpl_colormap = dict(green="lime",
)
def properties_func_default(shape, saved_attrs):
attr_list = copy.copy(shape.attr[0])
attr_dict = copy.copy(shape.attr[1])
attr_list.extend(saved_attrs[0])
attr_dict.update(saved_attrs[1])
color = attr_dict.get("color", None)
color = _ds9_to_mpl_colormap.get(color, color)
if shape.name == "text":
kwargs = dict(color=color,
rotation=attr_dict.get("textangle", 0),
)
font = attr_dict.get("font")
if font:
a = font.split()
if len(a) >= 3:
fontsize = float(a[1])
kwargs["fontsize"] = fontsize
elif shape.name == "point":
point_attrs = attr_dict.get("point", "boxcircle").split()
if len(point_attrs) == 1:
point_type = point_attrs[0]
point_size = 11
elif len(point_attrs) > 1:
point_type = point_attrs[0]
point_size = int(point_attrs[1])
marker = _point_type_dict.get(point_type, "o")
kwargs = dict(markeredgecolor=color,
markerfacecolor="none",
marker=marker,
markeredgewidth=int(attr_dict.get("width", 1)),
markersize=point_size
)
elif shape.name in ["line", "vector"]:
fontsize = 10 # default font size
font = attr_dict.get("font")
if font:
a = font.split()
if len(a) >= 3:
fontsize = float(a[1])
kwargs = dict(color=color,
linewidth=int(attr_dict.get("width", 1)),
mutation_scale=fontsize,
)
if int(attr_dict.get("dash", "0")):
kwargs["linestyle"] = "dashed"
else:
kwargs = dict(edgecolor=color,
linewidth=int(attr_dict.get("width", 1)),
facecolor="none"
)
if "background" in attr_list:
kwargs["linestyle"] = "dashed"
if int(attr_dict.get("dash", "0")):
kwargs["linestyle"] = "dashed"
if shape.exclude:
kwargs["hatch"] = "/"
return kwargs
def _get_text(txt, x, y, dx, dy, ha="center", va="center", **kwargs):
if "color" in kwargs:
textcolor = kwargs["color"]
del kwargs["color"]
elif "markeredgecolor" in kwargs:
textcolor = kwargs["markeredgecolor"]
else:
import matplotlib as mpl
textcolor = mpl.rcParams['text.color']
ann = Annotation(txt, (x, y), xytext=(dx, dy),
xycoords='data',
textcoords="offset points",
color=textcolor,
ha=ha, va=va,
**kwargs)
ann.set_transform(IdentityTransform())
return ann
|
biocore-ntnu/epic
|
epic/run/run_epic.py
|
multiple_files_count_reads_in_windows
|
python
|
def multiple_files_count_reads_in_windows(bed_files, args):
# type: (Iterable[str], Namespace) -> OrderedDict[str, List[pd.DataFrame]]
bed_windows = OrderedDict() # type: OrderedDict[str, List[pd.DataFrame]]
for bed_file in bed_files:
logging.info("Binning " + bed_file)
if ".bedpe" in bed_file:
chromosome_dfs = count_reads_in_windows_paired_end(bed_file, args)
else:
chromosome_dfs = count_reads_in_windows(bed_file, args)
bed_windows[bed_file] = chromosome_dfs
return bed_windows
|
Use count_reads on multiple files and store result in dict.
Untested since does the same thing as count reads.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/run/run_epic.py#L129-L144
|
[
"def count_reads_in_windows(bed_file, args):\n # type: (str, Namespace) -> List[pd.DataFrame]\n\n chromosome_size_dict = args.chromosome_sizes\n chromosomes = natsorted(list(chromosome_size_dict.keys()))\n\n parallel_count_reads = partial(_count_reads_in_windows, bed_file, args)\n\n info(\"Binning chromosomes {}\".format(\", \".join([c.replace(\"chr\", \"\")\n for c in chromosomes])))\n\n chromosome_dfs = Parallel(n_jobs=args.number_cores)(\n delayed(parallel_count_reads)(chromosome_size_dict[chromosome],\n chromosome, strand)\n for chromosome, strand in product(chromosomes, [\"+\", \"-\"]))\n\n info(\"Merging the bins on both strands per chromosome.\")\n both_chromosome_strand_dfs = [df_pair\n for df_pair in _pairwise(chromosome_dfs)]\n merged_chromosome_dfs = Parallel(\n n_jobs=args.number_cores)(delayed(merge_chromosome_dfs)(df_pair)\n for df_pair in both_chromosome_strand_dfs)\n\n return merged_chromosome_dfs\n",
"def count_reads_in_windows_paired_end(bed_file, args):\n # type: (str, Namespace) -> List[pd.DataFrame]\n\n chromosome_size_dict = args.chromosome_sizes\n chromosomes = natsorted(list(chromosome_size_dict.keys()))\n\n parallel_count_reads = partial(_count_reads_in_windows_paired_end,\n bed_file, args)\n\n info(\"Binning chromosomes {}\".format(\", \".join([c.replace(\"chr\", \"\")\n for c in chromosomes])))\n chromosome_dfs = Parallel(n_jobs=args.number_cores)(\n delayed(parallel_count_reads)(chromosome_size_dict[chromosome],\n chromosome)\n for chromosome in chromosomes)\n\n return chromosome_dfs\n"
] |
from __future__ import print_function
"""Run whole epic pipeline."""
__author__ = "Endre Bakken Stovner https://github.com/endrebak/"
__license__ = "MIT"
from os.path import dirname, join, basename
from sys import stdout, argv, stderr
from itertools import chain
from collections import OrderedDict
from subprocess import call
import logging
from argparse import Namespace
import pandas as pd
from numpy import log2
from typing import Iterable
from natsort import natsorted
from joblib import Parallel, delayed
from epic.windows.count.count_reads_in_windows import (
count_reads_in_windows, count_reads_in_windows_paired_end)
from epic.statistics.compute_background_probabilites import compute_background_probabilities
from epic.statistics.count_to_pvalue import count_to_pvalue
from epic.statistics.fdr import compute_fdr
from epic.utils.helper_functions import merge_chip_and_input, get_total_number_of_reads, merge_same_files
from epic.windows.cluster.find_islands import find_islands
from epic.matrixes.matrixes import write_matrix_files
def run_epic(args):
# type: (Namespace) -> pd.DataFrame
print(args.effective_genome_fraction)
chip_windows = multiple_files_count_reads_in_windows(args.treatment, args)
input_windows = multiple_files_count_reads_in_windows(args.control, args)
# print("merging chip" * 10, file=stderr)
chip_merged = _merge_files(chip_windows.values(), args.number_cores)
# print("merging input" * 10, file=stderr)
input_merged = _merge_files(input_windows.values(), args.number_cores)
chip_merged_sum = sum_columns(chip_merged)
input_merged_sum = sum_columns(input_merged)
nb_chip_reads = get_total_number_of_reads(chip_merged_sum)
print(nb_chip_reads)
nb_input_reads = get_total_number_of_reads(input_merged_sum)
merged_dfs = merge_chip_and_input(chip_merged_sum, input_merged_sum,
args.number_cores)
score_threshold, island_enriched_threshold, average_window_readcount = \
compute_background_probabilities(nb_chip_reads, args)
dfs = [] # type: Iterable[pd.DataFrame]
dfs = count_to_pvalue(merged_dfs, island_enriched_threshold,
average_window_readcount, args.number_cores)
dfs = find_islands(dfs, score_threshold, args)
logging.info("Done finding islands.")
logging.info("Concating dfs.")
df = pd.concat([df for df in dfs if not df.empty])
logging.info("Labeling island bins.")
logging.info("Computing FDR.")
df = compute_fdr(df, nb_chip_reads, nb_input_reads, args)
# Just in case some ints got promoted to float somewhere
df[["Start", "End", "ChIP", "Input"]] = df[["Start", "End", "ChIP", "Input"
]].astype(int)
# redundancy in below code
outfile = args.outfile if args.outfile else stdout
if args.outfile:
with open(outfile, "w+") as h:
print("# epic " + " ".join(argv[1:]), file=h)
else:
print("# epic " + " ".join(argv[1:]), file=stdout)
df.to_csv(outfile, index=False, sep=" ", na_rep="NA", mode="a")
if args.bed:
df_to_bed(df).to_csv(args.bed, header=False, index=False, sep="\t")
if (args.store_matrix or args.bigwig or args.chip_bigwig or args.input_bigwig or args.log2fc_bigwig or args.individual_log2fc_bigwigs):
write_matrix_files(chip_merged, input_merged, df, args)
return df.reset_index(drop=True) # only returns a value to simplify integration tests
def df_to_bed(df):
# type: (pd.DataFrame) -> pd.DataFrame
# '''Chromosome Start End ChIP Input Score Fold_change P FDR
# chr5 53000 55399 121 13 77.6075622841774 13.655736573980159 6.040968494897508e-92 1.9241805908359603e-91\''
regions = df["Chromosome Start End FDR".split()].copy()
regions.insert(4, "Score", log2(df.ChIP/df.Input) * 100)
regions.loc[regions.Score > 1000, "Score"] = 1000
regions.loc[regions.Score < 0, "Score"] = 0
regions.insert(5, "Strand", ".")
return regions
def sum_columns(dfs):
# type: (Iterable[pd.DataFrame]) -> List[pd.DataFrame]
new_dfs = []
for df in dfs:
s = df.set_index("Chromosome Bin".split())
s = s.sum(axis=1)
s.name = "Count"
df = s.reset_index()
new_dfs.append(df)
return new_dfs
def _merge_files(windows, nb_cpu):
# type: (Iterable[pd.DataFrame], int) -> pd.DataFrame
"""Merge lists of chromosome bin df chromosome-wise.
windows is an OrderedDict where the keys are files, the values are lists of
dfs, one per chromosome.
Returns a list of dataframes, one per chromosome, with the collective count
per bin for all files.
TODO: is it faster to merge all in one command?
"""
# windows is a list of chromosome dfs per file
windows = iter(windows) # can iterate over because it is odict_values
merged = next(windows)
# if there is only one file, the merging is skipped since the windows is used up
for chromosome_dfs in windows:
# merge_same_files merges the chromosome files in parallel
merged = merge_same_files(merged, chromosome_dfs, nb_cpu)
return merged
|
biocore-ntnu/epic
|
epic/run/run_epic.py
|
_merge_files
|
python
|
def _merge_files(windows, nb_cpu):
# type: (Iterable[pd.DataFrame], int) -> pd.DataFrame
# windows is a list of chromosome dfs per file
windows = iter(windows) # can iterate over because it is odict_values
merged = next(windows)
# if there is only one file, the merging is skipped since the windows is used up
for chromosome_dfs in windows:
# merge_same_files merges the chromosome files in parallel
merged = merge_same_files(merged, chromosome_dfs, nb_cpu)
return merged
|
Merge lists of chromosome bin df chromosome-wise.
windows is an OrderedDict where the keys are files, the values are lists of
dfs, one per chromosome.
Returns a list of dataframes, one per chromosome, with the collective count
per bin for all files.
TODO: is it faster to merge all in one command?
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/run/run_epic.py#L147-L169
|
[
"def merge_same_files(sample1_dfs, sample2_dfs, nb_cpu):\n # type: (List[pd.DataFrame], List[pd.DataFrame], int) -> List[pd.DataFrame]\n\n # if one list is missing a chromosome, we might pair up the wrong dataframes\n # therefore creating dicts beforehand to ensure they are paired up properly\n d1, d2 = ensure_same_chromosomes_in_list(sample1_dfs,\n sample2_dfs)\n\n assert len(d1) == len(d2)\n\n logging.info(\"Merging same class data.\")\n merged_chromosome_dfs = Parallel(n_jobs=nb_cpu)(delayed(_merge_same_files)(\n d1[chromosome],\n d2[chromosome]) for chromosome in d1.keys())\n\n return merged_chromosome_dfs\n"
] |
from __future__ import print_function
"""Run whole epic pipeline."""
__author__ = "Endre Bakken Stovner https://github.com/endrebak/"
__license__ = "MIT"
from os.path import dirname, join, basename
from sys import stdout, argv, stderr
from itertools import chain
from collections import OrderedDict
from subprocess import call
import logging
from argparse import Namespace
import pandas as pd
from numpy import log2
from typing import Iterable
from natsort import natsorted
from joblib import Parallel, delayed
from epic.windows.count.count_reads_in_windows import (
count_reads_in_windows, count_reads_in_windows_paired_end)
from epic.statistics.compute_background_probabilites import compute_background_probabilities
from epic.statistics.count_to_pvalue import count_to_pvalue
from epic.statistics.fdr import compute_fdr
from epic.utils.helper_functions import merge_chip_and_input, get_total_number_of_reads, merge_same_files
from epic.windows.cluster.find_islands import find_islands
from epic.matrixes.matrixes import write_matrix_files
def run_epic(args):
# type: (Namespace) -> pd.DataFrame
print(args.effective_genome_fraction)
chip_windows = multiple_files_count_reads_in_windows(args.treatment, args)
input_windows = multiple_files_count_reads_in_windows(args.control, args)
# print("merging chip" * 10, file=stderr)
chip_merged = _merge_files(chip_windows.values(), args.number_cores)
# print("merging input" * 10, file=stderr)
input_merged = _merge_files(input_windows.values(), args.number_cores)
chip_merged_sum = sum_columns(chip_merged)
input_merged_sum = sum_columns(input_merged)
nb_chip_reads = get_total_number_of_reads(chip_merged_sum)
print(nb_chip_reads)
nb_input_reads = get_total_number_of_reads(input_merged_sum)
merged_dfs = merge_chip_and_input(chip_merged_sum, input_merged_sum,
args.number_cores)
score_threshold, island_enriched_threshold, average_window_readcount = \
compute_background_probabilities(nb_chip_reads, args)
dfs = [] # type: Iterable[pd.DataFrame]
dfs = count_to_pvalue(merged_dfs, island_enriched_threshold,
average_window_readcount, args.number_cores)
dfs = find_islands(dfs, score_threshold, args)
logging.info("Done finding islands.")
logging.info("Concating dfs.")
df = pd.concat([df for df in dfs if not df.empty])
logging.info("Labeling island bins.")
logging.info("Computing FDR.")
df = compute_fdr(df, nb_chip_reads, nb_input_reads, args)
# Just in case some ints got promoted to float somewhere
df[["Start", "End", "ChIP", "Input"]] = df[["Start", "End", "ChIP", "Input"
]].astype(int)
# redundancy in below code
outfile = args.outfile if args.outfile else stdout
if args.outfile:
with open(outfile, "w+") as h:
print("# epic " + " ".join(argv[1:]), file=h)
else:
print("# epic " + " ".join(argv[1:]), file=stdout)
df.to_csv(outfile, index=False, sep=" ", na_rep="NA", mode="a")
if args.bed:
df_to_bed(df).to_csv(args.bed, header=False, index=False, sep="\t")
if (args.store_matrix or args.bigwig or args.chip_bigwig or args.input_bigwig or args.log2fc_bigwig or args.individual_log2fc_bigwigs):
write_matrix_files(chip_merged, input_merged, df, args)
return df.reset_index(drop=True) # only returns a value to simplify integration tests
def df_to_bed(df):
# type: (pd.DataFrame) -> pd.DataFrame
# '''Chromosome Start End ChIP Input Score Fold_change P FDR
# chr5 53000 55399 121 13 77.6075622841774 13.655736573980159 6.040968494897508e-92 1.9241805908359603e-91\''
regions = df["Chromosome Start End FDR".split()].copy()
regions.insert(4, "Score", log2(df.ChIP/df.Input) * 100)
regions.loc[regions.Score > 1000, "Score"] = 1000
regions.loc[regions.Score < 0, "Score"] = 0
regions.insert(5, "Strand", ".")
return regions
def sum_columns(dfs):
# type: (Iterable[pd.DataFrame]) -> List[pd.DataFrame]
new_dfs = []
for df in dfs:
s = df.set_index("Chromosome Bin".split())
s = s.sum(axis=1)
s.name = "Count"
df = s.reset_index()
new_dfs.append(df)
return new_dfs
def multiple_files_count_reads_in_windows(bed_files, args):
# type: (Iterable[str], Namespace) -> OrderedDict[str, List[pd.DataFrame]]
"""Use count_reads on multiple files and store result in dict.
Untested since does the same thing as count reads."""
bed_windows = OrderedDict() # type: OrderedDict[str, List[pd.DataFrame]]
for bed_file in bed_files:
logging.info("Binning " + bed_file)
if ".bedpe" in bed_file:
chromosome_dfs = count_reads_in_windows_paired_end(bed_file, args)
else:
chromosome_dfs = count_reads_in_windows(bed_file, args)
bed_windows[bed_file] = chromosome_dfs
return bed_windows
|
biocore-ntnu/epic
|
epic/statistics/generate_cumulative_distribution.py
|
generate_cumulative_dist
|
python
|
def generate_cumulative_dist(island_expectations_d, total_length):
# type: (Dict[int, float], int) -> float
cumulative = [0.0] * (total_length + 1)
partial_sum = 0.0
island_expectations = []
for i in range(len(cumulative)):
if i in island_expectations_d:
island_expectations.append(island_expectations_d[i])
else:
island_expectations.append(0)
for index in range(1, len(island_expectations) + 1):
complimentary = len(island_expectations) - index
partial_sum += island_expectations[complimentary]
cumulative[complimentary] = partial_sum
# move to function call
for index in range(len(cumulative)):
if cumulative[index] <= E_VALUE:
score_threshold = index * BIN_SIZE
break
return score_threshold
|
Generate cumulative distribution: a list of tuples (bins, hist).
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/statistics/generate_cumulative_distribution.py#L5-L32
| null |
from epic.config.constants import E_VALUE, BIN_SIZE
from typing import Sequence, List
|
biocore-ntnu/epic
|
epic/statistics/compute_values_needed_for_recurrence.py
|
compute_enriched_threshold
|
python
|
def compute_enriched_threshold(average_window_readcount):
# type: (float) -> int
current_threshold, survival_function = 0, 1
for current_threshold in count(start=0, step=1):
survival_function -= poisson.pmf(current_threshold,
average_window_readcount)
if survival_function <= WINDOW_P_VALUE:
break
island_enriched_threshold = current_threshold + 1
return island_enriched_threshold
|
Computes the minimum number of tags required in window for an island to be enriched.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/statistics/compute_values_needed_for_recurrence.py#L7-L22
| null |
from itertools import count
from scipy.stats import poisson
from epic.config.constants import WINDOW_P_VALUE
def compute_gap_factor(island_enriched_threshold,
gap_intervals_allowed, poisson_distribution_parameter):
# type: (int, int, float) -> float
max_gap_score = 1.0
gap_factor = single_gap_factor(island_enriched_threshold,
poisson_distribution_parameter)
max_gap_score += sum([pow(gap_factor, i)
for i in range(1, gap_intervals_allowed + 1)])
return max_gap_score
def single_gap_factor(island_enriched_threshold,
poisson_distribution_parameter):
# type: (int, float) -> float
poisson_scores = [poisson.pmf(i, poisson_distribution_parameter)
for i in range(island_enriched_threshold)]
return sum(poisson_scores)
def compute_boundary(island_enriched_threshold, gap_intervals_allowed,
average):
# type: (int, int, float) -> float
single_gap = single_gap_factor(island_enriched_threshold, average)
single_boundary_score = pow(single_gap, gap_intervals_allowed + 1)
start_and_end_score = single_boundary_score * single_boundary_score
return start_and_end_score
|
biocore-ntnu/epic
|
epic/statistics/compute_poisson.py
|
_factln
|
python
|
def _factln(num):
# type: (int) -> float
if num < 20:
log_factorial = log(factorial(num))
else:
log_factorial = num * log(num) - num + log(num * (1 + 4 * num * (
1 + 2 * num))) / 6.0 + log(pi) / 2
return log_factorial
|
Computes logfactorial regularly for tractable numbers, uses Ramanujans approximation otherwise.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/statistics/compute_poisson.py#L6-L18
| null |
from math import log, factorial, pi, exp
from epic.utils.helper_functions import lru_cache
@lru_cache()
@lru_cache()
def _poisson(i, average):
# type: (int, float) -> float
"""
"""
exponent = -average + i * log(average) - _factln(i)
return exp(exponent)
|
biocore-ntnu/epic
|
epic/merge/merge_helpers.py
|
add_new_enriched_bins_matrixes
|
python
|
def add_new_enriched_bins_matrixes(region_files, dfs, bin_size):
dfs = _remove_epic_enriched(dfs)
names = ["Enriched_" + os.path.basename(r) for r in region_files]
regions = region_files_to_bins(region_files, names, bin_size)
new_dfs = OrderedDict()
assert len(regions.columns) == len(dfs)
for region, (n, df) in zip(regions, dfs.items()):
region_col = regions[region]
df = df.join(region_col, how="outer").fillna(0)
new_dfs[n] = df
return new_dfs
|
Add enriched bins based on bed files.
There is no way to find the correspondence between region file and matrix
file, but it does not matter.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/merge/merge_helpers.py#L51-L75
|
[
"def _remove_epic_enriched(dfs):\n\n new_dfs = OrderedDict()\n for n, df in dfs.items():\n bad_cols = [c for c in df.columns if \"Enriched_\" in c]\n df = df.drop(bad_cols, axis=1)\n new_dfs[n] = df\n\n return new_dfs\n",
"def region_files_to_bins(region_files, names, bin_size):\n\n region_files = [(name, pd.read_table(r, sep=\"\\s+\", usecols=[0, 1, 2], header=None,\n names=[\"Chromosome\", \"Start\", \"End\"])) for name, r in zip(names, region_files)]\n bins = [compute_bins(df, bin_size, name) for name, df in region_files]\n regions = merge_bed_bins(bins)\n\n return regions\n"
] |
import os
from functools import reduce
import logging
from collections import OrderedDict
try: # py3
from math import gcd
except:
from fractions import gcd
import pandas as pd
from epic.merge.compute_bed_bins import compute_bins, merge_bed_bins
def compute_bin_size(dfs):
bin_sizes = []
for df in dfs.values():
bins = df.head(100000).index.get_level_values("Bin").astype(int)
bin_size = reduce(gcd, bins)
bin_sizes.append(bin_size)
assert len(set(bin_sizes)) == 1, "Matrixes have different bin sizes: " + str(bin_sizes)
bin_size = bin_sizes.pop()
logging.info("Bin size: " + str(bin_size))
return bin_size
def _remove_epic_enriched(dfs):
new_dfs = OrderedDict()
for n, df in dfs.items():
bad_cols = [c for c in df.columns if "Enriched_" in c]
df = df.drop(bad_cols, axis=1)
new_dfs[n] = df
return new_dfs
def region_files_to_bins(region_files, names, bin_size):
region_files = [(name, pd.read_table(r, sep="\s+", usecols=[0, 1, 2], header=None,
names=["Chromosome", "Start", "End"])) for name, r in zip(names, region_files)]
bins = [compute_bins(df, bin_size, name) for name, df in region_files]
regions = merge_bed_bins(bins)
return regions
|
biocore-ntnu/epic
|
epic/windows/count/merge_chromosome_dfs.py
|
merge_chromosome_dfs
|
python
|
def merge_chromosome_dfs(df_tuple):
# type: (Tuple[pd.DataFrame, pd.DataFrame]) -> pd.DataFrame
plus_df, minus_df = df_tuple
index_cols = "Chromosome Bin".split()
count_column = plus_df.columns[0]
if plus_df.empty:
return return_other(minus_df, count_column, index_cols)
if minus_df.empty:
return return_other(plus_df, count_column, index_cols)
# sum duplicate bins
# TODO: why are there duplicate bins here in the first place?
plus_df = plus_df.groupby(index_cols).sum()
minus_df = minus_df.groupby(index_cols).sum()
# first sum the two bins from each strand
df = pd.concat([plus_df, minus_df], axis=1).fillna(0).sum(axis=1)
df = df.reset_index().sort_values(by="Bin")
df.columns = ["Chromosome", "Bin", count_column]
df = df.sort_values(["Chromosome", "Bin"])
df[["Bin", count_column]] = df[["Bin", count_column]].astype(int32)
df = df[[count_column, "Chromosome", "Bin"]]
return df.reset_index(drop=True)
|
Merges data from the two strands into strand-agnostic counts.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/windows/count/merge_chromosome_dfs.py#L5-L32
|
[
"def return_other(df, count_column, index_cols):\n # type: (pd.DataFrame, Any, Sequence[Any]) -> pd.DataFrame\n\n df[[count_column, \"Bin\"]] = df[[count_column, \"Bin\"]].astype(int32)\n df = df.groupby(index_cols).sum().reset_index()\n return df[[count_column, \"Chromosome\", \"Bin\"]]\n"
] |
import pandas as pd
from numpy import int32
from typing import Any, Tuple, Sequence
def return_other(df, count_column, index_cols):
# type: (pd.DataFrame, Any, Sequence[Any]) -> pd.DataFrame
df[[count_column, "Bin"]] = df[[count_column, "Bin"]].astype(int32)
df = df.groupby(index_cols).sum().reset_index()
return df[[count_column, "Chromosome", "Bin"]]
# multiprocessing.pool.RemoteTraceback:
# """
# Traceback (most recent call last):
# File "/local/home/endrebak/anaconda3/lib/python3.5/site-packages/joblib-0.9.4-py3.5.egg/joblib/parallel.py", line 130, in __call__
# return self.func(*args, **kwargs)
# File "/local/home/endrebak/anaconda3/lib/python3.5/site-packages/joblib-0.9.4-py3.5.egg/joblib/parallel.py", line 72, in __call__
# return [func(*args, **kwargs) for func, args, kwargs in self.items]
# File "/local/home/endrebak/anaconda3/lib/python3.5/site-packages/joblib-0.9.4-py3.5.egg/joblib/parallel.py", line 72, in <listcomp>
# return [func(*args, **kwargs) for func, args, kwargs in self.items]
# File "/local/home/endrebak/anaconda3/lib/python3.5/site-packages/bioepic-0.0.9-py3.5.egg/epic/windows/count/merge_chromosome_dfs.py", line 21, in merge_chromosome_dfs
# df = pd.concat([plus_df, minus_df], axis=1).fillna(0).sum(axis=1)
# File "/local/home/endrebak/anaconda3/lib/python3.5/site-packages/pandas/tools/merge.py", line 846, in concat
# return op.get_result()
# File "/local/home/endrebak/anaconda3/lib/python3.5/site-packages/pandas/tools/merge.py", line 1031, in get_result
# indexers[ax] = obj_labels.reindex(new_labels)[1]
# File "/local/home/endrebak/anaconda3/lib/python3.5/site-packages/pandas/indexes/multi.py", line 1422, in reindex
# raise Exception("cannot handle a non-unique multi-index!")
# Exception: cannot handle a non-unique multi-index!
|
biocore-ntnu/epic
|
epic/windows/count/remove_out_of_bounds_bins.py
|
remove_out_of_bounds_bins
|
python
|
def remove_out_of_bounds_bins(df, chromosome_size):
# type: (pd.DataFrame, int) -> pd.DataFrame
# The dataframe is empty and contains no bins out of bounds
if "Bin" not in df:
return df
df = df.drop(df[df.Bin > chromosome_size].index)
return df.drop(df[df.Bin < 0].index)
|
Remove all reads that were shifted outside of the genome endpoints.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/windows/count/remove_out_of_bounds_bins.py#L3-L13
| null |
import pandas as pd
def remove_bins_with_ends_out_of_bounds(df, chromosome_size,
window_size):
# type: (pd.DataFrame, int, int) -> pd.DataFrame
"""Remove all reads that were shifted outside of the genome endpoints."""
# The dataframe is empty and contains no bins out of bounds
# print(df.head(2))
# print(chromosome_size)
# print(window_size)
out_of_bounds = df[df.index.get_level_values("Bin") + window_size >
chromosome_size].index
# print(len(out_of_bounds))
df = df.drop(out_of_bounds)
return df
# dfms = Parallel(n_jobs=args.number_cores)(
# delayed(_create_matrixes)(chromosome, chip, input, islands)
# for chromosome in all_chromosomes)
|
biocore-ntnu/epic
|
epic/windows/count/remove_out_of_bounds_bins.py
|
remove_bins_with_ends_out_of_bounds
|
python
|
def remove_bins_with_ends_out_of_bounds(df, chromosome_size,
window_size):
# type: (pd.DataFrame, int, int) -> pd.DataFrame
# The dataframe is empty and contains no bins out of bounds
# print(df.head(2))
# print(chromosome_size)
# print(window_size)
out_of_bounds = df[df.index.get_level_values("Bin") + window_size >
chromosome_size].index
# print(len(out_of_bounds))
df = df.drop(out_of_bounds)
return df
|
Remove all reads that were shifted outside of the genome endpoints.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/windows/count/remove_out_of_bounds_bins.py#L16-L31
| null |
import pandas as pd
def remove_out_of_bounds_bins(df, chromosome_size):
# type: (pd.DataFrame, int) -> pd.DataFrame
"""Remove all reads that were shifted outside of the genome endpoints."""
# The dataframe is empty and contains no bins out of bounds
if "Bin" not in df:
return df
df = df.drop(df[df.Bin > chromosome_size].index)
return df.drop(df[df.Bin < 0].index)
# dfms = Parallel(n_jobs=args.number_cores)(
# delayed(_create_matrixes)(chromosome, chip, input, islands)
# for chromosome in all_chromosomes)
|
biocore-ntnu/epic
|
epic/bigwig/create_bigwigs.py
|
create_log2fc_bigwigs
|
python
|
def create_log2fc_bigwigs(matrix, outdir, args):
# type: (pd.DataFrame, str, Namespace) -> None
call("mkdir -p {}".format(outdir), shell=True)
genome_size_dict = args.chromosome_sizes
outpaths = []
for bed_file in matrix[args.treatment]:
outpath = join(outdir, splitext(basename(bed_file))[0] + "_log2fc.bw")
outpaths.append(outpath)
data = create_log2fc_data(matrix, args)
Parallel(n_jobs=args.number_cores)(delayed(_create_bigwig)(bed_column, outpath, genome_size_dict) for outpath, bed_column in zip(outpaths, data))
|
Create bigwigs from matrix.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/bigwig/create_bigwigs.py#L33-L47
|
[
"def create_log2fc_data(matrix, args):\n\n input_columns = matrix[args.control]\n input_rpkm_sum = (1e6 * input_columns / input_columns.sum()).sum(axis=1) / len(args.control)\n input_rpkm_sum[input_rpkm_sum == 0] = 1\n\n data = []\n for bed_file in matrix[args.treatment]:\n bed_column = matrix[bed_file]\n bed_column = 1e6 * bed_column / bed_column.sum()\n divided = bed_column / input_rpkm_sum\n divided.loc[divided == 0] = 0.01\n log2_fc_column = np.log2(divided)\n data.append(log2_fc_column)\n\n return data\n"
] |
import logging
import numpy as np
from os.path import join, basename, splitext, dirname
from subprocess import call
from argparse import Namespace
import pandas as pd
from typing import Any, Dict, Iterable, List
import pyBigWig
from joblib import Parallel, delayed
def create_log2fc_data(matrix, args):
input_columns = matrix[args.control]
input_rpkm_sum = (1e6 * input_columns / input_columns.sum()).sum(axis=1) / len(args.control)
input_rpkm_sum[input_rpkm_sum == 0] = 1
data = []
for bed_file in matrix[args.treatment]:
bed_column = matrix[bed_file]
bed_column = 1e6 * bed_column / bed_column.sum()
divided = bed_column / input_rpkm_sum
divided.loc[divided == 0] = 0.01
log2_fc_column = np.log2(divided)
data.append(log2_fc_column)
return data
def create_bigwigs(matrix, outdir, args):
# type: (pd.DataFrame, str, Namespace) -> None
"""Create bigwigs from matrix."""
call("mkdir -p {}".format(outdir), shell=True)
genome_size_dict = args.chromosome_sizes
outpaths, data = [], []
for bed_file in matrix:
outpath = join(outdir, splitext(basename(bed_file))[0] + ".bw")
outpaths.append(outpath)
bed_column = matrix[bed_file]
bed_column = 1e6 * bed_column / bed_column.sum()
data.append(bed_column)
Parallel(n_jobs=args.number_cores)(delayed(_create_bigwig)(bed_column, outpath, genome_size_dict) for outpath, bed_column in zip(outpaths, data))
def _to_int(l):
# type: (Iterable[Any]) -> List[int]
return [int(i) for i in l]
def _create_bigwig(bed_column, outpath, genome_size_dict):
# type: (pd.Series, str, Dict[str, int]) -> None
logging.info("Creating biwgwig " + outpath)
bed_column = bed_column.reset_index()
values = [float(f) for _, _, _, f in bed_column.values]
unique_chromosomes = list(bed_column.Chromosome.drop_duplicates())
chromosomes = list(bed_column.Chromosome)
starts = _to_int(list(bed_column.Bin))
ends = _to_int(list(bed_column.End + 1))
header = [(c, int(genome_size_dict[c])) for c in unique_chromosomes]
bw = pyBigWig.open(outpath, "w")
bw.addHeader(header)
bw.addEntries(chromosomes, starts, ends=ends, values=values)
bw.close()
def create_sum_bigwigs(matrix, args):
print(matrix.head().to_csv(sep=" "))
rpkm_matrix = 1e6 * matrix / matrix.sum()
number_chip_files = len(args.treatment)
number_input_files = len(args.control)
chip = rpkm_matrix[args.treatment].sum(axis=1) / number_chip_files
input = rpkm_matrix[args.control].sum(axis=1) / number_input_files
input_pseudo = input.copy()
input_pseudo.loc[input_pseudo == 0] = 1
chip_pseudo = chip.copy()
chip_pseudo.loc[chip_pseudo == 0] = 1
log2fc = np.log2(chip_pseudo / input_pseudo.values)
bigwigs_to_create = []
if args.chip_bigwig:
folder = dirname(args.chip_bigwig)
if folder:
call("mkdir -p {}".format(folder), shell=True)
bigwigs_to_create.append([args.chip_bigwig, chip])
if args.input_bigwig:
folder = dirname(args.input_bigwig)
if folder:
call("mkdir -p {}".format(folder), shell=True)
bigwigs_to_create.append([args.input_bigwig, input])
if args.log2fc_bigwig:
folder = dirname(args.log2fc_bigwig)
if folder:
call("mkdir -p {}".format(folder), shell=True)
bigwigs_to_create.append([args.log2fc_bigwig, log2fc])
Parallel(n_jobs=args.number_cores)(delayed(_create_bigwig)(bed_column, outpath, args.chromosome_sizes) for outpath, bed_column in bigwigs_to_create)
|
biocore-ntnu/epic
|
epic/statistics/add_to_island_expectations.py
|
add_to_island_expectations_dict
|
python
|
def add_to_island_expectations_dict(average_window_readcount,
current_max_scaled_score,
island_eligibility_threshold,
island_expectations, gap_contribution):
# type: ( float, int, float, Dict[int, float], float) -> Dict[int, float]
scaled_score = current_max_scaled_score + E_VALUE
for index in range(current_max_scaled_score + 1, scaled_score + 1):
island_expectation = 0.0
i = island_eligibility_threshold #i is the number of tags in the added window
current_island = int(round(index - compute_window_score(
i, average_window_readcount) / BIN_SIZE))
while (current_island >= 0):
if current_island in island_expectations:
island_expectation += _poisson(
i, average_window_readcount) * island_expectations[
current_island]
i += 1
current_island = int(round(index - compute_window_score(
i, average_window_readcount) / BIN_SIZE))
island_expectation *= gap_contribution
if island_expectation:
island_expectations[index] = island_expectation
return island_expectations
|
Can probably be heavily optimized.
Time required to run can be seen from logging info.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/statistics/add_to_island_expectations.py#L12-L41
| null |
WINDOW_P_VALUE = 0.20
BIN_SIZE = 0.001
E_VALUE = 1000
E_VALUE_THRESHOLD = E_VALUE * .0000001
from typing import Sequence
from epic.statistics.compute_window_score import compute_window_score
from epic.statistics.compute_poisson import _poisson
|
biocore-ntnu/epic
|
epic/scripts/effective_genome_size.py
|
effective_genome_size
|
python
|
def effective_genome_size(fasta, read_length, nb_cores, tmpdir="/tmp"):
# type: (str, int, int, str) -> None
idx = Fasta(fasta)
genome_length = sum([len(c) for c in idx])
logging.info("Temporary directory: " + tmpdir)
logging.info("File analyzed: " + fasta)
logging.info("Genome length: " + str(genome_length))
print("File analyzed: ", fasta)
print("Genome length: ", genome_length)
chromosomes = ", ".join([c.name for c in idx])
if "_" in chromosomes:
print("Warning. The following chromosomes are part of your genome:\n",
chromosomes.replace(">", "") + "\n",
file=sys.stderr)
print(
"You probably want to remove all chromosomes in your fasta containing '_' for the effective genome size computation to be accurate.",
file=sys.stderr)
# if tmpdir is None:
# try:
# tmpdir = os.environ['TMPDIR']
# except KeyError:
# tmpdir = '/tmp'
output_file = os.path.join(tmpdir, '{1}.jf'.format(read_length,
basename(fasta)))
atexit.register(
lambda: call("rm {output_file}".format(output_file=output_file), shell=True))
call(
"jellyfish count -t {nb_cores} -m {read_length} -s {genome_length} -L 1 -U 1 --out-counter-len 1 --counter-len 1 {fasta} -o {output_file}".format(
**vars()),
shell=True)
stats = check_output("jellyfish stats {output_file}".format(
output_file=output_file),
shell=True)
unique_kmers = int(stats.split()[1])
effective_genome_size = unique_kmers / genome_length
logging.info("Number unique {read_length}-mers: ".format(
read_length=read_length) + str(unique_kmers))
logging.info("Effective genome size: " + str(effective_genome_size))
print("Number unique {read_length}-mers: ".format(read_length=read_length),
unique_kmers)
print("Effective genome size: ", effective_genome_size)
assert effective_genome_size < 1, "Something wrong happened, effective genome size over 1!"
|
Compute effective genome size for genome.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/scripts/effective_genome_size.py#L15-L68
| null |
from __future__ import print_function, division
import sys
import atexit
from subprocess import call, check_output
import os
from os.path import basename
import logging
from pyfaidx import Fasta
from epic.config import logging_settings
|
biocore-ntnu/epic
|
epic/matrixes/matrixes.py
|
create_matrixes
|
python
|
def create_matrixes(chip, input, df, args):
# type: (Iterable[pd.DataFrame], Iterable[pd.DataFrame], pd.DataFrame, Namespace) -> List[pd.DataFrame]
"Creates matrixes which can be written to file as is (matrix) or as bedGraph."
genome = args.chromosome_sizes
chip = put_dfs_in_chromosome_dict(chip)
input = put_dfs_in_chromosome_dict(input)
all_chromosomes = natsorted(set(list(chip.keys()) + list(input.keys())))
# print("df1\n", df, file=sys.stderr)
islands = enriched_bins(df, args)
# print("islands1\n", islands, file=sys.stderr)
logging.info("Creating matrixes from count data.")
dfms = Parallel(n_jobs=args.number_cores)(delayed(_create_matrixes)(
chromosome, chip, input, islands, genome[chromosome],
args.window_size) for chromosome in all_chromosomes)
return dfms
|
Creates matrixes which can be written to file as is (matrix) or as bedGraph.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/matrixes/matrixes.py#L148-L168
|
[
"def put_dfs_in_chromosome_dict(dfs):\n # type: (Iterable[pd.DataFrame]) -> Dict[str, pd.DataFrame]\n\n chromosome_dict = {} # type: Dict[str, pd.DataFrame]\n for df in dfs:\n\n if df.empty:\n continue\n\n chromosome = df.head(1).Chromosome.values[0]\n chromosome_dict[chromosome] = df\n\n return chromosome_dict\n",
"def enriched_bins(df, args):\n # type: (pd.DataFrame, Namespace) -> pd.DataFrame\n\n df = df.loc[df.FDR < args.false_discovery_rate_cutoff]\n\n idx_rowdicts = []\n for _, row in df.iterrows():\n for bin in range(\n int(row.Start), int(row.End), int(args.window_size)):\n idx_rowdicts.append({\"Chromosome\": row.Chromosome,\n \"Bin\": bin,\n \"Enriched\": 1})\n islands = pd.DataFrame.from_dict(idx_rowdicts)\n islands.loc[:, \"Chromosome\"].astype(\"category\")\n islands.loc[:, \"Bin\"].astype(int)\n\n return islands.set_index(\"Chromosome Bin\".split())\n"
] |
import sys
import logging
from os.path import dirname, join, basename
from subprocess import call
from itertools import chain
from typing import Iterable, Sequence, Tuple
from argparse import Namespace
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from natsort import natsorted
from epic.windows.count.remove_out_of_bounds_bins import remove_bins_with_ends_out_of_bounds
from epic.config.genomes import get_genome_size_file
def write_matrix_files(chip_merged, input_merged, df, args):
# type: (Dict[str, pd.DataFrame], Dict[str, pd.DataFrame], pd.DataFrame, Namespace) -> None
matrixes = create_matrixes(chip_merged, input_merged, df, args)
matrix = pd.concat(matrixes, axis=0, sort=False)
matrix = matrix.dropna()
matrix = matrix.set_index("Chromosome Bin".split())
if args.store_matrix:
print_matrixes(matrix, args)
if args.bigwig or args.individual_log2fc_bigwigs or args.chip_bigwig or args.input_bigwig or args.log2fc_bigwig:
matrix = matrix.astype(np.float64)
matrix = matrix.drop("Enriched", axis=1)
ends = pd.Series(matrix.index.get_level_values("Bin"), index=matrix.index) + (int(args.window_size) - 1)
matrix.insert(0, "End", ends)
matrix = matrix.set_index("End", append=True)
matrix = matrix.sort_index(level="Chromosome")
# TODO: remove out of bounds bins
if args.bigwig:
# defer initialization so not run during travis
from epic.bigwig.create_bigwigs import create_bigwigs
create_bigwigs(matrix, args.bigwig, args)
if args.individual_log2fc_bigwigs:
# defer initialization so not run during travis
from epic.bigwig.create_bigwigs import create_log2fc_bigwigs
create_log2fc_bigwigs(matrix, args.individual_log2fc_bigwigs, args)
if args.chip_bigwig or args.input_bigwig or args.log2fc_bigwig:
# defer initialization so not run during travis
from epic.bigwig.create_bigwigs import create_sum_bigwigs
create_sum_bigwigs(matrix, args)
def _create_matrixes(chromosome, chip, input, islands,
chromosome_size, window_size):
# type: (str, Dict[str, pd.DataFrame], Dict[str, pd.DataFrame], pd.DataFrame, int, int) -> pd.DataFrame
# print("islands2\n" + islands.head(10).to_csv(sep=" "), file=sys.stderr)
chip_df = get_chromosome_df(chromosome, chip)
input_df = get_chromosome_df(chromosome, input)
try:
chromo_islands = islands.xs(chromosome, drop_level=False)
except KeyError:
return pd.DataFrame(index="Chromosome Bin".split())
chip_df["Chromosome"] = chip_df["Chromosome"].astype("category")
# START workaround
# Should ideally have been just one line: chip_df["Bin"] = chip_df["Bin"].astype(int)
# Workaround for the following error:
# ValueError: assignment destination is read-only
bins = chip_df["Bin"].astype(int)
chip_df = chip_df.drop("Bin", axis=1)
chip_df.insert(0, "Bin", bins)
# print("chip_df1\n", chip_df.head(10).to_csv(sep=" "), file=sys.stderr)
# END workaround
chip_df = chip_df.set_index("Chromosome Bin".split())
# print("chilp_df", chip_df.head())
# removing duplicates to avoid joining problems
chip_df = chip_df[~chip_df.index.duplicated(keep='first')]
chromo_islands = chromo_islands[~chromo_islands.index.duplicated(keep='first')]
# chromo_islands.to_csv("chromo_islands.csv", sep=" ")
# chip_df.to_csv("chip_df.csv", sep=" ")
# print(chromo_islands.head(20).to_csv(sep=" "), file=sys.stderr)
# print(chromosome)
# print("chip_df2\n", chip_df.head(10).to_csv(sep=" "), file=sys.stderr)
# print(chromo_islands.head(10).to_csv(sep=" "), file=sys.stderr)
chip_df = chromo_islands.join(chip_df, how="outer").fillna(0)
# print("chip_df3\n", chip_df.head(10).to_csv(sep=" "), file=sys.stderr)
# print("chip_df", chip_df.tail().to_csv(sep=" "), file=sys.stderr)
chip_df = chip_df[~chip_df.index.duplicated(keep='first')]
# print("chip_df4\n", chip_df.head(10).to_csv(sep=" "), file=sys.stderr)
# print("chip_df", chip_df.tail().to_csv(sep=" "), file=sys.stderr)
input_df["Chromosome"] = input_df["Chromosome"].astype("category")
# START workaround
# Should ideally have been just one line: input_df["Bin"] = input_df["Bin"].astype(int)
# Workaround for the following error:
# ValueError: assignment destination is read-only
bins = input_df["Bin"].astype(int)
input_df = input_df.drop("Bin", axis=1)
input_df.insert(0, "Bin", bins)
input_df = input_df.set_index("Chromosome Bin".split())
# END workaround
input_df = input_df[~input_df.index.duplicated(keep='first')]
dfm = chip_df.join(input_df, how="outer", sort=False).fillna(0)
# print("dfm1\n", dfm.head(10).to_csv(sep=" "), file=sys.stderr)
dfm = remove_bins_with_ends_out_of_bounds(dfm, chromosome_size,
window_size)
dfm = dfm[~dfm.index.duplicated(keep='first')]
# print("dfm2\n", dfm.head(10).to_csv(sep=" "), file=sys.stderr)
# print(dfm.tail().to_csv(sep=" "), file=sys.stderr)
# print(dfm.head(), file=sys.stderr)
dfm.reset_index(inplace=True)
return dfm
def print_matrixes(matrix, args):
# type: (Iterable[pd.DataFrame], Namespace) -> None
outpath = args.store_matrix
dir = dirname(outpath)
if dir:
call("mkdir -p {}".format(dir), shell=True)
logging.info("Writing data matrix to file: " + outpath)
matrix.to_csv(outpath, sep=" ", header=True, compression="gzip")
def get_island_bins(df, window_size, genome, args):
# type: (pd.DataFrame, int, str, Namespace) -> Dict[str, Set[int]]
"""Finds the enriched bins in a df."""
# need these chromos because the df might not have islands in all chromos
chromosomes = natsorted(list(args.chromosome_sizes))
chromosome_island_bins = {} # type: Dict[str, Set[int]]
df_copy = df.reset_index(drop=False)
for chromosome in chromosomes:
cdf = df_copy.loc[df_copy.Chromosome == chromosome]
if cdf.empty:
chromosome_island_bins[chromosome] = set()
else:
island_starts_ends = zip(cdf.Start.values.tolist(),
cdf.End.values.tolist())
island_bins = chain(*[range(
int(start), int(end), window_size)
for start, end in island_starts_ends])
chromosome_island_bins[chromosome] = set(island_bins)
return chromosome_island_bins
def put_dfs_in_dict(dfs):
# type: (Iterable[pd.DataFrame]) -> Dict[str, pd.DataFrame]
sample_dict = {}
for df in dfs:
if df.empty:
continue
chromosome = df.head(1).Chromosome.values[0]
sample_dict[chromosome] = df
return sample_dict
def put_dfs_in_chromosome_dict(dfs):
# type: (Iterable[pd.DataFrame]) -> Dict[str, pd.DataFrame]
chromosome_dict = {} # type: Dict[str, pd.DataFrame]
for df in dfs:
if df.empty:
continue
chromosome = df.head(1).Chromosome.values[0]
chromosome_dict[chromosome] = df
return chromosome_dict
def get_chromosome_df(chromosome, df_dict):
# type: (str, Dict[str, pd.DataFrame]) -> pd.DataFrame
if chromosome in df_dict:
df = df_dict[chromosome]
else:
df = pd.DataFrame(columns="Chromosome Bin".split())
# print(chromosome, file=sys.stderr)
# print(df, file=sys.stderr)
return df
def enriched_bins(df, args):
# type: (pd.DataFrame, Namespace) -> pd.DataFrame
df = df.loc[df.FDR < args.false_discovery_rate_cutoff]
idx_rowdicts = []
for _, row in df.iterrows():
for bin in range(
int(row.Start), int(row.End), int(args.window_size)):
idx_rowdicts.append({"Chromosome": row.Chromosome,
"Bin": bin,
"Enriched": 1})
islands = pd.DataFrame.from_dict(idx_rowdicts)
islands.loc[:, "Chromosome"].astype("category")
islands.loc[:, "Bin"].astype(int)
return islands.set_index("Chromosome Bin".split())
|
biocore-ntnu/epic
|
epic/matrixes/matrixes.py
|
get_island_bins
|
python
|
def get_island_bins(df, window_size, genome, args):
# type: (pd.DataFrame, int, str, Namespace) -> Dict[str, Set[int]]
# need these chromos because the df might not have islands in all chromos
chromosomes = natsorted(list(args.chromosome_sizes))
chromosome_island_bins = {} # type: Dict[str, Set[int]]
df_copy = df.reset_index(drop=False)
for chromosome in chromosomes:
cdf = df_copy.loc[df_copy.Chromosome == chromosome]
if cdf.empty:
chromosome_island_bins[chromosome] = set()
else:
island_starts_ends = zip(cdf.Start.values.tolist(),
cdf.End.values.tolist())
island_bins = chain(*[range(
int(start), int(end), window_size)
for start, end in island_starts_ends])
chromosome_island_bins[chromosome] = set(island_bins)
return chromosome_island_bins
|
Finds the enriched bins in a df.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/matrixes/matrixes.py#L184-L205
| null |
import sys
import logging
from os.path import dirname, join, basename
from subprocess import call
from itertools import chain
from typing import Iterable, Sequence, Tuple
from argparse import Namespace
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from natsort import natsorted
from epic.windows.count.remove_out_of_bounds_bins import remove_bins_with_ends_out_of_bounds
from epic.config.genomes import get_genome_size_file
def write_matrix_files(chip_merged, input_merged, df, args):
# type: (Dict[str, pd.DataFrame], Dict[str, pd.DataFrame], pd.DataFrame, Namespace) -> None
matrixes = create_matrixes(chip_merged, input_merged, df, args)
matrix = pd.concat(matrixes, axis=0, sort=False)
matrix = matrix.dropna()
matrix = matrix.set_index("Chromosome Bin".split())
if args.store_matrix:
print_matrixes(matrix, args)
if args.bigwig or args.individual_log2fc_bigwigs or args.chip_bigwig or args.input_bigwig or args.log2fc_bigwig:
matrix = matrix.astype(np.float64)
matrix = matrix.drop("Enriched", axis=1)
ends = pd.Series(matrix.index.get_level_values("Bin"), index=matrix.index) + (int(args.window_size) - 1)
matrix.insert(0, "End", ends)
matrix = matrix.set_index("End", append=True)
matrix = matrix.sort_index(level="Chromosome")
# TODO: remove out of bounds bins
if args.bigwig:
# defer initialization so not run during travis
from epic.bigwig.create_bigwigs import create_bigwigs
create_bigwigs(matrix, args.bigwig, args)
if args.individual_log2fc_bigwigs:
# defer initialization so not run during travis
from epic.bigwig.create_bigwigs import create_log2fc_bigwigs
create_log2fc_bigwigs(matrix, args.individual_log2fc_bigwigs, args)
if args.chip_bigwig or args.input_bigwig or args.log2fc_bigwig:
# defer initialization so not run during travis
from epic.bigwig.create_bigwigs import create_sum_bigwigs
create_sum_bigwigs(matrix, args)
def _create_matrixes(chromosome, chip, input, islands,
chromosome_size, window_size):
# type: (str, Dict[str, pd.DataFrame], Dict[str, pd.DataFrame], pd.DataFrame, int, int) -> pd.DataFrame
# print("islands2\n" + islands.head(10).to_csv(sep=" "), file=sys.stderr)
chip_df = get_chromosome_df(chromosome, chip)
input_df = get_chromosome_df(chromosome, input)
try:
chromo_islands = islands.xs(chromosome, drop_level=False)
except KeyError:
return pd.DataFrame(index="Chromosome Bin".split())
chip_df["Chromosome"] = chip_df["Chromosome"].astype("category")
# START workaround
# Should ideally have been just one line: chip_df["Bin"] = chip_df["Bin"].astype(int)
# Workaround for the following error:
# ValueError: assignment destination is read-only
bins = chip_df["Bin"].astype(int)
chip_df = chip_df.drop("Bin", axis=1)
chip_df.insert(0, "Bin", bins)
# print("chip_df1\n", chip_df.head(10).to_csv(sep=" "), file=sys.stderr)
# END workaround
chip_df = chip_df.set_index("Chromosome Bin".split())
# print("chilp_df", chip_df.head())
# removing duplicates to avoid joining problems
chip_df = chip_df[~chip_df.index.duplicated(keep='first')]
chromo_islands = chromo_islands[~chromo_islands.index.duplicated(keep='first')]
# chromo_islands.to_csv("chromo_islands.csv", sep=" ")
# chip_df.to_csv("chip_df.csv", sep=" ")
# print(chromo_islands.head(20).to_csv(sep=" "), file=sys.stderr)
# print(chromosome)
# print("chip_df2\n", chip_df.head(10).to_csv(sep=" "), file=sys.stderr)
# print(chromo_islands.head(10).to_csv(sep=" "), file=sys.stderr)
chip_df = chromo_islands.join(chip_df, how="outer").fillna(0)
# print("chip_df3\n", chip_df.head(10).to_csv(sep=" "), file=sys.stderr)
# print("chip_df", chip_df.tail().to_csv(sep=" "), file=sys.stderr)
chip_df = chip_df[~chip_df.index.duplicated(keep='first')]
# print("chip_df4\n", chip_df.head(10).to_csv(sep=" "), file=sys.stderr)
# print("chip_df", chip_df.tail().to_csv(sep=" "), file=sys.stderr)
input_df["Chromosome"] = input_df["Chromosome"].astype("category")
# START workaround
# Should ideally have been just one line: input_df["Bin"] = input_df["Bin"].astype(int)
# Workaround for the following error:
# ValueError: assignment destination is read-only
bins = input_df["Bin"].astype(int)
input_df = input_df.drop("Bin", axis=1)
input_df.insert(0, "Bin", bins)
input_df = input_df.set_index("Chromosome Bin".split())
# END workaround
input_df = input_df[~input_df.index.duplicated(keep='first')]
dfm = chip_df.join(input_df, how="outer", sort=False).fillna(0)
# print("dfm1\n", dfm.head(10).to_csv(sep=" "), file=sys.stderr)
dfm = remove_bins_with_ends_out_of_bounds(dfm, chromosome_size,
window_size)
dfm = dfm[~dfm.index.duplicated(keep='first')]
# print("dfm2\n", dfm.head(10).to_csv(sep=" "), file=sys.stderr)
# print(dfm.tail().to_csv(sep=" "), file=sys.stderr)
# print(dfm.head(), file=sys.stderr)
dfm.reset_index(inplace=True)
return dfm
def create_matrixes(chip, input, df, args):
# type: (Iterable[pd.DataFrame], Iterable[pd.DataFrame], pd.DataFrame, Namespace) -> List[pd.DataFrame]
"Creates matrixes which can be written to file as is (matrix) or as bedGraph."
genome = args.chromosome_sizes
chip = put_dfs_in_chromosome_dict(chip)
input = put_dfs_in_chromosome_dict(input)
all_chromosomes = natsorted(set(list(chip.keys()) + list(input.keys())))
# print("df1\n", df, file=sys.stderr)
islands = enriched_bins(df, args)
# print("islands1\n", islands, file=sys.stderr)
logging.info("Creating matrixes from count data.")
dfms = Parallel(n_jobs=args.number_cores)(delayed(_create_matrixes)(
chromosome, chip, input, islands, genome[chromosome],
args.window_size) for chromosome in all_chromosomes)
return dfms
def print_matrixes(matrix, args):
# type: (Iterable[pd.DataFrame], Namespace) -> None
outpath = args.store_matrix
dir = dirname(outpath)
if dir:
call("mkdir -p {}".format(dir), shell=True)
logging.info("Writing data matrix to file: " + outpath)
matrix.to_csv(outpath, sep=" ", header=True, compression="gzip")
def put_dfs_in_dict(dfs):
# type: (Iterable[pd.DataFrame]) -> Dict[str, pd.DataFrame]
sample_dict = {}
for df in dfs:
if df.empty:
continue
chromosome = df.head(1).Chromosome.values[0]
sample_dict[chromosome] = df
return sample_dict
def put_dfs_in_chromosome_dict(dfs):
# type: (Iterable[pd.DataFrame]) -> Dict[str, pd.DataFrame]
chromosome_dict = {} # type: Dict[str, pd.DataFrame]
for df in dfs:
if df.empty:
continue
chromosome = df.head(1).Chromosome.values[0]
chromosome_dict[chromosome] = df
return chromosome_dict
def get_chromosome_df(chromosome, df_dict):
# type: (str, Dict[str, pd.DataFrame]) -> pd.DataFrame
if chromosome in df_dict:
df = df_dict[chromosome]
else:
df = pd.DataFrame(columns="Chromosome Bin".split())
# print(chromosome, file=sys.stderr)
# print(df, file=sys.stderr)
return df
def enriched_bins(df, args):
# type: (pd.DataFrame, Namespace) -> pd.DataFrame
df = df.loc[df.FDR < args.false_discovery_rate_cutoff]
idx_rowdicts = []
for _, row in df.iterrows():
for bin in range(
int(row.Start), int(row.End), int(args.window_size)):
idx_rowdicts.append({"Chromosome": row.Chromosome,
"Bin": bin,
"Enriched": 1})
islands = pd.DataFrame.from_dict(idx_rowdicts)
islands.loc[:, "Chromosome"].astype("category")
islands.loc[:, "Bin"].astype(int)
return islands.set_index("Chromosome Bin".split())
|
biocore-ntnu/epic
|
epic/config/genomes.py
|
create_genome_size_dict
|
python
|
def create_genome_size_dict(genome):
# type: (str) -> Dict[str,int]
size_file = get_genome_size_file(genome)
size_lines = open(size_file).readlines()
size_dict = {}
for line in size_lines:
genome, length = line.split()
size_dict[genome] = int(length)
return size_dict
|
Creates genome size dict from string containing data.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/config/genomes.py#L29-L41
|
[
"def get_genome_size_file(genome):\n # type: (str) -> str\n\n genome_names = pkg_resources.resource_listdir(\"epic\", \"scripts/chromsizes\")\n name_dict = {n.lower().replace(\".chromsizes\", \"\"): n for n in genome_names}\n\n # No try/except here, because get_egs would already have failed if genome\n # did not exist\n genome_exact = name_dict[genome.lower()]\n\n return pkg_resources.resource_filename(\n \"epic\", \"scripts/chromsizes/{}\".format(genome_exact))\n"
] |
from natsort import natsorted
from collections import OrderedDict
import pkg_resources
import logging
from typing import Dict
from epic.config import logging_settings
from epic.utils.find_readlength import (find_readlength,
get_closest_readlength)
__author__ = "Endre Bakken Stovner https://github.com/endrebak/"
__license__ = "MIT"
def get_genome_size_file(genome):
# type: (str) -> str
genome_names = pkg_resources.resource_listdir("epic", "scripts/chromsizes")
name_dict = {n.lower().replace(".chromsizes", ""): n for n in genome_names}
# No try/except here, because get_egs would already have failed if genome
# did not exist
genome_exact = name_dict[genome.lower()]
return pkg_resources.resource_filename(
"epic", "scripts/chromsizes/{}".format(genome_exact))
def create_genome_size_dict_custom_genome(chromsizes):
# type: (str) -> OrderedDict[str, int]
chromosome_lengths = [l.split() for l in open(chromsizes).readlines()]
od = OrderedDict() # type: OrderedDict[str, int]
for c, l in natsorted(chromosome_lengths):
od[c] = int(l)
return od
def get_effective_genome_length(genome, read_length):
# type: (str, int) -> float
genome_names = pkg_resources.resource_listdir("epic",
"scripts/effective_sizes")
name_dict = {n.split("_")[0]: "".join(n.split("_")[:-1])
for n in genome_names}
try:
genome_exact = name_dict[genome.lower()]
egf = pkg_resources.resource_string( # type: ignore
"epic", "scripts/effective_sizes/{}_{}.txt".format(
genome_exact, read_length)).split()[-1].decode()
except KeyError:
genome_list = "\n".join(list(name_dict.keys()))
logging.error(
"Genome " + genome +
" not found.\n These are the available genomes: " + genome_list +
"\nIf yours is not there, please request it at github.com/endrebak/epic .")
genome_length = sum(create_genome_size_dict(genome).values())
logging.info("Using an effective genome fraction of {}.".format(egf))
assert float(
egf) < 1, "Something wrong happened, effective genome fraction over 1!"
egs = float(egf) * genome_length
return egs
|
biocore-ntnu/epic
|
epic/statistics/compute_score_threshold.py
|
compute_score_threshold
|
python
|
def compute_score_threshold(average_window_readcount,
island_enriched_threshold,
gap_contribution, boundary_contribution,
genome_length_in_bins):
# type: (float, int, float, float, float) -> float
required_p_value = poisson.pmf(island_enriched_threshold,
average_window_readcount)
prob = boundary_contribution * required_p_value
score = -log(required_p_value)
current_scaled_score = int(round(score / BIN_SIZE))
island_expectations_d = {} # type: Dict[int, float]
island_expectations_d[current_scaled_score] = prob * genome_length_in_bins
island_expectations_d[
0] = boundary_contribution * genome_length_in_bins / gap_contribution
current_max_scaled_score = current_scaled_score
interval = int(1 / BIN_SIZE)
partial_cumu = 0.0
logging.info("Finding the score required to consider an island enriched.")
while (partial_cumu > E_VALUE_THRESHOLD or partial_cumu < 1e-100):
current_scaled_score += interval
current_max_scaled_score = current_scaled_score - interval
# logging.debug(island_expectations_d)
if current_scaled_score > current_max_scaled_score:
# logging.debug(island_expectations_d)
island_expectations_d = add_to_island_expectations_dict(
average_window_readcount, current_max_scaled_score,
island_enriched_threshold, island_expectations_d,
gap_contribution)
partial_cumu = 0.0001
current_max_scaled_score += 1000
if max(island_expectations_d) > interval:
partial_cumu = sum(
[val
for idx, val in island_expectations_d.items()
if idx > current_max_scaled_score - interval])
else:
partial_cumu = sum(island_expectations_d.values())
logging.debug("Computing cumulative distribution.")
score_threshold = generate_cumulative_dist(island_expectations_d,
current_max_scaled_score + 1)
logging.info("Enriched score threshold for islands: " + str(
score_threshold))
return score_threshold
|
What does island_expectations do?
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/statistics/compute_score_threshold.py#L10-L67
|
[
"def add_to_island_expectations_dict(average_window_readcount,\n current_max_scaled_score,\n island_eligibility_threshold,\n island_expectations, gap_contribution):\n # type: ( float, int, float, Dict[int, float], float) -> Dict[int, float]\n \"\"\"Can probably be heavily optimized.\n Time required to run can be seen from logging info.\"\"\"\n\n scaled_score = current_max_scaled_score + E_VALUE\n for index in range(current_max_scaled_score + 1, scaled_score + 1):\n island_expectation = 0.0\n i = island_eligibility_threshold #i is the number of tags in the added window\n\n current_island = int(round(index - compute_window_score(\n i, average_window_readcount) / BIN_SIZE))\n\n while (current_island >= 0):\n\n if current_island in island_expectations:\n island_expectation += _poisson(\n i, average_window_readcount) * island_expectations[\n current_island]\n i += 1\n current_island = int(round(index - compute_window_score(\n i, average_window_readcount) / BIN_SIZE))\n island_expectation *= gap_contribution\n if island_expectation:\n island_expectations[index] = island_expectation\n\n return island_expectations\n",
"def generate_cumulative_dist(island_expectations_d, total_length):\n # type: (Dict[int, float], int) -> float\n \"\"\"\n Generate cumulative distribution: a list of tuples (bins, hist).\n \"\"\"\n\n cumulative = [0.0] * (total_length + 1)\n partial_sum = 0.0\n\n island_expectations = []\n for i in range(len(cumulative)):\n if i in island_expectations_d:\n island_expectations.append(island_expectations_d[i])\n else:\n island_expectations.append(0)\n\n for index in range(1, len(island_expectations) + 1):\n complimentary = len(island_expectations) - index\n partial_sum += island_expectations[complimentary]\n cumulative[complimentary] = partial_sum\n\n # move to function call\n for index in range(len(cumulative)):\n if cumulative[index] <= E_VALUE:\n score_threshold = index * BIN_SIZE\n break\n\n return score_threshold\n"
] |
import logging
from scipy.stats import poisson
from numpy import log
from epic.config.constants import BIN_SIZE, E_VALUE_THRESHOLD
from epic.statistics.generate_cumulative_distribution import generate_cumulative_dist
from epic.statistics.add_to_island_expectations import add_to_island_expectations_dict
|
biocore-ntnu/epic
|
epic/utils/find_readlength.py
|
find_readlength
|
python
|
def find_readlength(args):
# type: (Namespace) -> int
try:
bed_file = args.treatment[0]
except AttributeError:
bed_file = args.infiles[0]
filereader = "cat "
if bed_file.endswith(".gz") and search("linux", platform, IGNORECASE):
filereader = "zcat "
elif bed_file.endswith(".gz") and search("darwin", platform, IGNORECASE):
filereader = "gzcat "
elif bed_file.endswith(".bz2"):
filereader = "bzgrep "
command = filereader + "{} | head -10000".format(bed_file)
output = check_output(command, shell=True)
df = pd.read_table(
BytesIO(output),
header=None,
usecols=[1, 2],
sep="\t",
names=["Start", "End"])
readlengths = df.End - df.Start
mean_readlength = readlengths.mean()
median_readlength = readlengths.median()
max_readlength = readlengths.max()
min_readlength = readlengths.min()
logging.info((
"Used first 10000 reads of {} to estimate a median read length of {}\n"
"Mean readlength: {}, max readlength: {}, min readlength: {}.").format(
bed_file, median_readlength, mean_readlength, max_readlength,
min_readlength))
return median_readlength
|
Estimate length of reads based on 10000 first.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/utils/find_readlength.py#L16-L55
| null |
import logging
from sys import platform
from re import search, IGNORECASE
from io import BytesIO
from subprocess import check_output
from argparse import Namespace
import pandas as pd
from epic.config import logging_settings
__author__ = "Endre Bakken Stovner https://github.com/endrebak/"
__license__ = "MIT"
def get_closest_readlength(estimated_readlength):
# type: (int) -> int
"""Find the predefined readlength closest to the estimated readlength.
In the case of a tie, choose the shortest readlength."""
readlengths = [36, 50, 75, 100]
differences = [abs(r - estimated_readlength) for r in readlengths]
min_difference = min(differences)
index_of_min_difference = [i
for i, d in enumerate(differences)
if d == min_difference][0]
return readlengths[index_of_min_difference]
|
biocore-ntnu/epic
|
epic/utils/find_readlength.py
|
get_closest_readlength
|
python
|
def get_closest_readlength(estimated_readlength):
# type: (int) -> int
readlengths = [36, 50, 75, 100]
differences = [abs(r - estimated_readlength) for r in readlengths]
min_difference = min(differences)
index_of_min_difference = [i
for i, d in enumerate(differences)
if d == min_difference][0]
return readlengths[index_of_min_difference]
|
Find the predefined readlength closest to the estimated readlength.
In the case of a tie, choose the shortest readlength.
|
train
|
https://github.com/biocore-ntnu/epic/blob/ed0024939ec6182a0a39d59d845ff14a4889a6ef/epic/utils/find_readlength.py#L58-L71
| null |
import logging
from sys import platform
from re import search, IGNORECASE
from io import BytesIO
from subprocess import check_output
from argparse import Namespace
import pandas as pd
from epic.config import logging_settings
__author__ = "Endre Bakken Stovner https://github.com/endrebak/"
__license__ = "MIT"
def find_readlength(args):
# type: (Namespace) -> int
"""Estimate length of reads based on 10000 first."""
try:
bed_file = args.treatment[0]
except AttributeError:
bed_file = args.infiles[0]
filereader = "cat "
if bed_file.endswith(".gz") and search("linux", platform, IGNORECASE):
filereader = "zcat "
elif bed_file.endswith(".gz") and search("darwin", platform, IGNORECASE):
filereader = "gzcat "
elif bed_file.endswith(".bz2"):
filereader = "bzgrep "
command = filereader + "{} | head -10000".format(bed_file)
output = check_output(command, shell=True)
df = pd.read_table(
BytesIO(output),
header=None,
usecols=[1, 2],
sep="\t",
names=["Start", "End"])
readlengths = df.End - df.Start
mean_readlength = readlengths.mean()
median_readlength = readlengths.median()
max_readlength = readlengths.max()
min_readlength = readlengths.min()
logging.info((
"Used first 10000 reads of {} to estimate a median read length of {}\n"
"Mean readlength: {}, max readlength: {}, min readlength: {}.").format(
bed_file, median_readlength, mean_readlength, max_readlength,
min_readlength))
return median_readlength
|
hthiery/python-fritzhome
|
pyfritzhome/cli.py
|
list_all
|
python
|
def list_all(fritz, args):
devices = fritz.get_devices()
for device in devices:
print('#' * 30)
print('name=%s' % device.name)
print(' ain=%s' % device.ain)
print(' id=%s' % device.identifier)
print(' productname=%s' % device.productname)
print(' manufacturer=%s' % device.manufacturer)
print(" present=%s" % device.present)
print(" lock=%s" % device.lock)
print(" devicelock=%s" % device.device_lock)
if device.present is False:
continue
if device.has_switch:
print(" Switch:")
print(" switch_state=%s" % device.switch_state)
if device.has_switch:
print(" Powermeter:")
print(" power=%s" % device.power)
print(" energy=%s" % device.energy)
print(" voltage=%s" % device.voltage)
if device.has_temperature_sensor:
print(" Temperature:")
print(" temperature=%s" % device.temperature)
print(" offset=%s" % device.offset)
if device.has_thermostat:
print(" Thermostat:")
print(" battery_low=%s" % device.battery_low)
print(" battery_level=%s" % device.battery_level)
print(" actual=%s" % device.actual_temperature)
print(" target=%s" % device.target_temperature)
print(" comfort=%s" % device.comfort_temperature)
print(" eco=%s" % device.eco_temperature)
print(" window=%s" % device.window_open)
print(" summer=%s" % device.summer_active)
print(" holiday=%s" % device.holiday_active)
if device.has_alarm:
print(" Alert:")
print(" alert=%s" % device.alert_state)
|
Command that prints all device information.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/cli.py#L18-L61
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import argparse
try:
from version import __version__
except ImportError:
__version__ = 'dev'
from pyfritzhome import Fritzhome
_LOGGER = logging.getLogger(__name__)
def device_name(fritz, args):
"""Command that prints the device name."""
print(fritz.get_device_name(args.ain))
def device_presence(fritz, args):
"""Command that prints the device presence."""
print(int(fritz.get_device_present(args.ain)))
def device_statistics(fritz, args):
"""Command that prints the device statistics."""
stats = fritz.get_device_statistics(args.ain)
print(stats)
def switch_get(fritz, args):
"""Command that get the device switch state."""
print(fritz.get_switch_state(args.ain))
def switch_on(fritz, args):
"""Command that set the device switch state to on."""
fritz.set_switch_state_on(args.ain)
def switch_off(fritz, args):
"""Command that set the device switch state to off."""
fritz.set_switch_state_off(args.ain)
def switch_toggle(fritz, args):
"""Command that toggles the device switch state."""
fritz.set_switch_state_toggle(args.ain)
def main(args=None):
"""The main function."""
parser = argparse.ArgumentParser(
description='Fritz!Box Smarthome CLI tool.')
parser.add_argument('-v', action='store_true', dest='verbose',
help='be more verbose')
parser.add_argument('-f', '--fritzbox', type=str, dest='host',
help='Fritz!Box IP address', default='fritz.box')
parser.add_argument('-u', '--user', type=str, dest='user',
help='Username')
parser.add_argument('-p', '--password', type=str, dest='password',
help='Username')
parser.add_argument('-a', '--ain', type=str, dest='ain',
help='Actor Identification', default=None)
parser.add_argument('-V', '--version', action='version',
version='{version}'.format(version=__version__),
help='Print version')
_sub = parser.add_subparsers(title='Commands')
# list all devices
subparser = _sub.add_parser('list', help='List all available devices')
subparser.set_defaults(func=list_all)
# device
subparser = _sub.add_parser('device', help='Device/Actor commands')
_sub_switch = subparser.add_subparsers()
# device name
subparser = _sub_switch.add_parser('name', help='get the device name')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_name)
# device presence
subparser = _sub_switch.add_parser('present',
help='get the device presence')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_presence)
# device stats
subparser = _sub_switch.add_parser('stats',
help='get the device statistics')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_statistics)
# switch
subparser = _sub.add_parser('switch', help='Switch commands')
_sub_switch = subparser.add_subparsers()
# switch get
subparser = _sub_switch.add_parser('get', help='get state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_get)
# switch on
subparser = _sub_switch.add_parser('on', help='set on state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_on)
# switch off
subparser = _sub_switch.add_parser('off', help='set off state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_off)
# switch toggle
subparser = _sub_switch.add_parser('toggle', help='set off state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_toggle)
args = parser.parse_args(args)
logging.basicConfig()
if args.verbose:
logging.getLogger('pyfritzhome').setLevel(logging.DEBUG)
fritzbox = None
try:
fritzbox = Fritzhome(host=args.host, user=args.user,
password=args.password)
fritzbox.login()
args.func(fritzbox, args)
finally:
if fritzbox is not None:
fritzbox.logout()
if __name__ == '__main__':
main()
|
hthiery/python-fritzhome
|
pyfritzhome/cli.py
|
device_statistics
|
python
|
def device_statistics(fritz, args):
stats = fritz.get_device_statistics(args.ain)
print(stats)
|
Command that prints the device statistics.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/cli.py#L74-L77
| null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import argparse
try:
from version import __version__
except ImportError:
__version__ = 'dev'
from pyfritzhome import Fritzhome
_LOGGER = logging.getLogger(__name__)
def list_all(fritz, args):
"""Command that prints all device information."""
devices = fritz.get_devices()
for device in devices:
print('#' * 30)
print('name=%s' % device.name)
print(' ain=%s' % device.ain)
print(' id=%s' % device.identifier)
print(' productname=%s' % device.productname)
print(' manufacturer=%s' % device.manufacturer)
print(" present=%s" % device.present)
print(" lock=%s" % device.lock)
print(" devicelock=%s" % device.device_lock)
if device.present is False:
continue
if device.has_switch:
print(" Switch:")
print(" switch_state=%s" % device.switch_state)
if device.has_switch:
print(" Powermeter:")
print(" power=%s" % device.power)
print(" energy=%s" % device.energy)
print(" voltage=%s" % device.voltage)
if device.has_temperature_sensor:
print(" Temperature:")
print(" temperature=%s" % device.temperature)
print(" offset=%s" % device.offset)
if device.has_thermostat:
print(" Thermostat:")
print(" battery_low=%s" % device.battery_low)
print(" battery_level=%s" % device.battery_level)
print(" actual=%s" % device.actual_temperature)
print(" target=%s" % device.target_temperature)
print(" comfort=%s" % device.comfort_temperature)
print(" eco=%s" % device.eco_temperature)
print(" window=%s" % device.window_open)
print(" summer=%s" % device.summer_active)
print(" holiday=%s" % device.holiday_active)
if device.has_alarm:
print(" Alert:")
print(" alert=%s" % device.alert_state)
def device_name(fritz, args):
"""Command that prints the device name."""
print(fritz.get_device_name(args.ain))
def device_presence(fritz, args):
"""Command that prints the device presence."""
print(int(fritz.get_device_present(args.ain)))
def switch_get(fritz, args):
"""Command that get the device switch state."""
print(fritz.get_switch_state(args.ain))
def switch_on(fritz, args):
"""Command that set the device switch state to on."""
fritz.set_switch_state_on(args.ain)
def switch_off(fritz, args):
"""Command that set the device switch state to off."""
fritz.set_switch_state_off(args.ain)
def switch_toggle(fritz, args):
"""Command that toggles the device switch state."""
fritz.set_switch_state_toggle(args.ain)
def main(args=None):
"""The main function."""
parser = argparse.ArgumentParser(
description='Fritz!Box Smarthome CLI tool.')
parser.add_argument('-v', action='store_true', dest='verbose',
help='be more verbose')
parser.add_argument('-f', '--fritzbox', type=str, dest='host',
help='Fritz!Box IP address', default='fritz.box')
parser.add_argument('-u', '--user', type=str, dest='user',
help='Username')
parser.add_argument('-p', '--password', type=str, dest='password',
help='Username')
parser.add_argument('-a', '--ain', type=str, dest='ain',
help='Actor Identification', default=None)
parser.add_argument('-V', '--version', action='version',
version='{version}'.format(version=__version__),
help='Print version')
_sub = parser.add_subparsers(title='Commands')
# list all devices
subparser = _sub.add_parser('list', help='List all available devices')
subparser.set_defaults(func=list_all)
# device
subparser = _sub.add_parser('device', help='Device/Actor commands')
_sub_switch = subparser.add_subparsers()
# device name
subparser = _sub_switch.add_parser('name', help='get the device name')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_name)
# device presence
subparser = _sub_switch.add_parser('present',
help='get the device presence')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_presence)
# device stats
subparser = _sub_switch.add_parser('stats',
help='get the device statistics')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_statistics)
# switch
subparser = _sub.add_parser('switch', help='Switch commands')
_sub_switch = subparser.add_subparsers()
# switch get
subparser = _sub_switch.add_parser('get', help='get state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_get)
# switch on
subparser = _sub_switch.add_parser('on', help='set on state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_on)
# switch off
subparser = _sub_switch.add_parser('off', help='set off state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_off)
# switch toggle
subparser = _sub_switch.add_parser('toggle', help='set off state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_toggle)
args = parser.parse_args(args)
logging.basicConfig()
if args.verbose:
logging.getLogger('pyfritzhome').setLevel(logging.DEBUG)
fritzbox = None
try:
fritzbox = Fritzhome(host=args.host, user=args.user,
password=args.password)
fritzbox.login()
args.func(fritzbox, args)
finally:
if fritzbox is not None:
fritzbox.logout()
if __name__ == '__main__':
main()
|
hthiery/python-fritzhome
|
pyfritzhome/cli.py
|
main
|
python
|
def main(args=None):
parser = argparse.ArgumentParser(
description='Fritz!Box Smarthome CLI tool.')
parser.add_argument('-v', action='store_true', dest='verbose',
help='be more verbose')
parser.add_argument('-f', '--fritzbox', type=str, dest='host',
help='Fritz!Box IP address', default='fritz.box')
parser.add_argument('-u', '--user', type=str, dest='user',
help='Username')
parser.add_argument('-p', '--password', type=str, dest='password',
help='Username')
parser.add_argument('-a', '--ain', type=str, dest='ain',
help='Actor Identification', default=None)
parser.add_argument('-V', '--version', action='version',
version='{version}'.format(version=__version__),
help='Print version')
_sub = parser.add_subparsers(title='Commands')
# list all devices
subparser = _sub.add_parser('list', help='List all available devices')
subparser.set_defaults(func=list_all)
# device
subparser = _sub.add_parser('device', help='Device/Actor commands')
_sub_switch = subparser.add_subparsers()
# device name
subparser = _sub_switch.add_parser('name', help='get the device name')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_name)
# device presence
subparser = _sub_switch.add_parser('present',
help='get the device presence')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_presence)
# device stats
subparser = _sub_switch.add_parser('stats',
help='get the device statistics')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=device_statistics)
# switch
subparser = _sub.add_parser('switch', help='Switch commands')
_sub_switch = subparser.add_subparsers()
# switch get
subparser = _sub_switch.add_parser('get', help='get state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_get)
# switch on
subparser = _sub_switch.add_parser('on', help='set on state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_on)
# switch off
subparser = _sub_switch.add_parser('off', help='set off state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_off)
# switch toggle
subparser = _sub_switch.add_parser('toggle', help='set off state')
subparser.add_argument('ain', type=str, metavar="AIN",
help='Actor Identification')
subparser.set_defaults(func=switch_toggle)
args = parser.parse_args(args)
logging.basicConfig()
if args.verbose:
logging.getLogger('pyfritzhome').setLevel(logging.DEBUG)
fritzbox = None
try:
fritzbox = Fritzhome(host=args.host, user=args.user,
password=args.password)
fritzbox.login()
args.func(fritzbox, args)
finally:
if fritzbox is not None:
fritzbox.logout()
|
The main function.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/cli.py#L100-L190
|
[
"def login(self):\n \"\"\"Login and get a valid session ID.\"\"\"\n try:\n (sid, challenge) = self._login_request()\n if sid == '0000000000000000':\n secret = self._create_login_secret(challenge, self._password)\n (sid2, challenge) = self._login_request(username=self._user,\n secret=secret)\n if sid2 == '0000000000000000':\n _LOGGER.warning(\"login failed %s\", sid2)\n raise LoginError(self._user)\n self._sid = sid2\n except xml.parsers.expat.ExpatError:\n raise LoginError(self._user)\n",
"def logout(self):\n \"\"\"Logout.\"\"\"\n self._logout_request()\n self._sid = None\n"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import logging
import argparse
try:
from version import __version__
except ImportError:
__version__ = 'dev'
from pyfritzhome import Fritzhome
_LOGGER = logging.getLogger(__name__)
def list_all(fritz, args):
"""Command that prints all device information."""
devices = fritz.get_devices()
for device in devices:
print('#' * 30)
print('name=%s' % device.name)
print(' ain=%s' % device.ain)
print(' id=%s' % device.identifier)
print(' productname=%s' % device.productname)
print(' manufacturer=%s' % device.manufacturer)
print(" present=%s" % device.present)
print(" lock=%s" % device.lock)
print(" devicelock=%s" % device.device_lock)
if device.present is False:
continue
if device.has_switch:
print(" Switch:")
print(" switch_state=%s" % device.switch_state)
if device.has_switch:
print(" Powermeter:")
print(" power=%s" % device.power)
print(" energy=%s" % device.energy)
print(" voltage=%s" % device.voltage)
if device.has_temperature_sensor:
print(" Temperature:")
print(" temperature=%s" % device.temperature)
print(" offset=%s" % device.offset)
if device.has_thermostat:
print(" Thermostat:")
print(" battery_low=%s" % device.battery_low)
print(" battery_level=%s" % device.battery_level)
print(" actual=%s" % device.actual_temperature)
print(" target=%s" % device.target_temperature)
print(" comfort=%s" % device.comfort_temperature)
print(" eco=%s" % device.eco_temperature)
print(" window=%s" % device.window_open)
print(" summer=%s" % device.summer_active)
print(" holiday=%s" % device.holiday_active)
if device.has_alarm:
print(" Alert:")
print(" alert=%s" % device.alert_state)
def device_name(fritz, args):
"""Command that prints the device name."""
print(fritz.get_device_name(args.ain))
def device_presence(fritz, args):
"""Command that prints the device presence."""
print(int(fritz.get_device_present(args.ain)))
def device_statistics(fritz, args):
"""Command that prints the device statistics."""
stats = fritz.get_device_statistics(args.ain)
print(stats)
def switch_get(fritz, args):
"""Command that get the device switch state."""
print(fritz.get_switch_state(args.ain))
def switch_on(fritz, args):
"""Command that set the device switch state to on."""
fritz.set_switch_state_on(args.ain)
def switch_off(fritz, args):
"""Command that set the device switch state to off."""
fritz.set_switch_state_off(args.ain)
def switch_toggle(fritz, args):
"""Command that toggles the device switch state."""
fritz.set_switch_state_toggle(args.ain)
if __name__ == '__main__':
main()
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
get_text
|
python
|
def get_text(nodelist):
value = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
value.append(node.data)
return ''.join(value)
|
Get the value from a text node.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L14-L20
| null |
# -*- coding: utf-8 -*-
from __future__ import print_function
import hashlib
import logging
import xml.dom.minidom
from requests import Session
from .errors import (InvalidError, LoginError)
_LOGGER = logging.getLogger(__name__)
def get_node_value(node, name):
"""Get the value from a node."""
return get_text(node.getElementsByTagName(name)[0].childNodes)
def bits(value):
while value:
bit = value & (~value+1)
yield bit
value ^= bit
class Fritzhome(object):
"""Fritzhome object to communicate with the device."""
_sid = None
_session = None
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
self._session = Session()
def _request(self, url, params=None, timeout=10):
"""Send a request with parameters."""
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip()
def _login_request(self, username=None, secret=None):
"""Send a login request with paramerters."""
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge)
def _logout_request(self):
"""Send a logout request."""
_LOGGER.debug('logout')
url = 'http://' + self._host + '/login_sid.lua'
params = {
'security:command/logout': '1',
'sid': self._sid
}
self._request(url, params)
@staticmethod
def _create_login_secret(challenge, password):
"""Create a login secret."""
to_hash = (challenge + '-' + password).encode('UTF-16LE')
hashed = hashlib.md5(to_hash).hexdigest()
return '{0}-{1}'.format(challenge, hashed)
def _aha_request(self, cmd, ain=None, param=None, rf=str):
"""Send an AHA request."""
url = 'http://' + self._host + '/webservices/homeautoswitch.lua'
params = {
'switchcmd': cmd,
'sid': self._sid
}
if param:
params['param'] = param
if ain:
params['ain'] = ain
plain = self._request(url, params)
if plain == 'inval':
raise InvalidError
if rf == bool:
return bool(int(plain))
return rf(plain)
def login(self):
"""Login and get a valid session ID."""
try:
(sid, challenge) = self._login_request()
if sid == '0000000000000000':
secret = self._create_login_secret(challenge, self._password)
(sid2, challenge) = self._login_request(username=self._user,
secret=secret)
if sid2 == '0000000000000000':
_LOGGER.warning("login failed %s", sid2)
raise LoginError(self._user)
self._sid = sid2
except xml.parsers.expat.ExpatError:
raise LoginError(self._user)
def logout(self):
"""Logout."""
self._logout_request()
self._sid = None
def get_device_elements(self):
"""Get the DOM elements for the device list."""
plain = self._aha_request('getdevicelistinfos')
dom = xml.dom.minidom.parseString(plain)
_LOGGER.debug(dom)
return dom.getElementsByTagName("device")
def get_device_element(self, ain):
"""Get the DOM element for the specified device."""
elements = self.get_device_elements()
for element in elements:
if element.getAttribute('identifier') == ain:
return element
return None
def get_devices(self):
"""Get the list of all known devices."""
devices = []
for element in self.get_device_elements():
device = FritzhomeDevice(self, node=element)
devices.append(device)
return devices
def get_device_by_ain(self, ain):
"""Returns a device specified by the AIN."""
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device
def get_device_present(self, ain):
"""Get the device presence."""
return self._aha_request('getswitchpresent', ain=ain, rf=bool)
def get_device_name(self, ain):
"""Get the device name."""
return self._aha_request('getswitchname', ain=ain)
def get_switch_state(self, ain):
"""Get the switch state."""
return self._aha_request('getswitchstate', ain=ain, rf=bool)
def set_switch_state_on(self, ain):
"""Set the switch to on state."""
return self._aha_request('setswitchon', ain=ain, rf=bool)
def set_switch_state_off(self, ain):
"""Set the switch to off state."""
return self._aha_request('setswitchoff', ain=ain, rf=bool)
def set_switch_state_toggle(self, ain):
"""Toggle the switch state."""
return self._aha_request('setswitchtoggle', ain=ain, rf=bool)
def get_switch_power(self, ain):
"""Get the switch power consumption."""
return self._aha_request('getswitchpower', ain=ain, rf=int)
def get_switch_energy(self, ain):
"""Get the switch energy."""
return self._aha_request('getswitchenergy', ain=ain, rf=int)
def get_temperature(self, ain):
"""Get the device temperature sensor value."""
return self._aha_request('gettemperature', ain=ain, rf=float) / 10.0
def _get_temperature(self, ain, name):
plain = self._aha_request(name, ain=ain, rf=float)
return (plain - 16) / 2 + 8
def get_target_temperature(self, ain):
"""Get the thermostate target temperature."""
return self._get_temperature(ain, 'gethkrtsoll')
def set_target_temperature(self, ain, temperature):
"""Set the thermostate target temperature."""
param = 16 + ((float(temperature) - 8) * 2)
if param < min(range(16, 56)):
param = 253
elif param > max(range(16, 56)):
param = 254
self._aha_request('sethkrtsoll', ain=ain, param=int(param))
def get_comfort_temperature(self, ain):
"""Get the thermostate comfort temperature."""
return self._get_temperature(ain, 'gethkrkomfort')
def get_eco_temperature(self, ain):
"""Get the thermostate eco temperature."""
return self._get_temperature(ain, 'gethkrabsenk')
def get_alert_state(self, ain):
"""Get the alert state."""
device = self.get_device_by_ain(ain)
return device.alert_state
def get_device_statistics(self, ain):
"""Get device statistics."""
plain = self._aha_request('getbasicdevicestats', ain=ain)
return plain
class FritzhomeDevice(object):
"""The Fritzhome Device class."""
ALARM_MASK = 0x010
UNKNOWN_MASK = 0x020
THERMOSTAT_MASK = 0x040
POWER_METER_MASK = 0x080
TEMPERATURE_MASK = 0x100
SWITCH_MASK = 0x200
DECT_REPEATER_MASK = 0x400
MICROPHONE_UNIT = 0x800
HANFUN_UNIT = 0x2000
ain = None
identifier = None
manufacturer = None
productname = None
actual_temperature = None
target_temperature = None
eco_temperature = None
comfort_temperature = None
battery_level = None
window_open = None
summer_active = None
holiday_active = None
lock = None
device_lock = None
error_code = None
battery_low = None
switch_state = None
switch_mode = None
power = None
energy = None
voltage = None
offset = None
temperature = None
alert_state = None
def __init__(self, fritz=None, node=None):
if fritz is not None:
self._fritz = fritz
if node is not None:
self._update_from_node(node)
@staticmethod
def _get_temp_from_node(val, name):
return float(get_node_value(val, name)) / 2
def _update_from_node(self, node):
_LOGGER.debug(node.toprettyxml())
self.ain = node.getAttribute("identifier")
self.identifier = node.getAttribute("id")
self._functionsbitmask = int(node.getAttribute("functionbitmask"))
self.fw_version = node.getAttribute("fwversion")
self.manufacturer = node.getAttribute("manufacturer")
self.productname = node.getAttribute("productname")
self.name = get_node_value(node, 'name')
self.present = bool(int(get_node_value(node, 'present')))
if self.present is False:
return
for bit in bits(self._functionsbitmask):
try:
fct = {
self.ALARM_MASK: self._update_alarm_from_node,
self.POWER_METER_MASK: self._update_powermeter_from_node,
self.SWITCH_MASK: self._update_switch_from_node,
self.TEMPERATURE_MASK: self._update_temperature_from_node,
self.THERMOSTAT_MASK: self._update_hkr_from_node,
}[bit]
fct(node)
except KeyError:
pass
def _update_hkr_from_node(self, node):
val = node.getElementsByTagName('hkr')[0]
try:
self.actual_temperature = self._get_temp_from_node(val, 'tist')
except ValueError:
pass
self.target_temperature = self._get_temp_from_node(val, 'tsoll')
self.eco_temperature = self._get_temp_from_node(val, 'absenk')
self.comfort_temperature = self._get_temp_from_node(val, 'komfort')
# optional value
try:
self.device_lock = bool(int(get_node_value(val, 'devicelock')))
except IndexError:
pass
try:
self.lock = bool(int(get_node_value(val, 'lock')))
except IndexError:
pass
try:
self.error_code = int(get_node_value(val, 'errorcode'))
except IndexError:
pass
try:
self.battery_low = bool(int(get_node_value(val, 'batterylow')))
except IndexError:
pass
try:
self.battery_level = int(int(get_node_value(val, 'battery')))
except IndexError:
pass
try:
self.window_open = bool(int(get_node_value(val,
'windowopenactiv')))
except IndexError:
pass
try:
self.summer_active = bool(int(get_node_value(val, 'summeractive')))
except IndexError:
pass
try:
self.holiday_active = bool(int(get_node_value(val,
'holidayactive')))
except IndexError:
pass
def _update_switch_from_node(self, node):
val = node.getElementsByTagName('switch')[0]
self.switch_state = bool(int(get_node_value(val, 'state')))
self.switch_mode = get_node_value(val, 'mode')
self.lock = bool(get_node_value(val, 'lock'))
# optional value
try:
self.device_lock = bool(int(get_node_value(val, 'devicelock')))
except IndexError:
pass
def _update_powermeter_from_node(self, node):
val = node.getElementsByTagName('powermeter')[0]
self.power = int(get_node_value(val, 'power'))
self.energy = int(get_node_value(val, 'energy'))
try:
self.voltage = float(int(get_node_value(val, 'voltage')) / 1000)
except IndexError:
pass
def _update_temperature_from_node(self, node):
val = node.getElementsByTagName('temperature')[0]
try:
self.offset = int(get_node_value(val, 'offset')) / 10
except ValueError:
pass
try:
self.temperature = int(get_node_value(val, 'celsius')) / 10
except ValueError:
pass
def _update_alarm_from_node(self, node):
val = node.getElementsByTagName('alert')[0]
try:
self.alert_state = bool(int(get_node_value(val, 'state')))
except IndexError:
pass
def __repr__(self):
"""Return a string."""
return '{ain} {identifier} {manuf} {prod} {name}'.format(
ain=self.ain,
identifier=self.identifier,
manuf=self.manufacturer,
prod=self.productname,
name=self.name)
def update(self):
"""Update the device values."""
node = self._fritz.get_device_element(self.ain)
self._update_from_node(node)
@property
def has_alarm(self):
"""Check if the device has alarm function."""
return bool(self._functionsbitmask & self.ALARM_MASK)
@property
def has_thermostat(self):
"""Check if the device has thermostat function."""
return bool(self._functionsbitmask & self.THERMOSTAT_MASK)
@property
def has_powermeter(self):
"""Check if the device has powermeter function."""
return bool(self._functionsbitmask & self.POWER_METER_MASK)
@property
def has_temperature_sensor(self):
"""Check if the device has temperature function."""
return bool(self._functionsbitmask & self.TEMPERATURE_MASK)
@property
def has_switch(self):
"""Check if the device has switch function."""
return bool(self._functionsbitmask & self.SWITCH_MASK)
@property
def has_repeater(self):
"""Check if the device has repeater function."""
return bool(self._functionsbitmask & self.DECT_REPEATER_MASK)
def get_present(self):
"""Check if the device is present."""
return self._fritz.get_device_present(self.ain)
def get_switch_state(self):
"""Get the switch state."""
return self._fritz.get_switch_state(self.ain)
def set_switch_state_on(self):
"""Set the switch state to on."""
return self._fritz.set_switch_state_on(self.ain)
def set_switch_state_off(self):
"""Set the switch state to off."""
return self._fritz.set_switch_state_off(self.ain)
def set_switch_state_toggle(self):
"""Toggle the switch state."""
return self._fritz.set_switch_state_toggle(self.ain)
def get_switch_power(self):
""" the switch state."""
return self._fritz.get_switch_power(self.ain)
def get_switch_energy(self):
"""Get the switch energy."""
return self._fritz.get_switch_energy(self.ain)
def get_temperature(self):
"""Get the device temperature value."""
return self._fritz.get_temperature(self.ain)
def get_target_temperature(self):
"""Get the thermostate target temperature."""
return self._fritz.get_target_temperature(self.ain)
def set_target_temperature(self, temperature):
"""Set the thermostate target temperature."""
return self._fritz.set_target_temperature(self.ain, temperature)
def get_comfort_temperature(self):
"""Get the thermostate comfort temperature."""
return self._fritz.get_comfort_temperature(self.ain)
def get_eco_temperature(self):
"""Get the thermostate eco temperature."""
return self._fritz.get_eco_temperature(self.ain)
def get_hkr_state(self):
"""Get the thermostate state."""
self.update()
try:
return {
126.5: 'off',
127.0: 'on',
self.eco_temperature: 'eco',
self.comfort_temperature: 'comfort'
}[self.target_temperature]
except KeyError:
return 'manual'
def set_hkr_state(self, state):
"""Set the state of the thermostat.
Possible values for state are: 'on', 'off', 'comfort', 'eco'.
"""
try:
value = {
'off': 0,
'on': 100,
'eco': self.eco_temperature,
'comfort': self.comfort_temperature
}[state]
except KeyError:
return
self.set_target_temperature(value)
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
Fritzhome._request
|
python
|
def _request(self, url, params=None, timeout=10):
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip()
|
Send a request with parameters.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L47-L51
| null |
class Fritzhome(object):
"""Fritzhome object to communicate with the device."""
_sid = None
_session = None
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
self._session = Session()
def _login_request(self, username=None, secret=None):
"""Send a login request with paramerters."""
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge)
def _logout_request(self):
"""Send a logout request."""
_LOGGER.debug('logout')
url = 'http://' + self._host + '/login_sid.lua'
params = {
'security:command/logout': '1',
'sid': self._sid
}
self._request(url, params)
@staticmethod
def _create_login_secret(challenge, password):
"""Create a login secret."""
to_hash = (challenge + '-' + password).encode('UTF-16LE')
hashed = hashlib.md5(to_hash).hexdigest()
return '{0}-{1}'.format(challenge, hashed)
def _aha_request(self, cmd, ain=None, param=None, rf=str):
"""Send an AHA request."""
url = 'http://' + self._host + '/webservices/homeautoswitch.lua'
params = {
'switchcmd': cmd,
'sid': self._sid
}
if param:
params['param'] = param
if ain:
params['ain'] = ain
plain = self._request(url, params)
if plain == 'inval':
raise InvalidError
if rf == bool:
return bool(int(plain))
return rf(plain)
def login(self):
"""Login and get a valid session ID."""
try:
(sid, challenge) = self._login_request()
if sid == '0000000000000000':
secret = self._create_login_secret(challenge, self._password)
(sid2, challenge) = self._login_request(username=self._user,
secret=secret)
if sid2 == '0000000000000000':
_LOGGER.warning("login failed %s", sid2)
raise LoginError(self._user)
self._sid = sid2
except xml.parsers.expat.ExpatError:
raise LoginError(self._user)
def logout(self):
"""Logout."""
self._logout_request()
self._sid = None
def get_device_elements(self):
"""Get the DOM elements for the device list."""
plain = self._aha_request('getdevicelistinfos')
dom = xml.dom.minidom.parseString(plain)
_LOGGER.debug(dom)
return dom.getElementsByTagName("device")
def get_device_element(self, ain):
"""Get the DOM element for the specified device."""
elements = self.get_device_elements()
for element in elements:
if element.getAttribute('identifier') == ain:
return element
return None
def get_devices(self):
"""Get the list of all known devices."""
devices = []
for element in self.get_device_elements():
device = FritzhomeDevice(self, node=element)
devices.append(device)
return devices
def get_device_by_ain(self, ain):
"""Returns a device specified by the AIN."""
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device
def get_device_present(self, ain):
"""Get the device presence."""
return self._aha_request('getswitchpresent', ain=ain, rf=bool)
def get_device_name(self, ain):
"""Get the device name."""
return self._aha_request('getswitchname', ain=ain)
def get_switch_state(self, ain):
"""Get the switch state."""
return self._aha_request('getswitchstate', ain=ain, rf=bool)
def set_switch_state_on(self, ain):
"""Set the switch to on state."""
return self._aha_request('setswitchon', ain=ain, rf=bool)
def set_switch_state_off(self, ain):
"""Set the switch to off state."""
return self._aha_request('setswitchoff', ain=ain, rf=bool)
def set_switch_state_toggle(self, ain):
"""Toggle the switch state."""
return self._aha_request('setswitchtoggle', ain=ain, rf=bool)
def get_switch_power(self, ain):
"""Get the switch power consumption."""
return self._aha_request('getswitchpower', ain=ain, rf=int)
def get_switch_energy(self, ain):
"""Get the switch energy."""
return self._aha_request('getswitchenergy', ain=ain, rf=int)
def get_temperature(self, ain):
"""Get the device temperature sensor value."""
return self._aha_request('gettemperature', ain=ain, rf=float) / 10.0
def _get_temperature(self, ain, name):
plain = self._aha_request(name, ain=ain, rf=float)
return (plain - 16) / 2 + 8
def get_target_temperature(self, ain):
"""Get the thermostate target temperature."""
return self._get_temperature(ain, 'gethkrtsoll')
def set_target_temperature(self, ain, temperature):
"""Set the thermostate target temperature."""
param = 16 + ((float(temperature) - 8) * 2)
if param < min(range(16, 56)):
param = 253
elif param > max(range(16, 56)):
param = 254
self._aha_request('sethkrtsoll', ain=ain, param=int(param))
def get_comfort_temperature(self, ain):
"""Get the thermostate comfort temperature."""
return self._get_temperature(ain, 'gethkrkomfort')
def get_eco_temperature(self, ain):
"""Get the thermostate eco temperature."""
return self._get_temperature(ain, 'gethkrabsenk')
def get_alert_state(self, ain):
"""Get the alert state."""
device = self.get_device_by_ain(ain)
return device.alert_state
def get_device_statistics(self, ain):
"""Get device statistics."""
plain = self._aha_request('getbasicdevicestats', ain=ain)
return plain
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
Fritzhome._login_request
|
python
|
def _login_request(self, username=None, secret=None):
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge)
|
Send a login request with paramerters.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L53-L68
|
[
"def get_text(nodelist):\n \"\"\"Get the value from a text node.\"\"\"\n value = []\n for node in nodelist:\n if node.nodeType == node.TEXT_NODE:\n value.append(node.data)\n return ''.join(value)\n",
"def _request(self, url, params=None, timeout=10):\n \"\"\"Send a request with parameters.\"\"\"\n rsp = self._session.get(url, params=params, timeout=timeout)\n rsp.raise_for_status()\n return rsp.text.strip()\n"
] |
class Fritzhome(object):
"""Fritzhome object to communicate with the device."""
_sid = None
_session = None
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
self._session = Session()
def _request(self, url, params=None, timeout=10):
"""Send a request with parameters."""
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip()
def _logout_request(self):
"""Send a logout request."""
_LOGGER.debug('logout')
url = 'http://' + self._host + '/login_sid.lua'
params = {
'security:command/logout': '1',
'sid': self._sid
}
self._request(url, params)
@staticmethod
def _create_login_secret(challenge, password):
"""Create a login secret."""
to_hash = (challenge + '-' + password).encode('UTF-16LE')
hashed = hashlib.md5(to_hash).hexdigest()
return '{0}-{1}'.format(challenge, hashed)
def _aha_request(self, cmd, ain=None, param=None, rf=str):
"""Send an AHA request."""
url = 'http://' + self._host + '/webservices/homeautoswitch.lua'
params = {
'switchcmd': cmd,
'sid': self._sid
}
if param:
params['param'] = param
if ain:
params['ain'] = ain
plain = self._request(url, params)
if plain == 'inval':
raise InvalidError
if rf == bool:
return bool(int(plain))
return rf(plain)
def login(self):
"""Login and get a valid session ID."""
try:
(sid, challenge) = self._login_request()
if sid == '0000000000000000':
secret = self._create_login_secret(challenge, self._password)
(sid2, challenge) = self._login_request(username=self._user,
secret=secret)
if sid2 == '0000000000000000':
_LOGGER.warning("login failed %s", sid2)
raise LoginError(self._user)
self._sid = sid2
except xml.parsers.expat.ExpatError:
raise LoginError(self._user)
def logout(self):
"""Logout."""
self._logout_request()
self._sid = None
def get_device_elements(self):
"""Get the DOM elements for the device list."""
plain = self._aha_request('getdevicelistinfos')
dom = xml.dom.minidom.parseString(plain)
_LOGGER.debug(dom)
return dom.getElementsByTagName("device")
def get_device_element(self, ain):
"""Get the DOM element for the specified device."""
elements = self.get_device_elements()
for element in elements:
if element.getAttribute('identifier') == ain:
return element
return None
def get_devices(self):
"""Get the list of all known devices."""
devices = []
for element in self.get_device_elements():
device = FritzhomeDevice(self, node=element)
devices.append(device)
return devices
def get_device_by_ain(self, ain):
"""Returns a device specified by the AIN."""
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device
def get_device_present(self, ain):
"""Get the device presence."""
return self._aha_request('getswitchpresent', ain=ain, rf=bool)
def get_device_name(self, ain):
"""Get the device name."""
return self._aha_request('getswitchname', ain=ain)
def get_switch_state(self, ain):
"""Get the switch state."""
return self._aha_request('getswitchstate', ain=ain, rf=bool)
def set_switch_state_on(self, ain):
"""Set the switch to on state."""
return self._aha_request('setswitchon', ain=ain, rf=bool)
def set_switch_state_off(self, ain):
"""Set the switch to off state."""
return self._aha_request('setswitchoff', ain=ain, rf=bool)
def set_switch_state_toggle(self, ain):
"""Toggle the switch state."""
return self._aha_request('setswitchtoggle', ain=ain, rf=bool)
def get_switch_power(self, ain):
"""Get the switch power consumption."""
return self._aha_request('getswitchpower', ain=ain, rf=int)
def get_switch_energy(self, ain):
"""Get the switch energy."""
return self._aha_request('getswitchenergy', ain=ain, rf=int)
def get_temperature(self, ain):
"""Get the device temperature sensor value."""
return self._aha_request('gettemperature', ain=ain, rf=float) / 10.0
def _get_temperature(self, ain, name):
plain = self._aha_request(name, ain=ain, rf=float)
return (plain - 16) / 2 + 8
def get_target_temperature(self, ain):
"""Get the thermostate target temperature."""
return self._get_temperature(ain, 'gethkrtsoll')
def set_target_temperature(self, ain, temperature):
"""Set the thermostate target temperature."""
param = 16 + ((float(temperature) - 8) * 2)
if param < min(range(16, 56)):
param = 253
elif param > max(range(16, 56)):
param = 254
self._aha_request('sethkrtsoll', ain=ain, param=int(param))
def get_comfort_temperature(self, ain):
"""Get the thermostate comfort temperature."""
return self._get_temperature(ain, 'gethkrkomfort')
def get_eco_temperature(self, ain):
"""Get the thermostate eco temperature."""
return self._get_temperature(ain, 'gethkrabsenk')
def get_alert_state(self, ain):
"""Get the alert state."""
device = self.get_device_by_ain(ain)
return device.alert_state
def get_device_statistics(self, ain):
"""Get device statistics."""
plain = self._aha_request('getbasicdevicestats', ain=ain)
return plain
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
Fritzhome._logout_request
|
python
|
def _logout_request(self):
_LOGGER.debug('logout')
url = 'http://' + self._host + '/login_sid.lua'
params = {
'security:command/logout': '1',
'sid': self._sid
}
self._request(url, params)
|
Send a logout request.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L70-L79
|
[
"def _request(self, url, params=None, timeout=10):\n \"\"\"Send a request with parameters.\"\"\"\n rsp = self._session.get(url, params=params, timeout=timeout)\n rsp.raise_for_status()\n return rsp.text.strip()\n"
] |
class Fritzhome(object):
"""Fritzhome object to communicate with the device."""
_sid = None
_session = None
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
self._session = Session()
def _request(self, url, params=None, timeout=10):
"""Send a request with parameters."""
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip()
def _login_request(self, username=None, secret=None):
"""Send a login request with paramerters."""
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge)
@staticmethod
def _create_login_secret(challenge, password):
"""Create a login secret."""
to_hash = (challenge + '-' + password).encode('UTF-16LE')
hashed = hashlib.md5(to_hash).hexdigest()
return '{0}-{1}'.format(challenge, hashed)
def _aha_request(self, cmd, ain=None, param=None, rf=str):
"""Send an AHA request."""
url = 'http://' + self._host + '/webservices/homeautoswitch.lua'
params = {
'switchcmd': cmd,
'sid': self._sid
}
if param:
params['param'] = param
if ain:
params['ain'] = ain
plain = self._request(url, params)
if plain == 'inval':
raise InvalidError
if rf == bool:
return bool(int(plain))
return rf(plain)
def login(self):
"""Login and get a valid session ID."""
try:
(sid, challenge) = self._login_request()
if sid == '0000000000000000':
secret = self._create_login_secret(challenge, self._password)
(sid2, challenge) = self._login_request(username=self._user,
secret=secret)
if sid2 == '0000000000000000':
_LOGGER.warning("login failed %s", sid2)
raise LoginError(self._user)
self._sid = sid2
except xml.parsers.expat.ExpatError:
raise LoginError(self._user)
def logout(self):
"""Logout."""
self._logout_request()
self._sid = None
def get_device_elements(self):
"""Get the DOM elements for the device list."""
plain = self._aha_request('getdevicelistinfos')
dom = xml.dom.minidom.parseString(plain)
_LOGGER.debug(dom)
return dom.getElementsByTagName("device")
def get_device_element(self, ain):
"""Get the DOM element for the specified device."""
elements = self.get_device_elements()
for element in elements:
if element.getAttribute('identifier') == ain:
return element
return None
def get_devices(self):
"""Get the list of all known devices."""
devices = []
for element in self.get_device_elements():
device = FritzhomeDevice(self, node=element)
devices.append(device)
return devices
def get_device_by_ain(self, ain):
"""Returns a device specified by the AIN."""
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device
def get_device_present(self, ain):
"""Get the device presence."""
return self._aha_request('getswitchpresent', ain=ain, rf=bool)
def get_device_name(self, ain):
"""Get the device name."""
return self._aha_request('getswitchname', ain=ain)
def get_switch_state(self, ain):
"""Get the switch state."""
return self._aha_request('getswitchstate', ain=ain, rf=bool)
def set_switch_state_on(self, ain):
"""Set the switch to on state."""
return self._aha_request('setswitchon', ain=ain, rf=bool)
def set_switch_state_off(self, ain):
"""Set the switch to off state."""
return self._aha_request('setswitchoff', ain=ain, rf=bool)
def set_switch_state_toggle(self, ain):
"""Toggle the switch state."""
return self._aha_request('setswitchtoggle', ain=ain, rf=bool)
def get_switch_power(self, ain):
"""Get the switch power consumption."""
return self._aha_request('getswitchpower', ain=ain, rf=int)
def get_switch_energy(self, ain):
"""Get the switch energy."""
return self._aha_request('getswitchenergy', ain=ain, rf=int)
def get_temperature(self, ain):
"""Get the device temperature sensor value."""
return self._aha_request('gettemperature', ain=ain, rf=float) / 10.0
def _get_temperature(self, ain, name):
plain = self._aha_request(name, ain=ain, rf=float)
return (plain - 16) / 2 + 8
def get_target_temperature(self, ain):
"""Get the thermostate target temperature."""
return self._get_temperature(ain, 'gethkrtsoll')
def set_target_temperature(self, ain, temperature):
"""Set the thermostate target temperature."""
param = 16 + ((float(temperature) - 8) * 2)
if param < min(range(16, 56)):
param = 253
elif param > max(range(16, 56)):
param = 254
self._aha_request('sethkrtsoll', ain=ain, param=int(param))
def get_comfort_temperature(self, ain):
"""Get the thermostate comfort temperature."""
return self._get_temperature(ain, 'gethkrkomfort')
def get_eco_temperature(self, ain):
"""Get the thermostate eco temperature."""
return self._get_temperature(ain, 'gethkrabsenk')
def get_alert_state(self, ain):
"""Get the alert state."""
device = self.get_device_by_ain(ain)
return device.alert_state
def get_device_statistics(self, ain):
"""Get device statistics."""
plain = self._aha_request('getbasicdevicestats', ain=ain)
return plain
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
Fritzhome._create_login_secret
|
python
|
def _create_login_secret(challenge, password):
to_hash = (challenge + '-' + password).encode('UTF-16LE')
hashed = hashlib.md5(to_hash).hexdigest()
return '{0}-{1}'.format(challenge, hashed)
|
Create a login secret.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L82-L86
| null |
class Fritzhome(object):
"""Fritzhome object to communicate with the device."""
_sid = None
_session = None
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
self._session = Session()
def _request(self, url, params=None, timeout=10):
"""Send a request with parameters."""
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip()
def _login_request(self, username=None, secret=None):
"""Send a login request with paramerters."""
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge)
def _logout_request(self):
"""Send a logout request."""
_LOGGER.debug('logout')
url = 'http://' + self._host + '/login_sid.lua'
params = {
'security:command/logout': '1',
'sid': self._sid
}
self._request(url, params)
@staticmethod
def _aha_request(self, cmd, ain=None, param=None, rf=str):
"""Send an AHA request."""
url = 'http://' + self._host + '/webservices/homeautoswitch.lua'
params = {
'switchcmd': cmd,
'sid': self._sid
}
if param:
params['param'] = param
if ain:
params['ain'] = ain
plain = self._request(url, params)
if plain == 'inval':
raise InvalidError
if rf == bool:
return bool(int(plain))
return rf(plain)
def login(self):
"""Login and get a valid session ID."""
try:
(sid, challenge) = self._login_request()
if sid == '0000000000000000':
secret = self._create_login_secret(challenge, self._password)
(sid2, challenge) = self._login_request(username=self._user,
secret=secret)
if sid2 == '0000000000000000':
_LOGGER.warning("login failed %s", sid2)
raise LoginError(self._user)
self._sid = sid2
except xml.parsers.expat.ExpatError:
raise LoginError(self._user)
def logout(self):
"""Logout."""
self._logout_request()
self._sid = None
def get_device_elements(self):
"""Get the DOM elements for the device list."""
plain = self._aha_request('getdevicelistinfos')
dom = xml.dom.minidom.parseString(plain)
_LOGGER.debug(dom)
return dom.getElementsByTagName("device")
def get_device_element(self, ain):
"""Get the DOM element for the specified device."""
elements = self.get_device_elements()
for element in elements:
if element.getAttribute('identifier') == ain:
return element
return None
def get_devices(self):
"""Get the list of all known devices."""
devices = []
for element in self.get_device_elements():
device = FritzhomeDevice(self, node=element)
devices.append(device)
return devices
def get_device_by_ain(self, ain):
"""Returns a device specified by the AIN."""
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device
def get_device_present(self, ain):
"""Get the device presence."""
return self._aha_request('getswitchpresent', ain=ain, rf=bool)
def get_device_name(self, ain):
"""Get the device name."""
return self._aha_request('getswitchname', ain=ain)
def get_switch_state(self, ain):
"""Get the switch state."""
return self._aha_request('getswitchstate', ain=ain, rf=bool)
def set_switch_state_on(self, ain):
"""Set the switch to on state."""
return self._aha_request('setswitchon', ain=ain, rf=bool)
def set_switch_state_off(self, ain):
"""Set the switch to off state."""
return self._aha_request('setswitchoff', ain=ain, rf=bool)
def set_switch_state_toggle(self, ain):
"""Toggle the switch state."""
return self._aha_request('setswitchtoggle', ain=ain, rf=bool)
def get_switch_power(self, ain):
"""Get the switch power consumption."""
return self._aha_request('getswitchpower', ain=ain, rf=int)
def get_switch_energy(self, ain):
"""Get the switch energy."""
return self._aha_request('getswitchenergy', ain=ain, rf=int)
def get_temperature(self, ain):
"""Get the device temperature sensor value."""
return self._aha_request('gettemperature', ain=ain, rf=float) / 10.0
def _get_temperature(self, ain, name):
plain = self._aha_request(name, ain=ain, rf=float)
return (plain - 16) / 2 + 8
def get_target_temperature(self, ain):
"""Get the thermostate target temperature."""
return self._get_temperature(ain, 'gethkrtsoll')
def set_target_temperature(self, ain, temperature):
"""Set the thermostate target temperature."""
param = 16 + ((float(temperature) - 8) * 2)
if param < min(range(16, 56)):
param = 253
elif param > max(range(16, 56)):
param = 254
self._aha_request('sethkrtsoll', ain=ain, param=int(param))
def get_comfort_temperature(self, ain):
"""Get the thermostate comfort temperature."""
return self._get_temperature(ain, 'gethkrkomfort')
def get_eco_temperature(self, ain):
"""Get the thermostate eco temperature."""
return self._get_temperature(ain, 'gethkrabsenk')
def get_alert_state(self, ain):
"""Get the alert state."""
device = self.get_device_by_ain(ain)
return device.alert_state
def get_device_statistics(self, ain):
"""Get device statistics."""
plain = self._aha_request('getbasicdevicestats', ain=ain)
return plain
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
Fritzhome._aha_request
|
python
|
def _aha_request(self, cmd, ain=None, param=None, rf=str):
url = 'http://' + self._host + '/webservices/homeautoswitch.lua'
params = {
'switchcmd': cmd,
'sid': self._sid
}
if param:
params['param'] = param
if ain:
params['ain'] = ain
plain = self._request(url, params)
if plain == 'inval':
raise InvalidError
if rf == bool:
return bool(int(plain))
return rf(plain)
|
Send an AHA request.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L88-L106
|
[
"def _request(self, url, params=None, timeout=10):\n \"\"\"Send a request with parameters.\"\"\"\n rsp = self._session.get(url, params=params, timeout=timeout)\n rsp.raise_for_status()\n return rsp.text.strip()\n"
] |
class Fritzhome(object):
"""Fritzhome object to communicate with the device."""
_sid = None
_session = None
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
self._session = Session()
def _request(self, url, params=None, timeout=10):
"""Send a request with parameters."""
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip()
def _login_request(self, username=None, secret=None):
"""Send a login request with paramerters."""
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge)
def _logout_request(self):
"""Send a logout request."""
_LOGGER.debug('logout')
url = 'http://' + self._host + '/login_sid.lua'
params = {
'security:command/logout': '1',
'sid': self._sid
}
self._request(url, params)
@staticmethod
def _create_login_secret(challenge, password):
"""Create a login secret."""
to_hash = (challenge + '-' + password).encode('UTF-16LE')
hashed = hashlib.md5(to_hash).hexdigest()
return '{0}-{1}'.format(challenge, hashed)
def login(self):
"""Login and get a valid session ID."""
try:
(sid, challenge) = self._login_request()
if sid == '0000000000000000':
secret = self._create_login_secret(challenge, self._password)
(sid2, challenge) = self._login_request(username=self._user,
secret=secret)
if sid2 == '0000000000000000':
_LOGGER.warning("login failed %s", sid2)
raise LoginError(self._user)
self._sid = sid2
except xml.parsers.expat.ExpatError:
raise LoginError(self._user)
def logout(self):
"""Logout."""
self._logout_request()
self._sid = None
def get_device_elements(self):
"""Get the DOM elements for the device list."""
plain = self._aha_request('getdevicelistinfos')
dom = xml.dom.minidom.parseString(plain)
_LOGGER.debug(dom)
return dom.getElementsByTagName("device")
def get_device_element(self, ain):
"""Get the DOM element for the specified device."""
elements = self.get_device_elements()
for element in elements:
if element.getAttribute('identifier') == ain:
return element
return None
def get_devices(self):
"""Get the list of all known devices."""
devices = []
for element in self.get_device_elements():
device = FritzhomeDevice(self, node=element)
devices.append(device)
return devices
def get_device_by_ain(self, ain):
"""Returns a device specified by the AIN."""
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device
def get_device_present(self, ain):
"""Get the device presence."""
return self._aha_request('getswitchpresent', ain=ain, rf=bool)
def get_device_name(self, ain):
"""Get the device name."""
return self._aha_request('getswitchname', ain=ain)
def get_switch_state(self, ain):
"""Get the switch state."""
return self._aha_request('getswitchstate', ain=ain, rf=bool)
def set_switch_state_on(self, ain):
"""Set the switch to on state."""
return self._aha_request('setswitchon', ain=ain, rf=bool)
def set_switch_state_off(self, ain):
"""Set the switch to off state."""
return self._aha_request('setswitchoff', ain=ain, rf=bool)
def set_switch_state_toggle(self, ain):
"""Toggle the switch state."""
return self._aha_request('setswitchtoggle', ain=ain, rf=bool)
def get_switch_power(self, ain):
"""Get the switch power consumption."""
return self._aha_request('getswitchpower', ain=ain, rf=int)
def get_switch_energy(self, ain):
"""Get the switch energy."""
return self._aha_request('getswitchenergy', ain=ain, rf=int)
def get_temperature(self, ain):
"""Get the device temperature sensor value."""
return self._aha_request('gettemperature', ain=ain, rf=float) / 10.0
def _get_temperature(self, ain, name):
plain = self._aha_request(name, ain=ain, rf=float)
return (plain - 16) / 2 + 8
def get_target_temperature(self, ain):
"""Get the thermostate target temperature."""
return self._get_temperature(ain, 'gethkrtsoll')
def set_target_temperature(self, ain, temperature):
"""Set the thermostate target temperature."""
param = 16 + ((float(temperature) - 8) * 2)
if param < min(range(16, 56)):
param = 253
elif param > max(range(16, 56)):
param = 254
self._aha_request('sethkrtsoll', ain=ain, param=int(param))
def get_comfort_temperature(self, ain):
"""Get the thermostate comfort temperature."""
return self._get_temperature(ain, 'gethkrkomfort')
def get_eco_temperature(self, ain):
"""Get the thermostate eco temperature."""
return self._get_temperature(ain, 'gethkrabsenk')
def get_alert_state(self, ain):
"""Get the alert state."""
device = self.get_device_by_ain(ain)
return device.alert_state
def get_device_statistics(self, ain):
"""Get device statistics."""
plain = self._aha_request('getbasicdevicestats', ain=ain)
return plain
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
Fritzhome.login
|
python
|
def login(self):
try:
(sid, challenge) = self._login_request()
if sid == '0000000000000000':
secret = self._create_login_secret(challenge, self._password)
(sid2, challenge) = self._login_request(username=self._user,
secret=secret)
if sid2 == '0000000000000000':
_LOGGER.warning("login failed %s", sid2)
raise LoginError(self._user)
self._sid = sid2
except xml.parsers.expat.ExpatError:
raise LoginError(self._user)
|
Login and get a valid session ID.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L108-L121
|
[
"def _login_request(self, username=None, secret=None):\n \"\"\"Send a login request with paramerters.\"\"\"\n url = 'http://' + self._host + '/login_sid.lua'\n params = {}\n if username:\n params['username'] = username\n if secret:\n params['response'] = secret\n\n plain = self._request(url, params)\n dom = xml.dom.minidom.parseString(plain)\n sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)\n challenge = get_text(\n dom.getElementsByTagName('Challenge')[0].childNodes)\n\n return (sid, challenge)\n",
"def _create_login_secret(challenge, password):\n \"\"\"Create a login secret.\"\"\"\n to_hash = (challenge + '-' + password).encode('UTF-16LE')\n hashed = hashlib.md5(to_hash).hexdigest()\n return '{0}-{1}'.format(challenge, hashed)\n"
] |
class Fritzhome(object):
"""Fritzhome object to communicate with the device."""
_sid = None
_session = None
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
self._session = Session()
def _request(self, url, params=None, timeout=10):
"""Send a request with parameters."""
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip()
def _login_request(self, username=None, secret=None):
"""Send a login request with paramerters."""
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge)
def _logout_request(self):
"""Send a logout request."""
_LOGGER.debug('logout')
url = 'http://' + self._host + '/login_sid.lua'
params = {
'security:command/logout': '1',
'sid': self._sid
}
self._request(url, params)
@staticmethod
def _create_login_secret(challenge, password):
"""Create a login secret."""
to_hash = (challenge + '-' + password).encode('UTF-16LE')
hashed = hashlib.md5(to_hash).hexdigest()
return '{0}-{1}'.format(challenge, hashed)
def _aha_request(self, cmd, ain=None, param=None, rf=str):
"""Send an AHA request."""
url = 'http://' + self._host + '/webservices/homeautoswitch.lua'
params = {
'switchcmd': cmd,
'sid': self._sid
}
if param:
params['param'] = param
if ain:
params['ain'] = ain
plain = self._request(url, params)
if plain == 'inval':
raise InvalidError
if rf == bool:
return bool(int(plain))
return rf(plain)
def logout(self):
"""Logout."""
self._logout_request()
self._sid = None
def get_device_elements(self):
"""Get the DOM elements for the device list."""
plain = self._aha_request('getdevicelistinfos')
dom = xml.dom.minidom.parseString(plain)
_LOGGER.debug(dom)
return dom.getElementsByTagName("device")
def get_device_element(self, ain):
"""Get the DOM element for the specified device."""
elements = self.get_device_elements()
for element in elements:
if element.getAttribute('identifier') == ain:
return element
return None
def get_devices(self):
"""Get the list of all known devices."""
devices = []
for element in self.get_device_elements():
device = FritzhomeDevice(self, node=element)
devices.append(device)
return devices
def get_device_by_ain(self, ain):
"""Returns a device specified by the AIN."""
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device
def get_device_present(self, ain):
"""Get the device presence."""
return self._aha_request('getswitchpresent', ain=ain, rf=bool)
def get_device_name(self, ain):
"""Get the device name."""
return self._aha_request('getswitchname', ain=ain)
def get_switch_state(self, ain):
"""Get the switch state."""
return self._aha_request('getswitchstate', ain=ain, rf=bool)
def set_switch_state_on(self, ain):
"""Set the switch to on state."""
return self._aha_request('setswitchon', ain=ain, rf=bool)
def set_switch_state_off(self, ain):
"""Set the switch to off state."""
return self._aha_request('setswitchoff', ain=ain, rf=bool)
def set_switch_state_toggle(self, ain):
"""Toggle the switch state."""
return self._aha_request('setswitchtoggle', ain=ain, rf=bool)
def get_switch_power(self, ain):
"""Get the switch power consumption."""
return self._aha_request('getswitchpower', ain=ain, rf=int)
def get_switch_energy(self, ain):
"""Get the switch energy."""
return self._aha_request('getswitchenergy', ain=ain, rf=int)
def get_temperature(self, ain):
"""Get the device temperature sensor value."""
return self._aha_request('gettemperature', ain=ain, rf=float) / 10.0
def _get_temperature(self, ain, name):
plain = self._aha_request(name, ain=ain, rf=float)
return (plain - 16) / 2 + 8
def get_target_temperature(self, ain):
"""Get the thermostate target temperature."""
return self._get_temperature(ain, 'gethkrtsoll')
def set_target_temperature(self, ain, temperature):
"""Set the thermostate target temperature."""
param = 16 + ((float(temperature) - 8) * 2)
if param < min(range(16, 56)):
param = 253
elif param > max(range(16, 56)):
param = 254
self._aha_request('sethkrtsoll', ain=ain, param=int(param))
def get_comfort_temperature(self, ain):
"""Get the thermostate comfort temperature."""
return self._get_temperature(ain, 'gethkrkomfort')
def get_eco_temperature(self, ain):
"""Get the thermostate eco temperature."""
return self._get_temperature(ain, 'gethkrabsenk')
def get_alert_state(self, ain):
"""Get the alert state."""
device = self.get_device_by_ain(ain)
return device.alert_state
def get_device_statistics(self, ain):
"""Get device statistics."""
plain = self._aha_request('getbasicdevicestats', ain=ain)
return plain
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
Fritzhome.get_device_elements
|
python
|
def get_device_elements(self):
plain = self._aha_request('getdevicelistinfos')
dom = xml.dom.minidom.parseString(plain)
_LOGGER.debug(dom)
return dom.getElementsByTagName("device")
|
Get the DOM elements for the device list.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L128-L133
|
[
"def _aha_request(self, cmd, ain=None, param=None, rf=str):\n \"\"\"Send an AHA request.\"\"\"\n url = 'http://' + self._host + '/webservices/homeautoswitch.lua'\n params = {\n 'switchcmd': cmd,\n 'sid': self._sid\n }\n if param:\n params['param'] = param\n if ain:\n params['ain'] = ain\n\n plain = self._request(url, params)\n if plain == 'inval':\n raise InvalidError\n\n if rf == bool:\n return bool(int(plain))\n return rf(plain)\n"
] |
class Fritzhome(object):
"""Fritzhome object to communicate with the device."""
_sid = None
_session = None
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
self._session = Session()
def _request(self, url, params=None, timeout=10):
"""Send a request with parameters."""
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip()
def _login_request(self, username=None, secret=None):
"""Send a login request with paramerters."""
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge)
def _logout_request(self):
"""Send a logout request."""
_LOGGER.debug('logout')
url = 'http://' + self._host + '/login_sid.lua'
params = {
'security:command/logout': '1',
'sid': self._sid
}
self._request(url, params)
@staticmethod
def _create_login_secret(challenge, password):
"""Create a login secret."""
to_hash = (challenge + '-' + password).encode('UTF-16LE')
hashed = hashlib.md5(to_hash).hexdigest()
return '{0}-{1}'.format(challenge, hashed)
def _aha_request(self, cmd, ain=None, param=None, rf=str):
"""Send an AHA request."""
url = 'http://' + self._host + '/webservices/homeautoswitch.lua'
params = {
'switchcmd': cmd,
'sid': self._sid
}
if param:
params['param'] = param
if ain:
params['ain'] = ain
plain = self._request(url, params)
if plain == 'inval':
raise InvalidError
if rf == bool:
return bool(int(plain))
return rf(plain)
def login(self):
"""Login and get a valid session ID."""
try:
(sid, challenge) = self._login_request()
if sid == '0000000000000000':
secret = self._create_login_secret(challenge, self._password)
(sid2, challenge) = self._login_request(username=self._user,
secret=secret)
if sid2 == '0000000000000000':
_LOGGER.warning("login failed %s", sid2)
raise LoginError(self._user)
self._sid = sid2
except xml.parsers.expat.ExpatError:
raise LoginError(self._user)
def logout(self):
"""Logout."""
self._logout_request()
self._sid = None
def get_device_element(self, ain):
"""Get the DOM element for the specified device."""
elements = self.get_device_elements()
for element in elements:
if element.getAttribute('identifier') == ain:
return element
return None
def get_devices(self):
"""Get the list of all known devices."""
devices = []
for element in self.get_device_elements():
device = FritzhomeDevice(self, node=element)
devices.append(device)
return devices
def get_device_by_ain(self, ain):
"""Returns a device specified by the AIN."""
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device
def get_device_present(self, ain):
"""Get the device presence."""
return self._aha_request('getswitchpresent', ain=ain, rf=bool)
def get_device_name(self, ain):
"""Get the device name."""
return self._aha_request('getswitchname', ain=ain)
def get_switch_state(self, ain):
"""Get the switch state."""
return self._aha_request('getswitchstate', ain=ain, rf=bool)
def set_switch_state_on(self, ain):
"""Set the switch to on state."""
return self._aha_request('setswitchon', ain=ain, rf=bool)
def set_switch_state_off(self, ain):
"""Set the switch to off state."""
return self._aha_request('setswitchoff', ain=ain, rf=bool)
def set_switch_state_toggle(self, ain):
"""Toggle the switch state."""
return self._aha_request('setswitchtoggle', ain=ain, rf=bool)
def get_switch_power(self, ain):
"""Get the switch power consumption."""
return self._aha_request('getswitchpower', ain=ain, rf=int)
def get_switch_energy(self, ain):
"""Get the switch energy."""
return self._aha_request('getswitchenergy', ain=ain, rf=int)
def get_temperature(self, ain):
"""Get the device temperature sensor value."""
return self._aha_request('gettemperature', ain=ain, rf=float) / 10.0
def _get_temperature(self, ain, name):
plain = self._aha_request(name, ain=ain, rf=float)
return (plain - 16) / 2 + 8
def get_target_temperature(self, ain):
"""Get the thermostate target temperature."""
return self._get_temperature(ain, 'gethkrtsoll')
def set_target_temperature(self, ain, temperature):
"""Set the thermostate target temperature."""
param = 16 + ((float(temperature) - 8) * 2)
if param < min(range(16, 56)):
param = 253
elif param > max(range(16, 56)):
param = 254
self._aha_request('sethkrtsoll', ain=ain, param=int(param))
def get_comfort_temperature(self, ain):
"""Get the thermostate comfort temperature."""
return self._get_temperature(ain, 'gethkrkomfort')
def get_eco_temperature(self, ain):
"""Get the thermostate eco temperature."""
return self._get_temperature(ain, 'gethkrabsenk')
def get_alert_state(self, ain):
"""Get the alert state."""
device = self.get_device_by_ain(ain)
return device.alert_state
def get_device_statistics(self, ain):
"""Get device statistics."""
plain = self._aha_request('getbasicdevicestats', ain=ain)
return plain
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
Fritzhome.get_device_element
|
python
|
def get_device_element(self, ain):
elements = self.get_device_elements()
for element in elements:
if element.getAttribute('identifier') == ain:
return element
return None
|
Get the DOM element for the specified device.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L135-L141
|
[
"def get_device_elements(self):\n \"\"\"Get the DOM elements for the device list.\"\"\"\n plain = self._aha_request('getdevicelistinfos')\n dom = xml.dom.minidom.parseString(plain)\n _LOGGER.debug(dom)\n return dom.getElementsByTagName(\"device\")\n"
] |
class Fritzhome(object):
"""Fritzhome object to communicate with the device."""
_sid = None
_session = None
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
self._session = Session()
def _request(self, url, params=None, timeout=10):
"""Send a request with parameters."""
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip()
def _login_request(self, username=None, secret=None):
"""Send a login request with paramerters."""
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge)
def _logout_request(self):
"""Send a logout request."""
_LOGGER.debug('logout')
url = 'http://' + self._host + '/login_sid.lua'
params = {
'security:command/logout': '1',
'sid': self._sid
}
self._request(url, params)
@staticmethod
def _create_login_secret(challenge, password):
"""Create a login secret."""
to_hash = (challenge + '-' + password).encode('UTF-16LE')
hashed = hashlib.md5(to_hash).hexdigest()
return '{0}-{1}'.format(challenge, hashed)
def _aha_request(self, cmd, ain=None, param=None, rf=str):
"""Send an AHA request."""
url = 'http://' + self._host + '/webservices/homeautoswitch.lua'
params = {
'switchcmd': cmd,
'sid': self._sid
}
if param:
params['param'] = param
if ain:
params['ain'] = ain
plain = self._request(url, params)
if plain == 'inval':
raise InvalidError
if rf == bool:
return bool(int(plain))
return rf(plain)
def login(self):
"""Login and get a valid session ID."""
try:
(sid, challenge) = self._login_request()
if sid == '0000000000000000':
secret = self._create_login_secret(challenge, self._password)
(sid2, challenge) = self._login_request(username=self._user,
secret=secret)
if sid2 == '0000000000000000':
_LOGGER.warning("login failed %s", sid2)
raise LoginError(self._user)
self._sid = sid2
except xml.parsers.expat.ExpatError:
raise LoginError(self._user)
def logout(self):
"""Logout."""
self._logout_request()
self._sid = None
def get_device_elements(self):
"""Get the DOM elements for the device list."""
plain = self._aha_request('getdevicelistinfos')
dom = xml.dom.minidom.parseString(plain)
_LOGGER.debug(dom)
return dom.getElementsByTagName("device")
def get_devices(self):
"""Get the list of all known devices."""
devices = []
for element in self.get_device_elements():
device = FritzhomeDevice(self, node=element)
devices.append(device)
return devices
def get_device_by_ain(self, ain):
"""Returns a device specified by the AIN."""
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device
def get_device_present(self, ain):
"""Get the device presence."""
return self._aha_request('getswitchpresent', ain=ain, rf=bool)
def get_device_name(self, ain):
"""Get the device name."""
return self._aha_request('getswitchname', ain=ain)
def get_switch_state(self, ain):
"""Get the switch state."""
return self._aha_request('getswitchstate', ain=ain, rf=bool)
def set_switch_state_on(self, ain):
"""Set the switch to on state."""
return self._aha_request('setswitchon', ain=ain, rf=bool)
def set_switch_state_off(self, ain):
"""Set the switch to off state."""
return self._aha_request('setswitchoff', ain=ain, rf=bool)
def set_switch_state_toggle(self, ain):
"""Toggle the switch state."""
return self._aha_request('setswitchtoggle', ain=ain, rf=bool)
def get_switch_power(self, ain):
"""Get the switch power consumption."""
return self._aha_request('getswitchpower', ain=ain, rf=int)
def get_switch_energy(self, ain):
"""Get the switch energy."""
return self._aha_request('getswitchenergy', ain=ain, rf=int)
def get_temperature(self, ain):
"""Get the device temperature sensor value."""
return self._aha_request('gettemperature', ain=ain, rf=float) / 10.0
def _get_temperature(self, ain, name):
plain = self._aha_request(name, ain=ain, rf=float)
return (plain - 16) / 2 + 8
def get_target_temperature(self, ain):
"""Get the thermostate target temperature."""
return self._get_temperature(ain, 'gethkrtsoll')
def set_target_temperature(self, ain, temperature):
"""Set the thermostate target temperature."""
param = 16 + ((float(temperature) - 8) * 2)
if param < min(range(16, 56)):
param = 253
elif param > max(range(16, 56)):
param = 254
self._aha_request('sethkrtsoll', ain=ain, param=int(param))
def get_comfort_temperature(self, ain):
"""Get the thermostate comfort temperature."""
return self._get_temperature(ain, 'gethkrkomfort')
def get_eco_temperature(self, ain):
"""Get the thermostate eco temperature."""
return self._get_temperature(ain, 'gethkrabsenk')
def get_alert_state(self, ain):
"""Get the alert state."""
device = self.get_device_by_ain(ain)
return device.alert_state
def get_device_statistics(self, ain):
"""Get device statistics."""
plain = self._aha_request('getbasicdevicestats', ain=ain)
return plain
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
Fritzhome.get_devices
|
python
|
def get_devices(self):
devices = []
for element in self.get_device_elements():
device = FritzhomeDevice(self, node=element)
devices.append(device)
return devices
|
Get the list of all known devices.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L143-L149
|
[
"def get_device_elements(self):\n \"\"\"Get the DOM elements for the device list.\"\"\"\n plain = self._aha_request('getdevicelistinfos')\n dom = xml.dom.minidom.parseString(plain)\n _LOGGER.debug(dom)\n return dom.getElementsByTagName(\"device\")\n"
] |
class Fritzhome(object):
"""Fritzhome object to communicate with the device."""
_sid = None
_session = None
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
self._session = Session()
def _request(self, url, params=None, timeout=10):
"""Send a request with parameters."""
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip()
def _login_request(self, username=None, secret=None):
"""Send a login request with paramerters."""
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge)
def _logout_request(self):
"""Send a logout request."""
_LOGGER.debug('logout')
url = 'http://' + self._host + '/login_sid.lua'
params = {
'security:command/logout': '1',
'sid': self._sid
}
self._request(url, params)
@staticmethod
def _create_login_secret(challenge, password):
"""Create a login secret."""
to_hash = (challenge + '-' + password).encode('UTF-16LE')
hashed = hashlib.md5(to_hash).hexdigest()
return '{0}-{1}'.format(challenge, hashed)
def _aha_request(self, cmd, ain=None, param=None, rf=str):
"""Send an AHA request."""
url = 'http://' + self._host + '/webservices/homeautoswitch.lua'
params = {
'switchcmd': cmd,
'sid': self._sid
}
if param:
params['param'] = param
if ain:
params['ain'] = ain
plain = self._request(url, params)
if plain == 'inval':
raise InvalidError
if rf == bool:
return bool(int(plain))
return rf(plain)
def login(self):
"""Login and get a valid session ID."""
try:
(sid, challenge) = self._login_request()
if sid == '0000000000000000':
secret = self._create_login_secret(challenge, self._password)
(sid2, challenge) = self._login_request(username=self._user,
secret=secret)
if sid2 == '0000000000000000':
_LOGGER.warning("login failed %s", sid2)
raise LoginError(self._user)
self._sid = sid2
except xml.parsers.expat.ExpatError:
raise LoginError(self._user)
def logout(self):
"""Logout."""
self._logout_request()
self._sid = None
def get_device_elements(self):
"""Get the DOM elements for the device list."""
plain = self._aha_request('getdevicelistinfos')
dom = xml.dom.minidom.parseString(plain)
_LOGGER.debug(dom)
return dom.getElementsByTagName("device")
def get_device_element(self, ain):
"""Get the DOM element for the specified device."""
elements = self.get_device_elements()
for element in elements:
if element.getAttribute('identifier') == ain:
return element
return None
def get_device_by_ain(self, ain):
"""Returns a device specified by the AIN."""
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device
def get_device_present(self, ain):
"""Get the device presence."""
return self._aha_request('getswitchpresent', ain=ain, rf=bool)
def get_device_name(self, ain):
"""Get the device name."""
return self._aha_request('getswitchname', ain=ain)
def get_switch_state(self, ain):
"""Get the switch state."""
return self._aha_request('getswitchstate', ain=ain, rf=bool)
def set_switch_state_on(self, ain):
"""Set the switch to on state."""
return self._aha_request('setswitchon', ain=ain, rf=bool)
def set_switch_state_off(self, ain):
"""Set the switch to off state."""
return self._aha_request('setswitchoff', ain=ain, rf=bool)
def set_switch_state_toggle(self, ain):
"""Toggle the switch state."""
return self._aha_request('setswitchtoggle', ain=ain, rf=bool)
def get_switch_power(self, ain):
"""Get the switch power consumption."""
return self._aha_request('getswitchpower', ain=ain, rf=int)
def get_switch_energy(self, ain):
"""Get the switch energy."""
return self._aha_request('getswitchenergy', ain=ain, rf=int)
def get_temperature(self, ain):
"""Get the device temperature sensor value."""
return self._aha_request('gettemperature', ain=ain, rf=float) / 10.0
def _get_temperature(self, ain, name):
plain = self._aha_request(name, ain=ain, rf=float)
return (plain - 16) / 2 + 8
def get_target_temperature(self, ain):
"""Get the thermostate target temperature."""
return self._get_temperature(ain, 'gethkrtsoll')
def set_target_temperature(self, ain, temperature):
"""Set the thermostate target temperature."""
param = 16 + ((float(temperature) - 8) * 2)
if param < min(range(16, 56)):
param = 253
elif param > max(range(16, 56)):
param = 254
self._aha_request('sethkrtsoll', ain=ain, param=int(param))
def get_comfort_temperature(self, ain):
"""Get the thermostate comfort temperature."""
return self._get_temperature(ain, 'gethkrkomfort')
def get_eco_temperature(self, ain):
"""Get the thermostate eco temperature."""
return self._get_temperature(ain, 'gethkrabsenk')
def get_alert_state(self, ain):
"""Get the alert state."""
device = self.get_device_by_ain(ain)
return device.alert_state
def get_device_statistics(self, ain):
"""Get device statistics."""
plain = self._aha_request('getbasicdevicestats', ain=ain)
return plain
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
Fritzhome.get_device_by_ain
|
python
|
def get_device_by_ain(self, ain):
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device
|
Returns a device specified by the AIN.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L151-L156
|
[
"def get_devices(self):\n \"\"\"Get the list of all known devices.\"\"\"\n devices = []\n for element in self.get_device_elements():\n device = FritzhomeDevice(self, node=element)\n devices.append(device)\n return devices\n"
] |
class Fritzhome(object):
"""Fritzhome object to communicate with the device."""
_sid = None
_session = None
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
self._session = Session()
def _request(self, url, params=None, timeout=10):
"""Send a request with parameters."""
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip()
def _login_request(self, username=None, secret=None):
"""Send a login request with paramerters."""
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge)
def _logout_request(self):
"""Send a logout request."""
_LOGGER.debug('logout')
url = 'http://' + self._host + '/login_sid.lua'
params = {
'security:command/logout': '1',
'sid': self._sid
}
self._request(url, params)
@staticmethod
def _create_login_secret(challenge, password):
"""Create a login secret."""
to_hash = (challenge + '-' + password).encode('UTF-16LE')
hashed = hashlib.md5(to_hash).hexdigest()
return '{0}-{1}'.format(challenge, hashed)
def _aha_request(self, cmd, ain=None, param=None, rf=str):
"""Send an AHA request."""
url = 'http://' + self._host + '/webservices/homeautoswitch.lua'
params = {
'switchcmd': cmd,
'sid': self._sid
}
if param:
params['param'] = param
if ain:
params['ain'] = ain
plain = self._request(url, params)
if plain == 'inval':
raise InvalidError
if rf == bool:
return bool(int(plain))
return rf(plain)
def login(self):
"""Login and get a valid session ID."""
try:
(sid, challenge) = self._login_request()
if sid == '0000000000000000':
secret = self._create_login_secret(challenge, self._password)
(sid2, challenge) = self._login_request(username=self._user,
secret=secret)
if sid2 == '0000000000000000':
_LOGGER.warning("login failed %s", sid2)
raise LoginError(self._user)
self._sid = sid2
except xml.parsers.expat.ExpatError:
raise LoginError(self._user)
def logout(self):
"""Logout."""
self._logout_request()
self._sid = None
def get_device_elements(self):
"""Get the DOM elements for the device list."""
plain = self._aha_request('getdevicelistinfos')
dom = xml.dom.minidom.parseString(plain)
_LOGGER.debug(dom)
return dom.getElementsByTagName("device")
def get_device_element(self, ain):
"""Get the DOM element for the specified device."""
elements = self.get_device_elements()
for element in elements:
if element.getAttribute('identifier') == ain:
return element
return None
def get_devices(self):
"""Get the list of all known devices."""
devices = []
for element in self.get_device_elements():
device = FritzhomeDevice(self, node=element)
devices.append(device)
return devices
def get_device_present(self, ain):
"""Get the device presence."""
return self._aha_request('getswitchpresent', ain=ain, rf=bool)
def get_device_name(self, ain):
"""Get the device name."""
return self._aha_request('getswitchname', ain=ain)
def get_switch_state(self, ain):
"""Get the switch state."""
return self._aha_request('getswitchstate', ain=ain, rf=bool)
def set_switch_state_on(self, ain):
"""Set the switch to on state."""
return self._aha_request('setswitchon', ain=ain, rf=bool)
def set_switch_state_off(self, ain):
"""Set the switch to off state."""
return self._aha_request('setswitchoff', ain=ain, rf=bool)
def set_switch_state_toggle(self, ain):
"""Toggle the switch state."""
return self._aha_request('setswitchtoggle', ain=ain, rf=bool)
def get_switch_power(self, ain):
"""Get the switch power consumption."""
return self._aha_request('getswitchpower', ain=ain, rf=int)
def get_switch_energy(self, ain):
"""Get the switch energy."""
return self._aha_request('getswitchenergy', ain=ain, rf=int)
def get_temperature(self, ain):
"""Get the device temperature sensor value."""
return self._aha_request('gettemperature', ain=ain, rf=float) / 10.0
def _get_temperature(self, ain, name):
plain = self._aha_request(name, ain=ain, rf=float)
return (plain - 16) / 2 + 8
def get_target_temperature(self, ain):
"""Get the thermostate target temperature."""
return self._get_temperature(ain, 'gethkrtsoll')
def set_target_temperature(self, ain, temperature):
"""Set the thermostate target temperature."""
param = 16 + ((float(temperature) - 8) * 2)
if param < min(range(16, 56)):
param = 253
elif param > max(range(16, 56)):
param = 254
self._aha_request('sethkrtsoll', ain=ain, param=int(param))
def get_comfort_temperature(self, ain):
"""Get the thermostate comfort temperature."""
return self._get_temperature(ain, 'gethkrkomfort')
def get_eco_temperature(self, ain):
"""Get the thermostate eco temperature."""
return self._get_temperature(ain, 'gethkrabsenk')
def get_alert_state(self, ain):
"""Get the alert state."""
device = self.get_device_by_ain(ain)
return device.alert_state
def get_device_statistics(self, ain):
"""Get device statistics."""
plain = self._aha_request('getbasicdevicestats', ain=ain)
return plain
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
Fritzhome.set_target_temperature
|
python
|
def set_target_temperature(self, ain, temperature):
param = 16 + ((float(temperature) - 8) * 2)
if param < min(range(16, 56)):
param = 253
elif param > max(range(16, 56)):
param = 254
self._aha_request('sethkrtsoll', ain=ain, param=int(param))
|
Set the thermostate target temperature.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L202-L210
|
[
"def _aha_request(self, cmd, ain=None, param=None, rf=str):\n \"\"\"Send an AHA request.\"\"\"\n url = 'http://' + self._host + '/webservices/homeautoswitch.lua'\n params = {\n 'switchcmd': cmd,\n 'sid': self._sid\n }\n if param:\n params['param'] = param\n if ain:\n params['ain'] = ain\n\n plain = self._request(url, params)\n if plain == 'inval':\n raise InvalidError\n\n if rf == bool:\n return bool(int(plain))\n return rf(plain)\n"
] |
class Fritzhome(object):
"""Fritzhome object to communicate with the device."""
_sid = None
_session = None
def __init__(self, host, user, password):
self._host = host
self._user = user
self._password = password
self._session = Session()
def _request(self, url, params=None, timeout=10):
"""Send a request with parameters."""
rsp = self._session.get(url, params=params, timeout=timeout)
rsp.raise_for_status()
return rsp.text.strip()
def _login_request(self, username=None, secret=None):
"""Send a login request with paramerters."""
url = 'http://' + self._host + '/login_sid.lua'
params = {}
if username:
params['username'] = username
if secret:
params['response'] = secret
plain = self._request(url, params)
dom = xml.dom.minidom.parseString(plain)
sid = get_text(dom.getElementsByTagName('SID')[0].childNodes)
challenge = get_text(
dom.getElementsByTagName('Challenge')[0].childNodes)
return (sid, challenge)
def _logout_request(self):
"""Send a logout request."""
_LOGGER.debug('logout')
url = 'http://' + self._host + '/login_sid.lua'
params = {
'security:command/logout': '1',
'sid': self._sid
}
self._request(url, params)
@staticmethod
def _create_login_secret(challenge, password):
"""Create a login secret."""
to_hash = (challenge + '-' + password).encode('UTF-16LE')
hashed = hashlib.md5(to_hash).hexdigest()
return '{0}-{1}'.format(challenge, hashed)
def _aha_request(self, cmd, ain=None, param=None, rf=str):
"""Send an AHA request."""
url = 'http://' + self._host + '/webservices/homeautoswitch.lua'
params = {
'switchcmd': cmd,
'sid': self._sid
}
if param:
params['param'] = param
if ain:
params['ain'] = ain
plain = self._request(url, params)
if plain == 'inval':
raise InvalidError
if rf == bool:
return bool(int(plain))
return rf(plain)
def login(self):
"""Login and get a valid session ID."""
try:
(sid, challenge) = self._login_request()
if sid == '0000000000000000':
secret = self._create_login_secret(challenge, self._password)
(sid2, challenge) = self._login_request(username=self._user,
secret=secret)
if sid2 == '0000000000000000':
_LOGGER.warning("login failed %s", sid2)
raise LoginError(self._user)
self._sid = sid2
except xml.parsers.expat.ExpatError:
raise LoginError(self._user)
def logout(self):
"""Logout."""
self._logout_request()
self._sid = None
def get_device_elements(self):
"""Get the DOM elements for the device list."""
plain = self._aha_request('getdevicelistinfos')
dom = xml.dom.minidom.parseString(plain)
_LOGGER.debug(dom)
return dom.getElementsByTagName("device")
def get_device_element(self, ain):
"""Get the DOM element for the specified device."""
elements = self.get_device_elements()
for element in elements:
if element.getAttribute('identifier') == ain:
return element
return None
def get_devices(self):
"""Get the list of all known devices."""
devices = []
for element in self.get_device_elements():
device = FritzhomeDevice(self, node=element)
devices.append(device)
return devices
def get_device_by_ain(self, ain):
"""Returns a device specified by the AIN."""
devices = self.get_devices()
for device in devices:
if device.ain == ain:
return device
def get_device_present(self, ain):
"""Get the device presence."""
return self._aha_request('getswitchpresent', ain=ain, rf=bool)
def get_device_name(self, ain):
"""Get the device name."""
return self._aha_request('getswitchname', ain=ain)
def get_switch_state(self, ain):
"""Get the switch state."""
return self._aha_request('getswitchstate', ain=ain, rf=bool)
def set_switch_state_on(self, ain):
"""Set the switch to on state."""
return self._aha_request('setswitchon', ain=ain, rf=bool)
def set_switch_state_off(self, ain):
"""Set the switch to off state."""
return self._aha_request('setswitchoff', ain=ain, rf=bool)
def set_switch_state_toggle(self, ain):
"""Toggle the switch state."""
return self._aha_request('setswitchtoggle', ain=ain, rf=bool)
def get_switch_power(self, ain):
"""Get the switch power consumption."""
return self._aha_request('getswitchpower', ain=ain, rf=int)
def get_switch_energy(self, ain):
"""Get the switch energy."""
return self._aha_request('getswitchenergy', ain=ain, rf=int)
def get_temperature(self, ain):
"""Get the device temperature sensor value."""
return self._aha_request('gettemperature', ain=ain, rf=float) / 10.0
def _get_temperature(self, ain, name):
plain = self._aha_request(name, ain=ain, rf=float)
return (plain - 16) / 2 + 8
def get_target_temperature(self, ain):
"""Get the thermostate target temperature."""
return self._get_temperature(ain, 'gethkrtsoll')
def get_comfort_temperature(self, ain):
"""Get the thermostate comfort temperature."""
return self._get_temperature(ain, 'gethkrkomfort')
def get_eco_temperature(self, ain):
"""Get the thermostate eco temperature."""
return self._get_temperature(ain, 'gethkrabsenk')
def get_alert_state(self, ain):
"""Get the alert state."""
device = self.get_device_by_ain(ain)
return device.alert_state
def get_device_statistics(self, ain):
"""Get device statistics."""
plain = self._aha_request('getbasicdevicestats', ain=ain)
return plain
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
FritzhomeDevice.update
|
python
|
def update(self):
node = self._fritz.get_device_element(self.ain)
self._update_from_node(node)
|
Update the device values.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L409-L412
|
[
"def _update_from_node(self, node):\n _LOGGER.debug(node.toprettyxml())\n self.ain = node.getAttribute(\"identifier\")\n self.identifier = node.getAttribute(\"id\")\n self._functionsbitmask = int(node.getAttribute(\"functionbitmask\"))\n self.fw_version = node.getAttribute(\"fwversion\")\n self.manufacturer = node.getAttribute(\"manufacturer\")\n self.productname = node.getAttribute(\"productname\")\n\n self.name = get_node_value(node, 'name')\n self.present = bool(int(get_node_value(node, 'present')))\n\n if self.present is False:\n return\n\n for bit in bits(self._functionsbitmask):\n try:\n fct = {\n self.ALARM_MASK: self._update_alarm_from_node,\n self.POWER_METER_MASK: self._update_powermeter_from_node,\n self.SWITCH_MASK: self._update_switch_from_node,\n self.TEMPERATURE_MASK: self._update_temperature_from_node,\n self.THERMOSTAT_MASK: self._update_hkr_from_node,\n }[bit]\n fct(node)\n except KeyError:\n pass\n"
] |
class FritzhomeDevice(object):
"""The Fritzhome Device class."""
ALARM_MASK = 0x010
UNKNOWN_MASK = 0x020
THERMOSTAT_MASK = 0x040
POWER_METER_MASK = 0x080
TEMPERATURE_MASK = 0x100
SWITCH_MASK = 0x200
DECT_REPEATER_MASK = 0x400
MICROPHONE_UNIT = 0x800
HANFUN_UNIT = 0x2000
ain = None
identifier = None
manufacturer = None
productname = None
actual_temperature = None
target_temperature = None
eco_temperature = None
comfort_temperature = None
battery_level = None
window_open = None
summer_active = None
holiday_active = None
lock = None
device_lock = None
error_code = None
battery_low = None
switch_state = None
switch_mode = None
power = None
energy = None
voltage = None
offset = None
temperature = None
alert_state = None
def __init__(self, fritz=None, node=None):
if fritz is not None:
self._fritz = fritz
if node is not None:
self._update_from_node(node)
@staticmethod
def _get_temp_from_node(val, name):
return float(get_node_value(val, name)) / 2
def _update_from_node(self, node):
_LOGGER.debug(node.toprettyxml())
self.ain = node.getAttribute("identifier")
self.identifier = node.getAttribute("id")
self._functionsbitmask = int(node.getAttribute("functionbitmask"))
self.fw_version = node.getAttribute("fwversion")
self.manufacturer = node.getAttribute("manufacturer")
self.productname = node.getAttribute("productname")
self.name = get_node_value(node, 'name')
self.present = bool(int(get_node_value(node, 'present')))
if self.present is False:
return
for bit in bits(self._functionsbitmask):
try:
fct = {
self.ALARM_MASK: self._update_alarm_from_node,
self.POWER_METER_MASK: self._update_powermeter_from_node,
self.SWITCH_MASK: self._update_switch_from_node,
self.TEMPERATURE_MASK: self._update_temperature_from_node,
self.THERMOSTAT_MASK: self._update_hkr_from_node,
}[bit]
fct(node)
except KeyError:
pass
def _update_hkr_from_node(self, node):
val = node.getElementsByTagName('hkr')[0]
try:
self.actual_temperature = self._get_temp_from_node(val, 'tist')
except ValueError:
pass
self.target_temperature = self._get_temp_from_node(val, 'tsoll')
self.eco_temperature = self._get_temp_from_node(val, 'absenk')
self.comfort_temperature = self._get_temp_from_node(val, 'komfort')
# optional value
try:
self.device_lock = bool(int(get_node_value(val, 'devicelock')))
except IndexError:
pass
try:
self.lock = bool(int(get_node_value(val, 'lock')))
except IndexError:
pass
try:
self.error_code = int(get_node_value(val, 'errorcode'))
except IndexError:
pass
try:
self.battery_low = bool(int(get_node_value(val, 'batterylow')))
except IndexError:
pass
try:
self.battery_level = int(int(get_node_value(val, 'battery')))
except IndexError:
pass
try:
self.window_open = bool(int(get_node_value(val,
'windowopenactiv')))
except IndexError:
pass
try:
self.summer_active = bool(int(get_node_value(val, 'summeractive')))
except IndexError:
pass
try:
self.holiday_active = bool(int(get_node_value(val,
'holidayactive')))
except IndexError:
pass
def _update_switch_from_node(self, node):
val = node.getElementsByTagName('switch')[0]
self.switch_state = bool(int(get_node_value(val, 'state')))
self.switch_mode = get_node_value(val, 'mode')
self.lock = bool(get_node_value(val, 'lock'))
# optional value
try:
self.device_lock = bool(int(get_node_value(val, 'devicelock')))
except IndexError:
pass
def _update_powermeter_from_node(self, node):
val = node.getElementsByTagName('powermeter')[0]
self.power = int(get_node_value(val, 'power'))
self.energy = int(get_node_value(val, 'energy'))
try:
self.voltage = float(int(get_node_value(val, 'voltage')) / 1000)
except IndexError:
pass
def _update_temperature_from_node(self, node):
val = node.getElementsByTagName('temperature')[0]
try:
self.offset = int(get_node_value(val, 'offset')) / 10
except ValueError:
pass
try:
self.temperature = int(get_node_value(val, 'celsius')) / 10
except ValueError:
pass
def _update_alarm_from_node(self, node):
val = node.getElementsByTagName('alert')[0]
try:
self.alert_state = bool(int(get_node_value(val, 'state')))
except IndexError:
pass
def __repr__(self):
"""Return a string."""
return '{ain} {identifier} {manuf} {prod} {name}'.format(
ain=self.ain,
identifier=self.identifier,
manuf=self.manufacturer,
prod=self.productname,
name=self.name)
@property
def has_alarm(self):
"""Check if the device has alarm function."""
return bool(self._functionsbitmask & self.ALARM_MASK)
@property
def has_thermostat(self):
"""Check if the device has thermostat function."""
return bool(self._functionsbitmask & self.THERMOSTAT_MASK)
@property
def has_powermeter(self):
"""Check if the device has powermeter function."""
return bool(self._functionsbitmask & self.POWER_METER_MASK)
@property
def has_temperature_sensor(self):
"""Check if the device has temperature function."""
return bool(self._functionsbitmask & self.TEMPERATURE_MASK)
@property
def has_switch(self):
"""Check if the device has switch function."""
return bool(self._functionsbitmask & self.SWITCH_MASK)
@property
def has_repeater(self):
"""Check if the device has repeater function."""
return bool(self._functionsbitmask & self.DECT_REPEATER_MASK)
def get_present(self):
"""Check if the device is present."""
return self._fritz.get_device_present(self.ain)
def get_switch_state(self):
"""Get the switch state."""
return self._fritz.get_switch_state(self.ain)
def set_switch_state_on(self):
"""Set the switch state to on."""
return self._fritz.set_switch_state_on(self.ain)
def set_switch_state_off(self):
"""Set the switch state to off."""
return self._fritz.set_switch_state_off(self.ain)
def set_switch_state_toggle(self):
"""Toggle the switch state."""
return self._fritz.set_switch_state_toggle(self.ain)
def get_switch_power(self):
""" the switch state."""
return self._fritz.get_switch_power(self.ain)
def get_switch_energy(self):
"""Get the switch energy."""
return self._fritz.get_switch_energy(self.ain)
def get_temperature(self):
"""Get the device temperature value."""
return self._fritz.get_temperature(self.ain)
def get_target_temperature(self):
"""Get the thermostate target temperature."""
return self._fritz.get_target_temperature(self.ain)
def set_target_temperature(self, temperature):
"""Set the thermostate target temperature."""
return self._fritz.set_target_temperature(self.ain, temperature)
def get_comfort_temperature(self):
"""Get the thermostate comfort temperature."""
return self._fritz.get_comfort_temperature(self.ain)
def get_eco_temperature(self):
"""Get the thermostate eco temperature."""
return self._fritz.get_eco_temperature(self.ain)
def get_hkr_state(self):
"""Get the thermostate state."""
self.update()
try:
return {
126.5: 'off',
127.0: 'on',
self.eco_temperature: 'eco',
self.comfort_temperature: 'comfort'
}[self.target_temperature]
except KeyError:
return 'manual'
def set_hkr_state(self, state):
"""Set the state of the thermostat.
Possible values for state are: 'on', 'off', 'comfort', 'eco'.
"""
try:
value = {
'off': 0,
'on': 100,
'eco': self.eco_temperature,
'comfort': self.comfort_temperature
}[state]
except KeyError:
return
self.set_target_temperature(value)
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
FritzhomeDevice.get_hkr_state
|
python
|
def get_hkr_state(self):
self.update()
try:
return {
126.5: 'off',
127.0: 'on',
self.eco_temperature: 'eco',
self.comfort_temperature: 'comfort'
}[self.target_temperature]
except KeyError:
return 'manual'
|
Get the thermostate state.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L492-L503
|
[
"def update(self):\n \"\"\"Update the device values.\"\"\"\n node = self._fritz.get_device_element(self.ain)\n self._update_from_node(node)\n"
] |
class FritzhomeDevice(object):
"""The Fritzhome Device class."""
ALARM_MASK = 0x010
UNKNOWN_MASK = 0x020
THERMOSTAT_MASK = 0x040
POWER_METER_MASK = 0x080
TEMPERATURE_MASK = 0x100
SWITCH_MASK = 0x200
DECT_REPEATER_MASK = 0x400
MICROPHONE_UNIT = 0x800
HANFUN_UNIT = 0x2000
ain = None
identifier = None
manufacturer = None
productname = None
actual_temperature = None
target_temperature = None
eco_temperature = None
comfort_temperature = None
battery_level = None
window_open = None
summer_active = None
holiday_active = None
lock = None
device_lock = None
error_code = None
battery_low = None
switch_state = None
switch_mode = None
power = None
energy = None
voltage = None
offset = None
temperature = None
alert_state = None
def __init__(self, fritz=None, node=None):
if fritz is not None:
self._fritz = fritz
if node is not None:
self._update_from_node(node)
@staticmethod
def _get_temp_from_node(val, name):
return float(get_node_value(val, name)) / 2
def _update_from_node(self, node):
_LOGGER.debug(node.toprettyxml())
self.ain = node.getAttribute("identifier")
self.identifier = node.getAttribute("id")
self._functionsbitmask = int(node.getAttribute("functionbitmask"))
self.fw_version = node.getAttribute("fwversion")
self.manufacturer = node.getAttribute("manufacturer")
self.productname = node.getAttribute("productname")
self.name = get_node_value(node, 'name')
self.present = bool(int(get_node_value(node, 'present')))
if self.present is False:
return
for bit in bits(self._functionsbitmask):
try:
fct = {
self.ALARM_MASK: self._update_alarm_from_node,
self.POWER_METER_MASK: self._update_powermeter_from_node,
self.SWITCH_MASK: self._update_switch_from_node,
self.TEMPERATURE_MASK: self._update_temperature_from_node,
self.THERMOSTAT_MASK: self._update_hkr_from_node,
}[bit]
fct(node)
except KeyError:
pass
def _update_hkr_from_node(self, node):
val = node.getElementsByTagName('hkr')[0]
try:
self.actual_temperature = self._get_temp_from_node(val, 'tist')
except ValueError:
pass
self.target_temperature = self._get_temp_from_node(val, 'tsoll')
self.eco_temperature = self._get_temp_from_node(val, 'absenk')
self.comfort_temperature = self._get_temp_from_node(val, 'komfort')
# optional value
try:
self.device_lock = bool(int(get_node_value(val, 'devicelock')))
except IndexError:
pass
try:
self.lock = bool(int(get_node_value(val, 'lock')))
except IndexError:
pass
try:
self.error_code = int(get_node_value(val, 'errorcode'))
except IndexError:
pass
try:
self.battery_low = bool(int(get_node_value(val, 'batterylow')))
except IndexError:
pass
try:
self.battery_level = int(int(get_node_value(val, 'battery')))
except IndexError:
pass
try:
self.window_open = bool(int(get_node_value(val,
'windowopenactiv')))
except IndexError:
pass
try:
self.summer_active = bool(int(get_node_value(val, 'summeractive')))
except IndexError:
pass
try:
self.holiday_active = bool(int(get_node_value(val,
'holidayactive')))
except IndexError:
pass
def _update_switch_from_node(self, node):
val = node.getElementsByTagName('switch')[0]
self.switch_state = bool(int(get_node_value(val, 'state')))
self.switch_mode = get_node_value(val, 'mode')
self.lock = bool(get_node_value(val, 'lock'))
# optional value
try:
self.device_lock = bool(int(get_node_value(val, 'devicelock')))
except IndexError:
pass
def _update_powermeter_from_node(self, node):
val = node.getElementsByTagName('powermeter')[0]
self.power = int(get_node_value(val, 'power'))
self.energy = int(get_node_value(val, 'energy'))
try:
self.voltage = float(int(get_node_value(val, 'voltage')) / 1000)
except IndexError:
pass
def _update_temperature_from_node(self, node):
val = node.getElementsByTagName('temperature')[0]
try:
self.offset = int(get_node_value(val, 'offset')) / 10
except ValueError:
pass
try:
self.temperature = int(get_node_value(val, 'celsius')) / 10
except ValueError:
pass
def _update_alarm_from_node(self, node):
val = node.getElementsByTagName('alert')[0]
try:
self.alert_state = bool(int(get_node_value(val, 'state')))
except IndexError:
pass
def __repr__(self):
"""Return a string."""
return '{ain} {identifier} {manuf} {prod} {name}'.format(
ain=self.ain,
identifier=self.identifier,
manuf=self.manufacturer,
prod=self.productname,
name=self.name)
def update(self):
"""Update the device values."""
node = self._fritz.get_device_element(self.ain)
self._update_from_node(node)
@property
def has_alarm(self):
"""Check if the device has alarm function."""
return bool(self._functionsbitmask & self.ALARM_MASK)
@property
def has_thermostat(self):
"""Check if the device has thermostat function."""
return bool(self._functionsbitmask & self.THERMOSTAT_MASK)
@property
def has_powermeter(self):
"""Check if the device has powermeter function."""
return bool(self._functionsbitmask & self.POWER_METER_MASK)
@property
def has_temperature_sensor(self):
"""Check if the device has temperature function."""
return bool(self._functionsbitmask & self.TEMPERATURE_MASK)
@property
def has_switch(self):
"""Check if the device has switch function."""
return bool(self._functionsbitmask & self.SWITCH_MASK)
@property
def has_repeater(self):
"""Check if the device has repeater function."""
return bool(self._functionsbitmask & self.DECT_REPEATER_MASK)
def get_present(self):
"""Check if the device is present."""
return self._fritz.get_device_present(self.ain)
def get_switch_state(self):
"""Get the switch state."""
return self._fritz.get_switch_state(self.ain)
def set_switch_state_on(self):
"""Set the switch state to on."""
return self._fritz.set_switch_state_on(self.ain)
def set_switch_state_off(self):
"""Set the switch state to off."""
return self._fritz.set_switch_state_off(self.ain)
def set_switch_state_toggle(self):
"""Toggle the switch state."""
return self._fritz.set_switch_state_toggle(self.ain)
def get_switch_power(self):
""" the switch state."""
return self._fritz.get_switch_power(self.ain)
def get_switch_energy(self):
"""Get the switch energy."""
return self._fritz.get_switch_energy(self.ain)
def get_temperature(self):
"""Get the device temperature value."""
return self._fritz.get_temperature(self.ain)
def get_target_temperature(self):
"""Get the thermostate target temperature."""
return self._fritz.get_target_temperature(self.ain)
def set_target_temperature(self, temperature):
"""Set the thermostate target temperature."""
return self._fritz.set_target_temperature(self.ain, temperature)
def get_comfort_temperature(self):
"""Get the thermostate comfort temperature."""
return self._fritz.get_comfort_temperature(self.ain)
def get_eco_temperature(self):
"""Get the thermostate eco temperature."""
return self._fritz.get_eco_temperature(self.ain)
def set_hkr_state(self, state):
"""Set the state of the thermostat.
Possible values for state are: 'on', 'off', 'comfort', 'eco'.
"""
try:
value = {
'off': 0,
'on': 100,
'eco': self.eco_temperature,
'comfort': self.comfort_temperature
}[state]
except KeyError:
return
self.set_target_temperature(value)
|
hthiery/python-fritzhome
|
pyfritzhome/fritzhome.py
|
FritzhomeDevice.set_hkr_state
|
python
|
def set_hkr_state(self, state):
try:
value = {
'off': 0,
'on': 100,
'eco': self.eco_temperature,
'comfort': self.comfort_temperature
}[state]
except KeyError:
return
self.set_target_temperature(value)
|
Set the state of the thermostat.
Possible values for state are: 'on', 'off', 'comfort', 'eco'.
|
train
|
https://github.com/hthiery/python-fritzhome/blob/c74bd178d08a305028f316f7da35202da3526f61/pyfritzhome/fritzhome.py#L505-L520
|
[
"def set_target_temperature(self, temperature):\n \"\"\"Set the thermostate target temperature.\"\"\"\n return self._fritz.set_target_temperature(self.ain, temperature)\n"
] |
class FritzhomeDevice(object):
"""The Fritzhome Device class."""
ALARM_MASK = 0x010
UNKNOWN_MASK = 0x020
THERMOSTAT_MASK = 0x040
POWER_METER_MASK = 0x080
TEMPERATURE_MASK = 0x100
SWITCH_MASK = 0x200
DECT_REPEATER_MASK = 0x400
MICROPHONE_UNIT = 0x800
HANFUN_UNIT = 0x2000
ain = None
identifier = None
manufacturer = None
productname = None
actual_temperature = None
target_temperature = None
eco_temperature = None
comfort_temperature = None
battery_level = None
window_open = None
summer_active = None
holiday_active = None
lock = None
device_lock = None
error_code = None
battery_low = None
switch_state = None
switch_mode = None
power = None
energy = None
voltage = None
offset = None
temperature = None
alert_state = None
def __init__(self, fritz=None, node=None):
if fritz is not None:
self._fritz = fritz
if node is not None:
self._update_from_node(node)
@staticmethod
def _get_temp_from_node(val, name):
return float(get_node_value(val, name)) / 2
def _update_from_node(self, node):
_LOGGER.debug(node.toprettyxml())
self.ain = node.getAttribute("identifier")
self.identifier = node.getAttribute("id")
self._functionsbitmask = int(node.getAttribute("functionbitmask"))
self.fw_version = node.getAttribute("fwversion")
self.manufacturer = node.getAttribute("manufacturer")
self.productname = node.getAttribute("productname")
self.name = get_node_value(node, 'name')
self.present = bool(int(get_node_value(node, 'present')))
if self.present is False:
return
for bit in bits(self._functionsbitmask):
try:
fct = {
self.ALARM_MASK: self._update_alarm_from_node,
self.POWER_METER_MASK: self._update_powermeter_from_node,
self.SWITCH_MASK: self._update_switch_from_node,
self.TEMPERATURE_MASK: self._update_temperature_from_node,
self.THERMOSTAT_MASK: self._update_hkr_from_node,
}[bit]
fct(node)
except KeyError:
pass
def _update_hkr_from_node(self, node):
val = node.getElementsByTagName('hkr')[0]
try:
self.actual_temperature = self._get_temp_from_node(val, 'tist')
except ValueError:
pass
self.target_temperature = self._get_temp_from_node(val, 'tsoll')
self.eco_temperature = self._get_temp_from_node(val, 'absenk')
self.comfort_temperature = self._get_temp_from_node(val, 'komfort')
# optional value
try:
self.device_lock = bool(int(get_node_value(val, 'devicelock')))
except IndexError:
pass
try:
self.lock = bool(int(get_node_value(val, 'lock')))
except IndexError:
pass
try:
self.error_code = int(get_node_value(val, 'errorcode'))
except IndexError:
pass
try:
self.battery_low = bool(int(get_node_value(val, 'batterylow')))
except IndexError:
pass
try:
self.battery_level = int(int(get_node_value(val, 'battery')))
except IndexError:
pass
try:
self.window_open = bool(int(get_node_value(val,
'windowopenactiv')))
except IndexError:
pass
try:
self.summer_active = bool(int(get_node_value(val, 'summeractive')))
except IndexError:
pass
try:
self.holiday_active = bool(int(get_node_value(val,
'holidayactive')))
except IndexError:
pass
def _update_switch_from_node(self, node):
val = node.getElementsByTagName('switch')[0]
self.switch_state = bool(int(get_node_value(val, 'state')))
self.switch_mode = get_node_value(val, 'mode')
self.lock = bool(get_node_value(val, 'lock'))
# optional value
try:
self.device_lock = bool(int(get_node_value(val, 'devicelock')))
except IndexError:
pass
def _update_powermeter_from_node(self, node):
val = node.getElementsByTagName('powermeter')[0]
self.power = int(get_node_value(val, 'power'))
self.energy = int(get_node_value(val, 'energy'))
try:
self.voltage = float(int(get_node_value(val, 'voltage')) / 1000)
except IndexError:
pass
def _update_temperature_from_node(self, node):
val = node.getElementsByTagName('temperature')[0]
try:
self.offset = int(get_node_value(val, 'offset')) / 10
except ValueError:
pass
try:
self.temperature = int(get_node_value(val, 'celsius')) / 10
except ValueError:
pass
def _update_alarm_from_node(self, node):
val = node.getElementsByTagName('alert')[0]
try:
self.alert_state = bool(int(get_node_value(val, 'state')))
except IndexError:
pass
def __repr__(self):
"""Return a string."""
return '{ain} {identifier} {manuf} {prod} {name}'.format(
ain=self.ain,
identifier=self.identifier,
manuf=self.manufacturer,
prod=self.productname,
name=self.name)
def update(self):
"""Update the device values."""
node = self._fritz.get_device_element(self.ain)
self._update_from_node(node)
@property
def has_alarm(self):
"""Check if the device has alarm function."""
return bool(self._functionsbitmask & self.ALARM_MASK)
@property
def has_thermostat(self):
"""Check if the device has thermostat function."""
return bool(self._functionsbitmask & self.THERMOSTAT_MASK)
@property
def has_powermeter(self):
"""Check if the device has powermeter function."""
return bool(self._functionsbitmask & self.POWER_METER_MASK)
@property
def has_temperature_sensor(self):
"""Check if the device has temperature function."""
return bool(self._functionsbitmask & self.TEMPERATURE_MASK)
@property
def has_switch(self):
"""Check if the device has switch function."""
return bool(self._functionsbitmask & self.SWITCH_MASK)
@property
def has_repeater(self):
"""Check if the device has repeater function."""
return bool(self._functionsbitmask & self.DECT_REPEATER_MASK)
def get_present(self):
"""Check if the device is present."""
return self._fritz.get_device_present(self.ain)
def get_switch_state(self):
"""Get the switch state."""
return self._fritz.get_switch_state(self.ain)
def set_switch_state_on(self):
"""Set the switch state to on."""
return self._fritz.set_switch_state_on(self.ain)
def set_switch_state_off(self):
"""Set the switch state to off."""
return self._fritz.set_switch_state_off(self.ain)
def set_switch_state_toggle(self):
"""Toggle the switch state."""
return self._fritz.set_switch_state_toggle(self.ain)
def get_switch_power(self):
""" the switch state."""
return self._fritz.get_switch_power(self.ain)
def get_switch_energy(self):
"""Get the switch energy."""
return self._fritz.get_switch_energy(self.ain)
def get_temperature(self):
"""Get the device temperature value."""
return self._fritz.get_temperature(self.ain)
def get_target_temperature(self):
"""Get the thermostate target temperature."""
return self._fritz.get_target_temperature(self.ain)
def set_target_temperature(self, temperature):
"""Set the thermostate target temperature."""
return self._fritz.set_target_temperature(self.ain, temperature)
def get_comfort_temperature(self):
"""Get the thermostate comfort temperature."""
return self._fritz.get_comfort_temperature(self.ain)
def get_eco_temperature(self):
"""Get the thermostate eco temperature."""
return self._fritz.get_eco_temperature(self.ain)
def get_hkr_state(self):
"""Get the thermostate state."""
self.update()
try:
return {
126.5: 'off',
127.0: 'on',
self.eco_temperature: 'eco',
self.comfort_temperature: 'comfort'
}[self.target_temperature]
except KeyError:
return 'manual'
|
ixc/python-edtf
|
edtf/fields.py
|
EDTFField.pre_save
|
python
|
def pre_save(self, instance, add):
if not self.natural_text_field or self.attname not in instance.__dict__:
return
edtf = getattr(instance, self.attname)
# Update EDTF field based on latest natural text value, if any
natural_text = getattr(instance, self.natural_text_field)
if natural_text:
edtf = text_to_edtf(natural_text)
else:
edtf = None
# TODO If `natural_text_field` becomes cleared the derived EDTF field
# value should also be cleared, rather than left at original value?
# TODO Handle case where EDTF field is set to a string directly, not
# via `natural_text_field` (this is a slightly unexpected use-case, but
# is a very efficient way to set EDTF values in situations like for API
# imports so we probably want to continue to support it?)
if edtf and not isinstance(edtf, EDTFObject):
edtf = parse_edtf(edtf, fail_silently=True)
setattr(instance, self.attname, edtf)
# set or clear related date fields on the instance
for attr in DATE_ATTRS:
field_attr = "%s_field" % attr
g = getattr(self, field_attr, None)
if g:
if edtf:
try:
target_field = instance._meta.get_field(g)
except FieldDoesNotExist:
continue
value = getattr(edtf, attr)() # struct_time
if isinstance(target_field, models.FloatField):
value = struct_time_to_jd(value)
elif isinstance(target_field, models.DateField):
value = struct_time_to_date(value)
else:
raise NotImplementedError(
u"EDTFField does not support %s as a derived data"
u" field, only FloatField or DateField"
% type(target_field))
setattr(instance, g, value)
else:
setattr(instance, g, None)
return edtf
|
Updates the edtf value from the value of the display_field.
If there's a valid edtf, then set the date values.
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/fields.py#L87-L138
|
[
"def parse_edtf(str, parseAll=True, fail_silently=False):\n try:\n if not str:\n raise ParseException(\"You must supply some input text\")\n p = edtfParser.parseString(str.strip(), parseAll)\n if p:\n return p[0]\n except ParseException as e:\n if fail_silently:\n return None\n raise EDTFParseException(e)\n",
"def text_to_edtf(text):\n \"\"\"\n Generate EDTF string equivalent of a given natural language date string.\n \"\"\"\n if not text:\n return\n\n t = text.lower()\n\n # try parsing the whole thing\n result = text_to_edtf_date(t)\n\n if not result:\n # split by list delims and move fwd with the first thing that returns a non-empty string.\n # TODO: assemble multiple dates into a {} or [] structure.\n for split in [\",\", \";\", \"or\"]:\n for list_item in t.split(split):\n\n # try parsing as an interval - split by '-'\n toks = list_item.split(\"-\")\n if len(toks) == 2:\n d1 = toks[0].strip()\n d2 = toks[1].strip()\n\n # match looks from the beginning of the string, search\n # looks anywhere.\n\n if re.match(r'\\d\\D\\b', d2): # 1-digit year partial e.g. 1868-9\n if re.search(r'\\b\\d\\d\\d\\d$', d1): # TODO: evaluate it and see if it's a year\n d2 = d1[-4:-1] + d2\n elif re.match(r'\\d\\d\\b', d2): # 2-digit year partial e.g. 1809-10\n if re.search(r'\\b\\d\\d\\d\\d$', d1):\n d2 = d1[-4:-2] + d2\n else:\n century_range_match = re.search(r'\\b(\\d\\d)(th|st|nd|rd|)-(\\d\\d)(th|st|nd|rd) [cC]', \"%s-%s\" % (d1,d2))\n if century_range_match:\n g = century_range_match.groups()\n d1 = \"%sC\" % g[0]\n d2 = \"%sC\" % g[2]\n\n r1 = text_to_edtf_date(d1)\n r2 = text_to_edtf_date(d2)\n\n if r1 and r2:\n result = r1 + \"/\" + r2\n return result\n\n # is it an either/or year \"1838/1862\" - that has a different\n # representation in EDTF. If it's 'both', then we use {}. If\n # it's 'or' then we use []. Assuming the latter for now.\n # This whole section could be more friendly.\n\n else:\n int_match = re.search(r\"(\\d\\d\\d\\d)\\/(\\d\\d\\d\\d)\", list_item)\n if int_match:\n return \"[%s, %s]\" % (int_match.group(1), int_match.group(2))\n\n result = text_to_edtf_date(list_item)\n if result:\n break\n if result:\n break\n\n is_before = re.findall(r'\\bbefore\\b', t)\n is_before = is_before or re.findall(r'\\bearlier\\b', t)\n\n is_after = re.findall(r'\\bafter\\b', t)\n is_after = is_after or re.findall(r'\\bsince\\b', t)\n is_after = is_after or re.findall(r'\\blater\\b', t)\n\n if is_before:\n result = u\"unknown/%s\" % result\n elif is_after:\n result = u\"%s/unknown\" % result\n\n return result\n",
"def struct_time_to_jd(st):\n \"\"\"\n Return a float number representing the Julian Date for the given\n `struct_time`.\n\n NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are ignored.\n \"\"\"\n year, month, day = st[:3]\n hours, minutes, seconds = st[3:6]\n\n # Convert time of day to fraction of day\n day += jdutil.hmsm_to_days(hours, minutes, seconds)\n\n return jdutil.date_to_jd(year, month, day)\n"
] |
class EDTFField(models.CharField):
def __init__(
self,
verbose_name=None, name=None,
natural_text_field=None,
lower_strict_field=None,
upper_strict_field=None,
lower_fuzzy_field=None,
upper_fuzzy_field=None,
**kwargs
):
kwargs['max_length'] = 2000
self.natural_text_field, self.lower_strict_field, \
self.upper_strict_field, self.lower_fuzzy_field, \
self.upper_fuzzy_field = natural_text_field, lower_strict_field, \
upper_strict_field, lower_fuzzy_field, upper_fuzzy_field
super(EDTFField, self).__init__(verbose_name, name, **kwargs)
description = "An field for storing complex/fuzzy date specifications in EDTF format."
def deconstruct(self):
name, path, args, kwargs = super(EDTFField, self).deconstruct()
if self.natural_text_field:
kwargs['natural_text_field'] = self.natural_text_field
for attr in DATE_ATTRS:
field = "%s_field" % attr
f = getattr(self, field, None)
if f:
kwargs[field] = f
del kwargs["max_length"]
return name, path, args, kwargs
def from_db_value(self, value, expression, connection, context):
# Converting values to Python objects
if not value:
return None
try:
return pickle.loads(str(value))
except:
pass
return parse_edtf(value, fail_silently=True)
def to_python(self, value):
if isinstance(value, EDTFObject):
return value
if value is None:
return value
return parse_edtf(value, fail_silently=True)
def get_db_prep_save(self, value, connection):
if value:
return pickle.dumps(value)
return super(EDTFField, self).get_db_prep_save(value, connection)
def get_prep_value(self, value):
# convert python objects to query values
value = super(EDTFField, self).get_prep_value(value)
if isinstance(value, EDTFObject):
return pickle.dumps(value)
return value
|
ixc/python-edtf
|
edtf/parser/parser_classes.py
|
apply_delta
|
python
|
def apply_delta(op, time_struct, delta):
if not delta:
return time_struct # No work to do
try:
dt_result = op(datetime(*time_struct[:6]), delta)
return dt_to_struct_time(dt_result)
except (OverflowError, ValueError):
# Year is not within supported 1 to 9999 AD range
pass
# Here we fake the year to one in the acceptable range to avoid having to
# write our own date rolling logic
# Adjust the year to be close to the 2000 millenium in 1,000 year
# increments to try and retain accurate relative leap years
actual_year = time_struct.tm_year
millenium = int(float(actual_year) / 1000)
millenium_diff = (2 - millenium) * 1000
adjusted_year = actual_year + millenium_diff
# Apply delta to the date/time with adjusted year
dt = datetime(*(adjusted_year,) + time_struct[1:6])
dt_result = op(dt, delta)
# Convert result year back to its original millenium
final_year = dt_result.year - millenium_diff
return struct_time(
(final_year,) + dt_result.timetuple()[1:6] + tuple(TIME_EMPTY_EXTRAS))
|
Apply a `relativedelta` to a `struct_time` data structure.
`op` is an operator function, probably always `add` or `sub`tract to
correspond to `a_date + a_delta` and `a_date - a_delta`.
This function is required because we cannot use standard `datetime` module
objects for conversion when the date/time is, or will become, outside the
boundary years 1 AD to 9999 AD.
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/parser/parser_classes.py#L47-L83
|
[
"def dt_to_struct_time(dt):\n \"\"\"\n Convert a `datetime.date` or `datetime.datetime` to a `struct_time`\n representation *with zero values* for data fields that we cannot always\n rely on for ancient or far-future dates: tm_wday, tm_yday, tm_isdst\n\n NOTE: If it wasn't for the requirement that the extra fields are unset\n we could use the `timetuple()` method instead of this function.\n \"\"\"\n if isinstance(dt, datetime):\n return struct_time(\n [dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second] +\n TIME_EMPTY_EXTRAS\n )\n elif isinstance(dt, date):\n return struct_time(\n [dt.year, dt.month, dt.day] + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS\n )\n else:\n raise NotImplementedError(\n \"Cannot convert %s to `struct_time`\" % type(dt))\n"
] |
import calendar
import re
from time import struct_time
from datetime import date, datetime
from operator import add, sub
from dateutil.relativedelta import relativedelta
from edtf import appsettings
from edtf.convert import dt_to_struct_time, trim_struct_time, \
TIME_EMPTY_TIME, TIME_EMPTY_EXTRAS
EARLIEST = 'earliest'
LATEST = 'latest'
PRECISION_MILLENIUM = "millenium"
PRECISION_CENTURY = "century"
PRECISION_DECADE = "decade"
PRECISION_YEAR = "year"
PRECISION_MONTH = "month"
PRECISION_SEASON = "season"
PRECISION_DAY = "day"
def days_in_month(year, month):
"""
Return the number of days in the given year and month, where month is
1=January to 12=December, and respecting leap years as identified by
`calendar.isleap()`
"""
return {
1: 31,
2: 29 if calendar.isleap(year) else 28,
3: 31,
4: 30,
5: 31,
6: 30,
7: 31,
8: 31,
9: 30,
10: 31,
11: 30,
12: 31,
}[month]
class EDTFObject(object):
"""
Object to attact to a parser to become instantiated when the parser
completes.
"""
parser = None
@classmethod
def set_parser(cls, p):
cls.parser = p
p.addParseAction(cls.parse_action)
@classmethod
def parse_action(cls, toks):
kwargs = toks.asDict()
try:
return cls(**kwargs) # replace the token list with the class
except Exception as e:
print("trying to %s.__init__(**%s)" % (cls.__name__, kwargs))
raise e
@classmethod
def parse(cls, s):
return cls.parser.parseString(s)[0]
def __repr__(self):
return "%s: '%s'" % (type(self).__name__, str(self))
def __init__(self, *args, **kwargs):
str = "%s.__init__(*%s, **%s)" % (
type(self).__name__,
args, kwargs,
)
raise NotImplementedError("%s is not implemented." % str)
def __str__(self):
raise NotImplementedError
def _strict_date(self, lean):
raise NotImplementedError
def lower_strict(self):
return self._strict_date(lean=EARLIEST)
def upper_strict(self):
return self._strict_date(lean=LATEST)
def _get_fuzzy_padding(self, lean):
"""
Subclasses should override this to pad based on how precise they are.
"""
return relativedelta(0)
def get_is_approximate(self):
return getattr(self, '_is_approximate', False)
def set_is_approximate(self, val):
self._is_approximate = val
is_approximate = property(get_is_approximate, set_is_approximate)
def get_is_uncertain(self):
return getattr(self, '_is_uncertain', False)
def set_is_uncertain(self, val):
self._is_uncertain = val
is_uncertain = property(get_is_uncertain, set_is_uncertain)
def lower_fuzzy(self):
strict_val = self.lower_strict()
return apply_delta(sub, strict_val, self._get_fuzzy_padding(EARLIEST))
def upper_fuzzy(self):
strict_val = self.upper_strict()
return apply_delta(add, strict_val, self._get_fuzzy_padding(LATEST))
def __eq__(self, other):
if isinstance(other, EDTFObject):
return str(self) == str(other)
elif isinstance(other, date):
return str(self) == other.isoformat()
elif isinstance(other, struct_time):
return self._strict_date() == trim_struct_time(other)
return False
def __ne__(self, other):
if isinstance(other, EDTFObject):
return str(self) != str(other)
elif isinstance(other, date):
return str(self) != other.isoformat()
elif isinstance(other, struct_time):
return self._strict_date() != trim_struct_time(other)
return True
def __gt__(self, other):
if isinstance(other, EDTFObject):
return self.lower_strict() > other.lower_strict()
elif isinstance(other, date):
return self.lower_strict() > dt_to_struct_time(other)
elif isinstance(other, struct_time):
return self.lower_strict() > trim_struct_time(other)
raise TypeError("can't compare %s with %s" % (type(self).__name__, type(other).__name__))
def __ge__(self, other):
if isinstance(other, EDTFObject):
return self.lower_strict() >= other.lower_strict()
elif isinstance(other, date):
return self.lower_strict() >= dt_to_struct_time(other)
elif isinstance(other, struct_time):
return self.lower_strict() >= trim_struct_time(other)
raise TypeError("can't compare %s with %s" % (type(self).__name__, type(other).__name__))
def __lt__(self, other):
if isinstance(other, EDTFObject):
return self.lower_strict() < other.lower_strict()
elif isinstance(other, date):
return self.lower_strict() < dt_to_struct_time(other)
elif isinstance(other, struct_time):
return self.lower_strict() < trim_struct_time(other)
raise TypeError("can't compare %s with %s" % (type(self).__name__, type(other).__name__))
def __le__(self, other):
if isinstance(other, EDTFObject):
return self.lower_strict() <= other.lower_strict()
elif isinstance(other, date):
return self.lower_strict() <= dt_to_struct_time(other)
elif isinstance(other, struct_time):
return self.lower_strict() <= trim_struct_time(other)
raise TypeError("can't compare %s with %s" % (type(self).__name__, type(other).__name__))
# (* ************************** Level 0 *************************** *)
class Date(EDTFObject):
def set_year(self, y):
if y is None:
raise AttributeError("Year must not be None")
self._year = y
def get_year(self):
return self._year
year = property(get_year, set_year)
def set_month(self, m):
self._month = m
if m == None:
self.day = None
def get_month(self):
return self._month
month = property(get_month, set_month)
def __init__(self, year=None, month=None, day=None, **kwargs):
for param in ('date', 'lower', 'upper'):
if param in kwargs:
self.__init__(**kwargs[param])
return
self.year = year # Year is required, but sometimes passed in as a 'date' dict.
self.month = month
self.day = day
def __str__(self):
r = self.year
if self.month:
r += "-%s" % self.month
if self.day:
r += "-%s" % self.day
return r
def isoformat(self, default=date.max):
return "%s-%02d-%02d" % (
self.year,
int(self.month or default.month),
int(self.day or default.day),
)
def _precise_year(self, lean):
# Replace any ambiguous characters in the year string with 0s or 9s
if lean == EARLIEST:
return int(re.sub(r'[xu]', r'0', self.year))
else:
return int(re.sub(r'[xu]', r'9', self.year))
def _precise_month(self, lean):
if self.month and self.month != "uu":
try:
return int(self.month)
except ValueError as e:
raise ValueError("Couldn't convert %s to int (in %s)" % (self.month, self))
else:
return 1 if lean == EARLIEST else 12
def _precise_day(self, lean):
if not self.day or self.day == 'uu':
if lean == EARLIEST:
return 1
else:
return days_in_month(
self._precise_year(LATEST), self._precise_month(LATEST)
)
else:
return int(self.day)
def _strict_date(self, lean):
"""
Return a `time.struct_time` representation of the date.
"""
return struct_time(
(
self._precise_year(lean),
self._precise_month(lean),
self._precise_day(lean),
) + tuple(TIME_EMPTY_TIME) + tuple(TIME_EMPTY_EXTRAS)
)
@property
def precision(self):
if self.day:
return PRECISION_DAY
if self.month:
return PRECISION_MONTH
return PRECISION_YEAR
class DateAndTime(EDTFObject):
def __init__(self, date, time):
self.date = date
self.time = time
def __str__(self):
return self.isoformat()
def isoformat(self):
return self.date.isoformat() + "T" + self.time
def _strict_date(self, lean):
return self.date._strict_date(lean)
def __eq__(self, other):
if isinstance(other, datetime):
return self.isoformat() == other.isoformat()
elif isinstance(other, struct_time):
return self._strict_date() == trim_struct_time(other)
return super(DateAndTime, self).__eq__(other)
def __ne__(self, other):
if isinstance(other, datetime):
return self.isoformat() != other.isoformat()
elif isinstance(other, struct_time):
return self._strict_date() != trim_struct_time(other)
return super(DateAndTime, self).__ne__(other)
class Interval(EDTFObject):
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
def __str__(self):
return "%s/%s" % (self.lower, self.upper)
def _strict_date(self, lean):
if lean == EARLIEST:
try:
r = self.lower._strict_date(lean)
if r is None:
raise AttributeError
return r
except AttributeError: # it's a string, or no date. Result depends on the upper date
upper = self.upper._strict_date(LATEST)
return apply_delta(sub, upper, appsettings.DELTA_IF_UNKNOWN)
else:
try:
r = self.upper._strict_date(lean)
if r is None:
raise AttributeError
return r
except AttributeError: # an 'unknown' or 'open' string - depends on the lower date
if self.upper and (self.upper == "open" or self.upper.date == "open"):
return dt_to_struct_time(date.today()) # it's still happening
else:
lower = self.lower._strict_date(EARLIEST)
return apply_delta(add, lower, appsettings.DELTA_IF_UNKNOWN)
# (* ************************** Level 1 *************************** *)
class UA(EDTFObject):
@classmethod
def parse_action(cls, toks):
args = toks.asList()
return cls(*args)
def __init__(self, *args):
assert len(args)==1
ua = args[0]
self.is_uncertain = "?" in ua
self.is_approximate = "~" in ua
def __str__(self):
d = ""
if self.is_uncertain:
d += "?"
if self.is_approximate:
d += "~"
return d
def _get_multiplier(self):
if self.is_uncertain and self.is_approximate:
return appsettings.MULTIPLIER_IF_BOTH
elif self.is_uncertain:
return appsettings.MULTIPLIER_IF_UNCERTAIN
elif self.is_approximate:
return appsettings.MULTIPLIER_IF_APPROXIMATE
class UncertainOrApproximate(EDTFObject):
def __init__(self, date, ua):
self.date = date
self.ua = ua
def __str__(self):
if self.ua:
return "%s%s" % (self.date, self.ua)
else:
return str(self.date)
def _strict_date(self, lean):
if self.date == "open":
return dt_to_struct_time(date.today())
if self.date =="unknown":
return None # depends on the other date
return self.date._strict_date(lean)
def _get_fuzzy_padding(self, lean):
if not self.ua:
return relativedelta(0)
multiplier = self.ua._get_multiplier()
if self.date.precision == PRECISION_DAY:
return multiplier * appsettings.PADDING_DAY_PRECISION
elif self.date.precision == PRECISION_MONTH:
return multiplier * appsettings.PADDING_MONTH_PRECISION
elif self.date.precision == PRECISION_YEAR:
return multiplier * appsettings.PADDING_YEAR_PRECISION
class Unspecified(Date):
pass
class Level1Interval(Interval):
def __init__(self, lower, upper):
self.lower = UncertainOrApproximate(**lower)
self.upper = UncertainOrApproximate(**upper)
def _get_fuzzy_padding(self, lean):
if lean == EARLIEST:
return self.lower._get_fuzzy_padding(lean)
elif lean == LATEST:
return self.upper._get_fuzzy_padding(lean)
class LongYear(EDTFObject):
def __init__(self, year):
self.year = year
def __str__(self):
return "y%s" % self.year
def _precise_year(self):
return int(self.year)
def _strict_date(self, lean):
py = self._precise_year()
if lean == EARLIEST:
return struct_time(
[py, 1, 1] + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS)
else:
return struct_time(
[py, 12, 31] + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS)
class Season(Date):
def __init__(self, year, season, **kwargs):
self.year = year
self.season = season # use season to look up month
# day isn't part of the 'season' spec, but it helps the inherited
# `Date` methods do their thing.
self.day = None
def __str__(self):
return "%s-%s" % (self.year, self.season)
def _precise_month(self, lean):
rng = appsettings.SEASON_MONTHS_RANGE[int(self.season)]
if lean == EARLIEST:
return rng[0]
else:
return rng[1]
# (* ************************** Level 2 *************************** *)
class PartialUncertainOrApproximate(Date):
def set_year(self, y): # Year can be None.
self._year = y
year = property(Date.get_year, set_year)
def __init__(
self, year=None, month=None, day=None,
year_ua=False, month_ua = False, day_ua = False,
year_month_ua = False, month_day_ua = False,
ssn=None, season_ua=False, all_ua=False
):
self.year = year
self.month = month
self.day = day
self.year_ua = year_ua
self.month_ua = month_ua
self.day_ua = day_ua
self.year_month_ua = year_month_ua
self.month_day_ua = month_day_ua
self.season = ssn
self.season_ua = season_ua
self.all_ua = all_ua
def __str__(self):
if self.season_ua:
return "%s%s" % (self.season, self.season_ua)
if self.year_ua:
y = "%s%s" % (self.year, self.year_ua)
else:
y = str(self.year)
if self.month_ua:
m = "(%s)%s" % (self.month, self.month_ua)
else:
m = str(self.month)
if self.day:
if self.day_ua:
d = "(%s)%s" % (self.day, self.day_ua)
else:
d = str(self.day)
else:
d = None
if self.year_month_ua: # year/month approximate. No brackets needed.
ym = "%s-%s%s" % (y, m, self.year_month_ua)
if d:
result = "%s-%s" % (ym, d)
else:
result = ym
elif self.month_day_ua:
if self.year_ua: # we don't need the brackets round month and day
result = "%s-%s-%s%s" % (y, m, d, self.month_day_ua)
else:
result = "%s-(%s-%s)%s" % (y, m, d, self.month_day_ua)
else:
if d:
result = "%s-%s-%s" % (y, m, d)
else:
result = "%s-%s" % (y, m)
if self.all_ua:
result = "(%s)%s" % (result, self.all_ua)
return result
def _precise_year(self, lean):
if self.season:
return self.season._precise_year(lean)
return super(PartialUncertainOrApproximate, self)._precise_year(lean)
def _precise_month(self, lean):
if self.season:
return self.season._precise_month(lean)
return super(PartialUncertainOrApproximate, self)._precise_month(lean)
def _precise_day(self, lean):
if self.season:
return self.season._precise_day(lean)
return super(PartialUncertainOrApproximate, self)._precise_day(lean)
def _get_fuzzy_padding(self, lean):
"""
This is not a perfect interpretation as fuzziness is introduced for
redundant uncertainly modifiers e.g. (2006~)~ will get two sets of
fuzziness.
"""
result = relativedelta(0)
if self.year_ua:
result += appsettings.PADDING_YEAR_PRECISION * self.year_ua._get_multiplier()
if self.month_ua:
result += appsettings.PADDING_MONTH_PRECISION * self.month_ua._get_multiplier()
if self.day_ua:
result += appsettings.PADDING_DAY_PRECISION * self.day_ua._get_multiplier()
if self.year_month_ua:
result += appsettings.PADDING_YEAR_PRECISION * self.year_month_ua._get_multiplier()
result += appsettings.PADDING_MONTH_PRECISION * self.year_month_ua._get_multiplier()
if self.month_day_ua:
result += appsettings.PADDING_DAY_PRECISION * self.month_day_ua._get_multiplier()
result += appsettings.PADDING_MONTH_PRECISION * self.month_day_ua._get_multiplier()
if self.season_ua:
result += appsettings.PADDING_SEASON_PRECISION * self.season_ua._get_multiplier()
if self.all_ua:
multiplier = self.all_ua._get_multiplier()
if self.precision == PRECISION_DAY:
result += multiplier * appsettings.PADDING_DAY_PRECISION
result += multiplier * appsettings.PADDING_MONTH_PRECISION
result += multiplier * appsettings.PADDING_YEAR_PRECISION
elif self.precision == PRECISION_MONTH:
result += multiplier * appsettings.PADDING_MONTH_PRECISION
result += multiplier * appsettings.PADDING_YEAR_PRECISION
elif self.precision == PRECISION_YEAR:
result += multiplier * appsettings.PADDING_YEAR_PRECISION
return result
class PartialUnspecified(Unspecified):
pass
class Consecutives(Interval):
# Treating Consecutive ranges as intervals where one bound is optional
def __init__(self, lower=None, upper=None):
if lower and not isinstance(lower, EDTFObject):
self.lower = Date.parse(lower)
else:
self.lower = lower
if upper and not isinstance(upper, EDTFObject):
self.upper = Date.parse(upper)
else:
self.upper = upper
def __str__(self):
return "%s..%s" % (self.lower or '', self.upper or '')
class EarlierConsecutives(Consecutives):
pass
class LaterConsecutives(Consecutives):
pass
class OneOfASet(EDTFObject):
@classmethod
def parse_action(cls, toks):
args = [t for t in toks.asList() if isinstance(t, EDTFObject)]
return cls(*args)
def __init__(self, *args):
self.objects = args
def __str__(self):
return "[%s]" % (", ".join([str(o) for o in self.objects]))
def _strict_date(self, lean):
if lean == LATEST:
return max([x._strict_date(lean) for x in self.objects])
else:
return min([x._strict_date(lean) for x in self.objects])
class MultipleDates(EDTFObject):
@classmethod
def parse_action(cls, toks):
args = [t for t in toks.asList() if isinstance(t, EDTFObject)]
return cls(*args)
def __init__(self, *args):
self.objects = args
def __str__(self):
return "{%s}" % (", ".join([str(o) for o in self.objects]))
def _strict_date(self, lean):
if lean == LATEST:
return max([x._strict_date(lean) for x in self.objects])
else:
return min([x._strict_date(lean) for x in self.objects])
class MaskedPrecision(Date):
pass
class Level2Interval(Level1Interval):
def __init__(self, lower, upper):
self.lower = lower
self.upper = upper
class ExponentialYear(LongYear):
def __init__(self, base, exponent, precision=None):
self.base = base
self.exponent = exponent
self.precision = precision
def _precise_year(self):
return int(self.base) * 10 ** int(self.exponent)
def get_year(self):
if self.precision:
return '%se%sp%s' % (self.base, self.exponent, self.precision)
else:
return '%se%s' % (self.base, self.exponent)
year = property(get_year)
|
ixc/python-edtf
|
edtf/parser/parser_classes.py
|
Date._strict_date
|
python
|
def _strict_date(self, lean):
return struct_time(
(
self._precise_year(lean),
self._precise_month(lean),
self._precise_day(lean),
) + tuple(TIME_EMPTY_TIME) + tuple(TIME_EMPTY_EXTRAS)
)
|
Return a `time.struct_time` representation of the date.
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/parser/parser_classes.py#L290-L300
| null |
class Date(EDTFObject):
def set_year(self, y):
if y is None:
raise AttributeError("Year must not be None")
self._year = y
def get_year(self):
return self._year
year = property(get_year, set_year)
def set_month(self, m):
self._month = m
if m == None:
self.day = None
def get_month(self):
return self._month
month = property(get_month, set_month)
def __init__(self, year=None, month=None, day=None, **kwargs):
for param in ('date', 'lower', 'upper'):
if param in kwargs:
self.__init__(**kwargs[param])
return
self.year = year # Year is required, but sometimes passed in as a 'date' dict.
self.month = month
self.day = day
def __str__(self):
r = self.year
if self.month:
r += "-%s" % self.month
if self.day:
r += "-%s" % self.day
return r
def isoformat(self, default=date.max):
return "%s-%02d-%02d" % (
self.year,
int(self.month or default.month),
int(self.day or default.day),
)
def _precise_year(self, lean):
# Replace any ambiguous characters in the year string with 0s or 9s
if lean == EARLIEST:
return int(re.sub(r'[xu]', r'0', self.year))
else:
return int(re.sub(r'[xu]', r'9', self.year))
def _precise_month(self, lean):
if self.month and self.month != "uu":
try:
return int(self.month)
except ValueError as e:
raise ValueError("Couldn't convert %s to int (in %s)" % (self.month, self))
else:
return 1 if lean == EARLIEST else 12
def _precise_day(self, lean):
if not self.day or self.day == 'uu':
if lean == EARLIEST:
return 1
else:
return days_in_month(
self._precise_year(LATEST), self._precise_month(LATEST)
)
else:
return int(self.day)
@property
def precision(self):
if self.day:
return PRECISION_DAY
if self.month:
return PRECISION_MONTH
return PRECISION_YEAR
|
ixc/python-edtf
|
edtf/parser/parser_classes.py
|
PartialUncertainOrApproximate._get_fuzzy_padding
|
python
|
def _get_fuzzy_padding(self, lean):
result = relativedelta(0)
if self.year_ua:
result += appsettings.PADDING_YEAR_PRECISION * self.year_ua._get_multiplier()
if self.month_ua:
result += appsettings.PADDING_MONTH_PRECISION * self.month_ua._get_multiplier()
if self.day_ua:
result += appsettings.PADDING_DAY_PRECISION * self.day_ua._get_multiplier()
if self.year_month_ua:
result += appsettings.PADDING_YEAR_PRECISION * self.year_month_ua._get_multiplier()
result += appsettings.PADDING_MONTH_PRECISION * self.year_month_ua._get_multiplier()
if self.month_day_ua:
result += appsettings.PADDING_DAY_PRECISION * self.month_day_ua._get_multiplier()
result += appsettings.PADDING_MONTH_PRECISION * self.month_day_ua._get_multiplier()
if self.season_ua:
result += appsettings.PADDING_SEASON_PRECISION * self.season_ua._get_multiplier()
if self.all_ua:
multiplier = self.all_ua._get_multiplier()
if self.precision == PRECISION_DAY:
result += multiplier * appsettings.PADDING_DAY_PRECISION
result += multiplier * appsettings.PADDING_MONTH_PRECISION
result += multiplier * appsettings.PADDING_YEAR_PRECISION
elif self.precision == PRECISION_MONTH:
result += multiplier * appsettings.PADDING_MONTH_PRECISION
result += multiplier * appsettings.PADDING_YEAR_PRECISION
elif self.precision == PRECISION_YEAR:
result += multiplier * appsettings.PADDING_YEAR_PRECISION
return result
|
This is not a perfect interpretation as fuzziness is introduced for
redundant uncertainly modifiers e.g. (2006~)~ will get two sets of
fuzziness.
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/parser/parser_classes.py#L582-L620
| null |
class PartialUncertainOrApproximate(Date):
def set_year(self, y): # Year can be None.
self._year = y
year = property(Date.get_year, set_year)
def __init__(
self, year=None, month=None, day=None,
year_ua=False, month_ua = False, day_ua = False,
year_month_ua = False, month_day_ua = False,
ssn=None, season_ua=False, all_ua=False
):
self.year = year
self.month = month
self.day = day
self.year_ua = year_ua
self.month_ua = month_ua
self.day_ua = day_ua
self.year_month_ua = year_month_ua
self.month_day_ua = month_day_ua
self.season = ssn
self.season_ua = season_ua
self.all_ua = all_ua
def __str__(self):
if self.season_ua:
return "%s%s" % (self.season, self.season_ua)
if self.year_ua:
y = "%s%s" % (self.year, self.year_ua)
else:
y = str(self.year)
if self.month_ua:
m = "(%s)%s" % (self.month, self.month_ua)
else:
m = str(self.month)
if self.day:
if self.day_ua:
d = "(%s)%s" % (self.day, self.day_ua)
else:
d = str(self.day)
else:
d = None
if self.year_month_ua: # year/month approximate. No brackets needed.
ym = "%s-%s%s" % (y, m, self.year_month_ua)
if d:
result = "%s-%s" % (ym, d)
else:
result = ym
elif self.month_day_ua:
if self.year_ua: # we don't need the brackets round month and day
result = "%s-%s-%s%s" % (y, m, d, self.month_day_ua)
else:
result = "%s-(%s-%s)%s" % (y, m, d, self.month_day_ua)
else:
if d:
result = "%s-%s-%s" % (y, m, d)
else:
result = "%s-%s" % (y, m)
if self.all_ua:
result = "(%s)%s" % (result, self.all_ua)
return result
def _precise_year(self, lean):
if self.season:
return self.season._precise_year(lean)
return super(PartialUncertainOrApproximate, self)._precise_year(lean)
def _precise_month(self, lean):
if self.season:
return self.season._precise_month(lean)
return super(PartialUncertainOrApproximate, self)._precise_month(lean)
def _precise_day(self, lean):
if self.season:
return self.season._precise_day(lean)
return super(PartialUncertainOrApproximate, self)._precise_day(lean)
|
ixc/python-edtf
|
edtf/jdutil.py
|
date_to_jd
|
python
|
def date_to_jd(year,month,day):
if month == 1 or month == 2:
yearp = year - 1
monthp = month + 12
else:
yearp = year
monthp = month
# this checks where we are in relation to October 15, 1582, the beginning
# of the Gregorian calendar.
if ((year < 1582) or
(year == 1582 and month < 10) or
(year == 1582 and month == 10 and day < 15)):
# before start of Gregorian calendar
B = 0
else:
# after start of Gregorian calendar
A = math.trunc(yearp / 100.)
B = 2 - A + math.trunc(A / 4.)
if yearp < 0:
C = math.trunc((365.25 * yearp) - 0.75)
else:
C = math.trunc(365.25 * yearp)
D = math.trunc(30.6001 * (monthp + 1))
jd = B + C + D + day + 1720994.5
return jd
|
Convert a date to Julian Day.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Returns
-------
jd : float
Julian Day
Examples
--------
Convert 6 a.m., February 17, 1985 to Julian Day
>>> date_to_jd(1985,2,17.25)
2446113.75
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/jdutil.py#L57-L117
| null |
# Source: https://gist.github.com/jiffyclub/1294443
"""
Functions for converting dates to/from JD and MJD. Assumes dates are historical
dates, including the transition from the Julian calendar to the Gregorian
calendar in 1582. No support for proleptic Gregorian/Julian calendars.
:Author: Matt Davis
:Website: http://github.com/jiffyclub
"""
import math
import datetime as dt
# Note: The Python datetime module assumes an infinitely valid Gregorian calendar.
# The Gregorian calendar took effect after 10-15-1582 and the dates 10-05 through
# 10-14-1582 never occurred. Python datetime objects will produce incorrect
# time deltas if one date is from before 10-15-1582.
def mjd_to_jd(mjd):
"""
Convert Modified Julian Day to Julian Day.
Parameters
----------
mjd : float
Modified Julian Day
Returns
-------
jd : float
Julian Day
"""
return mjd + 2400000.5
def jd_to_mjd(jd):
"""
Convert Julian Day to Modified Julian Day
Parameters
----------
jd : float
Julian Day
Returns
-------
mjd : float
Modified Julian Day
"""
return jd - 2400000.5
def jd_to_date(jd):
"""
Convert Julian Day to date.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
jd : float
Julian Day
Returns
-------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Examples
--------
Convert Julian Day 2446113.75 to year, month, and day.
>>> jd_to_date(2446113.75)
(1985, 2, 17.25)
"""
jd = jd + 0.5
F, I = math.modf(jd)
I = int(I)
A = math.trunc((I - 1867216.25)/36524.25)
if I > 2299160:
B = I + 1 + A - math.trunc(A / 4.)
else:
B = I
C = B + 1524
D = math.trunc((C - 122.1) / 365.25)
E = math.trunc(365.25 * D)
G = math.trunc((C - E) / 30.6001)
day = C - E + F - math.trunc(30.6001 * G)
if G < 13.5:
month = G - 1
else:
month = G - 13
if month > 2.5:
year = D - 4716
else:
year = D - 4715
return year, month, day
def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
"""
Convert hours, minutes, seconds, and microseconds to fractional days.
Parameters
----------
hour : int, optional
Hour number. Defaults to 0.
min : int, optional
Minute number. Defaults to 0.
sec : int, optional
Second number. Defaults to 0.
micro : int, optional
Microsecond number. Defaults to 0.
Returns
-------
days : float
Fractional days.
Examples
--------
>>> hmsm_to_days(hour=6)
0.25
"""
days = sec + (micro / 1.e6)
days = min + (days / 60.)
days = hour + (days / 60.)
return days / 24.
def days_to_hmsm(days):
"""
Convert fractional days to hours, minutes, seconds, and microseconds.
Precision beyond microseconds is rounded to the nearest microsecond.
Parameters
----------
days : float
A fractional number of days. Must be less than 1.
Returns
-------
hour : int
Hour number.
min : int
Minute number.
sec : int
Second number.
micro : int
Microsecond number.
Raises
------
ValueError
If `days` is >= 1.
Examples
--------
>>> days_to_hmsm(0.1)
(2, 24, 0, 0)
"""
hours = days * 24.
hours, hour = math.modf(hours)
mins = hours * 60.
mins, min = math.modf(mins)
secs = mins * 60.
secs, sec = math.modf(secs)
micro = round(secs * 1.e6)
return int(hour), int(min), int(sec), int(micro)
def datetime_to_jd(date):
"""
Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75
"""
days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
return date_to_jd(date.year,date.month,days)
def jd_to_datetime(jd):
"""
Convert a Julian Day to an `jdutil.datetime` object.
Parameters
----------
jd : float
Julian day.
Returns
-------
dt : `jdutil.datetime` object
`jdutil.datetime` equivalent of Julian day.
Examples
--------
>>> jd_to_datetime(2446113.75)
datetime(1985, 2, 17, 6, 0)
"""
year, month, day = jd_to_date(jd)
frac_days,day = math.modf(day)
day = int(day)
hour,min,sec,micro = days_to_hmsm(frac_days)
return datetime(year,month,day,hour,min,sec,micro)
def timedelta_to_days(td):
"""
Convert a `datetime.timedelta` object to a total number of days.
Parameters
----------
td : `datetime.timedelta` instance
Returns
-------
days : float
Total number of days in the `datetime.timedelta` object.
Examples
--------
>>> td = datetime.timedelta(4.5)
>>> td
datetime.timedelta(4, 43200)
>>> timedelta_to_days(td)
4.5
"""
seconds_in_day = 24. * 3600.
days = td.days + (td.seconds + (td.microseconds * 10.e6)) / seconds_in_day
return days
class datetime(dt.datetime):
"""
A subclass of `datetime.datetime` that performs math operations by first
converting to Julian Day, then back to a `jdutil.datetime` object.
Addition works with `datetime.timedelta` objects, subtraction works with
`datetime.timedelta`, `datetime.datetime`, and `jdutil.datetime` objects.
Not all combinations work in all directions, e.g.
`timedelta - datetime` is meaningless.
See Also
--------
datetime.datetime : Parent class.
"""
def __add__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __radd__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __sub__(self,other):
if isinstance(other,dt.timedelta):
days = timedelta_to_days(other)
combined = datetime_to_jd(self) - days
return jd_to_datetime(combined)
elif isinstance(other, (datetime,dt.datetime)):
diff = datetime_to_jd(self) - datetime_to_jd(other)
return dt.timedelta(diff)
else:
s = "jdutil.datetime supports '-' with: "
s += "datetime.timedelta, jdutil.datetime and datetime.datetime"
raise TypeError(s)
def __rsub__(self,other):
if not isinstance(other, (datetime,dt.datetime)):
s = "jdutil.datetime supports '-' with: "
s += "jdutil.datetime and datetime.datetime"
raise TypeError(s)
diff = datetime_to_jd(other) - datetime_to_jd(self)
return dt.timedelta(diff)
def to_jd(self):
"""
Return the date converted to Julian Day.
"""
return datetime_to_jd(self)
def to_mjd(self):
"""
Return the date converted to Modified Julian Day.
"""
return jd_to_mjd(self.to_jd())
|
ixc/python-edtf
|
edtf/jdutil.py
|
jd_to_date
|
python
|
def jd_to_date(jd):
jd = jd + 0.5
F, I = math.modf(jd)
I = int(I)
A = math.trunc((I - 1867216.25)/36524.25)
if I > 2299160:
B = I + 1 + A - math.trunc(A / 4.)
else:
B = I
C = B + 1524
D = math.trunc((C - 122.1) / 365.25)
E = math.trunc(365.25 * D)
G = math.trunc((C - E) / 30.6001)
day = C - E + F - math.trunc(30.6001 * G)
if G < 13.5:
month = G - 1
else:
month = G - 13
if month > 2.5:
year = D - 4716
else:
year = D - 4715
return year, month, day
|
Convert Julian Day to date.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
jd : float
Julian Day
Returns
-------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Examples
--------
Convert Julian Day 2446113.75 to year, month, and day.
>>> jd_to_date(2446113.75)
(1985, 2, 17.25)
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/jdutil.py#L120-L184
| null |
# Source: https://gist.github.com/jiffyclub/1294443
"""
Functions for converting dates to/from JD and MJD. Assumes dates are historical
dates, including the transition from the Julian calendar to the Gregorian
calendar in 1582. No support for proleptic Gregorian/Julian calendars.
:Author: Matt Davis
:Website: http://github.com/jiffyclub
"""
import math
import datetime as dt
# Note: The Python datetime module assumes an infinitely valid Gregorian calendar.
# The Gregorian calendar took effect after 10-15-1582 and the dates 10-05 through
# 10-14-1582 never occurred. Python datetime objects will produce incorrect
# time deltas if one date is from before 10-15-1582.
def mjd_to_jd(mjd):
"""
Convert Modified Julian Day to Julian Day.
Parameters
----------
mjd : float
Modified Julian Day
Returns
-------
jd : float
Julian Day
"""
return mjd + 2400000.5
def jd_to_mjd(jd):
"""
Convert Julian Day to Modified Julian Day
Parameters
----------
jd : float
Julian Day
Returns
-------
mjd : float
Modified Julian Day
"""
return jd - 2400000.5
def date_to_jd(year,month,day):
"""
Convert a date to Julian Day.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Returns
-------
jd : float
Julian Day
Examples
--------
Convert 6 a.m., February 17, 1985 to Julian Day
>>> date_to_jd(1985,2,17.25)
2446113.75
"""
if month == 1 or month == 2:
yearp = year - 1
monthp = month + 12
else:
yearp = year
monthp = month
# this checks where we are in relation to October 15, 1582, the beginning
# of the Gregorian calendar.
if ((year < 1582) or
(year == 1582 and month < 10) or
(year == 1582 and month == 10 and day < 15)):
# before start of Gregorian calendar
B = 0
else:
# after start of Gregorian calendar
A = math.trunc(yearp / 100.)
B = 2 - A + math.trunc(A / 4.)
if yearp < 0:
C = math.trunc((365.25 * yearp) - 0.75)
else:
C = math.trunc(365.25 * yearp)
D = math.trunc(30.6001 * (monthp + 1))
jd = B + C + D + day + 1720994.5
return jd
def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
"""
Convert hours, minutes, seconds, and microseconds to fractional days.
Parameters
----------
hour : int, optional
Hour number. Defaults to 0.
min : int, optional
Minute number. Defaults to 0.
sec : int, optional
Second number. Defaults to 0.
micro : int, optional
Microsecond number. Defaults to 0.
Returns
-------
days : float
Fractional days.
Examples
--------
>>> hmsm_to_days(hour=6)
0.25
"""
days = sec + (micro / 1.e6)
days = min + (days / 60.)
days = hour + (days / 60.)
return days / 24.
def days_to_hmsm(days):
"""
Convert fractional days to hours, minutes, seconds, and microseconds.
Precision beyond microseconds is rounded to the nearest microsecond.
Parameters
----------
days : float
A fractional number of days. Must be less than 1.
Returns
-------
hour : int
Hour number.
min : int
Minute number.
sec : int
Second number.
micro : int
Microsecond number.
Raises
------
ValueError
If `days` is >= 1.
Examples
--------
>>> days_to_hmsm(0.1)
(2, 24, 0, 0)
"""
hours = days * 24.
hours, hour = math.modf(hours)
mins = hours * 60.
mins, min = math.modf(mins)
secs = mins * 60.
secs, sec = math.modf(secs)
micro = round(secs * 1.e6)
return int(hour), int(min), int(sec), int(micro)
def datetime_to_jd(date):
"""
Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75
"""
days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
return date_to_jd(date.year,date.month,days)
def jd_to_datetime(jd):
"""
Convert a Julian Day to an `jdutil.datetime` object.
Parameters
----------
jd : float
Julian day.
Returns
-------
dt : `jdutil.datetime` object
`jdutil.datetime` equivalent of Julian day.
Examples
--------
>>> jd_to_datetime(2446113.75)
datetime(1985, 2, 17, 6, 0)
"""
year, month, day = jd_to_date(jd)
frac_days,day = math.modf(day)
day = int(day)
hour,min,sec,micro = days_to_hmsm(frac_days)
return datetime(year,month,day,hour,min,sec,micro)
def timedelta_to_days(td):
"""
Convert a `datetime.timedelta` object to a total number of days.
Parameters
----------
td : `datetime.timedelta` instance
Returns
-------
days : float
Total number of days in the `datetime.timedelta` object.
Examples
--------
>>> td = datetime.timedelta(4.5)
>>> td
datetime.timedelta(4, 43200)
>>> timedelta_to_days(td)
4.5
"""
seconds_in_day = 24. * 3600.
days = td.days + (td.seconds + (td.microseconds * 10.e6)) / seconds_in_day
return days
class datetime(dt.datetime):
"""
A subclass of `datetime.datetime` that performs math operations by first
converting to Julian Day, then back to a `jdutil.datetime` object.
Addition works with `datetime.timedelta` objects, subtraction works with
`datetime.timedelta`, `datetime.datetime`, and `jdutil.datetime` objects.
Not all combinations work in all directions, e.g.
`timedelta - datetime` is meaningless.
See Also
--------
datetime.datetime : Parent class.
"""
def __add__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __radd__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __sub__(self,other):
if isinstance(other,dt.timedelta):
days = timedelta_to_days(other)
combined = datetime_to_jd(self) - days
return jd_to_datetime(combined)
elif isinstance(other, (datetime,dt.datetime)):
diff = datetime_to_jd(self) - datetime_to_jd(other)
return dt.timedelta(diff)
else:
s = "jdutil.datetime supports '-' with: "
s += "datetime.timedelta, jdutil.datetime and datetime.datetime"
raise TypeError(s)
def __rsub__(self,other):
if not isinstance(other, (datetime,dt.datetime)):
s = "jdutil.datetime supports '-' with: "
s += "jdutil.datetime and datetime.datetime"
raise TypeError(s)
diff = datetime_to_jd(other) - datetime_to_jd(self)
return dt.timedelta(diff)
def to_jd(self):
"""
Return the date converted to Julian Day.
"""
return datetime_to_jd(self)
def to_mjd(self):
"""
Return the date converted to Modified Julian Day.
"""
return jd_to_mjd(self.to_jd())
|
ixc/python-edtf
|
edtf/jdutil.py
|
hmsm_to_days
|
python
|
def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
days = sec + (micro / 1.e6)
days = min + (days / 60.)
days = hour + (days / 60.)
return days / 24.
|
Convert hours, minutes, seconds, and microseconds to fractional days.
Parameters
----------
hour : int, optional
Hour number. Defaults to 0.
min : int, optional
Minute number. Defaults to 0.
sec : int, optional
Second number. Defaults to 0.
micro : int, optional
Microsecond number. Defaults to 0.
Returns
-------
days : float
Fractional days.
Examples
--------
>>> hmsm_to_days(hour=6)
0.25
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/jdutil.py#L187-L222
| null |
# Source: https://gist.github.com/jiffyclub/1294443
"""
Functions for converting dates to/from JD and MJD. Assumes dates are historical
dates, including the transition from the Julian calendar to the Gregorian
calendar in 1582. No support for proleptic Gregorian/Julian calendars.
:Author: Matt Davis
:Website: http://github.com/jiffyclub
"""
import math
import datetime as dt
# Note: The Python datetime module assumes an infinitely valid Gregorian calendar.
# The Gregorian calendar took effect after 10-15-1582 and the dates 10-05 through
# 10-14-1582 never occurred. Python datetime objects will produce incorrect
# time deltas if one date is from before 10-15-1582.
def mjd_to_jd(mjd):
"""
Convert Modified Julian Day to Julian Day.
Parameters
----------
mjd : float
Modified Julian Day
Returns
-------
jd : float
Julian Day
"""
return mjd + 2400000.5
def jd_to_mjd(jd):
"""
Convert Julian Day to Modified Julian Day
Parameters
----------
jd : float
Julian Day
Returns
-------
mjd : float
Modified Julian Day
"""
return jd - 2400000.5
def date_to_jd(year,month,day):
"""
Convert a date to Julian Day.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Returns
-------
jd : float
Julian Day
Examples
--------
Convert 6 a.m., February 17, 1985 to Julian Day
>>> date_to_jd(1985,2,17.25)
2446113.75
"""
if month == 1 or month == 2:
yearp = year - 1
monthp = month + 12
else:
yearp = year
monthp = month
# this checks where we are in relation to October 15, 1582, the beginning
# of the Gregorian calendar.
if ((year < 1582) or
(year == 1582 and month < 10) or
(year == 1582 and month == 10 and day < 15)):
# before start of Gregorian calendar
B = 0
else:
# after start of Gregorian calendar
A = math.trunc(yearp / 100.)
B = 2 - A + math.trunc(A / 4.)
if yearp < 0:
C = math.trunc((365.25 * yearp) - 0.75)
else:
C = math.trunc(365.25 * yearp)
D = math.trunc(30.6001 * (monthp + 1))
jd = B + C + D + day + 1720994.5
return jd
def jd_to_date(jd):
"""
Convert Julian Day to date.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
jd : float
Julian Day
Returns
-------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Examples
--------
Convert Julian Day 2446113.75 to year, month, and day.
>>> jd_to_date(2446113.75)
(1985, 2, 17.25)
"""
jd = jd + 0.5
F, I = math.modf(jd)
I = int(I)
A = math.trunc((I - 1867216.25)/36524.25)
if I > 2299160:
B = I + 1 + A - math.trunc(A / 4.)
else:
B = I
C = B + 1524
D = math.trunc((C - 122.1) / 365.25)
E = math.trunc(365.25 * D)
G = math.trunc((C - E) / 30.6001)
day = C - E + F - math.trunc(30.6001 * G)
if G < 13.5:
month = G - 1
else:
month = G - 13
if month > 2.5:
year = D - 4716
else:
year = D - 4715
return year, month, day
def days_to_hmsm(days):
"""
Convert fractional days to hours, minutes, seconds, and microseconds.
Precision beyond microseconds is rounded to the nearest microsecond.
Parameters
----------
days : float
A fractional number of days. Must be less than 1.
Returns
-------
hour : int
Hour number.
min : int
Minute number.
sec : int
Second number.
micro : int
Microsecond number.
Raises
------
ValueError
If `days` is >= 1.
Examples
--------
>>> days_to_hmsm(0.1)
(2, 24, 0, 0)
"""
hours = days * 24.
hours, hour = math.modf(hours)
mins = hours * 60.
mins, min = math.modf(mins)
secs = mins * 60.
secs, sec = math.modf(secs)
micro = round(secs * 1.e6)
return int(hour), int(min), int(sec), int(micro)
def datetime_to_jd(date):
"""
Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75
"""
days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
return date_to_jd(date.year,date.month,days)
def jd_to_datetime(jd):
"""
Convert a Julian Day to an `jdutil.datetime` object.
Parameters
----------
jd : float
Julian day.
Returns
-------
dt : `jdutil.datetime` object
`jdutil.datetime` equivalent of Julian day.
Examples
--------
>>> jd_to_datetime(2446113.75)
datetime(1985, 2, 17, 6, 0)
"""
year, month, day = jd_to_date(jd)
frac_days,day = math.modf(day)
day = int(day)
hour,min,sec,micro = days_to_hmsm(frac_days)
return datetime(year,month,day,hour,min,sec,micro)
def timedelta_to_days(td):
"""
Convert a `datetime.timedelta` object to a total number of days.
Parameters
----------
td : `datetime.timedelta` instance
Returns
-------
days : float
Total number of days in the `datetime.timedelta` object.
Examples
--------
>>> td = datetime.timedelta(4.5)
>>> td
datetime.timedelta(4, 43200)
>>> timedelta_to_days(td)
4.5
"""
seconds_in_day = 24. * 3600.
days = td.days + (td.seconds + (td.microseconds * 10.e6)) / seconds_in_day
return days
class datetime(dt.datetime):
"""
A subclass of `datetime.datetime` that performs math operations by first
converting to Julian Day, then back to a `jdutil.datetime` object.
Addition works with `datetime.timedelta` objects, subtraction works with
`datetime.timedelta`, `datetime.datetime`, and `jdutil.datetime` objects.
Not all combinations work in all directions, e.g.
`timedelta - datetime` is meaningless.
See Also
--------
datetime.datetime : Parent class.
"""
def __add__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __radd__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __sub__(self,other):
if isinstance(other,dt.timedelta):
days = timedelta_to_days(other)
combined = datetime_to_jd(self) - days
return jd_to_datetime(combined)
elif isinstance(other, (datetime,dt.datetime)):
diff = datetime_to_jd(self) - datetime_to_jd(other)
return dt.timedelta(diff)
else:
s = "jdutil.datetime supports '-' with: "
s += "datetime.timedelta, jdutil.datetime and datetime.datetime"
raise TypeError(s)
def __rsub__(self,other):
if not isinstance(other, (datetime,dt.datetime)):
s = "jdutil.datetime supports '-' with: "
s += "jdutil.datetime and datetime.datetime"
raise TypeError(s)
diff = datetime_to_jd(other) - datetime_to_jd(self)
return dt.timedelta(diff)
def to_jd(self):
"""
Return the date converted to Julian Day.
"""
return datetime_to_jd(self)
def to_mjd(self):
"""
Return the date converted to Modified Julian Day.
"""
return jd_to_mjd(self.to_jd())
|
ixc/python-edtf
|
edtf/jdutil.py
|
days_to_hmsm
|
python
|
def days_to_hmsm(days):
hours = days * 24.
hours, hour = math.modf(hours)
mins = hours * 60.
mins, min = math.modf(mins)
secs = mins * 60.
secs, sec = math.modf(secs)
micro = round(secs * 1.e6)
return int(hour), int(min), int(sec), int(micro)
|
Convert fractional days to hours, minutes, seconds, and microseconds.
Precision beyond microseconds is rounded to the nearest microsecond.
Parameters
----------
days : float
A fractional number of days. Must be less than 1.
Returns
-------
hour : int
Hour number.
min : int
Minute number.
sec : int
Second number.
micro : int
Microsecond number.
Raises
------
ValueError
If `days` is >= 1.
Examples
--------
>>> days_to_hmsm(0.1)
(2, 24, 0, 0)
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/jdutil.py#L225-L271
| null |
# Source: https://gist.github.com/jiffyclub/1294443
"""
Functions for converting dates to/from JD and MJD. Assumes dates are historical
dates, including the transition from the Julian calendar to the Gregorian
calendar in 1582. No support for proleptic Gregorian/Julian calendars.
:Author: Matt Davis
:Website: http://github.com/jiffyclub
"""
import math
import datetime as dt
# Note: The Python datetime module assumes an infinitely valid Gregorian calendar.
# The Gregorian calendar took effect after 10-15-1582 and the dates 10-05 through
# 10-14-1582 never occurred. Python datetime objects will produce incorrect
# time deltas if one date is from before 10-15-1582.
def mjd_to_jd(mjd):
"""
Convert Modified Julian Day to Julian Day.
Parameters
----------
mjd : float
Modified Julian Day
Returns
-------
jd : float
Julian Day
"""
return mjd + 2400000.5
def jd_to_mjd(jd):
"""
Convert Julian Day to Modified Julian Day
Parameters
----------
jd : float
Julian Day
Returns
-------
mjd : float
Modified Julian Day
"""
return jd - 2400000.5
def date_to_jd(year,month,day):
"""
Convert a date to Julian Day.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Returns
-------
jd : float
Julian Day
Examples
--------
Convert 6 a.m., February 17, 1985 to Julian Day
>>> date_to_jd(1985,2,17.25)
2446113.75
"""
if month == 1 or month == 2:
yearp = year - 1
monthp = month + 12
else:
yearp = year
monthp = month
# this checks where we are in relation to October 15, 1582, the beginning
# of the Gregorian calendar.
if ((year < 1582) or
(year == 1582 and month < 10) or
(year == 1582 and month == 10 and day < 15)):
# before start of Gregorian calendar
B = 0
else:
# after start of Gregorian calendar
A = math.trunc(yearp / 100.)
B = 2 - A + math.trunc(A / 4.)
if yearp < 0:
C = math.trunc((365.25 * yearp) - 0.75)
else:
C = math.trunc(365.25 * yearp)
D = math.trunc(30.6001 * (monthp + 1))
jd = B + C + D + day + 1720994.5
return jd
def jd_to_date(jd):
"""
Convert Julian Day to date.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
jd : float
Julian Day
Returns
-------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Examples
--------
Convert Julian Day 2446113.75 to year, month, and day.
>>> jd_to_date(2446113.75)
(1985, 2, 17.25)
"""
jd = jd + 0.5
F, I = math.modf(jd)
I = int(I)
A = math.trunc((I - 1867216.25)/36524.25)
if I > 2299160:
B = I + 1 + A - math.trunc(A / 4.)
else:
B = I
C = B + 1524
D = math.trunc((C - 122.1) / 365.25)
E = math.trunc(365.25 * D)
G = math.trunc((C - E) / 30.6001)
day = C - E + F - math.trunc(30.6001 * G)
if G < 13.5:
month = G - 1
else:
month = G - 13
if month > 2.5:
year = D - 4716
else:
year = D - 4715
return year, month, day
def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
"""
Convert hours, minutes, seconds, and microseconds to fractional days.
Parameters
----------
hour : int, optional
Hour number. Defaults to 0.
min : int, optional
Minute number. Defaults to 0.
sec : int, optional
Second number. Defaults to 0.
micro : int, optional
Microsecond number. Defaults to 0.
Returns
-------
days : float
Fractional days.
Examples
--------
>>> hmsm_to_days(hour=6)
0.25
"""
days = sec + (micro / 1.e6)
days = min + (days / 60.)
days = hour + (days / 60.)
return days / 24.
def datetime_to_jd(date):
"""
Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75
"""
days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
return date_to_jd(date.year,date.month,days)
def jd_to_datetime(jd):
"""
Convert a Julian Day to an `jdutil.datetime` object.
Parameters
----------
jd : float
Julian day.
Returns
-------
dt : `jdutil.datetime` object
`jdutil.datetime` equivalent of Julian day.
Examples
--------
>>> jd_to_datetime(2446113.75)
datetime(1985, 2, 17, 6, 0)
"""
year, month, day = jd_to_date(jd)
frac_days,day = math.modf(day)
day = int(day)
hour,min,sec,micro = days_to_hmsm(frac_days)
return datetime(year,month,day,hour,min,sec,micro)
def timedelta_to_days(td):
"""
Convert a `datetime.timedelta` object to a total number of days.
Parameters
----------
td : `datetime.timedelta` instance
Returns
-------
days : float
Total number of days in the `datetime.timedelta` object.
Examples
--------
>>> td = datetime.timedelta(4.5)
>>> td
datetime.timedelta(4, 43200)
>>> timedelta_to_days(td)
4.5
"""
seconds_in_day = 24. * 3600.
days = td.days + (td.seconds + (td.microseconds * 10.e6)) / seconds_in_day
return days
class datetime(dt.datetime):
"""
A subclass of `datetime.datetime` that performs math operations by first
converting to Julian Day, then back to a `jdutil.datetime` object.
Addition works with `datetime.timedelta` objects, subtraction works with
`datetime.timedelta`, `datetime.datetime`, and `jdutil.datetime` objects.
Not all combinations work in all directions, e.g.
`timedelta - datetime` is meaningless.
See Also
--------
datetime.datetime : Parent class.
"""
def __add__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __radd__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __sub__(self,other):
if isinstance(other,dt.timedelta):
days = timedelta_to_days(other)
combined = datetime_to_jd(self) - days
return jd_to_datetime(combined)
elif isinstance(other, (datetime,dt.datetime)):
diff = datetime_to_jd(self) - datetime_to_jd(other)
return dt.timedelta(diff)
else:
s = "jdutil.datetime supports '-' with: "
s += "datetime.timedelta, jdutil.datetime and datetime.datetime"
raise TypeError(s)
def __rsub__(self,other):
if not isinstance(other, (datetime,dt.datetime)):
s = "jdutil.datetime supports '-' with: "
s += "jdutil.datetime and datetime.datetime"
raise TypeError(s)
diff = datetime_to_jd(other) - datetime_to_jd(self)
return dt.timedelta(diff)
def to_jd(self):
"""
Return the date converted to Julian Day.
"""
return datetime_to_jd(self)
def to_mjd(self):
"""
Return the date converted to Modified Julian Day.
"""
return jd_to_mjd(self.to_jd())
|
ixc/python-edtf
|
edtf/jdutil.py
|
datetime_to_jd
|
python
|
def datetime_to_jd(date):
days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
return date_to_jd(date.year,date.month,days)
|
Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/jdutil.py#L274-L298
|
[
"def hmsm_to_days(hour=0,min=0,sec=0,micro=0):\n \"\"\"\n Convert hours, minutes, seconds, and microseconds to fractional days.\n\n Parameters\n ----------\n hour : int, optional\n Hour number. Defaults to 0.\n\n min : int, optional\n Minute number. Defaults to 0.\n\n sec : int, optional\n Second number. Defaults to 0.\n\n micro : int, optional\n Microsecond number. Defaults to 0.\n\n Returns\n -------\n days : float\n Fractional days.\n\n Examples\n --------\n >>> hmsm_to_days(hour=6)\n 0.25\n\n \"\"\"\n days = sec + (micro / 1.e6)\n\n days = min + (days / 60.)\n\n days = hour + (days / 60.)\n\n return days / 24.\n",
"def date_to_jd(year,month,day):\n \"\"\"\n Convert a date to Julian Day.\n\n Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', \n 4th ed., Duffet-Smith and Zwart, 2011.\n\n Parameters\n ----------\n year : int\n Year as integer. Years preceding 1 A.D. should be 0 or negative.\n The year before 1 A.D. is 0, 10 B.C. is year -9.\n\n month : int\n Month as integer, Jan = 1, Feb. = 2, etc.\n\n day : float\n Day, may contain fractional part.\n\n Returns\n -------\n jd : float\n Julian Day\n\n Examples\n --------\n Convert 6 a.m., February 17, 1985 to Julian Day\n\n >>> date_to_jd(1985,2,17.25)\n 2446113.75\n\n \"\"\"\n if month == 1 or month == 2:\n yearp = year - 1\n monthp = month + 12\n else:\n yearp = year\n monthp = month\n\n # this checks where we are in relation to October 15, 1582, the beginning\n # of the Gregorian calendar.\n if ((year < 1582) or\n (year == 1582 and month < 10) or\n (year == 1582 and month == 10 and day < 15)):\n # before start of Gregorian calendar\n B = 0\n else:\n # after start of Gregorian calendar\n A = math.trunc(yearp / 100.)\n B = 2 - A + math.trunc(A / 4.)\n\n if yearp < 0:\n C = math.trunc((365.25 * yearp) - 0.75)\n else:\n C = math.trunc(365.25 * yearp)\n\n D = math.trunc(30.6001 * (monthp + 1))\n\n jd = B + C + D + day + 1720994.5\n\n return jd\n"
] |
# Source: https://gist.github.com/jiffyclub/1294443
"""
Functions for converting dates to/from JD and MJD. Assumes dates are historical
dates, including the transition from the Julian calendar to the Gregorian
calendar in 1582. No support for proleptic Gregorian/Julian calendars.
:Author: Matt Davis
:Website: http://github.com/jiffyclub
"""
import math
import datetime as dt
# Note: The Python datetime module assumes an infinitely valid Gregorian calendar.
# The Gregorian calendar took effect after 10-15-1582 and the dates 10-05 through
# 10-14-1582 never occurred. Python datetime objects will produce incorrect
# time deltas if one date is from before 10-15-1582.
def mjd_to_jd(mjd):
"""
Convert Modified Julian Day to Julian Day.
Parameters
----------
mjd : float
Modified Julian Day
Returns
-------
jd : float
Julian Day
"""
return mjd + 2400000.5
def jd_to_mjd(jd):
"""
Convert Julian Day to Modified Julian Day
Parameters
----------
jd : float
Julian Day
Returns
-------
mjd : float
Modified Julian Day
"""
return jd - 2400000.5
def date_to_jd(year,month,day):
"""
Convert a date to Julian Day.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Returns
-------
jd : float
Julian Day
Examples
--------
Convert 6 a.m., February 17, 1985 to Julian Day
>>> date_to_jd(1985,2,17.25)
2446113.75
"""
if month == 1 or month == 2:
yearp = year - 1
monthp = month + 12
else:
yearp = year
monthp = month
# this checks where we are in relation to October 15, 1582, the beginning
# of the Gregorian calendar.
if ((year < 1582) or
(year == 1582 and month < 10) or
(year == 1582 and month == 10 and day < 15)):
# before start of Gregorian calendar
B = 0
else:
# after start of Gregorian calendar
A = math.trunc(yearp / 100.)
B = 2 - A + math.trunc(A / 4.)
if yearp < 0:
C = math.trunc((365.25 * yearp) - 0.75)
else:
C = math.trunc(365.25 * yearp)
D = math.trunc(30.6001 * (monthp + 1))
jd = B + C + D + day + 1720994.5
return jd
def jd_to_date(jd):
"""
Convert Julian Day to date.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
jd : float
Julian Day
Returns
-------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Examples
--------
Convert Julian Day 2446113.75 to year, month, and day.
>>> jd_to_date(2446113.75)
(1985, 2, 17.25)
"""
jd = jd + 0.5
F, I = math.modf(jd)
I = int(I)
A = math.trunc((I - 1867216.25)/36524.25)
if I > 2299160:
B = I + 1 + A - math.trunc(A / 4.)
else:
B = I
C = B + 1524
D = math.trunc((C - 122.1) / 365.25)
E = math.trunc(365.25 * D)
G = math.trunc((C - E) / 30.6001)
day = C - E + F - math.trunc(30.6001 * G)
if G < 13.5:
month = G - 1
else:
month = G - 13
if month > 2.5:
year = D - 4716
else:
year = D - 4715
return year, month, day
def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
"""
Convert hours, minutes, seconds, and microseconds to fractional days.
Parameters
----------
hour : int, optional
Hour number. Defaults to 0.
min : int, optional
Minute number. Defaults to 0.
sec : int, optional
Second number. Defaults to 0.
micro : int, optional
Microsecond number. Defaults to 0.
Returns
-------
days : float
Fractional days.
Examples
--------
>>> hmsm_to_days(hour=6)
0.25
"""
days = sec + (micro / 1.e6)
days = min + (days / 60.)
days = hour + (days / 60.)
return days / 24.
def days_to_hmsm(days):
"""
Convert fractional days to hours, minutes, seconds, and microseconds.
Precision beyond microseconds is rounded to the nearest microsecond.
Parameters
----------
days : float
A fractional number of days. Must be less than 1.
Returns
-------
hour : int
Hour number.
min : int
Minute number.
sec : int
Second number.
micro : int
Microsecond number.
Raises
------
ValueError
If `days` is >= 1.
Examples
--------
>>> days_to_hmsm(0.1)
(2, 24, 0, 0)
"""
hours = days * 24.
hours, hour = math.modf(hours)
mins = hours * 60.
mins, min = math.modf(mins)
secs = mins * 60.
secs, sec = math.modf(secs)
micro = round(secs * 1.e6)
return int(hour), int(min), int(sec), int(micro)
def jd_to_datetime(jd):
"""
Convert a Julian Day to an `jdutil.datetime` object.
Parameters
----------
jd : float
Julian day.
Returns
-------
dt : `jdutil.datetime` object
`jdutil.datetime` equivalent of Julian day.
Examples
--------
>>> jd_to_datetime(2446113.75)
datetime(1985, 2, 17, 6, 0)
"""
year, month, day = jd_to_date(jd)
frac_days,day = math.modf(day)
day = int(day)
hour,min,sec,micro = days_to_hmsm(frac_days)
return datetime(year,month,day,hour,min,sec,micro)
def timedelta_to_days(td):
"""
Convert a `datetime.timedelta` object to a total number of days.
Parameters
----------
td : `datetime.timedelta` instance
Returns
-------
days : float
Total number of days in the `datetime.timedelta` object.
Examples
--------
>>> td = datetime.timedelta(4.5)
>>> td
datetime.timedelta(4, 43200)
>>> timedelta_to_days(td)
4.5
"""
seconds_in_day = 24. * 3600.
days = td.days + (td.seconds + (td.microseconds * 10.e6)) / seconds_in_day
return days
class datetime(dt.datetime):
"""
A subclass of `datetime.datetime` that performs math operations by first
converting to Julian Day, then back to a `jdutil.datetime` object.
Addition works with `datetime.timedelta` objects, subtraction works with
`datetime.timedelta`, `datetime.datetime`, and `jdutil.datetime` objects.
Not all combinations work in all directions, e.g.
`timedelta - datetime` is meaningless.
See Also
--------
datetime.datetime : Parent class.
"""
def __add__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __radd__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __sub__(self,other):
if isinstance(other,dt.timedelta):
days = timedelta_to_days(other)
combined = datetime_to_jd(self) - days
return jd_to_datetime(combined)
elif isinstance(other, (datetime,dt.datetime)):
diff = datetime_to_jd(self) - datetime_to_jd(other)
return dt.timedelta(diff)
else:
s = "jdutil.datetime supports '-' with: "
s += "datetime.timedelta, jdutil.datetime and datetime.datetime"
raise TypeError(s)
def __rsub__(self,other):
if not isinstance(other, (datetime,dt.datetime)):
s = "jdutil.datetime supports '-' with: "
s += "jdutil.datetime and datetime.datetime"
raise TypeError(s)
diff = datetime_to_jd(other) - datetime_to_jd(self)
return dt.timedelta(diff)
def to_jd(self):
"""
Return the date converted to Julian Day.
"""
return datetime_to_jd(self)
def to_mjd(self):
"""
Return the date converted to Modified Julian Day.
"""
return jd_to_mjd(self.to_jd())
|
ixc/python-edtf
|
edtf/jdutil.py
|
jd_to_datetime
|
python
|
def jd_to_datetime(jd):
year, month, day = jd_to_date(jd)
frac_days,day = math.modf(day)
day = int(day)
hour,min,sec,micro = days_to_hmsm(frac_days)
return datetime(year,month,day,hour,min,sec,micro)
|
Convert a Julian Day to an `jdutil.datetime` object.
Parameters
----------
jd : float
Julian day.
Returns
-------
dt : `jdutil.datetime` object
`jdutil.datetime` equivalent of Julian day.
Examples
--------
>>> jd_to_datetime(2446113.75)
datetime(1985, 2, 17, 6, 0)
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/jdutil.py#L301-L328
|
[
"def jd_to_date(jd):\n \"\"\"\n Convert Julian Day to date.\n\n Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', \n 4th ed., Duffet-Smith and Zwart, 2011.\n\n Parameters\n ----------\n jd : float\n Julian Day\n\n Returns\n -------\n year : int\n Year as integer. Years preceding 1 A.D. should be 0 or negative.\n The year before 1 A.D. is 0, 10 B.C. is year -9.\n\n month : int\n Month as integer, Jan = 1, Feb. = 2, etc.\n\n day : float\n Day, may contain fractional part.\n\n Examples\n --------\n Convert Julian Day 2446113.75 to year, month, and day.\n\n >>> jd_to_date(2446113.75)\n (1985, 2, 17.25)\n\n \"\"\"\n jd = jd + 0.5\n\n F, I = math.modf(jd)\n I = int(I)\n\n A = math.trunc((I - 1867216.25)/36524.25)\n\n if I > 2299160:\n B = I + 1 + A - math.trunc(A / 4.)\n else:\n B = I\n\n C = B + 1524\n\n D = math.trunc((C - 122.1) / 365.25)\n\n E = math.trunc(365.25 * D)\n\n G = math.trunc((C - E) / 30.6001)\n\n day = C - E + F - math.trunc(30.6001 * G)\n\n if G < 13.5:\n month = G - 1\n else:\n month = G - 13\n\n if month > 2.5:\n year = D - 4716\n else:\n year = D - 4715\n\n return year, month, day\n",
"def days_to_hmsm(days):\n \"\"\"\n Convert fractional days to hours, minutes, seconds, and microseconds.\n Precision beyond microseconds is rounded to the nearest microsecond.\n\n Parameters\n ----------\n days : float\n A fractional number of days. Must be less than 1.\n\n Returns\n -------\n hour : int\n Hour number.\n\n min : int\n Minute number.\n\n sec : int\n Second number.\n\n micro : int\n Microsecond number.\n\n Raises\n ------\n ValueError\n If `days` is >= 1.\n\n Examples\n --------\n >>> days_to_hmsm(0.1)\n (2, 24, 0, 0)\n\n \"\"\"\n hours = days * 24.\n hours, hour = math.modf(hours)\n\n mins = hours * 60.\n mins, min = math.modf(mins)\n\n secs = mins * 60.\n secs, sec = math.modf(secs)\n\n micro = round(secs * 1.e6)\n\n return int(hour), int(min), int(sec), int(micro)\n"
] |
# Source: https://gist.github.com/jiffyclub/1294443
"""
Functions for converting dates to/from JD and MJD. Assumes dates are historical
dates, including the transition from the Julian calendar to the Gregorian
calendar in 1582. No support for proleptic Gregorian/Julian calendars.
:Author: Matt Davis
:Website: http://github.com/jiffyclub
"""
import math
import datetime as dt
# Note: The Python datetime module assumes an infinitely valid Gregorian calendar.
# The Gregorian calendar took effect after 10-15-1582 and the dates 10-05 through
# 10-14-1582 never occurred. Python datetime objects will produce incorrect
# time deltas if one date is from before 10-15-1582.
def mjd_to_jd(mjd):
"""
Convert Modified Julian Day to Julian Day.
Parameters
----------
mjd : float
Modified Julian Day
Returns
-------
jd : float
Julian Day
"""
return mjd + 2400000.5
def jd_to_mjd(jd):
"""
Convert Julian Day to Modified Julian Day
Parameters
----------
jd : float
Julian Day
Returns
-------
mjd : float
Modified Julian Day
"""
return jd - 2400000.5
def date_to_jd(year,month,day):
"""
Convert a date to Julian Day.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Returns
-------
jd : float
Julian Day
Examples
--------
Convert 6 a.m., February 17, 1985 to Julian Day
>>> date_to_jd(1985,2,17.25)
2446113.75
"""
if month == 1 or month == 2:
yearp = year - 1
monthp = month + 12
else:
yearp = year
monthp = month
# this checks where we are in relation to October 15, 1582, the beginning
# of the Gregorian calendar.
if ((year < 1582) or
(year == 1582 and month < 10) or
(year == 1582 and month == 10 and day < 15)):
# before start of Gregorian calendar
B = 0
else:
# after start of Gregorian calendar
A = math.trunc(yearp / 100.)
B = 2 - A + math.trunc(A / 4.)
if yearp < 0:
C = math.trunc((365.25 * yearp) - 0.75)
else:
C = math.trunc(365.25 * yearp)
D = math.trunc(30.6001 * (monthp + 1))
jd = B + C + D + day + 1720994.5
return jd
def jd_to_date(jd):
"""
Convert Julian Day to date.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
jd : float
Julian Day
Returns
-------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Examples
--------
Convert Julian Day 2446113.75 to year, month, and day.
>>> jd_to_date(2446113.75)
(1985, 2, 17.25)
"""
jd = jd + 0.5
F, I = math.modf(jd)
I = int(I)
A = math.trunc((I - 1867216.25)/36524.25)
if I > 2299160:
B = I + 1 + A - math.trunc(A / 4.)
else:
B = I
C = B + 1524
D = math.trunc((C - 122.1) / 365.25)
E = math.trunc(365.25 * D)
G = math.trunc((C - E) / 30.6001)
day = C - E + F - math.trunc(30.6001 * G)
if G < 13.5:
month = G - 1
else:
month = G - 13
if month > 2.5:
year = D - 4716
else:
year = D - 4715
return year, month, day
def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
"""
Convert hours, minutes, seconds, and microseconds to fractional days.
Parameters
----------
hour : int, optional
Hour number. Defaults to 0.
min : int, optional
Minute number. Defaults to 0.
sec : int, optional
Second number. Defaults to 0.
micro : int, optional
Microsecond number. Defaults to 0.
Returns
-------
days : float
Fractional days.
Examples
--------
>>> hmsm_to_days(hour=6)
0.25
"""
days = sec + (micro / 1.e6)
days = min + (days / 60.)
days = hour + (days / 60.)
return days / 24.
def days_to_hmsm(days):
"""
Convert fractional days to hours, minutes, seconds, and microseconds.
Precision beyond microseconds is rounded to the nearest microsecond.
Parameters
----------
days : float
A fractional number of days. Must be less than 1.
Returns
-------
hour : int
Hour number.
min : int
Minute number.
sec : int
Second number.
micro : int
Microsecond number.
Raises
------
ValueError
If `days` is >= 1.
Examples
--------
>>> days_to_hmsm(0.1)
(2, 24, 0, 0)
"""
hours = days * 24.
hours, hour = math.modf(hours)
mins = hours * 60.
mins, min = math.modf(mins)
secs = mins * 60.
secs, sec = math.modf(secs)
micro = round(secs * 1.e6)
return int(hour), int(min), int(sec), int(micro)
def datetime_to_jd(date):
"""
Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75
"""
days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
return date_to_jd(date.year,date.month,days)
def timedelta_to_days(td):
"""
Convert a `datetime.timedelta` object to a total number of days.
Parameters
----------
td : `datetime.timedelta` instance
Returns
-------
days : float
Total number of days in the `datetime.timedelta` object.
Examples
--------
>>> td = datetime.timedelta(4.5)
>>> td
datetime.timedelta(4, 43200)
>>> timedelta_to_days(td)
4.5
"""
seconds_in_day = 24. * 3600.
days = td.days + (td.seconds + (td.microseconds * 10.e6)) / seconds_in_day
return days
class datetime(dt.datetime):
"""
A subclass of `datetime.datetime` that performs math operations by first
converting to Julian Day, then back to a `jdutil.datetime` object.
Addition works with `datetime.timedelta` objects, subtraction works with
`datetime.timedelta`, `datetime.datetime`, and `jdutil.datetime` objects.
Not all combinations work in all directions, e.g.
`timedelta - datetime` is meaningless.
See Also
--------
datetime.datetime : Parent class.
"""
def __add__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __radd__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __sub__(self,other):
if isinstance(other,dt.timedelta):
days = timedelta_to_days(other)
combined = datetime_to_jd(self) - days
return jd_to_datetime(combined)
elif isinstance(other, (datetime,dt.datetime)):
diff = datetime_to_jd(self) - datetime_to_jd(other)
return dt.timedelta(diff)
else:
s = "jdutil.datetime supports '-' with: "
s += "datetime.timedelta, jdutil.datetime and datetime.datetime"
raise TypeError(s)
def __rsub__(self,other):
if not isinstance(other, (datetime,dt.datetime)):
s = "jdutil.datetime supports '-' with: "
s += "jdutil.datetime and datetime.datetime"
raise TypeError(s)
diff = datetime_to_jd(other) - datetime_to_jd(self)
return dt.timedelta(diff)
def to_jd(self):
"""
Return the date converted to Julian Day.
"""
return datetime_to_jd(self)
def to_mjd(self):
"""
Return the date converted to Modified Julian Day.
"""
return jd_to_mjd(self.to_jd())
|
ixc/python-edtf
|
edtf/jdutil.py
|
timedelta_to_days
|
python
|
def timedelta_to_days(td):
seconds_in_day = 24. * 3600.
days = td.days + (td.seconds + (td.microseconds * 10.e6)) / seconds_in_day
return days
|
Convert a `datetime.timedelta` object to a total number of days.
Parameters
----------
td : `datetime.timedelta` instance
Returns
-------
days : float
Total number of days in the `datetime.timedelta` object.
Examples
--------
>>> td = datetime.timedelta(4.5)
>>> td
datetime.timedelta(4, 43200)
>>> timedelta_to_days(td)
4.5
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/jdutil.py#L331-L357
| null |
# Source: https://gist.github.com/jiffyclub/1294443
"""
Functions for converting dates to/from JD and MJD. Assumes dates are historical
dates, including the transition from the Julian calendar to the Gregorian
calendar in 1582. No support for proleptic Gregorian/Julian calendars.
:Author: Matt Davis
:Website: http://github.com/jiffyclub
"""
import math
import datetime as dt
# Note: The Python datetime module assumes an infinitely valid Gregorian calendar.
# The Gregorian calendar took effect after 10-15-1582 and the dates 10-05 through
# 10-14-1582 never occurred. Python datetime objects will produce incorrect
# time deltas if one date is from before 10-15-1582.
def mjd_to_jd(mjd):
"""
Convert Modified Julian Day to Julian Day.
Parameters
----------
mjd : float
Modified Julian Day
Returns
-------
jd : float
Julian Day
"""
return mjd + 2400000.5
def jd_to_mjd(jd):
"""
Convert Julian Day to Modified Julian Day
Parameters
----------
jd : float
Julian Day
Returns
-------
mjd : float
Modified Julian Day
"""
return jd - 2400000.5
def date_to_jd(year,month,day):
"""
Convert a date to Julian Day.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Returns
-------
jd : float
Julian Day
Examples
--------
Convert 6 a.m., February 17, 1985 to Julian Day
>>> date_to_jd(1985,2,17.25)
2446113.75
"""
if month == 1 or month == 2:
yearp = year - 1
monthp = month + 12
else:
yearp = year
monthp = month
# this checks where we are in relation to October 15, 1582, the beginning
# of the Gregorian calendar.
if ((year < 1582) or
(year == 1582 and month < 10) or
(year == 1582 and month == 10 and day < 15)):
# before start of Gregorian calendar
B = 0
else:
# after start of Gregorian calendar
A = math.trunc(yearp / 100.)
B = 2 - A + math.trunc(A / 4.)
if yearp < 0:
C = math.trunc((365.25 * yearp) - 0.75)
else:
C = math.trunc(365.25 * yearp)
D = math.trunc(30.6001 * (monthp + 1))
jd = B + C + D + day + 1720994.5
return jd
def jd_to_date(jd):
"""
Convert Julian Day to date.
Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet',
4th ed., Duffet-Smith and Zwart, 2011.
Parameters
----------
jd : float
Julian Day
Returns
-------
year : int
Year as integer. Years preceding 1 A.D. should be 0 or negative.
The year before 1 A.D. is 0, 10 B.C. is year -9.
month : int
Month as integer, Jan = 1, Feb. = 2, etc.
day : float
Day, may contain fractional part.
Examples
--------
Convert Julian Day 2446113.75 to year, month, and day.
>>> jd_to_date(2446113.75)
(1985, 2, 17.25)
"""
jd = jd + 0.5
F, I = math.modf(jd)
I = int(I)
A = math.trunc((I - 1867216.25)/36524.25)
if I > 2299160:
B = I + 1 + A - math.trunc(A / 4.)
else:
B = I
C = B + 1524
D = math.trunc((C - 122.1) / 365.25)
E = math.trunc(365.25 * D)
G = math.trunc((C - E) / 30.6001)
day = C - E + F - math.trunc(30.6001 * G)
if G < 13.5:
month = G - 1
else:
month = G - 13
if month > 2.5:
year = D - 4716
else:
year = D - 4715
return year, month, day
def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
"""
Convert hours, minutes, seconds, and microseconds to fractional days.
Parameters
----------
hour : int, optional
Hour number. Defaults to 0.
min : int, optional
Minute number. Defaults to 0.
sec : int, optional
Second number. Defaults to 0.
micro : int, optional
Microsecond number. Defaults to 0.
Returns
-------
days : float
Fractional days.
Examples
--------
>>> hmsm_to_days(hour=6)
0.25
"""
days = sec + (micro / 1.e6)
days = min + (days / 60.)
days = hour + (days / 60.)
return days / 24.
def days_to_hmsm(days):
"""
Convert fractional days to hours, minutes, seconds, and microseconds.
Precision beyond microseconds is rounded to the nearest microsecond.
Parameters
----------
days : float
A fractional number of days. Must be less than 1.
Returns
-------
hour : int
Hour number.
min : int
Minute number.
sec : int
Second number.
micro : int
Microsecond number.
Raises
------
ValueError
If `days` is >= 1.
Examples
--------
>>> days_to_hmsm(0.1)
(2, 24, 0, 0)
"""
hours = days * 24.
hours, hour = math.modf(hours)
mins = hours * 60.
mins, min = math.modf(mins)
secs = mins * 60.
secs, sec = math.modf(secs)
micro = round(secs * 1.e6)
return int(hour), int(min), int(sec), int(micro)
def datetime_to_jd(date):
"""
Convert a `datetime.datetime` object to Julian Day.
Parameters
----------
date : `datetime.datetime` instance
Returns
-------
jd : float
Julian day.
Examples
--------
>>> d = datetime.datetime(1985,2,17,6)
>>> d
datetime.datetime(1985, 2, 17, 6, 0)
>>> jdutil.datetime_to_jd(d)
2446113.75
"""
days = date.day + hmsm_to_days(date.hour,date.minute,date.second,date.microsecond)
return date_to_jd(date.year,date.month,days)
def jd_to_datetime(jd):
"""
Convert a Julian Day to an `jdutil.datetime` object.
Parameters
----------
jd : float
Julian day.
Returns
-------
dt : `jdutil.datetime` object
`jdutil.datetime` equivalent of Julian day.
Examples
--------
>>> jd_to_datetime(2446113.75)
datetime(1985, 2, 17, 6, 0)
"""
year, month, day = jd_to_date(jd)
frac_days,day = math.modf(day)
day = int(day)
hour,min,sec,micro = days_to_hmsm(frac_days)
return datetime(year,month,day,hour,min,sec,micro)
class datetime(dt.datetime):
"""
A subclass of `datetime.datetime` that performs math operations by first
converting to Julian Day, then back to a `jdutil.datetime` object.
Addition works with `datetime.timedelta` objects, subtraction works with
`datetime.timedelta`, `datetime.datetime`, and `jdutil.datetime` objects.
Not all combinations work in all directions, e.g.
`timedelta - datetime` is meaningless.
See Also
--------
datetime.datetime : Parent class.
"""
def __add__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __radd__(self,other):
if not isinstance(other,dt.timedelta):
s = "jdutil.datetime supports '+' only with datetime.timedelta"
raise TypeError(s)
days = timedelta_to_days(other)
combined = datetime_to_jd(self) + days
return jd_to_datetime(combined)
def __sub__(self,other):
if isinstance(other,dt.timedelta):
days = timedelta_to_days(other)
combined = datetime_to_jd(self) - days
return jd_to_datetime(combined)
elif isinstance(other, (datetime,dt.datetime)):
diff = datetime_to_jd(self) - datetime_to_jd(other)
return dt.timedelta(diff)
else:
s = "jdutil.datetime supports '-' with: "
s += "datetime.timedelta, jdutil.datetime and datetime.datetime"
raise TypeError(s)
def __rsub__(self,other):
if not isinstance(other, (datetime,dt.datetime)):
s = "jdutil.datetime supports '-' with: "
s += "jdutil.datetime and datetime.datetime"
raise TypeError(s)
diff = datetime_to_jd(other) - datetime_to_jd(self)
return dt.timedelta(diff)
def to_jd(self):
"""
Return the date converted to Julian Day.
"""
return datetime_to_jd(self)
def to_mjd(self):
"""
Return the date converted to Modified Julian Day.
"""
return jd_to_mjd(self.to_jd())
|
ixc/python-edtf
|
edtf/convert.py
|
dt_to_struct_time
|
python
|
def dt_to_struct_time(dt):
if isinstance(dt, datetime):
return struct_time(
[dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second] +
TIME_EMPTY_EXTRAS
)
elif isinstance(dt, date):
return struct_time(
[dt.year, dt.month, dt.day] + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS
)
else:
raise NotImplementedError(
"Cannot convert %s to `struct_time`" % type(dt))
|
Convert a `datetime.date` or `datetime.datetime` to a `struct_time`
representation *with zero values* for data fields that we cannot always
rely on for ancient or far-future dates: tm_wday, tm_yday, tm_isdst
NOTE: If it wasn't for the requirement that the extra fields are unset
we could use the `timetuple()` method instead of this function.
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/convert.py#L11-L31
| null |
from time import struct_time
from datetime import date, datetime
from edtf import jdutil
TIME_EMPTY_TIME = [0, 0, 0] # tm_hour, tm_min, tm_sec
TIME_EMPTY_EXTRAS = [0, 0, -1] # tm_wday, tm_yday, tm_isdst
def struct_time_to_date(st):
"""
Return a `datetime.date` representing the provided `struct_time.
WARNING: This will fail for dates with years before 1 AD or after 9999 AD.
"""
return date(*st[:3])
def struct_time_to_datetime(st):
"""
Return a `datetime.datetime` representing the provided `struct_time.
WARNING: This will fail for dates with years before 1 AD or after 9999 AD.
"""
return datetime(*st[:6])
def trim_struct_time(st, strip_time=False):
"""
Return a `struct_time` based on the one provided but with the extra fields
`tm_wday`, `tm_yday`, and `tm_isdst` reset to default values.
If `strip_time` is set to true the time value are also set to zero:
`tm_hour`, `tm_min`, and `tm_sec`.
"""
if strip_time:
return struct_time(list(st[:3]) + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS)
else:
return struct_time(list(st[:6]) + TIME_EMPTY_EXTRAS)
def struct_time_to_jd(st):
"""
Return a float number representing the Julian Date for the given
`struct_time`.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are ignored.
"""
year, month, day = st[:3]
hours, minutes, seconds = st[3:6]
# Convert time of day to fraction of day
day += jdutil.hmsm_to_days(hours, minutes, seconds)
return jdutil.date_to_jd(year, month, day)
def jd_to_struct_time(jd):
"""
Return a `struct_time` converted from a Julian Date float number.
WARNING: Conversion to then from Julian Date value to `struct_time` can be
inaccurate and lose or gain time, especially for BC (negative) years.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are set to default
values, not real ones.
"""
year, month, day = jdutil.jd_to_date(jd)
# Convert time of day from fraction of day
day_fraction = day - int(day)
hour, minute, second, ms = jdutil.days_to_hmsm(day_fraction)
day = int(day)
# This conversion can return negative values for items we do not want to be
# negative: month, day, hour, minute, second.
year, month, day, hour, minute, second = _roll_negative_time_fields(
year, month, day, hour, minute, second)
return struct_time(
[year, month, day, hour, minute, second] + TIME_EMPTY_EXTRAS
)
def _roll_negative_time_fields(year, month, day, hour, minute, second):
"""
Fix date/time fields which have nonsense negative values for any field
except for year by rolling the overall date/time value backwards, treating
negative values as relative offsets of the next higher unit.
For example minute=5, second=-63 becomes minute=3, second=57 (5 minutes
less 63 seconds)
This is very unsophisticated handling of negative values which we would
ideally do with `dateutil.relativedelta` but cannot because that class does
not support arbitrary dates, especially not negative years which is the
only case where these nonsense values are likely to occur anyway.
NOTE: To greatly simplify the logic we assume all months are 30 days long.
"""
if second < 0:
minute += int(second / 60.0) # Adjust by whole minute in secs
minute -= 1 # Subtract 1 for negative second
second %= 60 # Convert negative second to positive remainder
if minute < 0:
hour += int(minute / 60.0) # Adjust by whole hour in minutes
hour -= 1 # Subtract 1 for negative minutes
minute %= 60 # Convert negative minute to positive remainder
if hour < 0:
day += int(hour / 24.0) # Adjust by whole day in hours
day -= 1 # Subtract 1 for negative minutes
hour %= 24 # Convert negative hour to positive remainder
if day < 0:
month += int(day / 30.0) # Adjust by whole month in days (assume 30)
month -= 1 # Subtract 1 for negative minutes
day %= 30 # Convert negative day to positive remainder
if month < 0:
year += int(month / 12.0) # Adjust by whole year in months
year -= 1 # Subtract 1 for negative minutes
month %= 12 # Convert negative month to positive remainder
return (year, month, day, hour, minute, second)
|
ixc/python-edtf
|
edtf/convert.py
|
trim_struct_time
|
python
|
def trim_struct_time(st, strip_time=False):
if strip_time:
return struct_time(list(st[:3]) + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS)
else:
return struct_time(list(st[:6]) + TIME_EMPTY_EXTRAS)
|
Return a `struct_time` based on the one provided but with the extra fields
`tm_wday`, `tm_yday`, and `tm_isdst` reset to default values.
If `strip_time` is set to true the time value are also set to zero:
`tm_hour`, `tm_min`, and `tm_sec`.
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/convert.py#L52-L63
| null |
from time import struct_time
from datetime import date, datetime
from edtf import jdutil
TIME_EMPTY_TIME = [0, 0, 0] # tm_hour, tm_min, tm_sec
TIME_EMPTY_EXTRAS = [0, 0, -1] # tm_wday, tm_yday, tm_isdst
def dt_to_struct_time(dt):
"""
Convert a `datetime.date` or `datetime.datetime` to a `struct_time`
representation *with zero values* for data fields that we cannot always
rely on for ancient or far-future dates: tm_wday, tm_yday, tm_isdst
NOTE: If it wasn't for the requirement that the extra fields are unset
we could use the `timetuple()` method instead of this function.
"""
if isinstance(dt, datetime):
return struct_time(
[dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second] +
TIME_EMPTY_EXTRAS
)
elif isinstance(dt, date):
return struct_time(
[dt.year, dt.month, dt.day] + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS
)
else:
raise NotImplementedError(
"Cannot convert %s to `struct_time`" % type(dt))
def struct_time_to_date(st):
"""
Return a `datetime.date` representing the provided `struct_time.
WARNING: This will fail for dates with years before 1 AD or after 9999 AD.
"""
return date(*st[:3])
def struct_time_to_datetime(st):
"""
Return a `datetime.datetime` representing the provided `struct_time.
WARNING: This will fail for dates with years before 1 AD or after 9999 AD.
"""
return datetime(*st[:6])
def struct_time_to_jd(st):
"""
Return a float number representing the Julian Date for the given
`struct_time`.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are ignored.
"""
year, month, day = st[:3]
hours, minutes, seconds = st[3:6]
# Convert time of day to fraction of day
day += jdutil.hmsm_to_days(hours, minutes, seconds)
return jdutil.date_to_jd(year, month, day)
def jd_to_struct_time(jd):
"""
Return a `struct_time` converted from a Julian Date float number.
WARNING: Conversion to then from Julian Date value to `struct_time` can be
inaccurate and lose or gain time, especially for BC (negative) years.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are set to default
values, not real ones.
"""
year, month, day = jdutil.jd_to_date(jd)
# Convert time of day from fraction of day
day_fraction = day - int(day)
hour, minute, second, ms = jdutil.days_to_hmsm(day_fraction)
day = int(day)
# This conversion can return negative values for items we do not want to be
# negative: month, day, hour, minute, second.
year, month, day, hour, minute, second = _roll_negative_time_fields(
year, month, day, hour, minute, second)
return struct_time(
[year, month, day, hour, minute, second] + TIME_EMPTY_EXTRAS
)
def _roll_negative_time_fields(year, month, day, hour, minute, second):
"""
Fix date/time fields which have nonsense negative values for any field
except for year by rolling the overall date/time value backwards, treating
negative values as relative offsets of the next higher unit.
For example minute=5, second=-63 becomes minute=3, second=57 (5 minutes
less 63 seconds)
This is very unsophisticated handling of negative values which we would
ideally do with `dateutil.relativedelta` but cannot because that class does
not support arbitrary dates, especially not negative years which is the
only case where these nonsense values are likely to occur anyway.
NOTE: To greatly simplify the logic we assume all months are 30 days long.
"""
if second < 0:
minute += int(second / 60.0) # Adjust by whole minute in secs
minute -= 1 # Subtract 1 for negative second
second %= 60 # Convert negative second to positive remainder
if minute < 0:
hour += int(minute / 60.0) # Adjust by whole hour in minutes
hour -= 1 # Subtract 1 for negative minutes
minute %= 60 # Convert negative minute to positive remainder
if hour < 0:
day += int(hour / 24.0) # Adjust by whole day in hours
day -= 1 # Subtract 1 for negative minutes
hour %= 24 # Convert negative hour to positive remainder
if day < 0:
month += int(day / 30.0) # Adjust by whole month in days (assume 30)
month -= 1 # Subtract 1 for negative minutes
day %= 30 # Convert negative day to positive remainder
if month < 0:
year += int(month / 12.0) # Adjust by whole year in months
year -= 1 # Subtract 1 for negative minutes
month %= 12 # Convert negative month to positive remainder
return (year, month, day, hour, minute, second)
|
ixc/python-edtf
|
edtf/convert.py
|
struct_time_to_jd
|
python
|
def struct_time_to_jd(st):
year, month, day = st[:3]
hours, minutes, seconds = st[3:6]
# Convert time of day to fraction of day
day += jdutil.hmsm_to_days(hours, minutes, seconds)
return jdutil.date_to_jd(year, month, day)
|
Return a float number representing the Julian Date for the given
`struct_time`.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are ignored.
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/convert.py#L66-L79
|
[
"def hmsm_to_days(hour=0,min=0,sec=0,micro=0):\n \"\"\"\n Convert hours, minutes, seconds, and microseconds to fractional days.\n\n Parameters\n ----------\n hour : int, optional\n Hour number. Defaults to 0.\n\n min : int, optional\n Minute number. Defaults to 0.\n\n sec : int, optional\n Second number. Defaults to 0.\n\n micro : int, optional\n Microsecond number. Defaults to 0.\n\n Returns\n -------\n days : float\n Fractional days.\n\n Examples\n --------\n >>> hmsm_to_days(hour=6)\n 0.25\n\n \"\"\"\n days = sec + (micro / 1.e6)\n\n days = min + (days / 60.)\n\n days = hour + (days / 60.)\n\n return days / 24.\n",
"def date_to_jd(year,month,day):\n \"\"\"\n Convert a date to Julian Day.\n\n Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', \n 4th ed., Duffet-Smith and Zwart, 2011.\n\n Parameters\n ----------\n year : int\n Year as integer. Years preceding 1 A.D. should be 0 or negative.\n The year before 1 A.D. is 0, 10 B.C. is year -9.\n\n month : int\n Month as integer, Jan = 1, Feb. = 2, etc.\n\n day : float\n Day, may contain fractional part.\n\n Returns\n -------\n jd : float\n Julian Day\n\n Examples\n --------\n Convert 6 a.m., February 17, 1985 to Julian Day\n\n >>> date_to_jd(1985,2,17.25)\n 2446113.75\n\n \"\"\"\n if month == 1 or month == 2:\n yearp = year - 1\n monthp = month + 12\n else:\n yearp = year\n monthp = month\n\n # this checks where we are in relation to October 15, 1582, the beginning\n # of the Gregorian calendar.\n if ((year < 1582) or\n (year == 1582 and month < 10) or\n (year == 1582 and month == 10 and day < 15)):\n # before start of Gregorian calendar\n B = 0\n else:\n # after start of Gregorian calendar\n A = math.trunc(yearp / 100.)\n B = 2 - A + math.trunc(A / 4.)\n\n if yearp < 0:\n C = math.trunc((365.25 * yearp) - 0.75)\n else:\n C = math.trunc(365.25 * yearp)\n\n D = math.trunc(30.6001 * (monthp + 1))\n\n jd = B + C + D + day + 1720994.5\n\n return jd\n"
] |
from time import struct_time
from datetime import date, datetime
from edtf import jdutil
TIME_EMPTY_TIME = [0, 0, 0] # tm_hour, tm_min, tm_sec
TIME_EMPTY_EXTRAS = [0, 0, -1] # tm_wday, tm_yday, tm_isdst
def dt_to_struct_time(dt):
"""
Convert a `datetime.date` or `datetime.datetime` to a `struct_time`
representation *with zero values* for data fields that we cannot always
rely on for ancient or far-future dates: tm_wday, tm_yday, tm_isdst
NOTE: If it wasn't for the requirement that the extra fields are unset
we could use the `timetuple()` method instead of this function.
"""
if isinstance(dt, datetime):
return struct_time(
[dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second] +
TIME_EMPTY_EXTRAS
)
elif isinstance(dt, date):
return struct_time(
[dt.year, dt.month, dt.day] + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS
)
else:
raise NotImplementedError(
"Cannot convert %s to `struct_time`" % type(dt))
def struct_time_to_date(st):
"""
Return a `datetime.date` representing the provided `struct_time.
WARNING: This will fail for dates with years before 1 AD or after 9999 AD.
"""
return date(*st[:3])
def struct_time_to_datetime(st):
"""
Return a `datetime.datetime` representing the provided `struct_time.
WARNING: This will fail for dates with years before 1 AD or after 9999 AD.
"""
return datetime(*st[:6])
def trim_struct_time(st, strip_time=False):
"""
Return a `struct_time` based on the one provided but with the extra fields
`tm_wday`, `tm_yday`, and `tm_isdst` reset to default values.
If `strip_time` is set to true the time value are also set to zero:
`tm_hour`, `tm_min`, and `tm_sec`.
"""
if strip_time:
return struct_time(list(st[:3]) + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS)
else:
return struct_time(list(st[:6]) + TIME_EMPTY_EXTRAS)
def jd_to_struct_time(jd):
"""
Return a `struct_time` converted from a Julian Date float number.
WARNING: Conversion to then from Julian Date value to `struct_time` can be
inaccurate and lose or gain time, especially for BC (negative) years.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are set to default
values, not real ones.
"""
year, month, day = jdutil.jd_to_date(jd)
# Convert time of day from fraction of day
day_fraction = day - int(day)
hour, minute, second, ms = jdutil.days_to_hmsm(day_fraction)
day = int(day)
# This conversion can return negative values for items we do not want to be
# negative: month, day, hour, minute, second.
year, month, day, hour, minute, second = _roll_negative_time_fields(
year, month, day, hour, minute, second)
return struct_time(
[year, month, day, hour, minute, second] + TIME_EMPTY_EXTRAS
)
def _roll_negative_time_fields(year, month, day, hour, minute, second):
"""
Fix date/time fields which have nonsense negative values for any field
except for year by rolling the overall date/time value backwards, treating
negative values as relative offsets of the next higher unit.
For example minute=5, second=-63 becomes minute=3, second=57 (5 minutes
less 63 seconds)
This is very unsophisticated handling of negative values which we would
ideally do with `dateutil.relativedelta` but cannot because that class does
not support arbitrary dates, especially not negative years which is the
only case where these nonsense values are likely to occur anyway.
NOTE: To greatly simplify the logic we assume all months are 30 days long.
"""
if second < 0:
minute += int(second / 60.0) # Adjust by whole minute in secs
minute -= 1 # Subtract 1 for negative second
second %= 60 # Convert negative second to positive remainder
if minute < 0:
hour += int(minute / 60.0) # Adjust by whole hour in minutes
hour -= 1 # Subtract 1 for negative minutes
minute %= 60 # Convert negative minute to positive remainder
if hour < 0:
day += int(hour / 24.0) # Adjust by whole day in hours
day -= 1 # Subtract 1 for negative minutes
hour %= 24 # Convert negative hour to positive remainder
if day < 0:
month += int(day / 30.0) # Adjust by whole month in days (assume 30)
month -= 1 # Subtract 1 for negative minutes
day %= 30 # Convert negative day to positive remainder
if month < 0:
year += int(month / 12.0) # Adjust by whole year in months
year -= 1 # Subtract 1 for negative minutes
month %= 12 # Convert negative month to positive remainder
return (year, month, day, hour, minute, second)
|
ixc/python-edtf
|
edtf/convert.py
|
jd_to_struct_time
|
python
|
def jd_to_struct_time(jd):
year, month, day = jdutil.jd_to_date(jd)
# Convert time of day from fraction of day
day_fraction = day - int(day)
hour, minute, second, ms = jdutil.days_to_hmsm(day_fraction)
day = int(day)
# This conversion can return negative values for items we do not want to be
# negative: month, day, hour, minute, second.
year, month, day, hour, minute, second = _roll_negative_time_fields(
year, month, day, hour, minute, second)
return struct_time(
[year, month, day, hour, minute, second] + TIME_EMPTY_EXTRAS
)
|
Return a `struct_time` converted from a Julian Date float number.
WARNING: Conversion to then from Julian Date value to `struct_time` can be
inaccurate and lose or gain time, especially for BC (negative) years.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are set to default
values, not real ones.
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/convert.py#L82-L106
|
[
"def jd_to_date(jd):\n \"\"\"\n Convert Julian Day to date.\n\n Algorithm from 'Practical Astronomy with your Calculator or Spreadsheet', \n 4th ed., Duffet-Smith and Zwart, 2011.\n\n Parameters\n ----------\n jd : float\n Julian Day\n\n Returns\n -------\n year : int\n Year as integer. Years preceding 1 A.D. should be 0 or negative.\n The year before 1 A.D. is 0, 10 B.C. is year -9.\n\n month : int\n Month as integer, Jan = 1, Feb. = 2, etc.\n\n day : float\n Day, may contain fractional part.\n\n Examples\n --------\n Convert Julian Day 2446113.75 to year, month, and day.\n\n >>> jd_to_date(2446113.75)\n (1985, 2, 17.25)\n\n \"\"\"\n jd = jd + 0.5\n\n F, I = math.modf(jd)\n I = int(I)\n\n A = math.trunc((I - 1867216.25)/36524.25)\n\n if I > 2299160:\n B = I + 1 + A - math.trunc(A / 4.)\n else:\n B = I\n\n C = B + 1524\n\n D = math.trunc((C - 122.1) / 365.25)\n\n E = math.trunc(365.25 * D)\n\n G = math.trunc((C - E) / 30.6001)\n\n day = C - E + F - math.trunc(30.6001 * G)\n\n if G < 13.5:\n month = G - 1\n else:\n month = G - 13\n\n if month > 2.5:\n year = D - 4716\n else:\n year = D - 4715\n\n return year, month, day\n",
"def days_to_hmsm(days):\n \"\"\"\n Convert fractional days to hours, minutes, seconds, and microseconds.\n Precision beyond microseconds is rounded to the nearest microsecond.\n\n Parameters\n ----------\n days : float\n A fractional number of days. Must be less than 1.\n\n Returns\n -------\n hour : int\n Hour number.\n\n min : int\n Minute number.\n\n sec : int\n Second number.\n\n micro : int\n Microsecond number.\n\n Raises\n ------\n ValueError\n If `days` is >= 1.\n\n Examples\n --------\n >>> days_to_hmsm(0.1)\n (2, 24, 0, 0)\n\n \"\"\"\n hours = days * 24.\n hours, hour = math.modf(hours)\n\n mins = hours * 60.\n mins, min = math.modf(mins)\n\n secs = mins * 60.\n secs, sec = math.modf(secs)\n\n micro = round(secs * 1.e6)\n\n return int(hour), int(min), int(sec), int(micro)\n",
"def _roll_negative_time_fields(year, month, day, hour, minute, second):\n \"\"\"\n Fix date/time fields which have nonsense negative values for any field\n except for year by rolling the overall date/time value backwards, treating\n negative values as relative offsets of the next higher unit.\n\n For example minute=5, second=-63 becomes minute=3, second=57 (5 minutes\n less 63 seconds)\n\n This is very unsophisticated handling of negative values which we would\n ideally do with `dateutil.relativedelta` but cannot because that class does\n not support arbitrary dates, especially not negative years which is the\n only case where these nonsense values are likely to occur anyway.\n\n NOTE: To greatly simplify the logic we assume all months are 30 days long.\n \"\"\"\n if second < 0:\n minute += int(second / 60.0) # Adjust by whole minute in secs\n minute -= 1 # Subtract 1 for negative second\n second %= 60 # Convert negative second to positive remainder\n if minute < 0:\n hour += int(minute / 60.0) # Adjust by whole hour in minutes\n hour -= 1 # Subtract 1 for negative minutes\n minute %= 60 # Convert negative minute to positive remainder\n if hour < 0:\n day += int(hour / 24.0) # Adjust by whole day in hours\n day -= 1 # Subtract 1 for negative minutes\n hour %= 24 # Convert negative hour to positive remainder\n if day < 0:\n month += int(day / 30.0) # Adjust by whole month in days (assume 30)\n month -= 1 # Subtract 1 for negative minutes\n day %= 30 # Convert negative day to positive remainder\n if month < 0:\n year += int(month / 12.0) # Adjust by whole year in months\n year -= 1 # Subtract 1 for negative minutes\n month %= 12 # Convert negative month to positive remainder\n return (year, month, day, hour, minute, second)\n"
] |
from time import struct_time
from datetime import date, datetime
from edtf import jdutil
TIME_EMPTY_TIME = [0, 0, 0] # tm_hour, tm_min, tm_sec
TIME_EMPTY_EXTRAS = [0, 0, -1] # tm_wday, tm_yday, tm_isdst
def dt_to_struct_time(dt):
"""
Convert a `datetime.date` or `datetime.datetime` to a `struct_time`
representation *with zero values* for data fields that we cannot always
rely on for ancient or far-future dates: tm_wday, tm_yday, tm_isdst
NOTE: If it wasn't for the requirement that the extra fields are unset
we could use the `timetuple()` method instead of this function.
"""
if isinstance(dt, datetime):
return struct_time(
[dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second] +
TIME_EMPTY_EXTRAS
)
elif isinstance(dt, date):
return struct_time(
[dt.year, dt.month, dt.day] + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS
)
else:
raise NotImplementedError(
"Cannot convert %s to `struct_time`" % type(dt))
def struct_time_to_date(st):
"""
Return a `datetime.date` representing the provided `struct_time.
WARNING: This will fail for dates with years before 1 AD or after 9999 AD.
"""
return date(*st[:3])
def struct_time_to_datetime(st):
"""
Return a `datetime.datetime` representing the provided `struct_time.
WARNING: This will fail for dates with years before 1 AD or after 9999 AD.
"""
return datetime(*st[:6])
def trim_struct_time(st, strip_time=False):
"""
Return a `struct_time` based on the one provided but with the extra fields
`tm_wday`, `tm_yday`, and `tm_isdst` reset to default values.
If `strip_time` is set to true the time value are also set to zero:
`tm_hour`, `tm_min`, and `tm_sec`.
"""
if strip_time:
return struct_time(list(st[:3]) + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS)
else:
return struct_time(list(st[:6]) + TIME_EMPTY_EXTRAS)
def struct_time_to_jd(st):
"""
Return a float number representing the Julian Date for the given
`struct_time`.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are ignored.
"""
year, month, day = st[:3]
hours, minutes, seconds = st[3:6]
# Convert time of day to fraction of day
day += jdutil.hmsm_to_days(hours, minutes, seconds)
return jdutil.date_to_jd(year, month, day)
def _roll_negative_time_fields(year, month, day, hour, minute, second):
"""
Fix date/time fields which have nonsense negative values for any field
except for year by rolling the overall date/time value backwards, treating
negative values as relative offsets of the next higher unit.
For example minute=5, second=-63 becomes minute=3, second=57 (5 minutes
less 63 seconds)
This is very unsophisticated handling of negative values which we would
ideally do with `dateutil.relativedelta` but cannot because that class does
not support arbitrary dates, especially not negative years which is the
only case where these nonsense values are likely to occur anyway.
NOTE: To greatly simplify the logic we assume all months are 30 days long.
"""
if second < 0:
minute += int(second / 60.0) # Adjust by whole minute in secs
minute -= 1 # Subtract 1 for negative second
second %= 60 # Convert negative second to positive remainder
if minute < 0:
hour += int(minute / 60.0) # Adjust by whole hour in minutes
hour -= 1 # Subtract 1 for negative minutes
minute %= 60 # Convert negative minute to positive remainder
if hour < 0:
day += int(hour / 24.0) # Adjust by whole day in hours
day -= 1 # Subtract 1 for negative minutes
hour %= 24 # Convert negative hour to positive remainder
if day < 0:
month += int(day / 30.0) # Adjust by whole month in days (assume 30)
month -= 1 # Subtract 1 for negative minutes
day %= 30 # Convert negative day to positive remainder
if month < 0:
year += int(month / 12.0) # Adjust by whole year in months
year -= 1 # Subtract 1 for negative minutes
month %= 12 # Convert negative month to positive remainder
return (year, month, day, hour, minute, second)
|
ixc/python-edtf
|
edtf/convert.py
|
_roll_negative_time_fields
|
python
|
def _roll_negative_time_fields(year, month, day, hour, minute, second):
if second < 0:
minute += int(second / 60.0) # Adjust by whole minute in secs
minute -= 1 # Subtract 1 for negative second
second %= 60 # Convert negative second to positive remainder
if minute < 0:
hour += int(minute / 60.0) # Adjust by whole hour in minutes
hour -= 1 # Subtract 1 for negative minutes
minute %= 60 # Convert negative minute to positive remainder
if hour < 0:
day += int(hour / 24.0) # Adjust by whole day in hours
day -= 1 # Subtract 1 for negative minutes
hour %= 24 # Convert negative hour to positive remainder
if day < 0:
month += int(day / 30.0) # Adjust by whole month in days (assume 30)
month -= 1 # Subtract 1 for negative minutes
day %= 30 # Convert negative day to positive remainder
if month < 0:
year += int(month / 12.0) # Adjust by whole year in months
year -= 1 # Subtract 1 for negative minutes
month %= 12 # Convert negative month to positive remainder
return (year, month, day, hour, minute, second)
|
Fix date/time fields which have nonsense negative values for any field
except for year by rolling the overall date/time value backwards, treating
negative values as relative offsets of the next higher unit.
For example minute=5, second=-63 becomes minute=3, second=57 (5 minutes
less 63 seconds)
This is very unsophisticated handling of negative values which we would
ideally do with `dateutil.relativedelta` but cannot because that class does
not support arbitrary dates, especially not negative years which is the
only case where these nonsense values are likely to occur anyway.
NOTE: To greatly simplify the logic we assume all months are 30 days long.
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/convert.py#L109-L145
| null |
from time import struct_time
from datetime import date, datetime
from edtf import jdutil
TIME_EMPTY_TIME = [0, 0, 0] # tm_hour, tm_min, tm_sec
TIME_EMPTY_EXTRAS = [0, 0, -1] # tm_wday, tm_yday, tm_isdst
def dt_to_struct_time(dt):
"""
Convert a `datetime.date` or `datetime.datetime` to a `struct_time`
representation *with zero values* for data fields that we cannot always
rely on for ancient or far-future dates: tm_wday, tm_yday, tm_isdst
NOTE: If it wasn't for the requirement that the extra fields are unset
we could use the `timetuple()` method instead of this function.
"""
if isinstance(dt, datetime):
return struct_time(
[dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second] +
TIME_EMPTY_EXTRAS
)
elif isinstance(dt, date):
return struct_time(
[dt.year, dt.month, dt.day] + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS
)
else:
raise NotImplementedError(
"Cannot convert %s to `struct_time`" % type(dt))
def struct_time_to_date(st):
"""
Return a `datetime.date` representing the provided `struct_time.
WARNING: This will fail for dates with years before 1 AD or after 9999 AD.
"""
return date(*st[:3])
def struct_time_to_datetime(st):
"""
Return a `datetime.datetime` representing the provided `struct_time.
WARNING: This will fail for dates with years before 1 AD or after 9999 AD.
"""
return datetime(*st[:6])
def trim_struct_time(st, strip_time=False):
"""
Return a `struct_time` based on the one provided but with the extra fields
`tm_wday`, `tm_yday`, and `tm_isdst` reset to default values.
If `strip_time` is set to true the time value are also set to zero:
`tm_hour`, `tm_min`, and `tm_sec`.
"""
if strip_time:
return struct_time(list(st[:3]) + TIME_EMPTY_TIME + TIME_EMPTY_EXTRAS)
else:
return struct_time(list(st[:6]) + TIME_EMPTY_EXTRAS)
def struct_time_to_jd(st):
"""
Return a float number representing the Julian Date for the given
`struct_time`.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are ignored.
"""
year, month, day = st[:3]
hours, minutes, seconds = st[3:6]
# Convert time of day to fraction of day
day += jdutil.hmsm_to_days(hours, minutes, seconds)
return jdutil.date_to_jd(year, month, day)
def jd_to_struct_time(jd):
"""
Return a `struct_time` converted from a Julian Date float number.
WARNING: Conversion to then from Julian Date value to `struct_time` can be
inaccurate and lose or gain time, especially for BC (negative) years.
NOTE: extra fields `tm_wday`, `tm_yday`, and `tm_isdst` are set to default
values, not real ones.
"""
year, month, day = jdutil.jd_to_date(jd)
# Convert time of day from fraction of day
day_fraction = day - int(day)
hour, minute, second, ms = jdutil.days_to_hmsm(day_fraction)
day = int(day)
# This conversion can return negative values for items we do not want to be
# negative: month, day, hour, minute, second.
year, month, day, hour, minute, second = _roll_negative_time_fields(
year, month, day, hour, minute, second)
return struct_time(
[year, month, day, hour, minute, second] + TIME_EMPTY_EXTRAS
)
|
ixc/python-edtf
|
edtf/natlang/en.py
|
text_to_edtf
|
python
|
def text_to_edtf(text):
if not text:
return
t = text.lower()
# try parsing the whole thing
result = text_to_edtf_date(t)
if not result:
# split by list delims and move fwd with the first thing that returns a non-empty string.
# TODO: assemble multiple dates into a {} or [] structure.
for split in [",", ";", "or"]:
for list_item in t.split(split):
# try parsing as an interval - split by '-'
toks = list_item.split("-")
if len(toks) == 2:
d1 = toks[0].strip()
d2 = toks[1].strip()
# match looks from the beginning of the string, search
# looks anywhere.
if re.match(r'\d\D\b', d2): # 1-digit year partial e.g. 1868-9
if re.search(r'\b\d\d\d\d$', d1): # TODO: evaluate it and see if it's a year
d2 = d1[-4:-1] + d2
elif re.match(r'\d\d\b', d2): # 2-digit year partial e.g. 1809-10
if re.search(r'\b\d\d\d\d$', d1):
d2 = d1[-4:-2] + d2
else:
century_range_match = re.search(r'\b(\d\d)(th|st|nd|rd|)-(\d\d)(th|st|nd|rd) [cC]', "%s-%s" % (d1,d2))
if century_range_match:
g = century_range_match.groups()
d1 = "%sC" % g[0]
d2 = "%sC" % g[2]
r1 = text_to_edtf_date(d1)
r2 = text_to_edtf_date(d2)
if r1 and r2:
result = r1 + "/" + r2
return result
# is it an either/or year "1838/1862" - that has a different
# representation in EDTF. If it's 'both', then we use {}. If
# it's 'or' then we use []. Assuming the latter for now.
# This whole section could be more friendly.
else:
int_match = re.search(r"(\d\d\d\d)\/(\d\d\d\d)", list_item)
if int_match:
return "[%s, %s]" % (int_match.group(1), int_match.group(2))
result = text_to_edtf_date(list_item)
if result:
break
if result:
break
is_before = re.findall(r'\bbefore\b', t)
is_before = is_before or re.findall(r'\bearlier\b', t)
is_after = re.findall(r'\bafter\b', t)
is_after = is_after or re.findall(r'\bsince\b', t)
is_after = is_after or re.findall(r'\blater\b', t)
if is_before:
result = u"unknown/%s" % result
elif is_after:
result = u"%s/unknown" % result
return result
|
Generate EDTF string equivalent of a given natural language date string.
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/natlang/en.py#L27-L102
|
[
"def text_to_edtf_date(text):\n \"\"\"\n Return EDTF string equivalent of a given natural language date string.\n\n The approach here is to parse the text twice, with different default\n dates. Then compare the results to see what differs - the parts that\n differ are undefined.\n \"\"\"\n if not text:\n return\n\n t = text.lower()\n result = ''\n\n for reject_re in REJECT_RULES:\n if re.match(reject_re, t):\n return\n\n # matches on '1800s'. Needs to happen before is_decade.\n could_be_century = re.findall(r'(\\d{2}00)s', t)\n # matches on '1800s' and '1910s'. Removes the 's'.\n # Needs to happen before is_uncertain because e.g. \"1860s?\"\n t, is_decade = re.subn(r'(\\d{3}0)s', r'\\1', t)\n\n # detect approximation signifiers\n # a few 'circa' abbreviations just before the year\n is_approximate = re.findall(r'\\b(ca?\\.?) ?\\d{4}', t)\n # the word 'circa' anywhere\n is_approximate = is_approximate or re.findall(r'\\bcirca\\b', t)\n # the word 'approx'/'around'/'about' anywhere\n is_approximate = is_approximate or \\\n re.findall(r'\\b(approx|around|about)', t)\n # a ~ before a year-ish number\n is_approximate = is_approximate or re.findall(r'\\b~\\d{4}', t)\n # a ~ at the beginning\n is_approximate = is_approximate or re.findall(r'^~', t)\n\n # detect uncertainty signifiers\n t, is_uncertain = re.subn(r'(\\d{4})\\?', r'\\1', t)\n # the words uncertain/maybe/guess anywhere\n is_uncertain = is_uncertain or re.findall(\n r'\\b(uncertain|possibly|maybe|guess)', t)\n\n # detect century forms\n is_century = re.findall(CENTURY_RE, t)\n\n # detect CE/BCE year form\n is_ce = re.findall(CE_RE, t)\n if is_century:\n result = \"%02dxx\" % (int(is_century[0][0]) - 1,)\n is_approximate = is_approximate or \\\n re.findall(r'\\b(ca?\\.?) ?' + CENTURY_RE, t)\n is_uncertain = is_uncertain or re.findall(CENTURY_RE + r'\\?', t)\n\n try:\n is_bc = is_century[0][-1] in (\"bc\", \"bce\")\n if is_bc:\n result = \"-%s\" % result\n except IndexError:\n pass\n\n elif is_ce:\n result = \"%04d\" % (int(is_ce[0][0]))\n is_approximate = is_approximate or \\\n re.findall(r'\\b(ca?\\.?) ?' + CE_RE, t)\n is_uncertain = is_uncertain or re.findall(CE_RE + r'\\?', t)\n\n try:\n is_bc = is_ce[0][-1] in (\"bc\", \"bce\")\n if is_bc:\n result = \"-%s\" % result\n except IndexError:\n pass\n\n else:\n # try dateutil.parse\n\n try:\n # parse twice, using different defaults to see what was\n # parsed and what was guessed.\n dt1 = parse(\n t,\n dayfirst=appsettings.DAY_FIRST,\n yearfirst=False,\n fuzzy=True, # force a match, even if it's default date\n default=DEFAULT_DATE_1\n )\n\n dt2 = parse(\n t,\n dayfirst=appsettings.DAY_FIRST,\n yearfirst=False,\n fuzzy=True, # force a match, even if it's default date\n default=DEFAULT_DATE_2\n )\n\n except ValueError:\n return\n\n if dt1.date() == DEFAULT_DATE_1.date() and \\\n dt2.date() == DEFAULT_DATE_2.date():\n # couldn't parse anything - defaults are untouched.\n return\n\n date1 = dt1.isoformat()[:10]\n date2 = dt2.isoformat()[:10]\n\n # guess precision of 'unspecified' characters to use\n mentions_year = re.findall(r'\\byear\\b.+(in|during)\\b', t)\n mentions_month = re.findall(r'\\bmonth\\b.+(in|during)\\b', t)\n mentions_day = re.findall(r'\\bday\\b.+(in|during)\\b', t)\n\n for i in xrange(len(date1)):\n # if the given year could be a century (e.g. '1800s') then use\n # approximate/uncertain markers to decide whether we treat it as\n # a century or a decade.\n if i == 2 and could_be_century and \\\n not (is_approximate or is_uncertain):\n result += 'x'\n elif i == 3 and is_decade > 0:\n if mentions_year:\n result += 'u' # year precision\n else:\n result += 'x' # decade precision\n elif date1[i] == date2[i]:\n # since both attempts at parsing produced the same result\n # it must be parsed value, not a default\n result += date1[i]\n else:\n # different values were produced, meaning that it's likely\n # a default. Use 'unspecified'\n result += \"u\"\n\n # strip off unknown chars from end of string - except the first 4\n\n for i in reversed(xrange(len(result))):\n if result[i] not in ('u', 'x', '-'):\n smallest_length = 4\n\n if mentions_month:\n smallest_length = 7\n if mentions_day:\n smallest_length = 10\n\n limit = max(smallest_length, i + 1)\n result = result[:limit]\n break\n\n # check for seasons\n if \"spring\" in t:\n result = result[:4] + \"-21\" + result[7:]\n elif \"summer\" in t:\n result = result[:4] + \"-22\" + result[7:]\n elif \"autumn\" in t or \"fall\" in t:\n result = result[:4] + \"-23\" + result[7:]\n elif \"winter\" in t:\n result = result[:4] + \"-24\" + result[7:]\n\n # end dateutil post-parsing\n\n if is_uncertain:\n result += \"?\"\n\n if is_approximate:\n result += \"~\"\n\n # weed out bad parses\n if result.startswith(\"uu-uu\"):\n return None\n\n return result\n"
] |
"""Utilities to derive an EDTF string from an (English) natural language string."""
from datetime import datetime
from dateutil.parser import parse
import re
from edtf import appsettings
from six.moves import xrange
# two dates where every digit of an ISO date representation is different,
# and one is in the past and one is in the future.
# This is used in the dateutil parse to detect which elements weren't parsed.
DEFAULT_DATE_1 = datetime(1234, 1, 1, 0, 0)
DEFAULT_DATE_2 = datetime(5678, 10, 10, 0, 0)
SHORT_YEAR_RE = r'(-?)([\du])([\dxu])([\dxu])([\dxu])'
LONG_YEAR_RE = r'y(-?)([1-9]\d\d\d\d+)'
CENTURY_RE = r'(\d{1,2})(c\.?|(st|nd|rd|th) century)\s?(ad|ce|bc|bce)?'
CE_RE = r'(\d{1,4}) (ad|ce|bc|bce)'
# Set of RE rules that will cause us to abort text processing, since we know
# the results will be wrong.
REJECT_RULES = (
r'.*dynasty.*', # Don't parse '23rd Dynasty' to 'uuuu-uu-23'
)
def text_to_edtf_date(text):
"""
Return EDTF string equivalent of a given natural language date string.
The approach here is to parse the text twice, with different default
dates. Then compare the results to see what differs - the parts that
differ are undefined.
"""
if not text:
return
t = text.lower()
result = ''
for reject_re in REJECT_RULES:
if re.match(reject_re, t):
return
# matches on '1800s'. Needs to happen before is_decade.
could_be_century = re.findall(r'(\d{2}00)s', t)
# matches on '1800s' and '1910s'. Removes the 's'.
# Needs to happen before is_uncertain because e.g. "1860s?"
t, is_decade = re.subn(r'(\d{3}0)s', r'\1', t)
# detect approximation signifiers
# a few 'circa' abbreviations just before the year
is_approximate = re.findall(r'\b(ca?\.?) ?\d{4}', t)
# the word 'circa' anywhere
is_approximate = is_approximate or re.findall(r'\bcirca\b', t)
# the word 'approx'/'around'/'about' anywhere
is_approximate = is_approximate or \
re.findall(r'\b(approx|around|about)', t)
# a ~ before a year-ish number
is_approximate = is_approximate or re.findall(r'\b~\d{4}', t)
# a ~ at the beginning
is_approximate = is_approximate or re.findall(r'^~', t)
# detect uncertainty signifiers
t, is_uncertain = re.subn(r'(\d{4})\?', r'\1', t)
# the words uncertain/maybe/guess anywhere
is_uncertain = is_uncertain or re.findall(
r'\b(uncertain|possibly|maybe|guess)', t)
# detect century forms
is_century = re.findall(CENTURY_RE, t)
# detect CE/BCE year form
is_ce = re.findall(CE_RE, t)
if is_century:
result = "%02dxx" % (int(is_century[0][0]) - 1,)
is_approximate = is_approximate or \
re.findall(r'\b(ca?\.?) ?' + CENTURY_RE, t)
is_uncertain = is_uncertain or re.findall(CENTURY_RE + r'\?', t)
try:
is_bc = is_century[0][-1] in ("bc", "bce")
if is_bc:
result = "-%s" % result
except IndexError:
pass
elif is_ce:
result = "%04d" % (int(is_ce[0][0]))
is_approximate = is_approximate or \
re.findall(r'\b(ca?\.?) ?' + CE_RE, t)
is_uncertain = is_uncertain or re.findall(CE_RE + r'\?', t)
try:
is_bc = is_ce[0][-1] in ("bc", "bce")
if is_bc:
result = "-%s" % result
except IndexError:
pass
else:
# try dateutil.parse
try:
# parse twice, using different defaults to see what was
# parsed and what was guessed.
dt1 = parse(
t,
dayfirst=appsettings.DAY_FIRST,
yearfirst=False,
fuzzy=True, # force a match, even if it's default date
default=DEFAULT_DATE_1
)
dt2 = parse(
t,
dayfirst=appsettings.DAY_FIRST,
yearfirst=False,
fuzzy=True, # force a match, even if it's default date
default=DEFAULT_DATE_2
)
except ValueError:
return
if dt1.date() == DEFAULT_DATE_1.date() and \
dt2.date() == DEFAULT_DATE_2.date():
# couldn't parse anything - defaults are untouched.
return
date1 = dt1.isoformat()[:10]
date2 = dt2.isoformat()[:10]
# guess precision of 'unspecified' characters to use
mentions_year = re.findall(r'\byear\b.+(in|during)\b', t)
mentions_month = re.findall(r'\bmonth\b.+(in|during)\b', t)
mentions_day = re.findall(r'\bday\b.+(in|during)\b', t)
for i in xrange(len(date1)):
# if the given year could be a century (e.g. '1800s') then use
# approximate/uncertain markers to decide whether we treat it as
# a century or a decade.
if i == 2 and could_be_century and \
not (is_approximate or is_uncertain):
result += 'x'
elif i == 3 and is_decade > 0:
if mentions_year:
result += 'u' # year precision
else:
result += 'x' # decade precision
elif date1[i] == date2[i]:
# since both attempts at parsing produced the same result
# it must be parsed value, not a default
result += date1[i]
else:
# different values were produced, meaning that it's likely
# a default. Use 'unspecified'
result += "u"
# strip off unknown chars from end of string - except the first 4
for i in reversed(xrange(len(result))):
if result[i] not in ('u', 'x', '-'):
smallest_length = 4
if mentions_month:
smallest_length = 7
if mentions_day:
smallest_length = 10
limit = max(smallest_length, i + 1)
result = result[:limit]
break
# check for seasons
if "spring" in t:
result = result[:4] + "-21" + result[7:]
elif "summer" in t:
result = result[:4] + "-22" + result[7:]
elif "autumn" in t or "fall" in t:
result = result[:4] + "-23" + result[7:]
elif "winter" in t:
result = result[:4] + "-24" + result[7:]
# end dateutil post-parsing
if is_uncertain:
result += "?"
if is_approximate:
result += "~"
# weed out bad parses
if result.startswith("uu-uu"):
return None
return result
|
ixc/python-edtf
|
edtf/natlang/en.py
|
text_to_edtf_date
|
python
|
def text_to_edtf_date(text):
if not text:
return
t = text.lower()
result = ''
for reject_re in REJECT_RULES:
if re.match(reject_re, t):
return
# matches on '1800s'. Needs to happen before is_decade.
could_be_century = re.findall(r'(\d{2}00)s', t)
# matches on '1800s' and '1910s'. Removes the 's'.
# Needs to happen before is_uncertain because e.g. "1860s?"
t, is_decade = re.subn(r'(\d{3}0)s', r'\1', t)
# detect approximation signifiers
# a few 'circa' abbreviations just before the year
is_approximate = re.findall(r'\b(ca?\.?) ?\d{4}', t)
# the word 'circa' anywhere
is_approximate = is_approximate or re.findall(r'\bcirca\b', t)
# the word 'approx'/'around'/'about' anywhere
is_approximate = is_approximate or \
re.findall(r'\b(approx|around|about)', t)
# a ~ before a year-ish number
is_approximate = is_approximate or re.findall(r'\b~\d{4}', t)
# a ~ at the beginning
is_approximate = is_approximate or re.findall(r'^~', t)
# detect uncertainty signifiers
t, is_uncertain = re.subn(r'(\d{4})\?', r'\1', t)
# the words uncertain/maybe/guess anywhere
is_uncertain = is_uncertain or re.findall(
r'\b(uncertain|possibly|maybe|guess)', t)
# detect century forms
is_century = re.findall(CENTURY_RE, t)
# detect CE/BCE year form
is_ce = re.findall(CE_RE, t)
if is_century:
result = "%02dxx" % (int(is_century[0][0]) - 1,)
is_approximate = is_approximate or \
re.findall(r'\b(ca?\.?) ?' + CENTURY_RE, t)
is_uncertain = is_uncertain or re.findall(CENTURY_RE + r'\?', t)
try:
is_bc = is_century[0][-1] in ("bc", "bce")
if is_bc:
result = "-%s" % result
except IndexError:
pass
elif is_ce:
result = "%04d" % (int(is_ce[0][0]))
is_approximate = is_approximate or \
re.findall(r'\b(ca?\.?) ?' + CE_RE, t)
is_uncertain = is_uncertain or re.findall(CE_RE + r'\?', t)
try:
is_bc = is_ce[0][-1] in ("bc", "bce")
if is_bc:
result = "-%s" % result
except IndexError:
pass
else:
# try dateutil.parse
try:
# parse twice, using different defaults to see what was
# parsed and what was guessed.
dt1 = parse(
t,
dayfirst=appsettings.DAY_FIRST,
yearfirst=False,
fuzzy=True, # force a match, even if it's default date
default=DEFAULT_DATE_1
)
dt2 = parse(
t,
dayfirst=appsettings.DAY_FIRST,
yearfirst=False,
fuzzy=True, # force a match, even if it's default date
default=DEFAULT_DATE_2
)
except ValueError:
return
if dt1.date() == DEFAULT_DATE_1.date() and \
dt2.date() == DEFAULT_DATE_2.date():
# couldn't parse anything - defaults are untouched.
return
date1 = dt1.isoformat()[:10]
date2 = dt2.isoformat()[:10]
# guess precision of 'unspecified' characters to use
mentions_year = re.findall(r'\byear\b.+(in|during)\b', t)
mentions_month = re.findall(r'\bmonth\b.+(in|during)\b', t)
mentions_day = re.findall(r'\bday\b.+(in|during)\b', t)
for i in xrange(len(date1)):
# if the given year could be a century (e.g. '1800s') then use
# approximate/uncertain markers to decide whether we treat it as
# a century or a decade.
if i == 2 and could_be_century and \
not (is_approximate or is_uncertain):
result += 'x'
elif i == 3 and is_decade > 0:
if mentions_year:
result += 'u' # year precision
else:
result += 'x' # decade precision
elif date1[i] == date2[i]:
# since both attempts at parsing produced the same result
# it must be parsed value, not a default
result += date1[i]
else:
# different values were produced, meaning that it's likely
# a default. Use 'unspecified'
result += "u"
# strip off unknown chars from end of string - except the first 4
for i in reversed(xrange(len(result))):
if result[i] not in ('u', 'x', '-'):
smallest_length = 4
if mentions_month:
smallest_length = 7
if mentions_day:
smallest_length = 10
limit = max(smallest_length, i + 1)
result = result[:limit]
break
# check for seasons
if "spring" in t:
result = result[:4] + "-21" + result[7:]
elif "summer" in t:
result = result[:4] + "-22" + result[7:]
elif "autumn" in t or "fall" in t:
result = result[:4] + "-23" + result[7:]
elif "winter" in t:
result = result[:4] + "-24" + result[7:]
# end dateutil post-parsing
if is_uncertain:
result += "?"
if is_approximate:
result += "~"
# weed out bad parses
if result.startswith("uu-uu"):
return None
return result
|
Return EDTF string equivalent of a given natural language date string.
The approach here is to parse the text twice, with different default
dates. Then compare the results to see what differs - the parts that
differ are undefined.
|
train
|
https://github.com/ixc/python-edtf/blob/ec2124d3df75f8dd72571026380ce8dd16f3dd6b/edtf/natlang/en.py#L105-L275
| null |
"""Utilities to derive an EDTF string from an (English) natural language string."""
from datetime import datetime
from dateutil.parser import parse
import re
from edtf import appsettings
from six.moves import xrange
# two dates where every digit of an ISO date representation is different,
# and one is in the past and one is in the future.
# This is used in the dateutil parse to detect which elements weren't parsed.
DEFAULT_DATE_1 = datetime(1234, 1, 1, 0, 0)
DEFAULT_DATE_2 = datetime(5678, 10, 10, 0, 0)
SHORT_YEAR_RE = r'(-?)([\du])([\dxu])([\dxu])([\dxu])'
LONG_YEAR_RE = r'y(-?)([1-9]\d\d\d\d+)'
CENTURY_RE = r'(\d{1,2})(c\.?|(st|nd|rd|th) century)\s?(ad|ce|bc|bce)?'
CE_RE = r'(\d{1,4}) (ad|ce|bc|bce)'
# Set of RE rules that will cause us to abort text processing, since we know
# the results will be wrong.
REJECT_RULES = (
r'.*dynasty.*', # Don't parse '23rd Dynasty' to 'uuuu-uu-23'
)
def text_to_edtf(text):
"""
Generate EDTF string equivalent of a given natural language date string.
"""
if not text:
return
t = text.lower()
# try parsing the whole thing
result = text_to_edtf_date(t)
if not result:
# split by list delims and move fwd with the first thing that returns a non-empty string.
# TODO: assemble multiple dates into a {} or [] structure.
for split in [",", ";", "or"]:
for list_item in t.split(split):
# try parsing as an interval - split by '-'
toks = list_item.split("-")
if len(toks) == 2:
d1 = toks[0].strip()
d2 = toks[1].strip()
# match looks from the beginning of the string, search
# looks anywhere.
if re.match(r'\d\D\b', d2): # 1-digit year partial e.g. 1868-9
if re.search(r'\b\d\d\d\d$', d1): # TODO: evaluate it and see if it's a year
d2 = d1[-4:-1] + d2
elif re.match(r'\d\d\b', d2): # 2-digit year partial e.g. 1809-10
if re.search(r'\b\d\d\d\d$', d1):
d2 = d1[-4:-2] + d2
else:
century_range_match = re.search(r'\b(\d\d)(th|st|nd|rd|)-(\d\d)(th|st|nd|rd) [cC]', "%s-%s" % (d1,d2))
if century_range_match:
g = century_range_match.groups()
d1 = "%sC" % g[0]
d2 = "%sC" % g[2]
r1 = text_to_edtf_date(d1)
r2 = text_to_edtf_date(d2)
if r1 and r2:
result = r1 + "/" + r2
return result
# is it an either/or year "1838/1862" - that has a different
# representation in EDTF. If it's 'both', then we use {}. If
# it's 'or' then we use []. Assuming the latter for now.
# This whole section could be more friendly.
else:
int_match = re.search(r"(\d\d\d\d)\/(\d\d\d\d)", list_item)
if int_match:
return "[%s, %s]" % (int_match.group(1), int_match.group(2))
result = text_to_edtf_date(list_item)
if result:
break
if result:
break
is_before = re.findall(r'\bbefore\b', t)
is_before = is_before or re.findall(r'\bearlier\b', t)
is_after = re.findall(r'\bafter\b', t)
is_after = is_after or re.findall(r'\bsince\b', t)
is_after = is_after or re.findall(r'\blater\b', t)
if is_before:
result = u"unknown/%s" % result
elif is_after:
result = u"%s/unknown" % result
return result
|
cykl/infoqscraper
|
infoqscraper/cache.py
|
XDGCache.get_content
|
python
|
def get_content(self, url):
cache_path = self._url_to_path(url)
try:
with open(cache_path, 'rb') as f:
return f.read()
except IOError:
return None
|
Returns the content of a cached resource.
Args:
url: The url of the resource
Returns:
The content of the cached resource or None if not in the cache
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/cache.py#L56-L70
|
[
"def _url_to_path(self, url):\n return os.path.join(self.dir, url)\n"
] |
class XDGCache(object):
"""A disk cache for resources.
Remote resources can be cached to avoid to fetch them several times from the web server.
The resources are stored into the XDG_CACHE_HOME_DIR.
Attributes:
dir: Where to store the cached resources
"""
def __init__(self):
self.dir = self._find_dir()
def _find_dir(self):
home = os.path.expanduser("~")
xdg_cache_home = os.environ.get("XDG_CACHE_HOME", os.path.join(home, ".cache"))
return os.path.join(xdg_cache_home, "infoqscraper", "resources")
def _url_to_path(self, url):
return os.path.join(self.dir, url)
def get_path(self, url):
"""Returns the path of a cached resource.
Args:
url: The url of the resource
Returns:
The path to the cached resource or None if not in the cache
"""
cache_path = self._url_to_path(url)
if os.path.exists(cache_path):
return cache_path
return None
def put_content(self, url, content):
"""Stores the content of a resource into the disk cache.
Args:
url: The url of the resource
content: The content of the resource
Raises:
CacheError: If the content cannot be put in cache
"""
cache_path = self._url_to_path(url)
# Ensure that cache directories exist
try:
dir = os.path.dirname(cache_path)
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise Error('Failed to create cache directories for ' % cache_path)
try:
with open(cache_path, 'wb') as f:
f.write(content)
except IOError:
raise Error('Failed to cache content as %s for %s' % (cache_path, url))
def put_path(self, url, path):
"""Puts a resource already on disk into the disk cache.
Args:
url: The original url of the resource
path: The resource already available on disk
Raises:
CacheError: If the file cannot be put in cache
"""
cache_path = self._url_to_path(url)
# Ensure that cache directories exist
try:
dir = os.path.dirname(cache_path)
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise Error('Failed to create cache directories for ' % cache_path)
# Remove the resource already exist
try:
os.unlink(cache_path)
except OSError:
pass
try:
# First try hard link to avoid wasting disk space & overhead
os.link(path, cache_path)
except OSError:
try:
# Use file copy as fallaback
shutil.copyfile(path, cache_path)
except IOError:
raise Error('Failed to cache %s as %s for %s' % (path, cache_path, url))
def clear(self):
"""Delete all the cached resources.
Raises:
OSError: If some file cannot be delete
"""
shutil.rmtree(self.dir)
@property
def size(self):
"""Returns the size of the cache in bytes."""
total_size = 0
for dir_path, dir_names, filenames in os.walk(self.dir):
for f in filenames:
fp = os.path.join(dir_path, f)
total_size += os.path.getsize(fp)
return total_size
|
cykl/infoqscraper
|
infoqscraper/cache.py
|
XDGCache.get_path
|
python
|
def get_path(self, url):
cache_path = self._url_to_path(url)
if os.path.exists(cache_path):
return cache_path
return None
|
Returns the path of a cached resource.
Args:
url: The url of the resource
Returns:
The path to the cached resource or None if not in the cache
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/cache.py#L72-L85
|
[
"def _url_to_path(self, url):\n return os.path.join(self.dir, url)\n"
] |
class XDGCache(object):
"""A disk cache for resources.
Remote resources can be cached to avoid to fetch them several times from the web server.
The resources are stored into the XDG_CACHE_HOME_DIR.
Attributes:
dir: Where to store the cached resources
"""
def __init__(self):
self.dir = self._find_dir()
def _find_dir(self):
home = os.path.expanduser("~")
xdg_cache_home = os.environ.get("XDG_CACHE_HOME", os.path.join(home, ".cache"))
return os.path.join(xdg_cache_home, "infoqscraper", "resources")
def _url_to_path(self, url):
return os.path.join(self.dir, url)
def get_content(self, url):
"""Returns the content of a cached resource.
Args:
url: The url of the resource
Returns:
The content of the cached resource or None if not in the cache
"""
cache_path = self._url_to_path(url)
try:
with open(cache_path, 'rb') as f:
return f.read()
except IOError:
return None
def put_content(self, url, content):
"""Stores the content of a resource into the disk cache.
Args:
url: The url of the resource
content: The content of the resource
Raises:
CacheError: If the content cannot be put in cache
"""
cache_path = self._url_to_path(url)
# Ensure that cache directories exist
try:
dir = os.path.dirname(cache_path)
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise Error('Failed to create cache directories for ' % cache_path)
try:
with open(cache_path, 'wb') as f:
f.write(content)
except IOError:
raise Error('Failed to cache content as %s for %s' % (cache_path, url))
def put_path(self, url, path):
"""Puts a resource already on disk into the disk cache.
Args:
url: The original url of the resource
path: The resource already available on disk
Raises:
CacheError: If the file cannot be put in cache
"""
cache_path = self._url_to_path(url)
# Ensure that cache directories exist
try:
dir = os.path.dirname(cache_path)
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise Error('Failed to create cache directories for ' % cache_path)
# Remove the resource already exist
try:
os.unlink(cache_path)
except OSError:
pass
try:
# First try hard link to avoid wasting disk space & overhead
os.link(path, cache_path)
except OSError:
try:
# Use file copy as fallaback
shutil.copyfile(path, cache_path)
except IOError:
raise Error('Failed to cache %s as %s for %s' % (path, cache_path, url))
def clear(self):
"""Delete all the cached resources.
Raises:
OSError: If some file cannot be delete
"""
shutil.rmtree(self.dir)
@property
def size(self):
"""Returns the size of the cache in bytes."""
total_size = 0
for dir_path, dir_names, filenames in os.walk(self.dir):
for f in filenames:
fp = os.path.join(dir_path, f)
total_size += os.path.getsize(fp)
return total_size
|
cykl/infoqscraper
|
infoqscraper/cache.py
|
XDGCache.put_content
|
python
|
def put_content(self, url, content):
cache_path = self._url_to_path(url)
# Ensure that cache directories exist
try:
dir = os.path.dirname(cache_path)
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise Error('Failed to create cache directories for ' % cache_path)
try:
with open(cache_path, 'wb') as f:
f.write(content)
except IOError:
raise Error('Failed to cache content as %s for %s' % (cache_path, url))
|
Stores the content of a resource into the disk cache.
Args:
url: The url of the resource
content: The content of the resource
Raises:
CacheError: If the content cannot be put in cache
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/cache.py#L87-L111
|
[
"def _url_to_path(self, url):\n return os.path.join(self.dir, url)\n"
] |
class XDGCache(object):
"""A disk cache for resources.
Remote resources can be cached to avoid to fetch them several times from the web server.
The resources are stored into the XDG_CACHE_HOME_DIR.
Attributes:
dir: Where to store the cached resources
"""
def __init__(self):
self.dir = self._find_dir()
def _find_dir(self):
home = os.path.expanduser("~")
xdg_cache_home = os.environ.get("XDG_CACHE_HOME", os.path.join(home, ".cache"))
return os.path.join(xdg_cache_home, "infoqscraper", "resources")
def _url_to_path(self, url):
return os.path.join(self.dir, url)
def get_content(self, url):
"""Returns the content of a cached resource.
Args:
url: The url of the resource
Returns:
The content of the cached resource or None if not in the cache
"""
cache_path = self._url_to_path(url)
try:
with open(cache_path, 'rb') as f:
return f.read()
except IOError:
return None
def get_path(self, url):
"""Returns the path of a cached resource.
Args:
url: The url of the resource
Returns:
The path to the cached resource or None if not in the cache
"""
cache_path = self._url_to_path(url)
if os.path.exists(cache_path):
return cache_path
return None
def put_path(self, url, path):
"""Puts a resource already on disk into the disk cache.
Args:
url: The original url of the resource
path: The resource already available on disk
Raises:
CacheError: If the file cannot be put in cache
"""
cache_path = self._url_to_path(url)
# Ensure that cache directories exist
try:
dir = os.path.dirname(cache_path)
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise Error('Failed to create cache directories for ' % cache_path)
# Remove the resource already exist
try:
os.unlink(cache_path)
except OSError:
pass
try:
# First try hard link to avoid wasting disk space & overhead
os.link(path, cache_path)
except OSError:
try:
# Use file copy as fallaback
shutil.copyfile(path, cache_path)
except IOError:
raise Error('Failed to cache %s as %s for %s' % (path, cache_path, url))
def clear(self):
"""Delete all the cached resources.
Raises:
OSError: If some file cannot be delete
"""
shutil.rmtree(self.dir)
@property
def size(self):
"""Returns the size of the cache in bytes."""
total_size = 0
for dir_path, dir_names, filenames in os.walk(self.dir):
for f in filenames:
fp = os.path.join(dir_path, f)
total_size += os.path.getsize(fp)
return total_size
|
cykl/infoqscraper
|
infoqscraper/cache.py
|
XDGCache.put_path
|
python
|
def put_path(self, url, path):
cache_path = self._url_to_path(url)
# Ensure that cache directories exist
try:
dir = os.path.dirname(cache_path)
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise Error('Failed to create cache directories for ' % cache_path)
# Remove the resource already exist
try:
os.unlink(cache_path)
except OSError:
pass
try:
# First try hard link to avoid wasting disk space & overhead
os.link(path, cache_path)
except OSError:
try:
# Use file copy as fallaback
shutil.copyfile(path, cache_path)
except IOError:
raise Error('Failed to cache %s as %s for %s' % (path, cache_path, url))
|
Puts a resource already on disk into the disk cache.
Args:
url: The original url of the resource
path: The resource already available on disk
Raises:
CacheError: If the file cannot be put in cache
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/cache.py#L113-L147
|
[
"def _url_to_path(self, url):\n return os.path.join(self.dir, url)\n"
] |
class XDGCache(object):
"""A disk cache for resources.
Remote resources can be cached to avoid to fetch them several times from the web server.
The resources are stored into the XDG_CACHE_HOME_DIR.
Attributes:
dir: Where to store the cached resources
"""
def __init__(self):
self.dir = self._find_dir()
def _find_dir(self):
home = os.path.expanduser("~")
xdg_cache_home = os.environ.get("XDG_CACHE_HOME", os.path.join(home, ".cache"))
return os.path.join(xdg_cache_home, "infoqscraper", "resources")
def _url_to_path(self, url):
return os.path.join(self.dir, url)
def get_content(self, url):
"""Returns the content of a cached resource.
Args:
url: The url of the resource
Returns:
The content of the cached resource or None if not in the cache
"""
cache_path = self._url_to_path(url)
try:
with open(cache_path, 'rb') as f:
return f.read()
except IOError:
return None
def get_path(self, url):
"""Returns the path of a cached resource.
Args:
url: The url of the resource
Returns:
The path to the cached resource or None if not in the cache
"""
cache_path = self._url_to_path(url)
if os.path.exists(cache_path):
return cache_path
return None
def put_content(self, url, content):
"""Stores the content of a resource into the disk cache.
Args:
url: The url of the resource
content: The content of the resource
Raises:
CacheError: If the content cannot be put in cache
"""
cache_path = self._url_to_path(url)
# Ensure that cache directories exist
try:
dir = os.path.dirname(cache_path)
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise Error('Failed to create cache directories for ' % cache_path)
try:
with open(cache_path, 'wb') as f:
f.write(content)
except IOError:
raise Error('Failed to cache content as %s for %s' % (cache_path, url))
def clear(self):
"""Delete all the cached resources.
Raises:
OSError: If some file cannot be delete
"""
shutil.rmtree(self.dir)
@property
def size(self):
"""Returns the size of the cache in bytes."""
total_size = 0
for dir_path, dir_names, filenames in os.walk(self.dir):
for f in filenames:
fp = os.path.join(dir_path, f)
total_size += os.path.getsize(fp)
return total_size
|
cykl/infoqscraper
|
infoqscraper/cache.py
|
XDGCache.size
|
python
|
def size(self):
total_size = 0
for dir_path, dir_names, filenames in os.walk(self.dir):
for f in filenames:
fp = os.path.join(dir_path, f)
total_size += os.path.getsize(fp)
return total_size
|
Returns the size of the cache in bytes.
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/cache.py#L158-L165
| null |
class XDGCache(object):
"""A disk cache for resources.
Remote resources can be cached to avoid to fetch them several times from the web server.
The resources are stored into the XDG_CACHE_HOME_DIR.
Attributes:
dir: Where to store the cached resources
"""
def __init__(self):
self.dir = self._find_dir()
def _find_dir(self):
home = os.path.expanduser("~")
xdg_cache_home = os.environ.get("XDG_CACHE_HOME", os.path.join(home, ".cache"))
return os.path.join(xdg_cache_home, "infoqscraper", "resources")
def _url_to_path(self, url):
return os.path.join(self.dir, url)
def get_content(self, url):
"""Returns the content of a cached resource.
Args:
url: The url of the resource
Returns:
The content of the cached resource or None if not in the cache
"""
cache_path = self._url_to_path(url)
try:
with open(cache_path, 'rb') as f:
return f.read()
except IOError:
return None
def get_path(self, url):
"""Returns the path of a cached resource.
Args:
url: The url of the resource
Returns:
The path to the cached resource or None if not in the cache
"""
cache_path = self._url_to_path(url)
if os.path.exists(cache_path):
return cache_path
return None
def put_content(self, url, content):
"""Stores the content of a resource into the disk cache.
Args:
url: The url of the resource
content: The content of the resource
Raises:
CacheError: If the content cannot be put in cache
"""
cache_path = self._url_to_path(url)
# Ensure that cache directories exist
try:
dir = os.path.dirname(cache_path)
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise Error('Failed to create cache directories for ' % cache_path)
try:
with open(cache_path, 'wb') as f:
f.write(content)
except IOError:
raise Error('Failed to cache content as %s for %s' % (cache_path, url))
def put_path(self, url, path):
"""Puts a resource already on disk into the disk cache.
Args:
url: The original url of the resource
path: The resource already available on disk
Raises:
CacheError: If the file cannot be put in cache
"""
cache_path = self._url_to_path(url)
# Ensure that cache directories exist
try:
dir = os.path.dirname(cache_path)
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise Error('Failed to create cache directories for ' % cache_path)
# Remove the resource already exist
try:
os.unlink(cache_path)
except OSError:
pass
try:
# First try hard link to avoid wasting disk space & overhead
os.link(path, cache_path)
except OSError:
try:
# Use file copy as fallaback
shutil.copyfile(path, cache_path)
except IOError:
raise Error('Failed to cache %s as %s for %s' % (path, cache_path, url))
def clear(self):
"""Delete all the cached resources.
Raises:
OSError: If some file cannot be delete
"""
shutil.rmtree(self.dir)
@property
|
cykl/infoqscraper
|
infoqscraper/scrap.py
|
get_summaries
|
python
|
def get_summaries(client, filter=None):
try:
index = 0
while True:
rb = _RightBarPage(client, index)
summaries = rb.summaries()
if filter is not None:
summaries = filter.filter(summaries)
for summary in summaries:
yield summary
index += len(summaries)
except StopIteration:
pass
|
Generate presentation summaries in a reverse chronological order.
A filter class can be supplied to filter summaries or bound the fetching process.
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/scrap.py#L36-L55
|
[
"def filter(self, presentation_summaries):\n if self.page_count >= self.max_pages:\n raise StopIteration\n\n self.page_count += 1\n return presentation_summaries\n",
"def summaries(self):\n \"\"\"Return a list of all the presentation summaries contained in this page\"\"\"\n def create_summary(div):\n def get_id(div):\n return get_url(div).rsplit('/')[-1]\n\n def get_url(div):\n return client.get_url(div.find('h2', class_='itemtitle').a['href'])\n\n def get_desc(div):\n return div.p.get_text(strip=True)\n\n def get_auth(div):\n return div.find('span', class_='author').a['title']\n\n def get_date(div):\n str = div.find('span', class_='author').get_text()\n str = str.replace('\\n', ' ')\n str = str.replace(six.u('\\xa0'), ' ')\n match = re.search(r'on\\s+(\\w{3} [0-9]{1,2}, 20[0-9]{2})', str)\n return datetime.datetime.strptime(match.group(1), \"%b %d, %Y\")\n\n def get_title(div):\n return div.find('h2', class_='itemtitle').a['title']\n\n return {\n 'id': get_id(div),\n 'url': get_url(div),\n 'desc': get_desc(div),\n 'auth': get_auth(div),\n 'date': get_date(div),\n 'title': get_title(div),\n }\n\n videos = self.soup.findAll('div', {'class': 'news_type_video'})\n return [create_summary(div) for div in videos]\n"
] |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2014, Clément MATHIEU
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import base64
import bs4
import datetime
import re
from infoqscraper import client
import six
from six.moves import urllib
class MaxPagesFilter(object):
""" A summary filter set an upper bound on the number fetched pages"""
def __init__(self, max_pages):
self.max_pages = max_pages
self.page_count = 0
def filter(self, presentation_summaries):
if self.page_count >= self.max_pages:
raise StopIteration
self.page_count += 1
return presentation_summaries
class Presentation(object):
""" An InfoQ presentation.
"""
def __init__(self, client, id):
self.client = client
self.id = id
self.soup = self._fetch()
def _fetch(self):
"""Download the page and create the soup"""
url = client.get_url("/presentations/" + self.id)
content = self.client.fetch_no_cache(url).decode('utf-8')
return bs4.BeautifulSoup(content, "html.parser")
@property
def metadata(self):
def get_title(pres_div):
return pres_div.find('h1', class_="general").div.get_text().strip()
def get_date(pres_div):
strings = ''.join(pres_div.find('span', class_='author_general').strings)
match = re.search('on[\n ]+(.*\d{4})', strings)
if match:
return datetime.datetime.strptime(match.group(1), "%b %d, %Y")
else:
raise Exception("Failed to extract date (markup changed?)")
def get_author(pres_div):
return pres_div.find('span', class_='authors-list').find('a').get_text().strip()
def get_timecodes(pres_div):
for script in pres_div.find_all('script'):
mo = re.search("TIMES\s?=\s?new\s+Array.?\((\d+(,\d+)+)\)", script.get_text())
if mo:
return [int(tc) for tc in mo.group(1).split(',')]
def get_slides(pres_div):
for script in pres_div.find_all('script'):
mo = re.search("var\s+slides\s?=\s?new\s+Array.?\(('.+')\)", script.get_text())
if mo:
return [slide.replace('\'', '') for slide in mo.group(1).split(',')]
def get_video(pres_div):
for script in pres_div.find_all('script'):
mo = re.search('var jsclassref = \'(.*)\';', script.get_text())
if mo:
b64 = mo.group(1)
path = base64.b64decode(b64).decode('utf-8')
# Older presentations use flv and the video path does not contain
# the extension. Newer presentations use mp4 and include the extension.
if path.endswith(".mp4"):
return "mp4:%s" % path
elif path.endswith(".flv"):
return "flv:%s" % path[:-4]
else:
raise Exception("Unsupported video type: %s" % path)
def get_bio(div):
return div.find('p', id="biotext").get_text(strip=True)
def get_summary(div):
return "".join(div.find('p', id="summary").get_text("|", strip=True).split("|")[1:])
def get_about(div):
return div.find('p', id="conference").get_text(strip=True)
def get_demo_timings(pres_div):
for script in pres_div.find_all('script'):
timings = re.search("demoTimings\s+=\s+'([^']+)", script.get_text())
if timings:
return [int(t) for t in timings.group(1).split(',')]
return []
def add_pdf_if_exist(metadata, pres_div):
# The markup is not the same if authenticated or not
form = pres_div.find('form', id="pdfForm")
if form:
metadata['pdf'] = client.get_url('/pdfdownload.action?filename=') + urllib.parse.quote(form.input['value'], safe='')
else:
a = pres_div.find('a', class_='link-slides')
if a:
metadata['pdf'] = client.get_url(a['href'])
def add_mp3_if_exist(metadata, bc3):
# The markup is not the same if authenticated or not
form = bc3.find('form', id="mp3Form")
if form:
metadata['mp3'] = client.get_url('/mp3download.action?filename=') + urllib.parse.quote(form.input['value'], safe='')
else:
a = bc3.find('a', class_='link-mp3')
if a:
metadata['mp3'] = client.get_url(a['href'])
if not hasattr(self, "_metadata"):
pres_div = self.soup.find('div', class_='presentation_full')
metadata = {
'url': client.get_url("/presentations/" + self.id),
'title': get_title(pres_div),
'date' : get_date(pres_div),
'auth' : get_author(pres_div),
'timecodes': get_timecodes(self.soup),
'demo_timings': get_demo_timings(self.soup),
'slides': get_slides(self.soup),
'video_url': six.u("rtmpe://video.infoq.com/cfx/st/"),
'video_path': get_video(self.soup),
'bio': get_bio(pres_div),
'summary': get_summary(pres_div),
'about': get_about(pres_div),
}
add_mp3_if_exist(metadata, pres_div)
add_pdf_if_exist(metadata, pres_div)
self._metadata = metadata
return self._metadata
class _RightBarPage(object):
"""A page returned by /rightbar.action
This page lists all available presentations with pagination.
"""
def __init__(self, client, index):
self.client = client
self.index = index
@property
def soup(self):
"""Download the page and create the soup"""
try:
return self._soup
except AttributeError:
url = client.get_url("/presentations/%s" % self.index)
content = self.client.fetch_no_cache(url).decode('utf-8')
self._soup = bs4.BeautifulSoup(content, "html.parser")
return self._soup
def summaries(self):
"""Return a list of all the presentation summaries contained in this page"""
def create_summary(div):
def get_id(div):
return get_url(div).rsplit('/')[-1]
def get_url(div):
return client.get_url(div.find('h2', class_='itemtitle').a['href'])
def get_desc(div):
return div.p.get_text(strip=True)
def get_auth(div):
return div.find('span', class_='author').a['title']
def get_date(div):
str = div.find('span', class_='author').get_text()
str = str.replace('\n', ' ')
str = str.replace(six.u('\xa0'), ' ')
match = re.search(r'on\s+(\w{3} [0-9]{1,2}, 20[0-9]{2})', str)
return datetime.datetime.strptime(match.group(1), "%b %d, %Y")
def get_title(div):
return div.find('h2', class_='itemtitle').a['title']
return {
'id': get_id(div),
'url': get_url(div),
'desc': get_desc(div),
'auth': get_auth(div),
'date': get_date(div),
'title': get_title(div),
}
videos = self.soup.findAll('div', {'class': 'news_type_video'})
return [create_summary(div) for div in videos]
|
cykl/infoqscraper
|
infoqscraper/scrap.py
|
Presentation._fetch
|
python
|
def _fetch(self):
url = client.get_url("/presentations/" + self.id)
content = self.client.fetch_no_cache(url).decode('utf-8')
return bs4.BeautifulSoup(content, "html.parser")
|
Download the page and create the soup
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/scrap.py#L82-L86
|
[
"def get_url(path, scheme=\"http\"):\n \"\"\" Return the full InfoQ URL \"\"\"\n return scheme + \"://www.infoq.com\" + path\n"
] |
class Presentation(object):
""" An InfoQ presentation.
"""
def __init__(self, client, id):
self.client = client
self.id = id
self.soup = self._fetch()
@property
def metadata(self):
def get_title(pres_div):
return pres_div.find('h1', class_="general").div.get_text().strip()
def get_date(pres_div):
strings = ''.join(pres_div.find('span', class_='author_general').strings)
match = re.search('on[\n ]+(.*\d{4})', strings)
if match:
return datetime.datetime.strptime(match.group(1), "%b %d, %Y")
else:
raise Exception("Failed to extract date (markup changed?)")
def get_author(pres_div):
return pres_div.find('span', class_='authors-list').find('a').get_text().strip()
def get_timecodes(pres_div):
for script in pres_div.find_all('script'):
mo = re.search("TIMES\s?=\s?new\s+Array.?\((\d+(,\d+)+)\)", script.get_text())
if mo:
return [int(tc) for tc in mo.group(1).split(',')]
def get_slides(pres_div):
for script in pres_div.find_all('script'):
mo = re.search("var\s+slides\s?=\s?new\s+Array.?\(('.+')\)", script.get_text())
if mo:
return [slide.replace('\'', '') for slide in mo.group(1).split(',')]
def get_video(pres_div):
for script in pres_div.find_all('script'):
mo = re.search('var jsclassref = \'(.*)\';', script.get_text())
if mo:
b64 = mo.group(1)
path = base64.b64decode(b64).decode('utf-8')
# Older presentations use flv and the video path does not contain
# the extension. Newer presentations use mp4 and include the extension.
if path.endswith(".mp4"):
return "mp4:%s" % path
elif path.endswith(".flv"):
return "flv:%s" % path[:-4]
else:
raise Exception("Unsupported video type: %s" % path)
def get_bio(div):
return div.find('p', id="biotext").get_text(strip=True)
def get_summary(div):
return "".join(div.find('p', id="summary").get_text("|", strip=True).split("|")[1:])
def get_about(div):
return div.find('p', id="conference").get_text(strip=True)
def get_demo_timings(pres_div):
for script in pres_div.find_all('script'):
timings = re.search("demoTimings\s+=\s+'([^']+)", script.get_text())
if timings:
return [int(t) for t in timings.group(1).split(',')]
return []
def add_pdf_if_exist(metadata, pres_div):
# The markup is not the same if authenticated or not
form = pres_div.find('form', id="pdfForm")
if form:
metadata['pdf'] = client.get_url('/pdfdownload.action?filename=') + urllib.parse.quote(form.input['value'], safe='')
else:
a = pres_div.find('a', class_='link-slides')
if a:
metadata['pdf'] = client.get_url(a['href'])
def add_mp3_if_exist(metadata, bc3):
# The markup is not the same if authenticated or not
form = bc3.find('form', id="mp3Form")
if form:
metadata['mp3'] = client.get_url('/mp3download.action?filename=') + urllib.parse.quote(form.input['value'], safe='')
else:
a = bc3.find('a', class_='link-mp3')
if a:
metadata['mp3'] = client.get_url(a['href'])
if not hasattr(self, "_metadata"):
pres_div = self.soup.find('div', class_='presentation_full')
metadata = {
'url': client.get_url("/presentations/" + self.id),
'title': get_title(pres_div),
'date' : get_date(pres_div),
'auth' : get_author(pres_div),
'timecodes': get_timecodes(self.soup),
'demo_timings': get_demo_timings(self.soup),
'slides': get_slides(self.soup),
'video_url': six.u("rtmpe://video.infoq.com/cfx/st/"),
'video_path': get_video(self.soup),
'bio': get_bio(pres_div),
'summary': get_summary(pres_div),
'about': get_about(pres_div),
}
add_mp3_if_exist(metadata, pres_div)
add_pdf_if_exist(metadata, pres_div)
self._metadata = metadata
return self._metadata
|
cykl/infoqscraper
|
infoqscraper/scrap.py
|
_RightBarPage.soup
|
python
|
def soup(self):
try:
return self._soup
except AttributeError:
url = client.get_url("/presentations/%s" % self.index)
content = self.client.fetch_no_cache(url).decode('utf-8')
self._soup = bs4.BeautifulSoup(content, "html.parser")
return self._soup
|
Download the page and create the soup
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/scrap.py#L203-L212
|
[
"def get_url(path, scheme=\"http\"):\n \"\"\" Return the full InfoQ URL \"\"\"\n return scheme + \"://www.infoq.com\" + path\n"
] |
class _RightBarPage(object):
"""A page returned by /rightbar.action
This page lists all available presentations with pagination.
"""
def __init__(self, client, index):
self.client = client
self.index = index
@property
def summaries(self):
"""Return a list of all the presentation summaries contained in this page"""
def create_summary(div):
def get_id(div):
return get_url(div).rsplit('/')[-1]
def get_url(div):
return client.get_url(div.find('h2', class_='itemtitle').a['href'])
def get_desc(div):
return div.p.get_text(strip=True)
def get_auth(div):
return div.find('span', class_='author').a['title']
def get_date(div):
str = div.find('span', class_='author').get_text()
str = str.replace('\n', ' ')
str = str.replace(six.u('\xa0'), ' ')
match = re.search(r'on\s+(\w{3} [0-9]{1,2}, 20[0-9]{2})', str)
return datetime.datetime.strptime(match.group(1), "%b %d, %Y")
def get_title(div):
return div.find('h2', class_='itemtitle').a['title']
return {
'id': get_id(div),
'url': get_url(div),
'desc': get_desc(div),
'auth': get_auth(div),
'date': get_date(div),
'title': get_title(div),
}
videos = self.soup.findAll('div', {'class': 'news_type_video'})
return [create_summary(div) for div in videos]
|
cykl/infoqscraper
|
infoqscraper/scrap.py
|
_RightBarPage.summaries
|
python
|
def summaries(self):
def create_summary(div):
def get_id(div):
return get_url(div).rsplit('/')[-1]
def get_url(div):
return client.get_url(div.find('h2', class_='itemtitle').a['href'])
def get_desc(div):
return div.p.get_text(strip=True)
def get_auth(div):
return div.find('span', class_='author').a['title']
def get_date(div):
str = div.find('span', class_='author').get_text()
str = str.replace('\n', ' ')
str = str.replace(six.u('\xa0'), ' ')
match = re.search(r'on\s+(\w{3} [0-9]{1,2}, 20[0-9]{2})', str)
return datetime.datetime.strptime(match.group(1), "%b %d, %Y")
def get_title(div):
return div.find('h2', class_='itemtitle').a['title']
return {
'id': get_id(div),
'url': get_url(div),
'desc': get_desc(div),
'auth': get_auth(div),
'date': get_date(div),
'title': get_title(div),
}
videos = self.soup.findAll('div', {'class': 'news_type_video'})
return [create_summary(div) for div in videos]
|
Return a list of all the presentation summaries contained in this page
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/scrap.py#L214-L249
| null |
class _RightBarPage(object):
"""A page returned by /rightbar.action
This page lists all available presentations with pagination.
"""
def __init__(self, client, index):
self.client = client
self.index = index
@property
def soup(self):
"""Download the page and create the soup"""
try:
return self._soup
except AttributeError:
url = client.get_url("/presentations/%s" % self.index)
content = self.client.fetch_no_cache(url).decode('utf-8')
self._soup = bs4.BeautifulSoup(content, "html.parser")
return self._soup
|
cykl/infoqscraper
|
infoqscraper/convert.py
|
swf2png
|
python
|
def swf2png(swf_path, png_path, swfrender_path="swfrender"):
# Currently rely on swftools
#
# Would be great to have a native python dependency to convert swf into png or jpg.
# However it seems that pyswf isn't flawless. Some graphical elements (like the text!) are lost during
# the export.
try:
cmd = [swfrender_path, swf_path, '-o', png_path]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise ConversionError("Failed to convert SWF file %s.\n"
"\tCommand: %s\n"
"\tExit status: %s.\n"
"\tOutput:\n%s"
% (swf_path, " ".join(cmd), e.returncode, e.output))
|
Convert SWF slides into a PNG image
Raises:
OSError is raised if swfrender is not available.
ConversionError is raised if image cannot be created.
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/convert.py#L340-L360
| null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012, Clément MATHIEU
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import os
import re
import shutil
import six
import subprocess
import tempfile
from infoqscraper import client
from infoqscraper import ConversionError
class Converter(object):
def __init__(self, presentation, output, **kwargs):
self.presentation = presentation
self.output = output
self.ffmpeg = kwargs['ffmpeg']
self.rtmpdump = kwargs['rtmpdump']
self.swfrender = kwargs['swfrender']
self.overwrite = kwargs['overwrite']
self.type = kwargs['type']
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(self.tmp_dir)
@property
def tmp_dir(self):
if not hasattr(self, "_tmp_dir"):
self._tmp_dir = tempfile.mkdtemp(prefix="infoq")
return self._tmp_dir
@property
def _audio_path(self):
return os.path.join(self.tmp_dir, "audio.ogg")
@property
def _video_path(self):
return os.path.join(self.tmp_dir, 'video.avi')
def create_presentation(self):
""" Create the presentation.
The audio track is mixed with the slides. The resulting file is saved as self.output
DownloadError is raised if some resources cannot be fetched.
ConversionError is raised if the final video cannot be created.
"""
# Avoid wasting time and bandwidth if we known that conversion will fail.
if not self.overwrite and os.path.exists(self.output):
raise ConversionError("File %s already exist and --overwrite not specified" % self.output)
video = self.download_video()
raw_slides = self.download_slides()
# ffmpeg does not support SWF
png_slides = self._convert_slides(raw_slides)
# Create one frame per second using the time code information
frame_pattern = self._prepare_frames(png_slides)
return self._assemble(video, frame_pattern)
def download_video(self):
"""Downloads the video.
If self.client.cache_enabled is True, then the disk cache is used.
Returns:
The path where the video has been saved.
Raises:
DownloadError: If the video cannot be downloaded.
"""
rvideo_path = self.presentation.metadata['video_path']
if self.presentation.client.cache:
video_path = self.presentation.client.cache.get_path(rvideo_path)
if not video_path:
video_path = self.download_video_no_cache()
self.presentation.client.cache.put_path(rvideo_path, video_path)
else:
video_path = self.download_video_no_cache()
return video_path
def download_video_no_cache(self):
"""Downloads the video.
Returns:
The path where the video has been saved.
Raises:
DownloadError: If the video cannot be downloaded.
"""
video_url = self.presentation.metadata['video_url']
video_path = self.presentation.metadata['video_path']
# After a while, when downloading a long video (> 1h), the RTMP server seems to reset the connection (rtmpdump
# returns exit code 2). The only way to get the full stream is to resume the download.
resume_download = True
while resume_download:
try:
cmd = [self.rtmpdump, '-q', '-e', '-r', video_url, '-y', video_path, "-o", self._video_path]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
resume_download = False
except subprocess.CalledProcessError as e:
if e.returncode != 2:
try:
os.unlink(self._video_path)
except OSError:
pass
raise client.DownloadError("Failed to download video at %s: rtmpdump exited with %s.\n\tOutput:\n%s"
% (video_url, e.returncode, e.output))
return self._video_path
def download_slides(self):
""" Download all SWF slides.
The location of the slides files are returned.
A DownloadError is raised if at least one of the slides cannot be download..
"""
return self.presentation.client.download_all(self.presentation.metadata['slides'], self.tmp_dir)
def _ffmpeg_legacy(self, audio, frame_pattern):
# Try to be compatible as much as possible with old ffmpeg releases (>= 0.7)
# - Do not use new syntax options
# - Do not use libx264, not available on old Ubuntu/Debian
# - Do not use -threads auto, not available on 0.8.*
# - Old releases are very picky regarding arguments position
# - -n is not supported on 0.8
#
# 0.5 (Debian Squeeze & Ubuntu 10.4) is not supported because of
# scaling issues with image2.
cmd = [
self.ffmpeg, "-v", "0",
"-i", audio,
"-f", "image2", "-r", "1", "-s", "hd720", "-i", frame_pattern,
"-map", "1:0", "-acodec", "libmp3lame", "-ab", "128k",
"-map", "0:1", "-vcodec", "mpeg4", "-vb", "2M", "-y", self.output
]
if not self.overwrite and os.path.exists(self.output):
# Handle already existing file manually since nor -n nor -nostdin is available on 0.8
raise Exception("File %s already exist and --overwrite not specified" % self.output)
return cmd
def _ffmpeg_h264(self, audio, frame_pattern):
return [
self.ffmpeg, "-v", "error",
"-i", audio,
"-r", "1", "-i", frame_pattern,
"-c:a", "copy",
"-c:v", "libx264", "-profile:v", "baseline", "-preset", "ultrafast", "-level", "3.0",
"-crf", "28", "-pix_fmt", "yuv420p",
"-s", "1280x720",
"-y" if self.overwrite else "-n",
self.output
]
def _ffmpeg_h264_overlay(self, video, frame_pattern):
cmd = [self.ffmpeg, "-i", video]
video_details = ""
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
video_details = e.output
fps_match = re.search(six.b('\S+(?=\s+tbr)'), video_details)
fps = float(fps_match.group(0))
timings = self.presentation.metadata['demo_timings'][:]
if len(timings) == 0 or timings[0] != 0:
slides_first = True
timings.insert(0, 0)
else:
slides_first = False
timings.append(float('inf'))
inputs = []
filter_complex = []
concat = []
for i, right_range in enumerate(timings[1:]):
left_range = timings[i]
duration = right_range - left_range
if left_range > 0:
inputs += ["-ss", str(left_range)]
if right_range != float('inf'):
inputs += ["-t", str(duration)]
inputs += ["-i", video]
if (i % 2 == 0) == slides_first:
inputs += [
"-f", "image2", "-r", "1", "-s", "hd720", "-start_number", str(left_range), "-i", frame_pattern
]
stream_id = i // 2 * 3
if not slides_first:
stream_id += 1
filter_complex += [
"[{0:d}:v] setpts=PTS-STARTPTS, scale=w=320:h=-1 [sp-{1:d}];".format(stream_id, i),
"[{0:d}:v] setpts=PTS-STARTPTS, scale=w=1280-320:h=-1[sl-{1:d}];".format(stream_id + 1, i),
"color=size=1280x720:c=Black [b-{0:d}];".format(i),
"[b-{0:d}][sl-{0:d}] overlay=shortest=1:x=0:y=0 [bsl-{0:d}];".format(i),
"[bsl-{0:d}][sp-{0:d}] overlay=shortest=1:x=main_w-320:y=main_h-overlay_h [c-{0:d}];".format(i)
]
else:
stream_id = i // 2 * 3
if slides_first:
stream_id += 2
filter_complex += [
"[{0:d}:v] scale='if(gt(a,16/9),1280,-1)':'if(gt(a,16/9),-1,720)' [c-{1:d}];".format(stream_id, i)
]
concat += ["[c-{0:d}] [{1:d}:a:0]".format(i, stream_id)]
concat += ["concat=n={0:d}:v=1:a=1 [v] [a]".format(len(timings) - 1)]
filter_script_path = os.path.join(self.tmp_dir, "filter")
with open(filter_script_path, 'w') as filter_script_file:
filter_script_file.write("\n".join(filter_complex))
filter_script_file.write("\n")
filter_script_file.write(" ".join(concat))
cmd = [self.ffmpeg, "-v", "error"]
cmd += inputs
cmd += [
"-filter_complex_script", filter_script_path,
"-map", "[v]", "-map", "[a]",
"-r", str(fps),
"-acodec", "libmp3lame", "-ab", "92k",
"-vcodec", "libx264", "-profile:v", "baseline", "-preset", "fast", "-level", "3.0", "-crf", "28",
"-y" if self.overwrite else "-n",
self.output
]
return cmd
def _assemble(self, audio, frame_pattern):
if self.type == "legacy":
cmd = self._ffmpeg_legacy(audio, frame_pattern)
elif self.type == "h264":
cmd = self._ffmpeg_h264(audio, frame_pattern)
elif self.type == "h264_overlay":
cmd = self._ffmpeg_h264_overlay(audio, frame_pattern)
else:
raise Exception("Unknown output type %s" % self.type)
self._run_command(cmd)
def _run_command(self, cmd):
try:
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = "Failed to create final movie as %s.\n" \
"\tCommand: %s\n" \
"\tExit code: %s\n" \
"\tOutput:\n%s" % (self.output, " ".join(cmd), e.returncode, e.output)
if self.type != "legacy":
msg += "\n Please note that %s output format requires a recent version of ffmpeg and libx264." \
" Perhaps you should check your setup." \
% self.type
raise ConversionError(msg)
def _convert_slides(self, slides):
def convert(slide):
if slide.endswith("swf"):
png_slide = slide.replace(".swf", ".png")
swf2png(slide, png_slide, swfrender_path=self.swfrender)
return png_slide
elif slide.endswith("jpg"):
return slide
else:
raise Exception("Unsupported slide type: %s" % slide)
return [convert(s) for s in slides]
def _prepare_frames(self, slides):
timecodes = self.presentation.metadata['timecodes']
ext = os.path.splitext(slides[0])[1]
frame = 0
for slide_index, src in enumerate(slides):
for remaining in range(timecodes[slide_index], timecodes[slide_index+1]):
dst = os.path.join(self.tmp_dir, "frame-{0:04d}." + ext).format(frame)
try:
os.link(src, dst)
except OSError as e:
if e.errno == errno.EMLINK:
# Create a new reference file when the upper limit is reached
# (previous to Linux 3.7, btrfs had a very low limit)
shutil.copyfile(src, dst)
src = dst
else:
raise e
frame += 1
return os.path.join(self.tmp_dir, "frame-%04d." + ext)
|
cykl/infoqscraper
|
infoqscraper/convert.py
|
Converter.create_presentation
|
python
|
def create_presentation(self):
# Avoid wasting time and bandwidth if we known that conversion will fail.
if not self.overwrite and os.path.exists(self.output):
raise ConversionError("File %s already exist and --overwrite not specified" % self.output)
video = self.download_video()
raw_slides = self.download_slides()
# ffmpeg does not support SWF
png_slides = self._convert_slides(raw_slides)
# Create one frame per second using the time code information
frame_pattern = self._prepare_frames(png_slides)
return self._assemble(video, frame_pattern)
|
Create the presentation.
The audio track is mixed with the slides. The resulting file is saved as self.output
DownloadError is raised if some resources cannot be fetched.
ConversionError is raised if the final video cannot be created.
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/convert.py#L70-L90
|
[
"def download_video(self):\n \"\"\"Downloads the video.\n\n If self.client.cache_enabled is True, then the disk cache is used.\n\n Returns:\n The path where the video has been saved.\n\n Raises:\n DownloadError: If the video cannot be downloaded.\n \"\"\"\n rvideo_path = self.presentation.metadata['video_path']\n\n if self.presentation.client.cache:\n video_path = self.presentation.client.cache.get_path(rvideo_path)\n if not video_path:\n video_path = self.download_video_no_cache()\n self.presentation.client.cache.put_path(rvideo_path, video_path)\n else:\n video_path = self.download_video_no_cache()\n\n return video_path\n",
"def download_slides(self):\n \"\"\" Download all SWF slides.\n\n The location of the slides files are returned.\n\n A DownloadError is raised if at least one of the slides cannot be download..\n \"\"\"\n return self.presentation.client.download_all(self.presentation.metadata['slides'], self.tmp_dir)\n",
"def _assemble(self, audio, frame_pattern):\n if self.type == \"legacy\":\n cmd = self._ffmpeg_legacy(audio, frame_pattern)\n elif self.type == \"h264\":\n cmd = self._ffmpeg_h264(audio, frame_pattern)\n elif self.type == \"h264_overlay\":\n cmd = self._ffmpeg_h264_overlay(audio, frame_pattern)\n else:\n raise Exception(\"Unknown output type %s\" % self.type)\n\n self._run_command(cmd)\n",
"def _convert_slides(self, slides):\n\n def convert(slide):\n if slide.endswith(\"swf\"):\n png_slide = slide.replace(\".swf\", \".png\")\n swf2png(slide, png_slide, swfrender_path=self.swfrender)\n return png_slide\n elif slide.endswith(\"jpg\"):\n return slide\n else:\n raise Exception(\"Unsupported slide type: %s\" % slide)\n\n return [convert(s) for s in slides]\n",
"def _prepare_frames(self, slides):\n timecodes = self.presentation.metadata['timecodes']\n ext = os.path.splitext(slides[0])[1]\n\n frame = 0\n for slide_index, src in enumerate(slides):\n for remaining in range(timecodes[slide_index], timecodes[slide_index+1]):\n dst = os.path.join(self.tmp_dir, \"frame-{0:04d}.\" + ext).format(frame)\n try:\n os.link(src, dst)\n except OSError as e:\n if e.errno == errno.EMLINK:\n # Create a new reference file when the upper limit is reached\n # (previous to Linux 3.7, btrfs had a very low limit)\n shutil.copyfile(src, dst)\n src = dst\n else:\n raise e\n\n frame += 1\n\n return os.path.join(self.tmp_dir, \"frame-%04d.\" + ext)\n"
] |
class Converter(object):
def __init__(self, presentation, output, **kwargs):
self.presentation = presentation
self.output = output
self.ffmpeg = kwargs['ffmpeg']
self.rtmpdump = kwargs['rtmpdump']
self.swfrender = kwargs['swfrender']
self.overwrite = kwargs['overwrite']
self.type = kwargs['type']
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(self.tmp_dir)
@property
def tmp_dir(self):
if not hasattr(self, "_tmp_dir"):
self._tmp_dir = tempfile.mkdtemp(prefix="infoq")
return self._tmp_dir
@property
def _audio_path(self):
return os.path.join(self.tmp_dir, "audio.ogg")
@property
def _video_path(self):
return os.path.join(self.tmp_dir, 'video.avi')
def download_video(self):
"""Downloads the video.
If self.client.cache_enabled is True, then the disk cache is used.
Returns:
The path where the video has been saved.
Raises:
DownloadError: If the video cannot be downloaded.
"""
rvideo_path = self.presentation.metadata['video_path']
if self.presentation.client.cache:
video_path = self.presentation.client.cache.get_path(rvideo_path)
if not video_path:
video_path = self.download_video_no_cache()
self.presentation.client.cache.put_path(rvideo_path, video_path)
else:
video_path = self.download_video_no_cache()
return video_path
def download_video_no_cache(self):
"""Downloads the video.
Returns:
The path where the video has been saved.
Raises:
DownloadError: If the video cannot be downloaded.
"""
video_url = self.presentation.metadata['video_url']
video_path = self.presentation.metadata['video_path']
# After a while, when downloading a long video (> 1h), the RTMP server seems to reset the connection (rtmpdump
# returns exit code 2). The only way to get the full stream is to resume the download.
resume_download = True
while resume_download:
try:
cmd = [self.rtmpdump, '-q', '-e', '-r', video_url, '-y', video_path, "-o", self._video_path]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
resume_download = False
except subprocess.CalledProcessError as e:
if e.returncode != 2:
try:
os.unlink(self._video_path)
except OSError:
pass
raise client.DownloadError("Failed to download video at %s: rtmpdump exited with %s.\n\tOutput:\n%s"
% (video_url, e.returncode, e.output))
return self._video_path
def download_slides(self):
""" Download all SWF slides.
The location of the slides files are returned.
A DownloadError is raised if at least one of the slides cannot be download..
"""
return self.presentation.client.download_all(self.presentation.metadata['slides'], self.tmp_dir)
def _ffmpeg_legacy(self, audio, frame_pattern):
# Try to be compatible as much as possible with old ffmpeg releases (>= 0.7)
# - Do not use new syntax options
# - Do not use libx264, not available on old Ubuntu/Debian
# - Do not use -threads auto, not available on 0.8.*
# - Old releases are very picky regarding arguments position
# - -n is not supported on 0.8
#
# 0.5 (Debian Squeeze & Ubuntu 10.4) is not supported because of
# scaling issues with image2.
cmd = [
self.ffmpeg, "-v", "0",
"-i", audio,
"-f", "image2", "-r", "1", "-s", "hd720", "-i", frame_pattern,
"-map", "1:0", "-acodec", "libmp3lame", "-ab", "128k",
"-map", "0:1", "-vcodec", "mpeg4", "-vb", "2M", "-y", self.output
]
if not self.overwrite and os.path.exists(self.output):
# Handle already existing file manually since nor -n nor -nostdin is available on 0.8
raise Exception("File %s already exist and --overwrite not specified" % self.output)
return cmd
def _ffmpeg_h264(self, audio, frame_pattern):
return [
self.ffmpeg, "-v", "error",
"-i", audio,
"-r", "1", "-i", frame_pattern,
"-c:a", "copy",
"-c:v", "libx264", "-profile:v", "baseline", "-preset", "ultrafast", "-level", "3.0",
"-crf", "28", "-pix_fmt", "yuv420p",
"-s", "1280x720",
"-y" if self.overwrite else "-n",
self.output
]
def _ffmpeg_h264_overlay(self, video, frame_pattern):
cmd = [self.ffmpeg, "-i", video]
video_details = ""
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
video_details = e.output
fps_match = re.search(six.b('\S+(?=\s+tbr)'), video_details)
fps = float(fps_match.group(0))
timings = self.presentation.metadata['demo_timings'][:]
if len(timings) == 0 or timings[0] != 0:
slides_first = True
timings.insert(0, 0)
else:
slides_first = False
timings.append(float('inf'))
inputs = []
filter_complex = []
concat = []
for i, right_range in enumerate(timings[1:]):
left_range = timings[i]
duration = right_range - left_range
if left_range > 0:
inputs += ["-ss", str(left_range)]
if right_range != float('inf'):
inputs += ["-t", str(duration)]
inputs += ["-i", video]
if (i % 2 == 0) == slides_first:
inputs += [
"-f", "image2", "-r", "1", "-s", "hd720", "-start_number", str(left_range), "-i", frame_pattern
]
stream_id = i // 2 * 3
if not slides_first:
stream_id += 1
filter_complex += [
"[{0:d}:v] setpts=PTS-STARTPTS, scale=w=320:h=-1 [sp-{1:d}];".format(stream_id, i),
"[{0:d}:v] setpts=PTS-STARTPTS, scale=w=1280-320:h=-1[sl-{1:d}];".format(stream_id + 1, i),
"color=size=1280x720:c=Black [b-{0:d}];".format(i),
"[b-{0:d}][sl-{0:d}] overlay=shortest=1:x=0:y=0 [bsl-{0:d}];".format(i),
"[bsl-{0:d}][sp-{0:d}] overlay=shortest=1:x=main_w-320:y=main_h-overlay_h [c-{0:d}];".format(i)
]
else:
stream_id = i // 2 * 3
if slides_first:
stream_id += 2
filter_complex += [
"[{0:d}:v] scale='if(gt(a,16/9),1280,-1)':'if(gt(a,16/9),-1,720)' [c-{1:d}];".format(stream_id, i)
]
concat += ["[c-{0:d}] [{1:d}:a:0]".format(i, stream_id)]
concat += ["concat=n={0:d}:v=1:a=1 [v] [a]".format(len(timings) - 1)]
filter_script_path = os.path.join(self.tmp_dir, "filter")
with open(filter_script_path, 'w') as filter_script_file:
filter_script_file.write("\n".join(filter_complex))
filter_script_file.write("\n")
filter_script_file.write(" ".join(concat))
cmd = [self.ffmpeg, "-v", "error"]
cmd += inputs
cmd += [
"-filter_complex_script", filter_script_path,
"-map", "[v]", "-map", "[a]",
"-r", str(fps),
"-acodec", "libmp3lame", "-ab", "92k",
"-vcodec", "libx264", "-profile:v", "baseline", "-preset", "fast", "-level", "3.0", "-crf", "28",
"-y" if self.overwrite else "-n",
self.output
]
return cmd
def _assemble(self, audio, frame_pattern):
if self.type == "legacy":
cmd = self._ffmpeg_legacy(audio, frame_pattern)
elif self.type == "h264":
cmd = self._ffmpeg_h264(audio, frame_pattern)
elif self.type == "h264_overlay":
cmd = self._ffmpeg_h264_overlay(audio, frame_pattern)
else:
raise Exception("Unknown output type %s" % self.type)
self._run_command(cmd)
def _run_command(self, cmd):
try:
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = "Failed to create final movie as %s.\n" \
"\tCommand: %s\n" \
"\tExit code: %s\n" \
"\tOutput:\n%s" % (self.output, " ".join(cmd), e.returncode, e.output)
if self.type != "legacy":
msg += "\n Please note that %s output format requires a recent version of ffmpeg and libx264." \
" Perhaps you should check your setup." \
% self.type
raise ConversionError(msg)
def _convert_slides(self, slides):
def convert(slide):
if slide.endswith("swf"):
png_slide = slide.replace(".swf", ".png")
swf2png(slide, png_slide, swfrender_path=self.swfrender)
return png_slide
elif slide.endswith("jpg"):
return slide
else:
raise Exception("Unsupported slide type: %s" % slide)
return [convert(s) for s in slides]
def _prepare_frames(self, slides):
timecodes = self.presentation.metadata['timecodes']
ext = os.path.splitext(slides[0])[1]
frame = 0
for slide_index, src in enumerate(slides):
for remaining in range(timecodes[slide_index], timecodes[slide_index+1]):
dst = os.path.join(self.tmp_dir, "frame-{0:04d}." + ext).format(frame)
try:
os.link(src, dst)
except OSError as e:
if e.errno == errno.EMLINK:
# Create a new reference file when the upper limit is reached
# (previous to Linux 3.7, btrfs had a very low limit)
shutil.copyfile(src, dst)
src = dst
else:
raise e
frame += 1
return os.path.join(self.tmp_dir, "frame-%04d." + ext)
|
cykl/infoqscraper
|
infoqscraper/convert.py
|
Converter.download_video
|
python
|
def download_video(self):
rvideo_path = self.presentation.metadata['video_path']
if self.presentation.client.cache:
video_path = self.presentation.client.cache.get_path(rvideo_path)
if not video_path:
video_path = self.download_video_no_cache()
self.presentation.client.cache.put_path(rvideo_path, video_path)
else:
video_path = self.download_video_no_cache()
return video_path
|
Downloads the video.
If self.client.cache_enabled is True, then the disk cache is used.
Returns:
The path where the video has been saved.
Raises:
DownloadError: If the video cannot be downloaded.
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/convert.py#L92-L113
|
[
"def download_video_no_cache(self):\n \"\"\"Downloads the video.\n\n Returns:\n The path where the video has been saved.\n\n Raises:\n DownloadError: If the video cannot be downloaded.\n \"\"\"\n video_url = self.presentation.metadata['video_url']\n video_path = self.presentation.metadata['video_path']\n\n # After a while, when downloading a long video (> 1h), the RTMP server seems to reset the connection (rtmpdump\n # returns exit code 2). The only way to get the full stream is to resume the download.\n resume_download = True\n while resume_download:\n try:\n cmd = [self.rtmpdump, '-q', '-e', '-r', video_url, '-y', video_path, \"-o\", self._video_path]\n subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n resume_download = False\n except subprocess.CalledProcessError as e:\n if e.returncode != 2:\n try:\n os.unlink(self._video_path)\n except OSError:\n pass\n\n raise client.DownloadError(\"Failed to download video at %s: rtmpdump exited with %s.\\n\\tOutput:\\n%s\"\n % (video_url, e.returncode, e.output))\n\n return self._video_path\n"
] |
class Converter(object):
def __init__(self, presentation, output, **kwargs):
self.presentation = presentation
self.output = output
self.ffmpeg = kwargs['ffmpeg']
self.rtmpdump = kwargs['rtmpdump']
self.swfrender = kwargs['swfrender']
self.overwrite = kwargs['overwrite']
self.type = kwargs['type']
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(self.tmp_dir)
@property
def tmp_dir(self):
if not hasattr(self, "_tmp_dir"):
self._tmp_dir = tempfile.mkdtemp(prefix="infoq")
return self._tmp_dir
@property
def _audio_path(self):
return os.path.join(self.tmp_dir, "audio.ogg")
@property
def _video_path(self):
return os.path.join(self.tmp_dir, 'video.avi')
def create_presentation(self):
""" Create the presentation.
The audio track is mixed with the slides. The resulting file is saved as self.output
DownloadError is raised if some resources cannot be fetched.
ConversionError is raised if the final video cannot be created.
"""
# Avoid wasting time and bandwidth if we known that conversion will fail.
if not self.overwrite and os.path.exists(self.output):
raise ConversionError("File %s already exist and --overwrite not specified" % self.output)
video = self.download_video()
raw_slides = self.download_slides()
# ffmpeg does not support SWF
png_slides = self._convert_slides(raw_slides)
# Create one frame per second using the time code information
frame_pattern = self._prepare_frames(png_slides)
return self._assemble(video, frame_pattern)
def download_video_no_cache(self):
"""Downloads the video.
Returns:
The path where the video has been saved.
Raises:
DownloadError: If the video cannot be downloaded.
"""
video_url = self.presentation.metadata['video_url']
video_path = self.presentation.metadata['video_path']
# After a while, when downloading a long video (> 1h), the RTMP server seems to reset the connection (rtmpdump
# returns exit code 2). The only way to get the full stream is to resume the download.
resume_download = True
while resume_download:
try:
cmd = [self.rtmpdump, '-q', '-e', '-r', video_url, '-y', video_path, "-o", self._video_path]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
resume_download = False
except subprocess.CalledProcessError as e:
if e.returncode != 2:
try:
os.unlink(self._video_path)
except OSError:
pass
raise client.DownloadError("Failed to download video at %s: rtmpdump exited with %s.\n\tOutput:\n%s"
% (video_url, e.returncode, e.output))
return self._video_path
def download_slides(self):
""" Download all SWF slides.
The location of the slides files are returned.
A DownloadError is raised if at least one of the slides cannot be download..
"""
return self.presentation.client.download_all(self.presentation.metadata['slides'], self.tmp_dir)
def _ffmpeg_legacy(self, audio, frame_pattern):
# Try to be compatible as much as possible with old ffmpeg releases (>= 0.7)
# - Do not use new syntax options
# - Do not use libx264, not available on old Ubuntu/Debian
# - Do not use -threads auto, not available on 0.8.*
# - Old releases are very picky regarding arguments position
# - -n is not supported on 0.8
#
# 0.5 (Debian Squeeze & Ubuntu 10.4) is not supported because of
# scaling issues with image2.
cmd = [
self.ffmpeg, "-v", "0",
"-i", audio,
"-f", "image2", "-r", "1", "-s", "hd720", "-i", frame_pattern,
"-map", "1:0", "-acodec", "libmp3lame", "-ab", "128k",
"-map", "0:1", "-vcodec", "mpeg4", "-vb", "2M", "-y", self.output
]
if not self.overwrite and os.path.exists(self.output):
# Handle already existing file manually since nor -n nor -nostdin is available on 0.8
raise Exception("File %s already exist and --overwrite not specified" % self.output)
return cmd
def _ffmpeg_h264(self, audio, frame_pattern):
return [
self.ffmpeg, "-v", "error",
"-i", audio,
"-r", "1", "-i", frame_pattern,
"-c:a", "copy",
"-c:v", "libx264", "-profile:v", "baseline", "-preset", "ultrafast", "-level", "3.0",
"-crf", "28", "-pix_fmt", "yuv420p",
"-s", "1280x720",
"-y" if self.overwrite else "-n",
self.output
]
def _ffmpeg_h264_overlay(self, video, frame_pattern):
cmd = [self.ffmpeg, "-i", video]
video_details = ""
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
video_details = e.output
fps_match = re.search(six.b('\S+(?=\s+tbr)'), video_details)
fps = float(fps_match.group(0))
timings = self.presentation.metadata['demo_timings'][:]
if len(timings) == 0 or timings[0] != 0:
slides_first = True
timings.insert(0, 0)
else:
slides_first = False
timings.append(float('inf'))
inputs = []
filter_complex = []
concat = []
for i, right_range in enumerate(timings[1:]):
left_range = timings[i]
duration = right_range - left_range
if left_range > 0:
inputs += ["-ss", str(left_range)]
if right_range != float('inf'):
inputs += ["-t", str(duration)]
inputs += ["-i", video]
if (i % 2 == 0) == slides_first:
inputs += [
"-f", "image2", "-r", "1", "-s", "hd720", "-start_number", str(left_range), "-i", frame_pattern
]
stream_id = i // 2 * 3
if not slides_first:
stream_id += 1
filter_complex += [
"[{0:d}:v] setpts=PTS-STARTPTS, scale=w=320:h=-1 [sp-{1:d}];".format(stream_id, i),
"[{0:d}:v] setpts=PTS-STARTPTS, scale=w=1280-320:h=-1[sl-{1:d}];".format(stream_id + 1, i),
"color=size=1280x720:c=Black [b-{0:d}];".format(i),
"[b-{0:d}][sl-{0:d}] overlay=shortest=1:x=0:y=0 [bsl-{0:d}];".format(i),
"[bsl-{0:d}][sp-{0:d}] overlay=shortest=1:x=main_w-320:y=main_h-overlay_h [c-{0:d}];".format(i)
]
else:
stream_id = i // 2 * 3
if slides_first:
stream_id += 2
filter_complex += [
"[{0:d}:v] scale='if(gt(a,16/9),1280,-1)':'if(gt(a,16/9),-1,720)' [c-{1:d}];".format(stream_id, i)
]
concat += ["[c-{0:d}] [{1:d}:a:0]".format(i, stream_id)]
concat += ["concat=n={0:d}:v=1:a=1 [v] [a]".format(len(timings) - 1)]
filter_script_path = os.path.join(self.tmp_dir, "filter")
with open(filter_script_path, 'w') as filter_script_file:
filter_script_file.write("\n".join(filter_complex))
filter_script_file.write("\n")
filter_script_file.write(" ".join(concat))
cmd = [self.ffmpeg, "-v", "error"]
cmd += inputs
cmd += [
"-filter_complex_script", filter_script_path,
"-map", "[v]", "-map", "[a]",
"-r", str(fps),
"-acodec", "libmp3lame", "-ab", "92k",
"-vcodec", "libx264", "-profile:v", "baseline", "-preset", "fast", "-level", "3.0", "-crf", "28",
"-y" if self.overwrite else "-n",
self.output
]
return cmd
def _assemble(self, audio, frame_pattern):
if self.type == "legacy":
cmd = self._ffmpeg_legacy(audio, frame_pattern)
elif self.type == "h264":
cmd = self._ffmpeg_h264(audio, frame_pattern)
elif self.type == "h264_overlay":
cmd = self._ffmpeg_h264_overlay(audio, frame_pattern)
else:
raise Exception("Unknown output type %s" % self.type)
self._run_command(cmd)
def _run_command(self, cmd):
try:
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = "Failed to create final movie as %s.\n" \
"\tCommand: %s\n" \
"\tExit code: %s\n" \
"\tOutput:\n%s" % (self.output, " ".join(cmd), e.returncode, e.output)
if self.type != "legacy":
msg += "\n Please note that %s output format requires a recent version of ffmpeg and libx264." \
" Perhaps you should check your setup." \
% self.type
raise ConversionError(msg)
def _convert_slides(self, slides):
def convert(slide):
if slide.endswith("swf"):
png_slide = slide.replace(".swf", ".png")
swf2png(slide, png_slide, swfrender_path=self.swfrender)
return png_slide
elif slide.endswith("jpg"):
return slide
else:
raise Exception("Unsupported slide type: %s" % slide)
return [convert(s) for s in slides]
def _prepare_frames(self, slides):
timecodes = self.presentation.metadata['timecodes']
ext = os.path.splitext(slides[0])[1]
frame = 0
for slide_index, src in enumerate(slides):
for remaining in range(timecodes[slide_index], timecodes[slide_index+1]):
dst = os.path.join(self.tmp_dir, "frame-{0:04d}." + ext).format(frame)
try:
os.link(src, dst)
except OSError as e:
if e.errno == errno.EMLINK:
# Create a new reference file when the upper limit is reached
# (previous to Linux 3.7, btrfs had a very low limit)
shutil.copyfile(src, dst)
src = dst
else:
raise e
frame += 1
return os.path.join(self.tmp_dir, "frame-%04d." + ext)
|
cykl/infoqscraper
|
infoqscraper/convert.py
|
Converter.download_video_no_cache
|
python
|
def download_video_no_cache(self):
video_url = self.presentation.metadata['video_url']
video_path = self.presentation.metadata['video_path']
# After a while, when downloading a long video (> 1h), the RTMP server seems to reset the connection (rtmpdump
# returns exit code 2). The only way to get the full stream is to resume the download.
resume_download = True
while resume_download:
try:
cmd = [self.rtmpdump, '-q', '-e', '-r', video_url, '-y', video_path, "-o", self._video_path]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
resume_download = False
except subprocess.CalledProcessError as e:
if e.returncode != 2:
try:
os.unlink(self._video_path)
except OSError:
pass
raise client.DownloadError("Failed to download video at %s: rtmpdump exited with %s.\n\tOutput:\n%s"
% (video_url, e.returncode, e.output))
return self._video_path
|
Downloads the video.
Returns:
The path where the video has been saved.
Raises:
DownloadError: If the video cannot be downloaded.
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/convert.py#L115-L145
| null |
class Converter(object):
def __init__(self, presentation, output, **kwargs):
self.presentation = presentation
self.output = output
self.ffmpeg = kwargs['ffmpeg']
self.rtmpdump = kwargs['rtmpdump']
self.swfrender = kwargs['swfrender']
self.overwrite = kwargs['overwrite']
self.type = kwargs['type']
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(self.tmp_dir)
@property
def tmp_dir(self):
if not hasattr(self, "_tmp_dir"):
self._tmp_dir = tempfile.mkdtemp(prefix="infoq")
return self._tmp_dir
@property
def _audio_path(self):
return os.path.join(self.tmp_dir, "audio.ogg")
@property
def _video_path(self):
return os.path.join(self.tmp_dir, 'video.avi')
def create_presentation(self):
""" Create the presentation.
The audio track is mixed with the slides. The resulting file is saved as self.output
DownloadError is raised if some resources cannot be fetched.
ConversionError is raised if the final video cannot be created.
"""
# Avoid wasting time and bandwidth if we known that conversion will fail.
if not self.overwrite and os.path.exists(self.output):
raise ConversionError("File %s already exist and --overwrite not specified" % self.output)
video = self.download_video()
raw_slides = self.download_slides()
# ffmpeg does not support SWF
png_slides = self._convert_slides(raw_slides)
# Create one frame per second using the time code information
frame_pattern = self._prepare_frames(png_slides)
return self._assemble(video, frame_pattern)
def download_video(self):
"""Downloads the video.
If self.client.cache_enabled is True, then the disk cache is used.
Returns:
The path where the video has been saved.
Raises:
DownloadError: If the video cannot be downloaded.
"""
rvideo_path = self.presentation.metadata['video_path']
if self.presentation.client.cache:
video_path = self.presentation.client.cache.get_path(rvideo_path)
if not video_path:
video_path = self.download_video_no_cache()
self.presentation.client.cache.put_path(rvideo_path, video_path)
else:
video_path = self.download_video_no_cache()
return video_path
def download_slides(self):
""" Download all SWF slides.
The location of the slides files are returned.
A DownloadError is raised if at least one of the slides cannot be download..
"""
return self.presentation.client.download_all(self.presentation.metadata['slides'], self.tmp_dir)
def _ffmpeg_legacy(self, audio, frame_pattern):
# Try to be compatible as much as possible with old ffmpeg releases (>= 0.7)
# - Do not use new syntax options
# - Do not use libx264, not available on old Ubuntu/Debian
# - Do not use -threads auto, not available on 0.8.*
# - Old releases are very picky regarding arguments position
# - -n is not supported on 0.8
#
# 0.5 (Debian Squeeze & Ubuntu 10.4) is not supported because of
# scaling issues with image2.
cmd = [
self.ffmpeg, "-v", "0",
"-i", audio,
"-f", "image2", "-r", "1", "-s", "hd720", "-i", frame_pattern,
"-map", "1:0", "-acodec", "libmp3lame", "-ab", "128k",
"-map", "0:1", "-vcodec", "mpeg4", "-vb", "2M", "-y", self.output
]
if not self.overwrite and os.path.exists(self.output):
# Handle already existing file manually since nor -n nor -nostdin is available on 0.8
raise Exception("File %s already exist and --overwrite not specified" % self.output)
return cmd
def _ffmpeg_h264(self, audio, frame_pattern):
return [
self.ffmpeg, "-v", "error",
"-i", audio,
"-r", "1", "-i", frame_pattern,
"-c:a", "copy",
"-c:v", "libx264", "-profile:v", "baseline", "-preset", "ultrafast", "-level", "3.0",
"-crf", "28", "-pix_fmt", "yuv420p",
"-s", "1280x720",
"-y" if self.overwrite else "-n",
self.output
]
def _ffmpeg_h264_overlay(self, video, frame_pattern):
cmd = [self.ffmpeg, "-i", video]
video_details = ""
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
video_details = e.output
fps_match = re.search(six.b('\S+(?=\s+tbr)'), video_details)
fps = float(fps_match.group(0))
timings = self.presentation.metadata['demo_timings'][:]
if len(timings) == 0 or timings[0] != 0:
slides_first = True
timings.insert(0, 0)
else:
slides_first = False
timings.append(float('inf'))
inputs = []
filter_complex = []
concat = []
for i, right_range in enumerate(timings[1:]):
left_range = timings[i]
duration = right_range - left_range
if left_range > 0:
inputs += ["-ss", str(left_range)]
if right_range != float('inf'):
inputs += ["-t", str(duration)]
inputs += ["-i", video]
if (i % 2 == 0) == slides_first:
inputs += [
"-f", "image2", "-r", "1", "-s", "hd720", "-start_number", str(left_range), "-i", frame_pattern
]
stream_id = i // 2 * 3
if not slides_first:
stream_id += 1
filter_complex += [
"[{0:d}:v] setpts=PTS-STARTPTS, scale=w=320:h=-1 [sp-{1:d}];".format(stream_id, i),
"[{0:d}:v] setpts=PTS-STARTPTS, scale=w=1280-320:h=-1[sl-{1:d}];".format(stream_id + 1, i),
"color=size=1280x720:c=Black [b-{0:d}];".format(i),
"[b-{0:d}][sl-{0:d}] overlay=shortest=1:x=0:y=0 [bsl-{0:d}];".format(i),
"[bsl-{0:d}][sp-{0:d}] overlay=shortest=1:x=main_w-320:y=main_h-overlay_h [c-{0:d}];".format(i)
]
else:
stream_id = i // 2 * 3
if slides_first:
stream_id += 2
filter_complex += [
"[{0:d}:v] scale='if(gt(a,16/9),1280,-1)':'if(gt(a,16/9),-1,720)' [c-{1:d}];".format(stream_id, i)
]
concat += ["[c-{0:d}] [{1:d}:a:0]".format(i, stream_id)]
concat += ["concat=n={0:d}:v=1:a=1 [v] [a]".format(len(timings) - 1)]
filter_script_path = os.path.join(self.tmp_dir, "filter")
with open(filter_script_path, 'w') as filter_script_file:
filter_script_file.write("\n".join(filter_complex))
filter_script_file.write("\n")
filter_script_file.write(" ".join(concat))
cmd = [self.ffmpeg, "-v", "error"]
cmd += inputs
cmd += [
"-filter_complex_script", filter_script_path,
"-map", "[v]", "-map", "[a]",
"-r", str(fps),
"-acodec", "libmp3lame", "-ab", "92k",
"-vcodec", "libx264", "-profile:v", "baseline", "-preset", "fast", "-level", "3.0", "-crf", "28",
"-y" if self.overwrite else "-n",
self.output
]
return cmd
def _assemble(self, audio, frame_pattern):
if self.type == "legacy":
cmd = self._ffmpeg_legacy(audio, frame_pattern)
elif self.type == "h264":
cmd = self._ffmpeg_h264(audio, frame_pattern)
elif self.type == "h264_overlay":
cmd = self._ffmpeg_h264_overlay(audio, frame_pattern)
else:
raise Exception("Unknown output type %s" % self.type)
self._run_command(cmd)
def _run_command(self, cmd):
try:
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = "Failed to create final movie as %s.\n" \
"\tCommand: %s\n" \
"\tExit code: %s\n" \
"\tOutput:\n%s" % (self.output, " ".join(cmd), e.returncode, e.output)
if self.type != "legacy":
msg += "\n Please note that %s output format requires a recent version of ffmpeg and libx264." \
" Perhaps you should check your setup." \
% self.type
raise ConversionError(msg)
def _convert_slides(self, slides):
def convert(slide):
if slide.endswith("swf"):
png_slide = slide.replace(".swf", ".png")
swf2png(slide, png_slide, swfrender_path=self.swfrender)
return png_slide
elif slide.endswith("jpg"):
return slide
else:
raise Exception("Unsupported slide type: %s" % slide)
return [convert(s) for s in slides]
def _prepare_frames(self, slides):
timecodes = self.presentation.metadata['timecodes']
ext = os.path.splitext(slides[0])[1]
frame = 0
for slide_index, src in enumerate(slides):
for remaining in range(timecodes[slide_index], timecodes[slide_index+1]):
dst = os.path.join(self.tmp_dir, "frame-{0:04d}." + ext).format(frame)
try:
os.link(src, dst)
except OSError as e:
if e.errno == errno.EMLINK:
# Create a new reference file when the upper limit is reached
# (previous to Linux 3.7, btrfs had a very low limit)
shutil.copyfile(src, dst)
src = dst
else:
raise e
frame += 1
return os.path.join(self.tmp_dir, "frame-%04d." + ext)
|
cykl/infoqscraper
|
infoqscraper/convert.py
|
Converter.download_slides
|
python
|
def download_slides(self):
return self.presentation.client.download_all(self.presentation.metadata['slides'], self.tmp_dir)
|
Download all SWF slides.
The location of the slides files are returned.
A DownloadError is raised if at least one of the slides cannot be download..
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/convert.py#L147-L154
| null |
class Converter(object):
def __init__(self, presentation, output, **kwargs):
self.presentation = presentation
self.output = output
self.ffmpeg = kwargs['ffmpeg']
self.rtmpdump = kwargs['rtmpdump']
self.swfrender = kwargs['swfrender']
self.overwrite = kwargs['overwrite']
self.type = kwargs['type']
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(self.tmp_dir)
@property
def tmp_dir(self):
if not hasattr(self, "_tmp_dir"):
self._tmp_dir = tempfile.mkdtemp(prefix="infoq")
return self._tmp_dir
@property
def _audio_path(self):
return os.path.join(self.tmp_dir, "audio.ogg")
@property
def _video_path(self):
return os.path.join(self.tmp_dir, 'video.avi')
def create_presentation(self):
""" Create the presentation.
The audio track is mixed with the slides. The resulting file is saved as self.output
DownloadError is raised if some resources cannot be fetched.
ConversionError is raised if the final video cannot be created.
"""
# Avoid wasting time and bandwidth if we known that conversion will fail.
if not self.overwrite and os.path.exists(self.output):
raise ConversionError("File %s already exist and --overwrite not specified" % self.output)
video = self.download_video()
raw_slides = self.download_slides()
# ffmpeg does not support SWF
png_slides = self._convert_slides(raw_slides)
# Create one frame per second using the time code information
frame_pattern = self._prepare_frames(png_slides)
return self._assemble(video, frame_pattern)
def download_video(self):
"""Downloads the video.
If self.client.cache_enabled is True, then the disk cache is used.
Returns:
The path where the video has been saved.
Raises:
DownloadError: If the video cannot be downloaded.
"""
rvideo_path = self.presentation.metadata['video_path']
if self.presentation.client.cache:
video_path = self.presentation.client.cache.get_path(rvideo_path)
if not video_path:
video_path = self.download_video_no_cache()
self.presentation.client.cache.put_path(rvideo_path, video_path)
else:
video_path = self.download_video_no_cache()
return video_path
def download_video_no_cache(self):
"""Downloads the video.
Returns:
The path where the video has been saved.
Raises:
DownloadError: If the video cannot be downloaded.
"""
video_url = self.presentation.metadata['video_url']
video_path = self.presentation.metadata['video_path']
# After a while, when downloading a long video (> 1h), the RTMP server seems to reset the connection (rtmpdump
# returns exit code 2). The only way to get the full stream is to resume the download.
resume_download = True
while resume_download:
try:
cmd = [self.rtmpdump, '-q', '-e', '-r', video_url, '-y', video_path, "-o", self._video_path]
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
resume_download = False
except subprocess.CalledProcessError as e:
if e.returncode != 2:
try:
os.unlink(self._video_path)
except OSError:
pass
raise client.DownloadError("Failed to download video at %s: rtmpdump exited with %s.\n\tOutput:\n%s"
% (video_url, e.returncode, e.output))
return self._video_path
def _ffmpeg_legacy(self, audio, frame_pattern):
# Try to be compatible as much as possible with old ffmpeg releases (>= 0.7)
# - Do not use new syntax options
# - Do not use libx264, not available on old Ubuntu/Debian
# - Do not use -threads auto, not available on 0.8.*
# - Old releases are very picky regarding arguments position
# - -n is not supported on 0.8
#
# 0.5 (Debian Squeeze & Ubuntu 10.4) is not supported because of
# scaling issues with image2.
cmd = [
self.ffmpeg, "-v", "0",
"-i", audio,
"-f", "image2", "-r", "1", "-s", "hd720", "-i", frame_pattern,
"-map", "1:0", "-acodec", "libmp3lame", "-ab", "128k",
"-map", "0:1", "-vcodec", "mpeg4", "-vb", "2M", "-y", self.output
]
if not self.overwrite and os.path.exists(self.output):
# Handle already existing file manually since nor -n nor -nostdin is available on 0.8
raise Exception("File %s already exist and --overwrite not specified" % self.output)
return cmd
def _ffmpeg_h264(self, audio, frame_pattern):
return [
self.ffmpeg, "-v", "error",
"-i", audio,
"-r", "1", "-i", frame_pattern,
"-c:a", "copy",
"-c:v", "libx264", "-profile:v", "baseline", "-preset", "ultrafast", "-level", "3.0",
"-crf", "28", "-pix_fmt", "yuv420p",
"-s", "1280x720",
"-y" if self.overwrite else "-n",
self.output
]
def _ffmpeg_h264_overlay(self, video, frame_pattern):
cmd = [self.ffmpeg, "-i", video]
video_details = ""
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
video_details = e.output
fps_match = re.search(six.b('\S+(?=\s+tbr)'), video_details)
fps = float(fps_match.group(0))
timings = self.presentation.metadata['demo_timings'][:]
if len(timings) == 0 or timings[0] != 0:
slides_first = True
timings.insert(0, 0)
else:
slides_first = False
timings.append(float('inf'))
inputs = []
filter_complex = []
concat = []
for i, right_range in enumerate(timings[1:]):
left_range = timings[i]
duration = right_range - left_range
if left_range > 0:
inputs += ["-ss", str(left_range)]
if right_range != float('inf'):
inputs += ["-t", str(duration)]
inputs += ["-i", video]
if (i % 2 == 0) == slides_first:
inputs += [
"-f", "image2", "-r", "1", "-s", "hd720", "-start_number", str(left_range), "-i", frame_pattern
]
stream_id = i // 2 * 3
if not slides_first:
stream_id += 1
filter_complex += [
"[{0:d}:v] setpts=PTS-STARTPTS, scale=w=320:h=-1 [sp-{1:d}];".format(stream_id, i),
"[{0:d}:v] setpts=PTS-STARTPTS, scale=w=1280-320:h=-1[sl-{1:d}];".format(stream_id + 1, i),
"color=size=1280x720:c=Black [b-{0:d}];".format(i),
"[b-{0:d}][sl-{0:d}] overlay=shortest=1:x=0:y=0 [bsl-{0:d}];".format(i),
"[bsl-{0:d}][sp-{0:d}] overlay=shortest=1:x=main_w-320:y=main_h-overlay_h [c-{0:d}];".format(i)
]
else:
stream_id = i // 2 * 3
if slides_first:
stream_id += 2
filter_complex += [
"[{0:d}:v] scale='if(gt(a,16/9),1280,-1)':'if(gt(a,16/9),-1,720)' [c-{1:d}];".format(stream_id, i)
]
concat += ["[c-{0:d}] [{1:d}:a:0]".format(i, stream_id)]
concat += ["concat=n={0:d}:v=1:a=1 [v] [a]".format(len(timings) - 1)]
filter_script_path = os.path.join(self.tmp_dir, "filter")
with open(filter_script_path, 'w') as filter_script_file:
filter_script_file.write("\n".join(filter_complex))
filter_script_file.write("\n")
filter_script_file.write(" ".join(concat))
cmd = [self.ffmpeg, "-v", "error"]
cmd += inputs
cmd += [
"-filter_complex_script", filter_script_path,
"-map", "[v]", "-map", "[a]",
"-r", str(fps),
"-acodec", "libmp3lame", "-ab", "92k",
"-vcodec", "libx264", "-profile:v", "baseline", "-preset", "fast", "-level", "3.0", "-crf", "28",
"-y" if self.overwrite else "-n",
self.output
]
return cmd
def _assemble(self, audio, frame_pattern):
if self.type == "legacy":
cmd = self._ffmpeg_legacy(audio, frame_pattern)
elif self.type == "h264":
cmd = self._ffmpeg_h264(audio, frame_pattern)
elif self.type == "h264_overlay":
cmd = self._ffmpeg_h264_overlay(audio, frame_pattern)
else:
raise Exception("Unknown output type %s" % self.type)
self._run_command(cmd)
def _run_command(self, cmd):
try:
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = "Failed to create final movie as %s.\n" \
"\tCommand: %s\n" \
"\tExit code: %s\n" \
"\tOutput:\n%s" % (self.output, " ".join(cmd), e.returncode, e.output)
if self.type != "legacy":
msg += "\n Please note that %s output format requires a recent version of ffmpeg and libx264." \
" Perhaps you should check your setup." \
% self.type
raise ConversionError(msg)
def _convert_slides(self, slides):
def convert(slide):
if slide.endswith("swf"):
png_slide = slide.replace(".swf", ".png")
swf2png(slide, png_slide, swfrender_path=self.swfrender)
return png_slide
elif slide.endswith("jpg"):
return slide
else:
raise Exception("Unsupported slide type: %s" % slide)
return [convert(s) for s in slides]
def _prepare_frames(self, slides):
timecodes = self.presentation.metadata['timecodes']
ext = os.path.splitext(slides[0])[1]
frame = 0
for slide_index, src in enumerate(slides):
for remaining in range(timecodes[slide_index], timecodes[slide_index+1]):
dst = os.path.join(self.tmp_dir, "frame-{0:04d}." + ext).format(frame)
try:
os.link(src, dst)
except OSError as e:
if e.errno == errno.EMLINK:
# Create a new reference file when the upper limit is reached
# (previous to Linux 3.7, btrfs had a very low limit)
shutil.copyfile(src, dst)
src = dst
else:
raise e
frame += 1
return os.path.join(self.tmp_dir, "frame-%04d." + ext)
|
cykl/infoqscraper
|
infoqscraper/client.py
|
InfoQ.login
|
python
|
def login(self, username, password):
url = get_url("/login.action", scheme="https")
params = {
'username': username,
'password': password,
'submit-login': '',
}
with contextlib.closing(self.opener.open(url, urllib.parse.urlencode(params))) as response:
if not "loginAction.jsp" in response.url:
raise AuthenticationError("Login failed. Unexpected redirection: %s" % response.url)
if not "resultMessage=success" in response.url:
raise AuthenticationError("Login failed.")
self.authenticated = True
|
Log in.
AuthenticationFailedException exception is raised if authentication fails.
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/client.py#L62-L79
|
[
"def get_url(path, scheme=\"http\"):\n \"\"\" Return the full InfoQ URL \"\"\"\n return scheme + \"://www.infoq.com\" + path\n"
] |
class InfoQ(object):
""" InfoQ web client entry point
Attributes:
authenticated: If logged in or not
cache: None if caching is disable. A Cache object otherwise
"""
def __init__(self, cache_enabled=False):
self.authenticated = False
# InfoQ requires cookies to be logged in. Use a dedicated urllib opener
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(http_cookiejar.CookieJar()))
self.cache = None
if cache_enabled:
self.enable_cache()
def enable_cache(self):
if not self.cache:
self.cache = cache.XDGCache()
def fetch(self, url):
if self.cache:
content = self.cache.get_content(url)
if not content:
content = self.fetch_no_cache(url)
self.cache.put_content(url, content)
else:
content = self.fetch_no_cache(url)
return content
def fetch_no_cache(self, url):
""" Fetch the resource specified and return its content.
DownloadError is raised if the resource cannot be fetched.
"""
try:
with contextlib.closing(self.opener.open(url)) as response:
# InfoQ does not send a 404 but a 302 redirecting to a valid URL...
if response.code != 200 or response.url == INFOQ_404_URL:
raise DownloadError("%s not found" % url)
return response.read()
except urllib.error.URLError as e:
raise DownloadError("Failed to get %s: %s" % (url, e))
def download(self, url, dir_path, filename=None):
""" Download the resources specified by url into dir_path. The resulting
file path is returned.
DownloadError is raised the resources cannot be downloaded.
"""
if not filename:
filename = url.rsplit('/', 1)[1]
path = os.path.join(dir_path, filename)
content = self.fetch(url)
with open(path, "wb") as f:
f.write(content)
return path
def download_all(self, urls, dir_path):
""" Download all the resources specified by urls into dir_path. The resulting
file paths is returned.
DownloadError is raised if at least one of the resources cannot be downloaded.
In the case already downloaded resources are erased.
"""
# TODO: Implement parallel download
filenames = []
try:
for url in urls:
filenames.append(self.download(url, dir_path))
except DownloadError as e:
for filename in filenames:
os.remove(filename)
raise e
return filenames
|
cykl/infoqscraper
|
infoqscraper/client.py
|
InfoQ.fetch_no_cache
|
python
|
def fetch_no_cache(self, url):
try:
with contextlib.closing(self.opener.open(url)) as response:
# InfoQ does not send a 404 but a 302 redirecting to a valid URL...
if response.code != 200 or response.url == INFOQ_404_URL:
raise DownloadError("%s not found" % url)
return response.read()
except urllib.error.URLError as e:
raise DownloadError("Failed to get %s: %s" % (url, e))
|
Fetch the resource specified and return its content.
DownloadError is raised if the resource cannot be fetched.
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/client.py#L92-L105
| null |
class InfoQ(object):
""" InfoQ web client entry point
Attributes:
authenticated: If logged in or not
cache: None if caching is disable. A Cache object otherwise
"""
def __init__(self, cache_enabled=False):
self.authenticated = False
# InfoQ requires cookies to be logged in. Use a dedicated urllib opener
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(http_cookiejar.CookieJar()))
self.cache = None
if cache_enabled:
self.enable_cache()
def enable_cache(self):
if not self.cache:
self.cache = cache.XDGCache()
def login(self, username, password):
""" Log in.
AuthenticationFailedException exception is raised if authentication fails.
"""
url = get_url("/login.action", scheme="https")
params = {
'username': username,
'password': password,
'submit-login': '',
}
with contextlib.closing(self.opener.open(url, urllib.parse.urlencode(params))) as response:
if not "loginAction.jsp" in response.url:
raise AuthenticationError("Login failed. Unexpected redirection: %s" % response.url)
if not "resultMessage=success" in response.url:
raise AuthenticationError("Login failed.")
self.authenticated = True
def fetch(self, url):
if self.cache:
content = self.cache.get_content(url)
if not content:
content = self.fetch_no_cache(url)
self.cache.put_content(url, content)
else:
content = self.fetch_no_cache(url)
return content
def download(self, url, dir_path, filename=None):
""" Download the resources specified by url into dir_path. The resulting
file path is returned.
DownloadError is raised the resources cannot be downloaded.
"""
if not filename:
filename = url.rsplit('/', 1)[1]
path = os.path.join(dir_path, filename)
content = self.fetch(url)
with open(path, "wb") as f:
f.write(content)
return path
def download_all(self, urls, dir_path):
""" Download all the resources specified by urls into dir_path. The resulting
file paths is returned.
DownloadError is raised if at least one of the resources cannot be downloaded.
In the case already downloaded resources are erased.
"""
# TODO: Implement parallel download
filenames = []
try:
for url in urls:
filenames.append(self.download(url, dir_path))
except DownloadError as e:
for filename in filenames:
os.remove(filename)
raise e
return filenames
|
cykl/infoqscraper
|
infoqscraper/client.py
|
InfoQ.download
|
python
|
def download(self, url, dir_path, filename=None):
if not filename:
filename = url.rsplit('/', 1)[1]
path = os.path.join(dir_path, filename)
content = self.fetch(url)
with open(path, "wb") as f:
f.write(content)
return path
|
Download the resources specified by url into dir_path. The resulting
file path is returned.
DownloadError is raised the resources cannot be downloaded.
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/client.py#L107-L121
|
[
"def fetch(self, url):\n if self.cache:\n content = self.cache.get_content(url)\n if not content:\n content = self.fetch_no_cache(url)\n self.cache.put_content(url, content)\n else:\n content = self.fetch_no_cache(url)\n\n return content\n"
] |
class InfoQ(object):
""" InfoQ web client entry point
Attributes:
authenticated: If logged in or not
cache: None if caching is disable. A Cache object otherwise
"""
def __init__(self, cache_enabled=False):
self.authenticated = False
# InfoQ requires cookies to be logged in. Use a dedicated urllib opener
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(http_cookiejar.CookieJar()))
self.cache = None
if cache_enabled:
self.enable_cache()
def enable_cache(self):
if not self.cache:
self.cache = cache.XDGCache()
def login(self, username, password):
""" Log in.
AuthenticationFailedException exception is raised if authentication fails.
"""
url = get_url("/login.action", scheme="https")
params = {
'username': username,
'password': password,
'submit-login': '',
}
with contextlib.closing(self.opener.open(url, urllib.parse.urlencode(params))) as response:
if not "loginAction.jsp" in response.url:
raise AuthenticationError("Login failed. Unexpected redirection: %s" % response.url)
if not "resultMessage=success" in response.url:
raise AuthenticationError("Login failed.")
self.authenticated = True
def fetch(self, url):
if self.cache:
content = self.cache.get_content(url)
if not content:
content = self.fetch_no_cache(url)
self.cache.put_content(url, content)
else:
content = self.fetch_no_cache(url)
return content
def fetch_no_cache(self, url):
""" Fetch the resource specified and return its content.
DownloadError is raised if the resource cannot be fetched.
"""
try:
with contextlib.closing(self.opener.open(url)) as response:
# InfoQ does not send a 404 but a 302 redirecting to a valid URL...
if response.code != 200 or response.url == INFOQ_404_URL:
raise DownloadError("%s not found" % url)
return response.read()
except urllib.error.URLError as e:
raise DownloadError("Failed to get %s: %s" % (url, e))
def download_all(self, urls, dir_path):
""" Download all the resources specified by urls into dir_path. The resulting
file paths is returned.
DownloadError is raised if at least one of the resources cannot be downloaded.
In the case already downloaded resources are erased.
"""
# TODO: Implement parallel download
filenames = []
try:
for url in urls:
filenames.append(self.download(url, dir_path))
except DownloadError as e:
for filename in filenames:
os.remove(filename)
raise e
return filenames
|
cykl/infoqscraper
|
infoqscraper/client.py
|
InfoQ.download_all
|
python
|
def download_all(self, urls, dir_path):
# TODO: Implement parallel download
filenames = []
try:
for url in urls:
filenames.append(self.download(url, dir_path))
except DownloadError as e:
for filename in filenames:
os.remove(filename)
raise e
return filenames
|
Download all the resources specified by urls into dir_path. The resulting
file paths is returned.
DownloadError is raised if at least one of the resources cannot be downloaded.
In the case already downloaded resources are erased.
|
train
|
https://github.com/cykl/infoqscraper/blob/4fc026b994f98a0a7fe8578e0c9a3a9664982b2e/infoqscraper/client.py#L123-L141
|
[
"def download(self, url, dir_path, filename=None):\n \"\"\" Download the resources specified by url into dir_path. The resulting\n file path is returned.\n\n DownloadError is raised the resources cannot be downloaded.\n \"\"\"\n if not filename:\n filename = url.rsplit('/', 1)[1]\n path = os.path.join(dir_path, filename)\n\n content = self.fetch(url)\n with open(path, \"wb\") as f:\n f.write(content)\n\n return path\n"
] |
class InfoQ(object):
""" InfoQ web client entry point
Attributes:
authenticated: If logged in or not
cache: None if caching is disable. A Cache object otherwise
"""
def __init__(self, cache_enabled=False):
self.authenticated = False
# InfoQ requires cookies to be logged in. Use a dedicated urllib opener
self.opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(http_cookiejar.CookieJar()))
self.cache = None
if cache_enabled:
self.enable_cache()
def enable_cache(self):
if not self.cache:
self.cache = cache.XDGCache()
def login(self, username, password):
""" Log in.
AuthenticationFailedException exception is raised if authentication fails.
"""
url = get_url("/login.action", scheme="https")
params = {
'username': username,
'password': password,
'submit-login': '',
}
with contextlib.closing(self.opener.open(url, urllib.parse.urlencode(params))) as response:
if not "loginAction.jsp" in response.url:
raise AuthenticationError("Login failed. Unexpected redirection: %s" % response.url)
if not "resultMessage=success" in response.url:
raise AuthenticationError("Login failed.")
self.authenticated = True
def fetch(self, url):
if self.cache:
content = self.cache.get_content(url)
if not content:
content = self.fetch_no_cache(url)
self.cache.put_content(url, content)
else:
content = self.fetch_no_cache(url)
return content
def fetch_no_cache(self, url):
""" Fetch the resource specified and return its content.
DownloadError is raised if the resource cannot be fetched.
"""
try:
with contextlib.closing(self.opener.open(url)) as response:
# InfoQ does not send a 404 but a 302 redirecting to a valid URL...
if response.code != 200 or response.url == INFOQ_404_URL:
raise DownloadError("%s not found" % url)
return response.read()
except urllib.error.URLError as e:
raise DownloadError("Failed to get %s: %s" % (url, e))
def download(self, url, dir_path, filename=None):
""" Download the resources specified by url into dir_path. The resulting
file path is returned.
DownloadError is raised the resources cannot be downloaded.
"""
if not filename:
filename = url.rsplit('/', 1)[1]
path = os.path.join(dir_path, filename)
content = self.fetch(url)
with open(path, "wb") as f:
f.write(content)
return path
|
klmitch/requiem
|
requiem/request.py
|
HTTPRequest.send
|
python
|
def send(self):
# Pre-process the request
try:
self.procstack.proc_request(self)
except exc.ShortCircuit, e:
self._debug("Request pre-processing short-circuited")
# Short-circuited; we have an (already processed) response
return e.response
self._debug("Sending %r request to %r (body %r, headers %r)",
self.method, self.url, self.body, self.headers)
# Issue the request
(resp, content) = self.client.request(self.url, self.method,
self.body, self.headers,
self.max_redirects)
# Save the body in the response
resp.body = content
# Do any processing on the response that's desired
try:
self.proc_response(resp)
except:
# Process the exception
result = self.procstack.proc_exception(*sys.exc_info())
if not result:
# Not handled, re-raise it
raise
else:
# Handled and we have a fully post-processed response
return result
# Return the response, post-processing it
return self.procstack.proc_response(resp)
|
Issue the request.
Uses httplib2.Http support for handling redirects. Returns an
httplib2.Response, which may be augmented by the
proc_response() method.
Note that the default implementation of proc_response() causes
an appropriate exception to be raised if the response code is
>= 400.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/request.py#L76-L122
|
[
"def proc_response(self, resp):\n \"\"\"Process response hook.\n\n Process non-redirect responses received by the send() method.\n May augment the response. The default implementation causes\n an exception to be raised if the response status code is >=\n 400.\n \"\"\"\n\n # Raise exceptions for error responses\n if resp.status >= 400:\n e = exc.exception_map.get(resp.status, exc.HTTPException)\n self._debug(\" Response was a %d fault, raising %s\",\n resp.status, e.__name__)\n raise e(resp)\n"
] |
class HTTPRequest(object):
"""Represent and perform HTTP requests.
Implements the dictionary access protocol to modify headers
(headers can also be accessed directly at the 'headers' attribute)
and the stream protocol to build up the body. Handles
redirections under control of the class attribute 'max_redirects'.
Understands schemes supported by the specified client, which must
be compatible with the httplib2.Http object.
"""
max_redirects = 10
def __init__(self, method, url, client, procstack,
body=None, headers=None, debug=None):
"""Initialize a request.
The method and url must be specified. The body and headers
are optional, and may be manipulated after instantiating the
object.
"""
# Save the relevant data
self.method = method.upper()
self.url = url
self.client = client
self.procstack = procstack
self.body = body or ''
self.headers = hdrs.HeaderDict()
self._debug = debug or (lambda *args, **kwargs: None)
# Set up the headers...
if headers:
self.headers.update(headers)
self._debug("Initialized %r request for %r", self.method, self.url)
def write(self, data):
"""Write data to the body."""
self._debug("Adding %r to request body", data)
# Add the written data to our body
self.body += data
def flush(self):
"""Flush body stream--no-op for compatibility."""
# Do-nothing to allow stream compatibility
pass
def proc_response(self, resp):
"""Process response hook.
Process non-redirect responses received by the send() method.
May augment the response. The default implementation causes
an exception to be raised if the response status code is >=
400.
"""
# Raise exceptions for error responses
if resp.status >= 400:
e = exc.exception_map.get(resp.status, exc.HTTPException)
self._debug(" Response was a %d fault, raising %s",
resp.status, e.__name__)
raise e(resp)
def __getitem__(self, item):
"""Allow headers to be retrieved via dictionary access."""
# Headers are done by item access
return self.headers[item.title()]
def __setitem__(self, item, value):
"""Allow headers to be set via dictionary access."""
# Headers are done by item access
self.headers[item.title()] = value
def __delitem__(self, item):
"""Allow headers to be removed via dictionary access."""
# Headers are done by item access
del self.headers[item.title()]
def __contains__(self, item):
"""Allow header presence to be discovered via dictionary access."""
# Headers are done by item access
return item.title() in self.headers
def __len__(self):
"""Obtain the number of headers present on the request."""
# Headers are done by item access
return len(self.headers)
|
klmitch/requiem
|
requiem/request.py
|
HTTPRequest.proc_response
|
python
|
def proc_response(self, resp):
# Raise exceptions for error responses
if resp.status >= 400:
e = exc.exception_map.get(resp.status, exc.HTTPException)
self._debug(" Response was a %d fault, raising %s",
resp.status, e.__name__)
raise e(resp)
|
Process response hook.
Process non-redirect responses received by the send() method.
May augment the response. The default implementation causes
an exception to be raised if the response status code is >=
400.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/request.py#L124-L138
| null |
class HTTPRequest(object):
"""Represent and perform HTTP requests.
Implements the dictionary access protocol to modify headers
(headers can also be accessed directly at the 'headers' attribute)
and the stream protocol to build up the body. Handles
redirections under control of the class attribute 'max_redirects'.
Understands schemes supported by the specified client, which must
be compatible with the httplib2.Http object.
"""
max_redirects = 10
def __init__(self, method, url, client, procstack,
body=None, headers=None, debug=None):
"""Initialize a request.
The method and url must be specified. The body and headers
are optional, and may be manipulated after instantiating the
object.
"""
# Save the relevant data
self.method = method.upper()
self.url = url
self.client = client
self.procstack = procstack
self.body = body or ''
self.headers = hdrs.HeaderDict()
self._debug = debug or (lambda *args, **kwargs: None)
# Set up the headers...
if headers:
self.headers.update(headers)
self._debug("Initialized %r request for %r", self.method, self.url)
def write(self, data):
"""Write data to the body."""
self._debug("Adding %r to request body", data)
# Add the written data to our body
self.body += data
def flush(self):
"""Flush body stream--no-op for compatibility."""
# Do-nothing to allow stream compatibility
pass
def send(self):
"""Issue the request.
Uses httplib2.Http support for handling redirects. Returns an
httplib2.Response, which may be augmented by the
proc_response() method.
Note that the default implementation of proc_response() causes
an appropriate exception to be raised if the response code is
>= 400.
"""
# Pre-process the request
try:
self.procstack.proc_request(self)
except exc.ShortCircuit, e:
self._debug("Request pre-processing short-circuited")
# Short-circuited; we have an (already processed) response
return e.response
self._debug("Sending %r request to %r (body %r, headers %r)",
self.method, self.url, self.body, self.headers)
# Issue the request
(resp, content) = self.client.request(self.url, self.method,
self.body, self.headers,
self.max_redirects)
# Save the body in the response
resp.body = content
# Do any processing on the response that's desired
try:
self.proc_response(resp)
except:
# Process the exception
result = self.procstack.proc_exception(*sys.exc_info())
if not result:
# Not handled, re-raise it
raise
else:
# Handled and we have a fully post-processed response
return result
# Return the response, post-processing it
return self.procstack.proc_response(resp)
def __getitem__(self, item):
"""Allow headers to be retrieved via dictionary access."""
# Headers are done by item access
return self.headers[item.title()]
def __setitem__(self, item, value):
"""Allow headers to be set via dictionary access."""
# Headers are done by item access
self.headers[item.title()] = value
def __delitem__(self, item):
"""Allow headers to be removed via dictionary access."""
# Headers are done by item access
del self.headers[item.title()]
def __contains__(self, item):
"""Allow header presence to be discovered via dictionary access."""
# Headers are done by item access
return item.title() in self.headers
def __len__(self):
"""Obtain the number of headers present on the request."""
# Headers are done by item access
return len(self.headers)
|
klmitch/requiem
|
requiem/processor.py
|
_safe_call
|
python
|
def _safe_call(obj, methname, *args, **kwargs):
meth = getattr(obj, methname, None)
if meth is None or not callable(meth):
return
return meth(*args, **kwargs)
|
Safely calls the method with the given methname on the given
object. Remaining positional and keyword arguments are passed to
the method. The return value is None, if the method is not
available, or the return value of the method.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/processor.py#L57-L69
| null |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from requiem import exceptions as exc
__all__ = ['Processor']
class Processor(object):
"""
Class for pre-processing requests and post-processing responses
and exceptions. It is not necessary for processors to inherit
from this class.
"""
def proc_request(self, req):
"""
Pre-processes requests. The req object may be modified--in
place--in any way. May return a response object to
short-circuit other request processing, including submission.
"""
pass
def proc_response(self, resp):
"""
Post-processes responses. The resp object may be modified--in
place--in any way. Return values are ignored.
"""
pass
def proc_exception(self, exc_type, exc_value, traceback):
"""
Post-processes exceptions. May return a response to halt
exception processing.
"""
pass
class ProcessorStack(list):
"""
A list subclass for processor stacks, defining three
domain-specific methods: proc_request(), proc_response(), and
proc_exception().
"""
def proc_request(self, req):
"""
Pre-process a request through all processors in the stack, in
order. If any processor's proc_request() method returns a
value other than None, that value is treated as a response and
post-processed through the proc_response() methods of the
processors preceding that processor in the stack. (Note that
the response returned this way is not passed to the
processor's proc_response() method.) Such a response will
then be attached to a ShortCircuit exception.
For convenience, returns the request passed to the method.
"""
for idx in range(len(self)):
resp = _safe_call(self[idx], 'proc_request', req)
# Do we have a response?
if resp is not None:
# Short-circuit
raise exc.ShortCircuit(self.proc_response(resp, idx - 1))
# Return the request we were passed
return req
def proc_response(self, resp, startidx=None):
"""
Post-process a response through all processors in the stack,
in reverse order. For convenience, returns the response
passed to the method.
The startidx argument is an internal interface only used by
the proc_request() and proc_exception() methods to process a
response through a subset of response processors.
"""
# If we're empty, bail out early
if not self:
return resp
# Select appropriate starting index
if startidx is None:
startidx = len(self)
for idx in range(startidx, -1, -1):
_safe_call(self[idx], 'proc_response', resp)
# Return the response we were passed
return resp
def proc_exception(self, exc_type, exc_value, traceback):
"""
Post-process an exception through all processors in the stack,
in reverse order. The exception so post-processed is any
exception raised by the Request object's proc_response()
method; if the httplib2.Http raises an exception, that
exception will not be processed by this mechanism.
Exception processors may return a response object to preempt
exception processing. The response object will be
post-processed with the proc_response() method on the
remaining processors in the stack.
Note that, if the exception has a 'response' attribute, each
processor's proc_response() method will be called on it prior
to calling proc_exception().
The return value will be None if the exception was not
handled, or a response object returned by one of the
processors.
"""
# If we're empty, bail out early
if not self:
return
for idx in range(len(self), -1, -1):
# First, process the response...
if hasattr(exc_value, 'response'):
_safe_call(self[idx], 'proc_response', exc_value.response)
resp = _safe_call(self[idx], 'proc_exception',
exc_type, exc_value, traceback)
# If we have a response, finish processing and return it
if resp:
return self.proc_response(resp, idx - 1)
|
klmitch/requiem
|
requiem/processor.py
|
ProcessorStack.proc_request
|
python
|
def proc_request(self, req):
for idx in range(len(self)):
resp = _safe_call(self[idx], 'proc_request', req)
# Do we have a response?
if resp is not None:
# Short-circuit
raise exc.ShortCircuit(self.proc_response(resp, idx - 1))
# Return the request we were passed
return req
|
Pre-process a request through all processors in the stack, in
order. If any processor's proc_request() method returns a
value other than None, that value is treated as a response and
post-processed through the proc_response() methods of the
processors preceding that processor in the stack. (Note that
the response returned this way is not passed to the
processor's proc_response() method.) Such a response will
then be attached to a ShortCircuit exception.
For convenience, returns the request passed to the method.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/processor.py#L79-L102
|
[
"def _safe_call(obj, methname, *args, **kwargs):\n \"\"\"\n Safely calls the method with the given methname on the given\n object. Remaining positional and keyword arguments are passed to\n the method. The return value is None, if the method is not\n available, or the return value of the method.\n \"\"\"\n\n meth = getattr(obj, methname, None)\n if meth is None or not callable(meth):\n return\n\n return meth(*args, **kwargs)\n",
"def proc_response(self, resp, startidx=None):\n \"\"\"\n Post-process a response through all processors in the stack,\n in reverse order. For convenience, returns the response\n passed to the method.\n\n The startidx argument is an internal interface only used by\n the proc_request() and proc_exception() methods to process a\n response through a subset of response processors.\n \"\"\"\n\n # If we're empty, bail out early\n if not self:\n return resp\n\n # Select appropriate starting index\n if startidx is None:\n startidx = len(self)\n\n for idx in range(startidx, -1, -1):\n _safe_call(self[idx], 'proc_response', resp)\n\n # Return the response we were passed\n return resp\n"
] |
class ProcessorStack(list):
"""
A list subclass for processor stacks, defining three
domain-specific methods: proc_request(), proc_response(), and
proc_exception().
"""
def proc_response(self, resp, startidx=None):
"""
Post-process a response through all processors in the stack,
in reverse order. For convenience, returns the response
passed to the method.
The startidx argument is an internal interface only used by
the proc_request() and proc_exception() methods to process a
response through a subset of response processors.
"""
# If we're empty, bail out early
if not self:
return resp
# Select appropriate starting index
if startidx is None:
startidx = len(self)
for idx in range(startidx, -1, -1):
_safe_call(self[idx], 'proc_response', resp)
# Return the response we were passed
return resp
def proc_exception(self, exc_type, exc_value, traceback):
"""
Post-process an exception through all processors in the stack,
in reverse order. The exception so post-processed is any
exception raised by the Request object's proc_response()
method; if the httplib2.Http raises an exception, that
exception will not be processed by this mechanism.
Exception processors may return a response object to preempt
exception processing. The response object will be
post-processed with the proc_response() method on the
remaining processors in the stack.
Note that, if the exception has a 'response' attribute, each
processor's proc_response() method will be called on it prior
to calling proc_exception().
The return value will be None if the exception was not
handled, or a response object returned by one of the
processors.
"""
# If we're empty, bail out early
if not self:
return
for idx in range(len(self), -1, -1):
# First, process the response...
if hasattr(exc_value, 'response'):
_safe_call(self[idx], 'proc_response', exc_value.response)
resp = _safe_call(self[idx], 'proc_exception',
exc_type, exc_value, traceback)
# If we have a response, finish processing and return it
if resp:
return self.proc_response(resp, idx - 1)
|
klmitch/requiem
|
requiem/processor.py
|
ProcessorStack.proc_response
|
python
|
def proc_response(self, resp, startidx=None):
# If we're empty, bail out early
if not self:
return resp
# Select appropriate starting index
if startidx is None:
startidx = len(self)
for idx in range(startidx, -1, -1):
_safe_call(self[idx], 'proc_response', resp)
# Return the response we were passed
return resp
|
Post-process a response through all processors in the stack,
in reverse order. For convenience, returns the response
passed to the method.
The startidx argument is an internal interface only used by
the proc_request() and proc_exception() methods to process a
response through a subset of response processors.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/processor.py#L104-L127
|
[
"def _safe_call(obj, methname, *args, **kwargs):\n \"\"\"\n Safely calls the method with the given methname on the given\n object. Remaining positional and keyword arguments are passed to\n the method. The return value is None, if the method is not\n available, or the return value of the method.\n \"\"\"\n\n meth = getattr(obj, methname, None)\n if meth is None or not callable(meth):\n return\n\n return meth(*args, **kwargs)\n"
] |
class ProcessorStack(list):
"""
A list subclass for processor stacks, defining three
domain-specific methods: proc_request(), proc_response(), and
proc_exception().
"""
def proc_request(self, req):
"""
Pre-process a request through all processors in the stack, in
order. If any processor's proc_request() method returns a
value other than None, that value is treated as a response and
post-processed through the proc_response() methods of the
processors preceding that processor in the stack. (Note that
the response returned this way is not passed to the
processor's proc_response() method.) Such a response will
then be attached to a ShortCircuit exception.
For convenience, returns the request passed to the method.
"""
for idx in range(len(self)):
resp = _safe_call(self[idx], 'proc_request', req)
# Do we have a response?
if resp is not None:
# Short-circuit
raise exc.ShortCircuit(self.proc_response(resp, idx - 1))
# Return the request we were passed
return req
def proc_exception(self, exc_type, exc_value, traceback):
"""
Post-process an exception through all processors in the stack,
in reverse order. The exception so post-processed is any
exception raised by the Request object's proc_response()
method; if the httplib2.Http raises an exception, that
exception will not be processed by this mechanism.
Exception processors may return a response object to preempt
exception processing. The response object will be
post-processed with the proc_response() method on the
remaining processors in the stack.
Note that, if the exception has a 'response' attribute, each
processor's proc_response() method will be called on it prior
to calling proc_exception().
The return value will be None if the exception was not
handled, or a response object returned by one of the
processors.
"""
# If we're empty, bail out early
if not self:
return
for idx in range(len(self), -1, -1):
# First, process the response...
if hasattr(exc_value, 'response'):
_safe_call(self[idx], 'proc_response', exc_value.response)
resp = _safe_call(self[idx], 'proc_exception',
exc_type, exc_value, traceback)
# If we have a response, finish processing and return it
if resp:
return self.proc_response(resp, idx - 1)
|
klmitch/requiem
|
requiem/processor.py
|
ProcessorStack.proc_exception
|
python
|
def proc_exception(self, exc_type, exc_value, traceback):
# If we're empty, bail out early
if not self:
return
for idx in range(len(self), -1, -1):
# First, process the response...
if hasattr(exc_value, 'response'):
_safe_call(self[idx], 'proc_response', exc_value.response)
resp = _safe_call(self[idx], 'proc_exception',
exc_type, exc_value, traceback)
# If we have a response, finish processing and return it
if resp:
return self.proc_response(resp, idx - 1)
|
Post-process an exception through all processors in the stack,
in reverse order. The exception so post-processed is any
exception raised by the Request object's proc_response()
method; if the httplib2.Http raises an exception, that
exception will not be processed by this mechanism.
Exception processors may return a response object to preempt
exception processing. The response object will be
post-processed with the proc_response() method on the
remaining processors in the stack.
Note that, if the exception has a 'response' attribute, each
processor's proc_response() method will be called on it prior
to calling proc_exception().
The return value will be None if the exception was not
handled, or a response object returned by one of the
processors.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/processor.py#L129-L165
|
[
"def _safe_call(obj, methname, *args, **kwargs):\n \"\"\"\n Safely calls the method with the given methname on the given\n object. Remaining positional and keyword arguments are passed to\n the method. The return value is None, if the method is not\n available, or the return value of the method.\n \"\"\"\n\n meth = getattr(obj, methname, None)\n if meth is None or not callable(meth):\n return\n\n return meth(*args, **kwargs)\n",
"def proc_response(self, resp, startidx=None):\n \"\"\"\n Post-process a response through all processors in the stack,\n in reverse order. For convenience, returns the response\n passed to the method.\n\n The startidx argument is an internal interface only used by\n the proc_request() and proc_exception() methods to process a\n response through a subset of response processors.\n \"\"\"\n\n # If we're empty, bail out early\n if not self:\n return resp\n\n # Select appropriate starting index\n if startidx is None:\n startidx = len(self)\n\n for idx in range(startidx, -1, -1):\n _safe_call(self[idx], 'proc_response', resp)\n\n # Return the response we were passed\n return resp\n"
] |
class ProcessorStack(list):
"""
A list subclass for processor stacks, defining three
domain-specific methods: proc_request(), proc_response(), and
proc_exception().
"""
def proc_request(self, req):
"""
Pre-process a request through all processors in the stack, in
order. If any processor's proc_request() method returns a
value other than None, that value is treated as a response and
post-processed through the proc_response() methods of the
processors preceding that processor in the stack. (Note that
the response returned this way is not passed to the
processor's proc_response() method.) Such a response will
then be attached to a ShortCircuit exception.
For convenience, returns the request passed to the method.
"""
for idx in range(len(self)):
resp = _safe_call(self[idx], 'proc_request', req)
# Do we have a response?
if resp is not None:
# Short-circuit
raise exc.ShortCircuit(self.proc_response(resp, idx - 1))
# Return the request we were passed
return req
def proc_response(self, resp, startidx=None):
"""
Post-process a response through all processors in the stack,
in reverse order. For convenience, returns the response
passed to the method.
The startidx argument is an internal interface only used by
the proc_request() and proc_exception() methods to process a
response through a subset of response processors.
"""
# If we're empty, bail out early
if not self:
return resp
# Select appropriate starting index
if startidx is None:
startidx = len(self)
for idx in range(startidx, -1, -1):
_safe_call(self[idx], 'proc_response', resp)
# Return the response we were passed
return resp
|
klmitch/requiem
|
requiem/jsclient.py
|
JSONRequest.proc_response
|
python
|
def proc_response(self, resp):
# Try to interpret any JSON
try:
resp.obj = json.loads(resp.body)
self._debug(" Received entity: %r", resp.obj)
except ValueError:
resp.obj = None
self._debug(" No received entity; body %r", resp.body)
# Now, call superclass method for error handling
super(JSONRequest, self).proc_response(resp)
|
Process JSON data found in the response.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/jsclient.py#L56-L68
|
[
"def proc_response(self, resp):\n \"\"\"Process response hook.\n\n Process non-redirect responses received by the send() method.\n May augment the response. The default implementation causes\n an exception to be raised if the response status code is >=\n 400.\n \"\"\"\n\n # Raise exceptions for error responses\n if resp.status >= 400:\n e = exc.exception_map.get(resp.status, exc.HTTPException)\n self._debug(\" Response was a %d fault, raising %s\",\n resp.status, e.__name__)\n raise e(resp)\n"
] |
class JSONRequest(request.HTTPRequest):
"""Variant of HTTPRequest to process JSON data in responses."""
|
klmitch/requiem
|
requiem/jsclient.py
|
JSONClient._attach_obj
|
python
|
def _attach_obj(self, req, obj):
# Attach the object to the request
json.dump(obj, req)
# Also set the content-type header
req['content-type'] = self._content_type
|
Helper method to attach obj to req as JSON data.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/jsclient.py#L92-L99
| null |
class JSONClient(client.RESTClient):
"""Process JSON data in requests and responses.
Augments RESTClient to include the _attach_obj() helper method,
for attaching JSON objects to requests. Also uses JSONRequest in
preference to HTTPRequest, so that JSON data in responses is
processed.
"""
_req_class = JSONRequest
_content_type = 'application/json'
def __init__(self, baseurl, headers=None, debug=False, client=None):
"""Override RESTClient.__init__() to set an Accept header."""
# Initialize superclass
super(JSONClient, self).__init__(baseurl, headers, debug, client)
# Set the accept header
self._headers.setdefault('accept', self._content_type)
|
klmitch/requiem
|
requiem/headers.py
|
HeaderDict.fromkeys
|
python
|
def fromkeys(cls, seq, v=None):
return super(HeaderDict, cls).fromkeys(cls,
[s.title() for s in seq], v)
|
Override dict.fromkeys() to title-case keys.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/headers.py#L57-L61
| null |
class HeaderDict(dict):
"""Class for representing a dictionary where keys are header names."""
def __contains__(self, k):
"""Override dict.__contains__() to title-case keys."""
return super(HeaderDict, self).__contains__(k.title())
def __delitem__(self, k):
"""Override dict.__delitem__() to title-case keys."""
return super(HeaderDict, self).__delitem__(k.title())
def __init__(self, d=None, **kwargs):
"""Override dict.__init__() to title-case keys."""
# Initialize ourself as if we were empty...
super(HeaderDict, self).__init__()
# Use our own update method
self.update(d, **kwargs)
def __getitem__(self, k):
"""Override dict.__getitem__() to title-case keys."""
return super(HeaderDict, self).__getitem__(k.title())
def __setitem__(self, k, v):
"""Override dict.__setitem__() to title-case keys."""
return super(HeaderDict, self).__setitem__(k.title(), v)
def copy(self):
"""Override dict.copy() to return a HeaderDict instance."""
return self.__class__(self)
@classmethod
def get(self, k, d=None):
"""Override dict.get() to title-case keys."""
return super(HeaderDict, self).get(k.title(), d)
def has_key(self, k):
"""Override dict.has_key() to title-case keys."""
return super(HeaderDict, self).has_key(k.title())
def setdefault(self, k, d=None):
"""Override dict.setdefault() to title-case keys."""
return super(HeaderDict, self).setdefault(k.title(), d)
def update(self, e=None, **f):
"""Override dict.update() to title-case keys."""
# Handle e first
if e is not None:
if hasattr(e, 'keys'):
for k in e:
self[k.title()] = e[k]
else:
for (k, v) in e:
self[k.title()] = v
# Now handle f
if len(f):
for k in f:
self[k.title()] = f[k]
|
klmitch/requiem
|
requiem/headers.py
|
HeaderDict.get
|
python
|
def get(self, k, d=None):
return super(HeaderDict, self).get(k.title(), d)
|
Override dict.get() to title-case keys.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/headers.py#L63-L66
| null |
class HeaderDict(dict):
"""Class for representing a dictionary where keys are header names."""
def __contains__(self, k):
"""Override dict.__contains__() to title-case keys."""
return super(HeaderDict, self).__contains__(k.title())
def __delitem__(self, k):
"""Override dict.__delitem__() to title-case keys."""
return super(HeaderDict, self).__delitem__(k.title())
def __init__(self, d=None, **kwargs):
"""Override dict.__init__() to title-case keys."""
# Initialize ourself as if we were empty...
super(HeaderDict, self).__init__()
# Use our own update method
self.update(d, **kwargs)
def __getitem__(self, k):
"""Override dict.__getitem__() to title-case keys."""
return super(HeaderDict, self).__getitem__(k.title())
def __setitem__(self, k, v):
"""Override dict.__setitem__() to title-case keys."""
return super(HeaderDict, self).__setitem__(k.title(), v)
def copy(self):
"""Override dict.copy() to return a HeaderDict instance."""
return self.__class__(self)
@classmethod
def fromkeys(cls, seq, v=None):
"""Override dict.fromkeys() to title-case keys."""
return super(HeaderDict, cls).fromkeys(cls,
[s.title() for s in seq], v)
def has_key(self, k):
"""Override dict.has_key() to title-case keys."""
return super(HeaderDict, self).has_key(k.title())
def setdefault(self, k, d=None):
"""Override dict.setdefault() to title-case keys."""
return super(HeaderDict, self).setdefault(k.title(), d)
def update(self, e=None, **f):
"""Override dict.update() to title-case keys."""
# Handle e first
if e is not None:
if hasattr(e, 'keys'):
for k in e:
self[k.title()] = e[k]
else:
for (k, v) in e:
self[k.title()] = v
# Now handle f
if len(f):
for k in f:
self[k.title()] = f[k]
|
klmitch/requiem
|
requiem/headers.py
|
HeaderDict.setdefault
|
python
|
def setdefault(self, k, d=None):
return super(HeaderDict, self).setdefault(k.title(), d)
|
Override dict.setdefault() to title-case keys.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/headers.py#L73-L76
| null |
class HeaderDict(dict):
"""Class for representing a dictionary where keys are header names."""
def __contains__(self, k):
"""Override dict.__contains__() to title-case keys."""
return super(HeaderDict, self).__contains__(k.title())
def __delitem__(self, k):
"""Override dict.__delitem__() to title-case keys."""
return super(HeaderDict, self).__delitem__(k.title())
def __init__(self, d=None, **kwargs):
"""Override dict.__init__() to title-case keys."""
# Initialize ourself as if we were empty...
super(HeaderDict, self).__init__()
# Use our own update method
self.update(d, **kwargs)
def __getitem__(self, k):
"""Override dict.__getitem__() to title-case keys."""
return super(HeaderDict, self).__getitem__(k.title())
def __setitem__(self, k, v):
"""Override dict.__setitem__() to title-case keys."""
return super(HeaderDict, self).__setitem__(k.title(), v)
def copy(self):
"""Override dict.copy() to return a HeaderDict instance."""
return self.__class__(self)
@classmethod
def fromkeys(cls, seq, v=None):
"""Override dict.fromkeys() to title-case keys."""
return super(HeaderDict, cls).fromkeys(cls,
[s.title() for s in seq], v)
def get(self, k, d=None):
"""Override dict.get() to title-case keys."""
return super(HeaderDict, self).get(k.title(), d)
def has_key(self, k):
"""Override dict.has_key() to title-case keys."""
return super(HeaderDict, self).has_key(k.title())
def update(self, e=None, **f):
"""Override dict.update() to title-case keys."""
# Handle e first
if e is not None:
if hasattr(e, 'keys'):
for k in e:
self[k.title()] = e[k]
else:
for (k, v) in e:
self[k.title()] = v
# Now handle f
if len(f):
for k in f:
self[k.title()] = f[k]
|
klmitch/requiem
|
requiem/headers.py
|
HeaderDict.update
|
python
|
def update(self, e=None, **f):
# Handle e first
if e is not None:
if hasattr(e, 'keys'):
for k in e:
self[k.title()] = e[k]
else:
for (k, v) in e:
self[k.title()] = v
# Now handle f
if len(f):
for k in f:
self[k.title()] = f[k]
|
Override dict.update() to title-case keys.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/headers.py#L78-L93
| null |
class HeaderDict(dict):
"""Class for representing a dictionary where keys are header names."""
def __contains__(self, k):
"""Override dict.__contains__() to title-case keys."""
return super(HeaderDict, self).__contains__(k.title())
def __delitem__(self, k):
"""Override dict.__delitem__() to title-case keys."""
return super(HeaderDict, self).__delitem__(k.title())
def __init__(self, d=None, **kwargs):
"""Override dict.__init__() to title-case keys."""
# Initialize ourself as if we were empty...
super(HeaderDict, self).__init__()
# Use our own update method
self.update(d, **kwargs)
def __getitem__(self, k):
"""Override dict.__getitem__() to title-case keys."""
return super(HeaderDict, self).__getitem__(k.title())
def __setitem__(self, k, v):
"""Override dict.__setitem__() to title-case keys."""
return super(HeaderDict, self).__setitem__(k.title(), v)
def copy(self):
"""Override dict.copy() to return a HeaderDict instance."""
return self.__class__(self)
@classmethod
def fromkeys(cls, seq, v=None):
"""Override dict.fromkeys() to title-case keys."""
return super(HeaderDict, cls).fromkeys(cls,
[s.title() for s in seq], v)
def get(self, k, d=None):
"""Override dict.get() to title-case keys."""
return super(HeaderDict, self).get(k.title(), d)
def has_key(self, k):
"""Override dict.has_key() to title-case keys."""
return super(HeaderDict, self).has_key(k.title())
def setdefault(self, k, d=None):
"""Override dict.setdefault() to title-case keys."""
return super(HeaderDict, self).setdefault(k.title(), d)
|
klmitch/requiem
|
requiem/decorators.py
|
_getcallargs
|
python
|
def _getcallargs(func, positional, named):
args, varargs, varkw, defaults = inspect.getargspec(func)
f_name = func.__name__
arg2value = {}
# The following closures are basically because of tuple parameter
# unpacking.
assigned_tuple_params = []
def assign(arg, value):
if isinstance(arg, str):
arg2value[arg] = value
else:
assigned_tuple_params.append(arg)
value = iter(value)
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise ValueError('need more than %d %s to unpack' %
(i, 'values' if i > 1 else 'value'))
assign(subarg, subvalue)
try:
next(value)
except StopIteration:
pass
else:
raise ValueError('too many values to unpack')
def is_assigned(arg):
if isinstance(arg, str):
return arg in arg2value
return arg in assigned_tuple_params
# Inject a place-holder for the request and get the self and the
# req_name
positional = positional[:1] + (None,) + positional[1:]
theSelf = positional[0]
req_name = args[1]
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
# Start with our positional parameters...
for arg, value in zip(args, positional):
assign(arg, value)
# Deal with the variable argument list...
if varargs:
if num_pos > num_args:
assign(varargs, positional[-(num_pos-num_args):])
else:
assign(varargs, ())
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
# Exclusion rules on keyword arguments
for arg in args:
if isinstance(arg, str) and arg in named:
if is_assigned(arg):
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
assign(arg, named.pop(arg))
# Fill in any missing values with the defaults
if defaults:
for arg, value in zip(args[-num_defaults:], defaults):
if not is_assigned(arg):
assign(arg, value)
# Handle the **names
if varkw:
assign(varkw, named)
elif named:
unexpected = next(iter(named))
if isinstance(unexpected, unicode):
unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
# Anything left over?
unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
# Return the mapping and the name of the request argument
return arg2value, theSelf, req_name
|
Get the mapping of arguments to values.
Generates a dict, with keys being the function argument names
(including the names of the * and ** arguments, if any), and
values the respective bound values from 'positional' and 'named'.
A parameter for the request is injected. Returns a tuple of the
dict, the object the method is being called on, and the name of
the injected request argument.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/decorators.py#L40-L147
|
[
"def assign(arg, value):\n if isinstance(arg, str):\n arg2value[arg] = value\n else:\n assigned_tuple_params.append(arg)\n value = iter(value)\n for i, subarg in enumerate(arg):\n try:\n subvalue = next(value)\n except StopIteration:\n raise ValueError('need more than %d %s to unpack' %\n (i, 'values' if i > 1 else 'value'))\n assign(subarg, subvalue)\n try:\n next(value)\n except StopIteration:\n pass\n else:\n raise ValueError('too many values to unpack')\n",
"def is_assigned(arg):\n if isinstance(arg, str):\n return arg in arg2value\n return arg in assigned_tuple_params\n"
] |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
import urllib
import urlparse
from requiem import headers as hdrs
__all__ = ['restmethod']
# Custom version of inspect.getcallargs(). We need this because:
#
# 1. inspect.getcallargs() does not exist prior to Python 2.7.
#
# 2. We need to inject an argument for request objects, and make it
# accessible to our caller.
#
# 3. We need to additionally return the object the method is acting
# on. (This function only works on methods.)
#
# Note that this implementation is largely copied straight from Python
# 2.7 inspect.py, with the addition of a few comments and the changes
# in behavior noted above.
def _urljoin(left, right):
"""Join two URLs.
Takes URLs specified by left and right and joins them into a
single URL. If right is an absolute URL, it is returned directly.
This differs from urlparse.urljoin() in that the latter always
chops off the left-most component of left unless it is trailed by
'/', which is not the behavior we want.
"""
# Handle the tricky case of right being a full URL
tmp = urlparse.urlparse(right)
if tmp.scheme or tmp.netloc:
# Go ahead and use urlparse.urljoin()
return urlparse.urljoin(left, right)
# Check for slashes
joincond = (left[-1:], right[:1])
if joincond == ('/', '/'):
# Too many, preserve only one
return left + right[1:]
elif '/' in joincond:
# Just one; great!
return left + right
else:
# Not enough; add one
return left + '/' + right
def restmethod(method, reluri, *qargs, **headers):
"""Decorate a method to inject an HTTPRequest.
Generates an HTTPRequest using the given HTTP method and relative
URI. If additional positional arguments are present, they are
expected to be strings that name function arguments that should be
included as the query parameters of the URL. If additional
keyword arguments are present, the keywords are expected to name
function arguments and the values are expected to name headers to
set from those values. The request is injected as the first
function argument after the 'self' argument.
Note that two attributes must exist on the object the method is
called on: the '_baseurl' attribute specifies the URL that reluri
is relative to; and the '_make_req' attribute specifies a method
that instantiates an HTTPRequest from a method and full url (which
will include query arguments).
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Process the arguments against the original function
argmap, theSelf, req_name = _getcallargs(func, args, kwargs)
# Build the URL
url = _urljoin(theSelf._baseurl, reluri.format(**argmap))
# Build the query string, as needed
if qargs:
query = dict([(k, argmap[k]) for k in qargs
if argmap[k] is not None])
if query:
url += '?%s' % urllib.urlencode(query)
# Build the headers, if needed
hlist = None
if headers:
hlist = hdrs.HeaderDict()
for aname, hname in headers.items():
if argmap[aname]:
hlist[hname] = argmap[aname]
if not hlist:
# If there are no headers, don't send any
hlist = None
# Now, build the request and pass it to the method
argmap[req_name] = theSelf._make_req(method, url,
func.__name__, hlist)
# Call the method
return func(**argmap)
# Return the function wrapper
return wrapper
# Return the actual decorator
return decorator
|
klmitch/requiem
|
requiem/decorators.py
|
_urljoin
|
python
|
def _urljoin(left, right):
# Handle the tricky case of right being a full URL
tmp = urlparse.urlparse(right)
if tmp.scheme or tmp.netloc:
# Go ahead and use urlparse.urljoin()
return urlparse.urljoin(left, right)
# Check for slashes
joincond = (left[-1:], right[:1])
if joincond == ('/', '/'):
# Too many, preserve only one
return left + right[1:]
elif '/' in joincond:
# Just one; great!
return left + right
else:
# Not enough; add one
return left + '/' + right
|
Join two URLs.
Takes URLs specified by left and right and joins them into a
single URL. If right is an absolute URL, it is returned directly.
This differs from urlparse.urljoin() in that the latter always
chops off the left-most component of left unless it is trailed by
'/', which is not the behavior we want.
|
train
|
https://github.com/klmitch/requiem/blob/0b3b5252e1b3487af732a8666b3bdc2e7035fef5/requiem/decorators.py#L150-L176
| null |
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
import urllib
import urlparse
from requiem import headers as hdrs
__all__ = ['restmethod']
# Custom version of inspect.getcallargs(). We need this because:
#
# 1. inspect.getcallargs() does not exist prior to Python 2.7.
#
# 2. We need to inject an argument for request objects, and make it
# accessible to our caller.
#
# 3. We need to additionally return the object the method is acting
# on. (This function only works on methods.)
#
# Note that this implementation is largely copied straight from Python
# 2.7 inspect.py, with the addition of a few comments and the changes
# in behavior noted above.
def _getcallargs(func, positional, named):
"""Get the mapping of arguments to values.
Generates a dict, with keys being the function argument names
(including the names of the * and ** arguments, if any), and
values the respective bound values from 'positional' and 'named'.
A parameter for the request is injected. Returns a tuple of the
dict, the object the method is being called on, and the name of
the injected request argument.
"""
args, varargs, varkw, defaults = inspect.getargspec(func)
f_name = func.__name__
arg2value = {}
# The following closures are basically because of tuple parameter
# unpacking.
assigned_tuple_params = []
def assign(arg, value):
if isinstance(arg, str):
arg2value[arg] = value
else:
assigned_tuple_params.append(arg)
value = iter(value)
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise ValueError('need more than %d %s to unpack' %
(i, 'values' if i > 1 else 'value'))
assign(subarg, subvalue)
try:
next(value)
except StopIteration:
pass
else:
raise ValueError('too many values to unpack')
def is_assigned(arg):
if isinstance(arg, str):
return arg in arg2value
return arg in assigned_tuple_params
# Inject a place-holder for the request and get the self and the
# req_name
positional = positional[:1] + (None,) + positional[1:]
theSelf = positional[0]
req_name = args[1]
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
# Start with our positional parameters...
for arg, value in zip(args, positional):
assign(arg, value)
# Deal with the variable argument list...
if varargs:
if num_pos > num_args:
assign(varargs, positional[-(num_pos-num_args):])
else:
assign(varargs, ())
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
# Exclusion rules on keyword arguments
for arg in args:
if isinstance(arg, str) and arg in named:
if is_assigned(arg):
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
assign(arg, named.pop(arg))
# Fill in any missing values with the defaults
if defaults:
for arg, value in zip(args[-num_defaults:], defaults):
if not is_assigned(arg):
assign(arg, value)
# Handle the **names
if varkw:
assign(varkw, named)
elif named:
unexpected = next(iter(named))
if isinstance(unexpected, unicode):
unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
# Anything left over?
unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
# Return the mapping and the name of the request argument
return arg2value, theSelf, req_name
def restmethod(method, reluri, *qargs, **headers):
"""Decorate a method to inject an HTTPRequest.
Generates an HTTPRequest using the given HTTP method and relative
URI. If additional positional arguments are present, they are
expected to be strings that name function arguments that should be
included as the query parameters of the URL. If additional
keyword arguments are present, the keywords are expected to name
function arguments and the values are expected to name headers to
set from those values. The request is injected as the first
function argument after the 'self' argument.
Note that two attributes must exist on the object the method is
called on: the '_baseurl' attribute specifies the URL that reluri
is relative to; and the '_make_req' attribute specifies a method
that instantiates an HTTPRequest from a method and full url (which
will include query arguments).
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Process the arguments against the original function
argmap, theSelf, req_name = _getcallargs(func, args, kwargs)
# Build the URL
url = _urljoin(theSelf._baseurl, reluri.format(**argmap))
# Build the query string, as needed
if qargs:
query = dict([(k, argmap[k]) for k in qargs
if argmap[k] is not None])
if query:
url += '?%s' % urllib.urlencode(query)
# Build the headers, if needed
hlist = None
if headers:
hlist = hdrs.HeaderDict()
for aname, hname in headers.items():
if argmap[aname]:
hlist[hname] = argmap[aname]
if not hlist:
# If there are no headers, don't send any
hlist = None
# Now, build the request and pass it to the method
argmap[req_name] = theSelf._make_req(method, url,
func.__name__, hlist)
# Call the method
return func(**argmap)
# Return the function wrapper
return wrapper
# Return the actual decorator
return decorator
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.