repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
andy29485/embypy | embypy/utils/connector.py | Connector.post | python | async def post(self, path, data={}, send_raw=False, **params):
'''sends post request
Parameters
----------
path : str
same as get_url
query : kargs dict
additional info to pass to get_url
See Also
--------
get_url :
Returns
-------
requests.models.Response
the response that was given
'''
url = self.get_url(path, **params)
jstr = json.dumps(data)
for i in range(self.tries+1):
try:
if send_raw:
resp = await self.session.post(url, data=data, timeout=self.timeout)
else:
resp = await self.session.post(url, data=jstr, timeout=self.timeout)
if await self._process_resp(resp):
return resp
else:
continue
except aiohttp.ClientConnectionError:
if i >= self.tries:
raise aiohttp.ClientConnectionError(
'Emby server is probably down'
) | sends post request
Parameters
----------
path : str
same as get_url
query : kargs dict
additional info to pass to get_url
See Also
--------
get_url :
Returns
-------
requests.models.Response
the response that was given | train | https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/utils/connector.py#L339-L375 | [
"def get_url(self, path='/', websocket=False, remote=True,\n attach_api_key=True, userId=None, pass_uid=False, **query):\n '''construct a url for an emby request\n\n Parameters\n ----------\n path : str\n uri path(excluding domain and port) of get request for emby\n websocket : bool, optional\n ... | class Connector:
'''Class responsible for comunication with emby
Parameters
----------
url : str
url to connect to
api-key : str
api key generated by emby, used for authentication
token : str
similar to api key, but is meant for user logins
address-remote : str, optional
alt url to connect to, pulic facing (see notes)
ssl : str, optional
path to ssl certificate - for self signed certs
userid : str, optional
emby id of the user you wish to connect as
username : str, optional
username for login (see notes)
password : str, optional
password for login (see notes)
device_id : str
device id as registered in emby
timeout : int
number of seconds to wait before timeout for a request
tries : int
number of times to try a request before throwing an error
loop : asyncio.AbstractEventLoop
if given all calls should be awaitable
Notes
-----
This class/object should NOT be used (except internally).
Tf a address-remote url is given, then that will be used for output,
such as the `embypy.objects.EmbyObject.url` atribute.
`url` will always be used when making requests - thus I recomend using
the local address for `url` and the remote address
for `address-remote`
username/password authentication is not supported as of yet
'''
def __init__(self, url, **kargs):
if ('api_key' not in kargs or 'device_id' not in kargs) and \
('username' not in kargs or 'password' not in kargs):
raise ValueError('provide api key and device id or username/password')
urlremote = kargs.get('address-remote')
self.ssl = kargs.get('ssl', True)
self.userid = kargs.get('userid')
self.token = kargs.get('token')
self.api_key = kargs.get('api_key', self.token)
self.username = kargs.get('username')
self.password = kargs.get('password')
self.device_id = kargs.get('device_id', 'EmbyPy')
self.timeout = kargs.get('timeout', 30)
self.tries = kargs.get('tries', 3)
self.loop = kargs.get('loop', asyncio.get_event_loop())
self.url = urlparse(url)
self.urlremote = urlparse(urlremote) if urlremote else urlremote
if self.ssl and type(self.ssl) == str:
self.ssl = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self.ssl.load_verify_locations(cafile=self.ssl)
conn = aiohttp.TCPConnector(ssl_context=self.ssl)
self.session = aiohttp.ClientSession(
headers={
'Authorization':
'MediaBrowser Client="{0}",Device="{0}",DeviceId="{1}",Version="{2}"'
.format('EmbyPy', self.device_id, __version__)
},
connector=conn
)
#connect to websocket is user wants to
if 'ws' in kargs:
self.ws = WebSocket(self, self.get_url(websocket=True), self.ssl)
else:
self.ws = None
# authenticate to emby if password was given
if self.password and self.username and not self.token:
self.login_sync()
def __del__(self):
try:
Connector.sync_run(self.session.close())
except:
self.loop.run_until_complete(self.session.close())
@staticmethod
def sync_run(f):
if asyncio.iscoroutinefunction(f):
f = f()
if asyncio.iscoroutine(f):
return asyncio.get_event_loop().run_until_complete(f)
elif callable(f):
return f()
else:
return f
def get_sync(self, *args, **kargs):
return self.sync_run(self.get(*args, **kargs))
def delete_sync(self, *args, **kargs):
return self.sync_run(self.delete(*args, **kargs))
def post_sync(self, *args, **kargs):
return self.sync_run(self.post(*args, **kargs))
def getJson_sync(self, *args, **kargs):
return self.sync_run(self.getJson(*args, **kargs))
def login_sync(self):
return self.sync_run(self.login())
async def login(self):
data = await self.post('/Users/AuthenticateByName',
data={
'username':self.username,
'pw':self.password,
},
send_raw=True,
format='json',
)
data = await data.json()
self.token = data.get('AccessToken', '')
self.userid = data.get('User', {}).get('Id')
self.api_key = self.token
self.session._default_headers.update(
{'X-MediaBrowser-Token': self.token}
)
def get_url(self, path='/', websocket=False, remote=True,
attach_api_key=True, userId=None, pass_uid=False, **query):
'''construct a url for an emby request
Parameters
----------
path : str
uri path(excluding domain and port) of get request for emby
websocket : bool, optional
if true, then `ws(s)` are used instead of `http(s)`
remote : bool, optional
if true, remote-address is used (default True)
attach_api_key : bool, optional
if true, apikey is added to the query (default True)
userId : str, optional
uid to use, if none, default is used
pass_uid : bool, optional
if true, uid is added to the query (default False)
query : karg dict
additional parameters to set (part of url after the `?`)
Also See
--------
get :
getJson :
post :
delete :
Returns
-------
full url
'''
userId = userId or self.userid
if attach_api_key and self.api_key:
query.update({'api_key':self.api_key, 'deviceId': self.device_id})
if pass_uid:
query['userId'] = userId
if remote:
url = self.urlremote or self.url
else:
url = self.url
if websocket:
scheme = {'http':'ws', 'https':'wss'}[url.scheme]
else:
scheme = url.scheme
netloc = url.netloc + '/emby'
url = urlunparse((scheme, netloc, path, '', '{params}', '')).format(
UserId = userId,
ApiKey = self.api_key,
DeviceId = self.device_id,
params = urlencode(query)
)
return url[:-1] if url[-1] == '?' else url
async def _process_resp(self, resp):
if (not resp or resp.status == 401) and self.username:
await self.login()
await resp.close()
return False
return True
def add_on_message(self, func):
'''add function that handles websocket messages'''
return self.ws.on_message.append(func)
async def get(self, path, **query):
'''return a get request
Parameters
----------
path : str
same as get_url
query : kargs dict
additional info to pass to get_url
See Also
--------
get_url :
getJson :
Returns
-------
requests.models.Response
the response that was given
'''
url = self.get_url(path, **query)
for i in range(self.tries+1):
try:
resp = await self.session.get(url, timeout=self.timeout)
if await self._process_resp(resp):
return resp
else:
continue
except aiohttp.ClientConnectionError:
if i >= self.tries:
raise aiohttp.ClientConnectionError(
'Emby server is probably down'
)
async def delete(self, path, **query):
'''send a delete request
Parameters
----------
path : str
same as get_url
query : kargs dict
additional info to pass to get_url
See Also
--------
get_url :
Returns
-------
requests.models.Response
the response that was given
'''
url = self.get_url(path, **query)
for i in range(self.tries+1):
try:
resp = await self.session.delete(url, timeout=self.timeout)
if await self._process_resp(resp):
return resp
else:
continue
except aiohttp.ClientConnectionError:
if i >= self.tries:
raise aiohttp.ClientConnectionError(
'Emby server is probably down'
)
async def getJson(self, path, **query):
'''wrapper for get, parses response as json
Parameters
----------
path : str
same as get_url
query : kargs dict
additional info to pass to get_url
See Also
--------
get_url :
get :
Returns
-------
dict
the response content as a dict
'''
for i in range(self.tries+1):
try:
return await (await self.get(path, **query)).json()
except:
if i >= self.tries:
raise
|
andy29485/embypy | embypy/utils/connector.py | Connector.getJson | python | async def getJson(self, path, **query):
'''wrapper for get, parses response as json
Parameters
----------
path : str
same as get_url
query : kargs dict
additional info to pass to get_url
See Also
--------
get_url :
get :
Returns
-------
dict
the response content as a dict
'''
for i in range(self.tries+1):
try:
return await (await self.get(path, **query)).json()
except:
if i >= self.tries:
raise | wrapper for get, parses response as json
Parameters
----------
path : str
same as get_url
query : kargs dict
additional info to pass to get_url
See Also
--------
get_url :
get :
Returns
-------
dict
the response content as a dict | train | https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/utils/connector.py#L378-L403 | [
"async def get(self, path, **query):\n '''return a get request\n\n Parameters\n ----------\n path : str\n same as get_url\n query : kargs dict\n additional info to pass to get_url\n\n See Also\n --------\n get_url :\n getJson :\n\n Returns\n -------\n requests.models.Response\n the response... | class Connector:
'''Class responsible for comunication with emby
Parameters
----------
url : str
url to connect to
api-key : str
api key generated by emby, used for authentication
token : str
similar to api key, but is meant for user logins
address-remote : str, optional
alt url to connect to, pulic facing (see notes)
ssl : str, optional
path to ssl certificate - for self signed certs
userid : str, optional
emby id of the user you wish to connect as
username : str, optional
username for login (see notes)
password : str, optional
password for login (see notes)
device_id : str
device id as registered in emby
timeout : int
number of seconds to wait before timeout for a request
tries : int
number of times to try a request before throwing an error
loop : asyncio.AbstractEventLoop
if given all calls should be awaitable
Notes
-----
This class/object should NOT be used (except internally).
Tf a address-remote url is given, then that will be used for output,
such as the `embypy.objects.EmbyObject.url` atribute.
`url` will always be used when making requests - thus I recomend using
the local address for `url` and the remote address
for `address-remote`
username/password authentication is not supported as of yet
'''
def __init__(self, url, **kargs):
if ('api_key' not in kargs or 'device_id' not in kargs) and \
('username' not in kargs or 'password' not in kargs):
raise ValueError('provide api key and device id or username/password')
urlremote = kargs.get('address-remote')
self.ssl = kargs.get('ssl', True)
self.userid = kargs.get('userid')
self.token = kargs.get('token')
self.api_key = kargs.get('api_key', self.token)
self.username = kargs.get('username')
self.password = kargs.get('password')
self.device_id = kargs.get('device_id', 'EmbyPy')
self.timeout = kargs.get('timeout', 30)
self.tries = kargs.get('tries', 3)
self.loop = kargs.get('loop', asyncio.get_event_loop())
self.url = urlparse(url)
self.urlremote = urlparse(urlremote) if urlremote else urlremote
if self.ssl and type(self.ssl) == str:
self.ssl = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self.ssl.load_verify_locations(cafile=self.ssl)
conn = aiohttp.TCPConnector(ssl_context=self.ssl)
self.session = aiohttp.ClientSession(
headers={
'Authorization':
'MediaBrowser Client="{0}",Device="{0}",DeviceId="{1}",Version="{2}"'
.format('EmbyPy', self.device_id, __version__)
},
connector=conn
)
#connect to websocket is user wants to
if 'ws' in kargs:
self.ws = WebSocket(self, self.get_url(websocket=True), self.ssl)
else:
self.ws = None
# authenticate to emby if password was given
if self.password and self.username and not self.token:
self.login_sync()
def __del__(self):
try:
Connector.sync_run(self.session.close())
except:
self.loop.run_until_complete(self.session.close())
@staticmethod
def sync_run(f):
if asyncio.iscoroutinefunction(f):
f = f()
if asyncio.iscoroutine(f):
return asyncio.get_event_loop().run_until_complete(f)
elif callable(f):
return f()
else:
return f
def get_sync(self, *args, **kargs):
return self.sync_run(self.get(*args, **kargs))
def delete_sync(self, *args, **kargs):
return self.sync_run(self.delete(*args, **kargs))
def post_sync(self, *args, **kargs):
return self.sync_run(self.post(*args, **kargs))
def getJson_sync(self, *args, **kargs):
return self.sync_run(self.getJson(*args, **kargs))
def login_sync(self):
return self.sync_run(self.login())
async def login(self):
data = await self.post('/Users/AuthenticateByName',
data={
'username':self.username,
'pw':self.password,
},
send_raw=True,
format='json',
)
data = await data.json()
self.token = data.get('AccessToken', '')
self.userid = data.get('User', {}).get('Id')
self.api_key = self.token
self.session._default_headers.update(
{'X-MediaBrowser-Token': self.token}
)
def get_url(self, path='/', websocket=False, remote=True,
attach_api_key=True, userId=None, pass_uid=False, **query):
'''construct a url for an emby request
Parameters
----------
path : str
uri path(excluding domain and port) of get request for emby
websocket : bool, optional
if true, then `ws(s)` are used instead of `http(s)`
remote : bool, optional
if true, remote-address is used (default True)
attach_api_key : bool, optional
if true, apikey is added to the query (default True)
userId : str, optional
uid to use, if none, default is used
pass_uid : bool, optional
if true, uid is added to the query (default False)
query : karg dict
additional parameters to set (part of url after the `?`)
Also See
--------
get :
getJson :
post :
delete :
Returns
-------
full url
'''
userId = userId or self.userid
if attach_api_key and self.api_key:
query.update({'api_key':self.api_key, 'deviceId': self.device_id})
if pass_uid:
query['userId'] = userId
if remote:
url = self.urlremote or self.url
else:
url = self.url
if websocket:
scheme = {'http':'ws', 'https':'wss'}[url.scheme]
else:
scheme = url.scheme
netloc = url.netloc + '/emby'
url = urlunparse((scheme, netloc, path, '', '{params}', '')).format(
UserId = userId,
ApiKey = self.api_key,
DeviceId = self.device_id,
params = urlencode(query)
)
return url[:-1] if url[-1] == '?' else url
async def _process_resp(self, resp):
if (not resp or resp.status == 401) and self.username:
await self.login()
await resp.close()
return False
return True
def add_on_message(self, func):
'''add function that handles websocket messages'''
return self.ws.on_message.append(func)
async def get(self, path, **query):
'''return a get request
Parameters
----------
path : str
same as get_url
query : kargs dict
additional info to pass to get_url
See Also
--------
get_url :
getJson :
Returns
-------
requests.models.Response
the response that was given
'''
url = self.get_url(path, **query)
for i in range(self.tries+1):
try:
resp = await self.session.get(url, timeout=self.timeout)
if await self._process_resp(resp):
return resp
else:
continue
except aiohttp.ClientConnectionError:
if i >= self.tries:
raise aiohttp.ClientConnectionError(
'Emby server is probably down'
)
async def delete(self, path, **query):
'''send a delete request
Parameters
----------
path : str
same as get_url
query : kargs dict
additional info to pass to get_url
See Also
--------
get_url :
Returns
-------
requests.models.Response
the response that was given
'''
url = self.get_url(path, **query)
for i in range(self.tries+1):
try:
resp = await self.session.delete(url, timeout=self.timeout)
if await self._process_resp(resp):
return resp
else:
continue
except aiohttp.ClientConnectionError:
if i >= self.tries:
raise aiohttp.ClientConnectionError(
'Emby server is probably down'
)
async def post(self, path, data={}, send_raw=False, **params):
'''sends post request
Parameters
----------
path : str
same as get_url
query : kargs dict
additional info to pass to get_url
See Also
--------
get_url :
Returns
-------
requests.models.Response
the response that was given
'''
url = self.get_url(path, **params)
jstr = json.dumps(data)
for i in range(self.tries+1):
try:
if send_raw:
resp = await self.session.post(url, data=data, timeout=self.timeout)
else:
resp = await self.session.post(url, data=jstr, timeout=self.timeout)
if await self._process_resp(resp):
return resp
else:
continue
except aiohttp.ClientConnectionError:
if i >= self.tries:
raise aiohttp.ClientConnectionError(
'Emby server is probably down'
)
|
andy29485/embypy | embypy/emby.py | Emby.info | python | async def info(self, obj_id=None):
'''Get info about object id
|coro|
Parameters
----------
obj_id : str, list
if not provided, server info is retured(as a dict).
Otherwise, an object with that id is returned
(or objects if `obj_id` is a list).
'''
if obj_id:
try:
return await self.process(obj_id)
except JSONDecodeError:
raise LookupError('Error object with that id does not exist', obj_id)
else:
return await self.connector.getJson('/system/info/public', remote=False) | Get info about object id
|coro|
Parameters
----------
obj_id : str, list
if not provided, server info is retured(as a dict).
Otherwise, an object with that id is returned
(or objects if `obj_id` is a list). | train | https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/emby.py#L39-L57 | [
"async def process(self, object_dict):\n '''[for internal use] convert json/dict into python object\n\n |coro|\n\n Parameters\n ----------\n object_dict : dict\n json representation of object from emby\n\n Notes\n -----\n if a string is given, it is assumed to be an id, obj is returned.\n if a list is g... | class Emby(objects.EmbyObject):
'''Emby connection class, an object of this type should be created
to communicate with emby
Parameters
----------
url : str
url to the server (e.g. http://127.0.0.1:8096/)
api_key : str, optional
key obtained from server dashboard
device_id : str, optional
device id to pass to emby
username : str, optional
username to login, this+password can be used instead of an apikey
password : str, optional
password for user to login as
Attributes
----------
connector : embypy.utils.connector.Connector
Object used to make api requests, do not use
'''
def __init__(self, url, **kargs):
connector = Connector(url, **kargs)
super().__init__({'ItemId':'', 'Name':''}, connector)
def info_sync(self, obj_id=None):
return self.connector.sync_run(self.info(obj_id))
def search_sync(self, query,
sort_map = {'BoxSet':0,'Series':1,'Movie':2,'Audio':3,'Person':4},
strict_sort = False):
return self.connector.sync_run(self.search(query, sort_map, strict_sort))
async def search(self, query,
sort_map = {'BoxSet':0,'Series':1,'Movie':2,'Audio':3,'Person':4},
strict_sort = False):
'''Sends a search request to emby, returns results
|coro|
Parameters
----------
query : str
the search string to send to emby
sort_map : dict
is a dict of strings to ints. Strings should be item types, and
the ints are the priority of those types(for sorting).
lower valued(0) will appear first.
strict_sort : bool
if True, then only item types in the keys of sortmap will be
included in the results
Returns
-------
list
list of emby objects
'''
search_params = {
'remote' : False,
'searchTerm' : query
}
if strict_sort:
search_params['IncludeItemTypes'] = ','.join(sort_map.keys())
json = await self.connector.getJson('/Search/Hints/', **search_params)
items = await self.process(json["SearchHints"])
m_size = len(sort_map)
items = sorted(items, key = lambda x : sort_map.get(x.type, m_size))
return items
def latest_sync(self, userId=None, itemTypes='', groupItems=False):
return self.connector.sync_run(self.latest(userId, itemTypes, groupItems))
async def latest(self, userId=None, itemTypes='', groupItems=False):
'''returns list of latest items
|coro|
Parameters
----------
userId : str
if provided, then the list returned is
the one that that use will see.
itemTypes: str
if provided, then the list will only include items
if that type - gets passed to the emby api
see https://github.com/MediaBrowser/Emby/wiki/Item-Types
Returns
-------
list
the itmes that will appear as latest (for user if id was given)
'''
json = await self.connector.getJson('/Users/{UserId}/Items/Latest',
remote=False,
userId=userId,
IncludeItemTypes=itemTypes,
GroupItems=groupItems
)
return await self.process(json)
def nextUp_sync(self, userId=None):
return self.connector.sync_run(self.nextUp(userId))
async def nextUp(self, userId=None):
'''returns list of items marked as `next up`
|coro|
Parameters
----------
userId : str
if provided, then the list returned is
the one that that use will see.
Returns
-------
list
the itmes that will appear as next up
(for user if id was given)
'''
json = await self.connector.getJson('/Shows/NextUp',
pass_uid=True,
remote=False,
userId=userId
)
return await self.process(json)
def update_sync(self):
self.connector.sync_run(self.update())
async def update(self):
'''
reload all cached information
|coro|
Notes
-----
This is a slow process, and will remove the cache before updating.
Thus it is recomended to use the `*_force` properties, which will
only update the cache after data is retrived.
'''
keys = self.extras.keys()
self.extras = {}
for key in keys:
try:
func = getattr(self, key, None)
if callable(func):
func()
except:
pass
def create_playlist_sync(self, name, *songs):
return self.connector.sync_run(self.create_playlist(name, *songs))
async def create_playlist(self, name, *songs):
'''create a new playlist
|coro|
Parameters
----------
name : str
name of new playlist
songs : array_like
list of song ids to add to playlist
'''
data = {'Name': name}
ids = [i.id for i in (await self.process(songs))]
if ids:
data['Ids'] = ','.join(ids)
# TODO - return playlist not status
return await self.connector.post('/Playlists',
data=data,
pass_uid=True,
remote=False
)
@property
def albums_sync(self):
return self.connector.sync_run(self.albums)
@property
async def albums(self):
'''returns list of all albums.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Album`
'''
return self.extras.get('albums') or \
await self.albums_force
@property
def albums_force_sync(self):
return self.connector.sync_run(self.albums_force)
@property
async def albums_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'MusicAlbum',
Fields = 'Path,ParentId,Overview,Genres,Tags,Artists',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['albums'] = items
return items
@property
def songs_sync(self):
return self.connector.sync_run(self.songs)
@property
async def songs(self):
'''returns list of all songs.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Audio`
'''
return self.extras.get('songs') or await self.songs_force
@property
def songs_force_sync(self):
return self.connector.sync_run(self.songs_force)
@property
async def songs_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Audio',
Fields = 'Path,ParentId,Overview,Genres,Tags,Artists',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['songs'] = items
return items
@property
def playlists_sync(self):
return self.connector.sync_run(self.playlists)
@property
async def playlists(self):
'''returns list of all playlists.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Playlist`
'''
return self.extras.get('playlists') or \
await self.playlists_force
@property
def playlists_force_sync(self):
return self.connector.sync_run(self.playlists_force)
@property
async def playlists_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Playlist',
Fields = 'Path,ParentId,Overview',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['playlists'] = items
return items
@property
def artists_sync(self):
return self.connector.sync_run(self.artists)
@property
async def artists(self):
'''returns list of all song artists.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Artist`
'''
return self.extras.get('artists', []) or \
await self.artists_force
@property
def artists_force_sync(self):
return self.connector.sync_run(self.artists_force)
@property
async def artists_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'MusicArtist',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['artists'] = items
return items
@property
def movies_sync(self):
return self.connector.sync_run(self.movies)
@property
async def movies(self):
'''returns list of all movies.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Movie`
'''
return self.extras.get('movies', []) or \
await self.movies_force
@property
def movies_force_sync(self):
return self.connector.sync_run(self.movies_force)
@property
async def movies_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Movie',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['movies'] = items
return items
@property
def series_sync(self):
return self.connector.sync_run(self.series)
@property
async def series(self):
'''returns a list of all series in emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Series`
'''
return self.extras.get('series', []) or \
await self.series_force
@property
def series_force_sync(self):
return self.connector.sync_run(self.series_force)
@property
async def series_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Series',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['series'] = items
return items
@property
def episodes_sync(self):
return self.connector.sync_run(self.episodes)
@property
async def episodes(self):
'''returns a list of all episodes in emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Episode`
'''
return self.extras.get('episodes', []) or \
await self.episodes_force
@property
def episodes_force_sync(self):
return self.connector.sync_run(self.episodes_force)
@property
async def episodes_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Episode',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['episodes'] = items
return items
@property
def devices_sync(self):
return self.connector.sync_run(self.devices)
@property
async def devices(self):
'''returns a list of all devices connected to emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Devices`
'''
return self.extras.get('devices', []) or \
await self.devices_force
@property
def devices_force_sync(self):
return self.connector.sync_run(self.devices_force)
@property
async def devices_force(self):
items = await self.connector.getJson('/Devices', remote = False)
items = await self.process(items)
self.extras['devices'] = items
return items
@property
def users_sync(self):
return self.connector.sync_run(self.users)
@property
async def users(self):
'''returns a list of all users.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Users`
'''
return self.extras.get('users', []) or \
await self.users_force
@property
def users_force_sync(self):
return self.connector.sync_run(self.users_force)
@property
async def users_force(self):
items = await self.connector.getJson('/Users', remote = False)
items = await self.process(items)
self.extras['users'] = items
return items
|
andy29485/embypy | embypy/emby.py | Emby.search | python | async def search(self, query,
sort_map = {'BoxSet':0,'Series':1,'Movie':2,'Audio':3,'Person':4},
strict_sort = False):
'''Sends a search request to emby, returns results
|coro|
Parameters
----------
query : str
the search string to send to emby
sort_map : dict
is a dict of strings to ints. Strings should be item types, and
the ints are the priority of those types(for sorting).
lower valued(0) will appear first.
strict_sort : bool
if True, then only item types in the keys of sortmap will be
included in the results
Returns
-------
list
list of emby objects
'''
search_params = {
'remote' : False,
'searchTerm' : query
}
if strict_sort:
search_params['IncludeItemTypes'] = ','.join(sort_map.keys())
json = await self.connector.getJson('/Search/Hints/', **search_params)
items = await self.process(json["SearchHints"])
m_size = len(sort_map)
items = sorted(items, key = lambda x : sort_map.get(x.type, m_size))
return items | Sends a search request to emby, returns results
|coro|
Parameters
----------
query : str
the search string to send to emby
sort_map : dict
is a dict of strings to ints. Strings should be item types, and
the ints are the priority of those types(for sorting).
lower valued(0) will appear first.
strict_sort : bool
if True, then only item types in the keys of sortmap will be
included in the results
Returns
-------
list
list of emby objects | train | https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/emby.py#L64-L101 | [
"async def process(self, object_dict):\n '''[for internal use] convert json/dict into python object\n\n |coro|\n\n Parameters\n ----------\n object_dict : dict\n json representation of object from emby\n\n Notes\n -----\n if a string is given, it is assumed to be an id, obj is returned.\n if a list is g... | class Emby(objects.EmbyObject):
'''Emby connection class, an object of this type should be created
to communicate with emby
Parameters
----------
url : str
url to the server (e.g. http://127.0.0.1:8096/)
api_key : str, optional
key obtained from server dashboard
device_id : str, optional
device id to pass to emby
username : str, optional
username to login, this+password can be used instead of an apikey
password : str, optional
password for user to login as
Attributes
----------
connector : embypy.utils.connector.Connector
Object used to make api requests, do not use
'''
def __init__(self, url, **kargs):
connector = Connector(url, **kargs)
super().__init__({'ItemId':'', 'Name':''}, connector)
def info_sync(self, obj_id=None):
return self.connector.sync_run(self.info(obj_id))
async def info(self, obj_id=None):
'''Get info about object id
|coro|
Parameters
----------
obj_id : str, list
if not provided, server info is retured(as a dict).
Otherwise, an object with that id is returned
(or objects if `obj_id` is a list).
'''
if obj_id:
try:
return await self.process(obj_id)
except JSONDecodeError:
raise LookupError('Error object with that id does not exist', obj_id)
else:
return await self.connector.getJson('/system/info/public', remote=False)
def search_sync(self, query,
sort_map = {'BoxSet':0,'Series':1,'Movie':2,'Audio':3,'Person':4},
strict_sort = False):
return self.connector.sync_run(self.search(query, sort_map, strict_sort))
def latest_sync(self, userId=None, itemTypes='', groupItems=False):
return self.connector.sync_run(self.latest(userId, itemTypes, groupItems))
async def latest(self, userId=None, itemTypes='', groupItems=False):
'''returns list of latest items
|coro|
Parameters
----------
userId : str
if provided, then the list returned is
the one that that use will see.
itemTypes: str
if provided, then the list will only include items
if that type - gets passed to the emby api
see https://github.com/MediaBrowser/Emby/wiki/Item-Types
Returns
-------
list
the itmes that will appear as latest (for user if id was given)
'''
json = await self.connector.getJson('/Users/{UserId}/Items/Latest',
remote=False,
userId=userId,
IncludeItemTypes=itemTypes,
GroupItems=groupItems
)
return await self.process(json)
def nextUp_sync(self, userId=None):
return self.connector.sync_run(self.nextUp(userId))
async def nextUp(self, userId=None):
'''returns list of items marked as `next up`
|coro|
Parameters
----------
userId : str
if provided, then the list returned is
the one that that use will see.
Returns
-------
list
the itmes that will appear as next up
(for user if id was given)
'''
json = await self.connector.getJson('/Shows/NextUp',
pass_uid=True,
remote=False,
userId=userId
)
return await self.process(json)
def update_sync(self):
self.connector.sync_run(self.update())
async def update(self):
'''
reload all cached information
|coro|
Notes
-----
This is a slow process, and will remove the cache before updating.
Thus it is recomended to use the `*_force` properties, which will
only update the cache after data is retrived.
'''
keys = self.extras.keys()
self.extras = {}
for key in keys:
try:
func = getattr(self, key, None)
if callable(func):
func()
except:
pass
def create_playlist_sync(self, name, *songs):
return self.connector.sync_run(self.create_playlist(name, *songs))
async def create_playlist(self, name, *songs):
'''create a new playlist
|coro|
Parameters
----------
name : str
name of new playlist
songs : array_like
list of song ids to add to playlist
'''
data = {'Name': name}
ids = [i.id for i in (await self.process(songs))]
if ids:
data['Ids'] = ','.join(ids)
# TODO - return playlist not status
return await self.connector.post('/Playlists',
data=data,
pass_uid=True,
remote=False
)
@property
def albums_sync(self):
return self.connector.sync_run(self.albums)
@property
async def albums(self):
'''returns list of all albums.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Album`
'''
return self.extras.get('albums') or \
await self.albums_force
@property
def albums_force_sync(self):
return self.connector.sync_run(self.albums_force)
@property
async def albums_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'MusicAlbum',
Fields = 'Path,ParentId,Overview,Genres,Tags,Artists',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['albums'] = items
return items
@property
def songs_sync(self):
return self.connector.sync_run(self.songs)
@property
async def songs(self):
'''returns list of all songs.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Audio`
'''
return self.extras.get('songs') or await self.songs_force
@property
def songs_force_sync(self):
return self.connector.sync_run(self.songs_force)
@property
async def songs_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Audio',
Fields = 'Path,ParentId,Overview,Genres,Tags,Artists',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['songs'] = items
return items
@property
def playlists_sync(self):
return self.connector.sync_run(self.playlists)
@property
async def playlists(self):
'''returns list of all playlists.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Playlist`
'''
return self.extras.get('playlists') or \
await self.playlists_force
@property
def playlists_force_sync(self):
return self.connector.sync_run(self.playlists_force)
@property
async def playlists_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Playlist',
Fields = 'Path,ParentId,Overview',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['playlists'] = items
return items
@property
def artists_sync(self):
return self.connector.sync_run(self.artists)
@property
async def artists(self):
'''returns list of all song artists.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Artist`
'''
return self.extras.get('artists', []) or \
await self.artists_force
@property
def artists_force_sync(self):
return self.connector.sync_run(self.artists_force)
@property
async def artists_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'MusicArtist',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['artists'] = items
return items
@property
def movies_sync(self):
return self.connector.sync_run(self.movies)
@property
async def movies(self):
'''returns list of all movies.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Movie`
'''
return self.extras.get('movies', []) or \
await self.movies_force
@property
def movies_force_sync(self):
return self.connector.sync_run(self.movies_force)
@property
async def movies_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Movie',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['movies'] = items
return items
@property
def series_sync(self):
return self.connector.sync_run(self.series)
@property
async def series(self):
'''returns a list of all series in emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Series`
'''
return self.extras.get('series', []) or \
await self.series_force
@property
def series_force_sync(self):
return self.connector.sync_run(self.series_force)
@property
async def series_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Series',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['series'] = items
return items
@property
def episodes_sync(self):
return self.connector.sync_run(self.episodes)
@property
async def episodes(self):
'''returns a list of all episodes in emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Episode`
'''
return self.extras.get('episodes', []) or \
await self.episodes_force
@property
def episodes_force_sync(self):
return self.connector.sync_run(self.episodes_force)
@property
async def episodes_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Episode',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['episodes'] = items
return items
@property
def devices_sync(self):
return self.connector.sync_run(self.devices)
@property
async def devices(self):
'''returns a list of all devices connected to emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Devices`
'''
return self.extras.get('devices', []) or \
await self.devices_force
@property
def devices_force_sync(self):
return self.connector.sync_run(self.devices_force)
@property
async def devices_force(self):
items = await self.connector.getJson('/Devices', remote = False)
items = await self.process(items)
self.extras['devices'] = items
return items
@property
def users_sync(self):
return self.connector.sync_run(self.users)
@property
async def users(self):
'''returns a list of all users.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Users`
'''
return self.extras.get('users', []) or \
await self.users_force
@property
def users_force_sync(self):
return self.connector.sync_run(self.users_force)
@property
async def users_force(self):
items = await self.connector.getJson('/Users', remote = False)
items = await self.process(items)
self.extras['users'] = items
return items
|
andy29485/embypy | embypy/emby.py | Emby.nextUp | python | async def nextUp(self, userId=None):
'''returns list of items marked as `next up`
|coro|
Parameters
----------
userId : str
if provided, then the list returned is
the one that that use will see.
Returns
-------
list
the itmes that will appear as next up
(for user if id was given)
'''
json = await self.connector.getJson('/Shows/NextUp',
pass_uid=True,
remote=False,
userId=userId
)
return await self.process(json) | returns list of items marked as `next up`
|coro|
Parameters
----------
userId : str
if provided, then the list returned is
the one that that use will see.
Returns
-------
list
the itmes that will appear as next up
(for user if id was given) | train | https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/emby.py#L137-L159 | [
"async def process(self, object_dict):\n '''[for internal use] convert json/dict into python object\n\n |coro|\n\n Parameters\n ----------\n object_dict : dict\n json representation of object from emby\n\n Notes\n -----\n if a string is given, it is assumed to be an id, obj is returned.\n if a list is g... | class Emby(objects.EmbyObject):
'''Emby connection class, an object of this type should be created
to communicate with emby
Parameters
----------
url : str
url to the server (e.g. http://127.0.0.1:8096/)
api_key : str, optional
key obtained from server dashboard
device_id : str, optional
device id to pass to emby
username : str, optional
username to login, this+password can be used instead of an apikey
password : str, optional
password for user to login as
Attributes
----------
connector : embypy.utils.connector.Connector
Object used to make api requests, do not use
'''
def __init__(self, url, **kargs):
connector = Connector(url, **kargs)
super().__init__({'ItemId':'', 'Name':''}, connector)
def info_sync(self, obj_id=None):
return self.connector.sync_run(self.info(obj_id))
async def info(self, obj_id=None):
'''Get info about object id
|coro|
Parameters
----------
obj_id : str, list
if not provided, server info is retured(as a dict).
Otherwise, an object with that id is returned
(or objects if `obj_id` is a list).
'''
if obj_id:
try:
return await self.process(obj_id)
except JSONDecodeError:
raise LookupError('Error object with that id does not exist', obj_id)
else:
return await self.connector.getJson('/system/info/public', remote=False)
def search_sync(self, query,
sort_map = {'BoxSet':0,'Series':1,'Movie':2,'Audio':3,'Person':4},
strict_sort = False):
return self.connector.sync_run(self.search(query, sort_map, strict_sort))
async def search(self, query,
sort_map = {'BoxSet':0,'Series':1,'Movie':2,'Audio':3,'Person':4},
strict_sort = False):
'''Sends a search request to emby, returns results
|coro|
Parameters
----------
query : str
the search string to send to emby
sort_map : dict
is a dict of strings to ints. Strings should be item types, and
the ints are the priority of those types(for sorting).
lower valued(0) will appear first.
strict_sort : bool
if True, then only item types in the keys of sortmap will be
included in the results
Returns
-------
list
list of emby objects
'''
search_params = {
'remote' : False,
'searchTerm' : query
}
if strict_sort:
search_params['IncludeItemTypes'] = ','.join(sort_map.keys())
json = await self.connector.getJson('/Search/Hints/', **search_params)
items = await self.process(json["SearchHints"])
m_size = len(sort_map)
items = sorted(items, key = lambda x : sort_map.get(x.type, m_size))
return items
def latest_sync(self, userId=None, itemTypes='', groupItems=False):
return self.connector.sync_run(self.latest(userId, itemTypes, groupItems))
async def latest(self, userId=None, itemTypes='', groupItems=False):
'''returns list of latest items
|coro|
Parameters
----------
userId : str
if provided, then the list returned is
the one that that use will see.
itemTypes: str
if provided, then the list will only include items
if that type - gets passed to the emby api
see https://github.com/MediaBrowser/Emby/wiki/Item-Types
Returns
-------
list
the itmes that will appear as latest (for user if id was given)
'''
json = await self.connector.getJson('/Users/{UserId}/Items/Latest',
remote=False,
userId=userId,
IncludeItemTypes=itemTypes,
GroupItems=groupItems
)
return await self.process(json)
def nextUp_sync(self, userId=None):
return self.connector.sync_run(self.nextUp(userId))
def update_sync(self):
self.connector.sync_run(self.update())
async def update(self):
'''
reload all cached information
|coro|
Notes
-----
This is a slow process, and will remove the cache before updating.
Thus it is recomended to use the `*_force` properties, which will
only update the cache after data is retrived.
'''
keys = self.extras.keys()
self.extras = {}
for key in keys:
try:
func = getattr(self, key, None)
if callable(func):
func()
except:
pass
def create_playlist_sync(self, name, *songs):
return self.connector.sync_run(self.create_playlist(name, *songs))
async def create_playlist(self, name, *songs):
'''create a new playlist
|coro|
Parameters
----------
name : str
name of new playlist
songs : array_like
list of song ids to add to playlist
'''
data = {'Name': name}
ids = [i.id for i in (await self.process(songs))]
if ids:
data['Ids'] = ','.join(ids)
# TODO - return playlist not status
return await self.connector.post('/Playlists',
data=data,
pass_uid=True,
remote=False
)
@property
def albums_sync(self):
return self.connector.sync_run(self.albums)
@property
async def albums(self):
'''returns list of all albums.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Album`
'''
return self.extras.get('albums') or \
await self.albums_force
@property
def albums_force_sync(self):
return self.connector.sync_run(self.albums_force)
@property
async def albums_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'MusicAlbum',
Fields = 'Path,ParentId,Overview,Genres,Tags,Artists',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['albums'] = items
return items
@property
def songs_sync(self):
return self.connector.sync_run(self.songs)
@property
async def songs(self):
'''returns list of all songs.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Audio`
'''
return self.extras.get('songs') or await self.songs_force
@property
def songs_force_sync(self):
return self.connector.sync_run(self.songs_force)
@property
async def songs_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Audio',
Fields = 'Path,ParentId,Overview,Genres,Tags,Artists',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['songs'] = items
return items
@property
def playlists_sync(self):
return self.connector.sync_run(self.playlists)
@property
async def playlists(self):
'''returns list of all playlists.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Playlist`
'''
return self.extras.get('playlists') or \
await self.playlists_force
@property
def playlists_force_sync(self):
return self.connector.sync_run(self.playlists_force)
@property
async def playlists_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Playlist',
Fields = 'Path,ParentId,Overview',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['playlists'] = items
return items
@property
def artists_sync(self):
return self.connector.sync_run(self.artists)
@property
async def artists(self):
'''returns list of all song artists.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Artist`
'''
return self.extras.get('artists', []) or \
await self.artists_force
@property
def artists_force_sync(self):
return self.connector.sync_run(self.artists_force)
@property
async def artists_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'MusicArtist',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['artists'] = items
return items
@property
def movies_sync(self):
return self.connector.sync_run(self.movies)
@property
async def movies(self):
'''returns list of all movies.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Movie`
'''
return self.extras.get('movies', []) or \
await self.movies_force
@property
def movies_force_sync(self):
return self.connector.sync_run(self.movies_force)
@property
async def movies_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Movie',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['movies'] = items
return items
@property
def series_sync(self):
return self.connector.sync_run(self.series)
@property
async def series(self):
'''returns a list of all series in emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Series`
'''
return self.extras.get('series', []) or \
await self.series_force
@property
def series_force_sync(self):
return self.connector.sync_run(self.series_force)
@property
async def series_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Series',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['series'] = items
return items
@property
def episodes_sync(self):
return self.connector.sync_run(self.episodes)
@property
async def episodes(self):
'''returns a list of all episodes in emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Episode`
'''
return self.extras.get('episodes', []) or \
await self.episodes_force
@property
def episodes_force_sync(self):
return self.connector.sync_run(self.episodes_force)
@property
async def episodes_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Episode',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['episodes'] = items
return items
@property
def devices_sync(self):
return self.connector.sync_run(self.devices)
@property
async def devices(self):
'''returns a list of all devices connected to emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Devices`
'''
return self.extras.get('devices', []) or \
await self.devices_force
@property
def devices_force_sync(self):
return self.connector.sync_run(self.devices_force)
@property
async def devices_force(self):
items = await self.connector.getJson('/Devices', remote = False)
items = await self.process(items)
self.extras['devices'] = items
return items
@property
def users_sync(self):
return self.connector.sync_run(self.users)
@property
async def users(self):
'''returns a list of all users.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Users`
'''
return self.extras.get('users', []) or \
await self.users_force
@property
def users_force_sync(self):
return self.connector.sync_run(self.users_force)
@property
async def users_force(self):
items = await self.connector.getJson('/Users', remote = False)
items = await self.process(items)
self.extras['users'] = items
return items
|
andy29485/embypy | embypy/emby.py | Emby.update | python | async def update(self):
'''
reload all cached information
|coro|
Notes
-----
This is a slow process, and will remove the cache before updating.
Thus it is recomended to use the `*_force` properties, which will
only update the cache after data is retrived.
'''
keys = self.extras.keys()
self.extras = {}
for key in keys:
try:
func = getattr(self, key, None)
if callable(func):
func()
except:
pass | reload all cached information
|coro|
Notes
-----
This is a slow process, and will remove the cache before updating.
Thus it is recomended to use the `*_force` properties, which will
only update the cache after data is retrived. | train | https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/emby.py#L165-L185 | null | class Emby(objects.EmbyObject):
'''Emby connection class, an object of this type should be created
to communicate with emby
Parameters
----------
url : str
url to the server (e.g. http://127.0.0.1:8096/)
api_key : str, optional
key obtained from server dashboard
device_id : str, optional
device id to pass to emby
username : str, optional
username to login, this+password can be used instead of an apikey
password : str, optional
password for user to login as
Attributes
----------
connector : embypy.utils.connector.Connector
Object used to make api requests, do not use
'''
def __init__(self, url, **kargs):
connector = Connector(url, **kargs)
super().__init__({'ItemId':'', 'Name':''}, connector)
def info_sync(self, obj_id=None):
return self.connector.sync_run(self.info(obj_id))
async def info(self, obj_id=None):
'''Get info about object id
|coro|
Parameters
----------
obj_id : str, list
if not provided, server info is retured(as a dict).
Otherwise, an object with that id is returned
(or objects if `obj_id` is a list).
'''
if obj_id:
try:
return await self.process(obj_id)
except JSONDecodeError:
raise LookupError('Error object with that id does not exist', obj_id)
else:
return await self.connector.getJson('/system/info/public', remote=False)
def search_sync(self, query,
sort_map = {'BoxSet':0,'Series':1,'Movie':2,'Audio':3,'Person':4},
strict_sort = False):
return self.connector.sync_run(self.search(query, sort_map, strict_sort))
async def search(self, query,
sort_map = {'BoxSet':0,'Series':1,'Movie':2,'Audio':3,'Person':4},
strict_sort = False):
'''Sends a search request to emby, returns results
|coro|
Parameters
----------
query : str
the search string to send to emby
sort_map : dict
is a dict of strings to ints. Strings should be item types, and
the ints are the priority of those types(for sorting).
lower valued(0) will appear first.
strict_sort : bool
if True, then only item types in the keys of sortmap will be
included in the results
Returns
-------
list
list of emby objects
'''
search_params = {
'remote' : False,
'searchTerm' : query
}
if strict_sort:
search_params['IncludeItemTypes'] = ','.join(sort_map.keys())
json = await self.connector.getJson('/Search/Hints/', **search_params)
items = await self.process(json["SearchHints"])
m_size = len(sort_map)
items = sorted(items, key = lambda x : sort_map.get(x.type, m_size))
return items
def latest_sync(self, userId=None, itemTypes='', groupItems=False):
return self.connector.sync_run(self.latest(userId, itemTypes, groupItems))
async def latest(self, userId=None, itemTypes='', groupItems=False):
'''returns list of latest items
|coro|
Parameters
----------
userId : str
if provided, then the list returned is
the one that that use will see.
itemTypes: str
if provided, then the list will only include items
if that type - gets passed to the emby api
see https://github.com/MediaBrowser/Emby/wiki/Item-Types
Returns
-------
list
the itmes that will appear as latest (for user if id was given)
'''
json = await self.connector.getJson('/Users/{UserId}/Items/Latest',
remote=False,
userId=userId,
IncludeItemTypes=itemTypes,
GroupItems=groupItems
)
return await self.process(json)
def nextUp_sync(self, userId=None):
return self.connector.sync_run(self.nextUp(userId))
async def nextUp(self, userId=None):
'''returns list of items marked as `next up`
|coro|
Parameters
----------
userId : str
if provided, then the list returned is
the one that that use will see.
Returns
-------
list
the itmes that will appear as next up
(for user if id was given)
'''
json = await self.connector.getJson('/Shows/NextUp',
pass_uid=True,
remote=False,
userId=userId
)
return await self.process(json)
def update_sync(self):
self.connector.sync_run(self.update())
def create_playlist_sync(self, name, *songs):
return self.connector.sync_run(self.create_playlist(name, *songs))
async def create_playlist(self, name, *songs):
'''create a new playlist
|coro|
Parameters
----------
name : str
name of new playlist
songs : array_like
list of song ids to add to playlist
'''
data = {'Name': name}
ids = [i.id for i in (await self.process(songs))]
if ids:
data['Ids'] = ','.join(ids)
# TODO - return playlist not status
return await self.connector.post('/Playlists',
data=data,
pass_uid=True,
remote=False
)
@property
def albums_sync(self):
return self.connector.sync_run(self.albums)
@property
async def albums(self):
'''returns list of all albums.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Album`
'''
return self.extras.get('albums') or \
await self.albums_force
@property
def albums_force_sync(self):
return self.connector.sync_run(self.albums_force)
@property
async def albums_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'MusicAlbum',
Fields = 'Path,ParentId,Overview,Genres,Tags,Artists',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['albums'] = items
return items
@property
def songs_sync(self):
return self.connector.sync_run(self.songs)
@property
async def songs(self):
'''returns list of all songs.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Audio`
'''
return self.extras.get('songs') or await self.songs_force
@property
def songs_force_sync(self):
return self.connector.sync_run(self.songs_force)
@property
async def songs_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Audio',
Fields = 'Path,ParentId,Overview,Genres,Tags,Artists',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['songs'] = items
return items
@property
def playlists_sync(self):
return self.connector.sync_run(self.playlists)
@property
async def playlists(self):
'''returns list of all playlists.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Playlist`
'''
return self.extras.get('playlists') or \
await self.playlists_force
@property
def playlists_force_sync(self):
return self.connector.sync_run(self.playlists_force)
@property
async def playlists_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Playlist',
Fields = 'Path,ParentId,Overview',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['playlists'] = items
return items
@property
def artists_sync(self):
return self.connector.sync_run(self.artists)
@property
async def artists(self):
'''returns list of all song artists.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Artist`
'''
return self.extras.get('artists', []) or \
await self.artists_force
@property
def artists_force_sync(self):
return self.connector.sync_run(self.artists_force)
@property
async def artists_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'MusicArtist',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['artists'] = items
return items
@property
def movies_sync(self):
return self.connector.sync_run(self.movies)
@property
async def movies(self):
'''returns list of all movies.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Movie`
'''
return self.extras.get('movies', []) or \
await self.movies_force
@property
def movies_force_sync(self):
return self.connector.sync_run(self.movies_force)
@property
async def movies_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Movie',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['movies'] = items
return items
@property
def series_sync(self):
return self.connector.sync_run(self.series)
@property
async def series(self):
'''returns a list of all series in emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Series`
'''
return self.extras.get('series', []) or \
await self.series_force
@property
def series_force_sync(self):
return self.connector.sync_run(self.series_force)
@property
async def series_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Series',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['series'] = items
return items
@property
def episodes_sync(self):
return self.connector.sync_run(self.episodes)
@property
async def episodes(self):
'''returns a list of all episodes in emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Episode`
'''
return self.extras.get('episodes', []) or \
await self.episodes_force
@property
def episodes_force_sync(self):
return self.connector.sync_run(self.episodes_force)
@property
async def episodes_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Episode',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['episodes'] = items
return items
@property
def devices_sync(self):
return self.connector.sync_run(self.devices)
@property
async def devices(self):
'''returns a list of all devices connected to emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Devices`
'''
return self.extras.get('devices', []) or \
await self.devices_force
@property
def devices_force_sync(self):
return self.connector.sync_run(self.devices_force)
@property
async def devices_force(self):
items = await self.connector.getJson('/Devices', remote = False)
items = await self.process(items)
self.extras['devices'] = items
return items
@property
def users_sync(self):
return self.connector.sync_run(self.users)
@property
async def users(self):
'''returns a list of all users.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Users`
'''
return self.extras.get('users', []) or \
await self.users_force
@property
def users_force_sync(self):
return self.connector.sync_run(self.users_force)
@property
async def users_force(self):
items = await self.connector.getJson('/Users', remote = False)
items = await self.process(items)
self.extras['users'] = items
return items
|
andy29485/embypy | embypy/emby.py | Emby.create_playlist | python | async def create_playlist(self, name, *songs):
'''create a new playlist
|coro|
Parameters
----------
name : str
name of new playlist
songs : array_like
list of song ids to add to playlist
'''
data = {'Name': name}
ids = [i.id for i in (await self.process(songs))]
if ids:
data['Ids'] = ','.join(ids)
# TODO - return playlist not status
return await self.connector.post('/Playlists',
data=data,
pass_uid=True,
remote=False
) | create a new playlist
|coro|
Parameters
----------
name : str
name of new playlist
songs : array_like
list of song ids to add to playlist | train | https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/emby.py#L190-L213 | [
"async def process(self, object_dict):\n '''[for internal use] convert json/dict into python object\n\n |coro|\n\n Parameters\n ----------\n object_dict : dict\n json representation of object from emby\n\n Notes\n -----\n if a string is given, it is assumed to be an id, obj is returned.\n if a list is g... | class Emby(objects.EmbyObject):
'''Emby connection class, an object of this type should be created
to communicate with emby
Parameters
----------
url : str
url to the server (e.g. http://127.0.0.1:8096/)
api_key : str, optional
key obtained from server dashboard
device_id : str, optional
device id to pass to emby
username : str, optional
username to login, this+password can be used instead of an apikey
password : str, optional
password for user to login as
Attributes
----------
connector : embypy.utils.connector.Connector
Object used to make api requests, do not use
'''
def __init__(self, url, **kargs):
connector = Connector(url, **kargs)
super().__init__({'ItemId':'', 'Name':''}, connector)
def info_sync(self, obj_id=None):
return self.connector.sync_run(self.info(obj_id))
async def info(self, obj_id=None):
'''Get info about object id
|coro|
Parameters
----------
obj_id : str, list
if not provided, server info is retured(as a dict).
Otherwise, an object with that id is returned
(or objects if `obj_id` is a list).
'''
if obj_id:
try:
return await self.process(obj_id)
except JSONDecodeError:
raise LookupError('Error object with that id does not exist', obj_id)
else:
return await self.connector.getJson('/system/info/public', remote=False)
def search_sync(self, query,
sort_map = {'BoxSet':0,'Series':1,'Movie':2,'Audio':3,'Person':4},
strict_sort = False):
return self.connector.sync_run(self.search(query, sort_map, strict_sort))
async def search(self, query,
sort_map = {'BoxSet':0,'Series':1,'Movie':2,'Audio':3,'Person':4},
strict_sort = False):
'''Sends a search request to emby, returns results
|coro|
Parameters
----------
query : str
the search string to send to emby
sort_map : dict
is a dict of strings to ints. Strings should be item types, and
the ints are the priority of those types(for sorting).
lower valued(0) will appear first.
strict_sort : bool
if True, then only item types in the keys of sortmap will be
included in the results
Returns
-------
list
list of emby objects
'''
search_params = {
'remote' : False,
'searchTerm' : query
}
if strict_sort:
search_params['IncludeItemTypes'] = ','.join(sort_map.keys())
json = await self.connector.getJson('/Search/Hints/', **search_params)
items = await self.process(json["SearchHints"])
m_size = len(sort_map)
items = sorted(items, key = lambda x : sort_map.get(x.type, m_size))
return items
def latest_sync(self, userId=None, itemTypes='', groupItems=False):
return self.connector.sync_run(self.latest(userId, itemTypes, groupItems))
async def latest(self, userId=None, itemTypes='', groupItems=False):
'''returns list of latest items
|coro|
Parameters
----------
userId : str
if provided, then the list returned is
the one that that use will see.
itemTypes: str
if provided, then the list will only include items
if that type - gets passed to the emby api
see https://github.com/MediaBrowser/Emby/wiki/Item-Types
Returns
-------
list
the itmes that will appear as latest (for user if id was given)
'''
json = await self.connector.getJson('/Users/{UserId}/Items/Latest',
remote=False,
userId=userId,
IncludeItemTypes=itemTypes,
GroupItems=groupItems
)
return await self.process(json)
def nextUp_sync(self, userId=None):
return self.connector.sync_run(self.nextUp(userId))
async def nextUp(self, userId=None):
'''returns list of items marked as `next up`
|coro|
Parameters
----------
userId : str
if provided, then the list returned is
the one that that use will see.
Returns
-------
list
the itmes that will appear as next up
(for user if id was given)
'''
json = await self.connector.getJson('/Shows/NextUp',
pass_uid=True,
remote=False,
userId=userId
)
return await self.process(json)
def update_sync(self):
self.connector.sync_run(self.update())
async def update(self):
'''
reload all cached information
|coro|
Notes
-----
This is a slow process, and will remove the cache before updating.
Thus it is recomended to use the `*_force` properties, which will
only update the cache after data is retrived.
'''
keys = self.extras.keys()
self.extras = {}
for key in keys:
try:
func = getattr(self, key, None)
if callable(func):
func()
except:
pass
def create_playlist_sync(self, name, *songs):
return self.connector.sync_run(self.create_playlist(name, *songs))
@property
def albums_sync(self):
return self.connector.sync_run(self.albums)
@property
async def albums(self):
'''returns list of all albums.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Album`
'''
return self.extras.get('albums') or \
await self.albums_force
@property
def albums_force_sync(self):
return self.connector.sync_run(self.albums_force)
@property
async def albums_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'MusicAlbum',
Fields = 'Path,ParentId,Overview,Genres,Tags,Artists',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['albums'] = items
return items
@property
def songs_sync(self):
return self.connector.sync_run(self.songs)
@property
async def songs(self):
'''returns list of all songs.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Audio`
'''
return self.extras.get('songs') or await self.songs_force
@property
def songs_force_sync(self):
return self.connector.sync_run(self.songs_force)
@property
async def songs_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Audio',
Fields = 'Path,ParentId,Overview,Genres,Tags,Artists',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['songs'] = items
return items
@property
def playlists_sync(self):
return self.connector.sync_run(self.playlists)
@property
async def playlists(self):
'''returns list of all playlists.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Playlist`
'''
return self.extras.get('playlists') or \
await self.playlists_force
@property
def playlists_force_sync(self):
return self.connector.sync_run(self.playlists_force)
@property
async def playlists_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Playlist',
Fields = 'Path,ParentId,Overview',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['playlists'] = items
return items
@property
def artists_sync(self):
return self.connector.sync_run(self.artists)
@property
async def artists(self):
'''returns list of all song artists.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Artist`
'''
return self.extras.get('artists', []) or \
await self.artists_force
@property
def artists_force_sync(self):
return self.connector.sync_run(self.artists_force)
@property
async def artists_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'MusicArtist',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['artists'] = items
return items
@property
def movies_sync(self):
return self.connector.sync_run(self.movies)
@property
async def movies(self):
'''returns list of all movies.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Movie`
'''
return self.extras.get('movies', []) or \
await self.movies_force
@property
def movies_force_sync(self):
return self.connector.sync_run(self.movies_force)
@property
async def movies_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Movie',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['movies'] = items
return items
@property
def series_sync(self):
return self.connector.sync_run(self.series)
@property
async def series(self):
'''returns a list of all series in emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Series`
'''
return self.extras.get('series', []) or \
await self.series_force
@property
def series_force_sync(self):
return self.connector.sync_run(self.series_force)
@property
async def series_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Series',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['series'] = items
return items
@property
def episodes_sync(self):
return self.connector.sync_run(self.episodes)
@property
async def episodes(self):
'''returns a list of all episodes in emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Episode`
'''
return self.extras.get('episodes', []) or \
await self.episodes_force
@property
def episodes_force_sync(self):
return self.connector.sync_run(self.episodes_force)
@property
async def episodes_force(self):
items = await self.connector.getJson(
'/Users/{UserId}/Items',
remote = False,
format = 'json',
Recursive = 'true',
IncludeItemTypes = 'Episode',
Fields = 'Path,ParentId,Overview,Genres,Tags',
SortBy = 'SortName',
SortOrder = 'Ascending'
)
items = await self.process(items)
self.extras['episodes'] = items
return items
@property
def devices_sync(self):
return self.connector.sync_run(self.devices)
@property
async def devices(self):
'''returns a list of all devices connected to emby.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Devices`
'''
return self.extras.get('devices', []) or \
await self.devices_force
@property
def devices_force_sync(self):
return self.connector.sync_run(self.devices_force)
@property
async def devices_force(self):
items = await self.connector.getJson('/Devices', remote = False)
items = await self.process(items)
self.extras['devices'] = items
return items
@property
def users_sync(self):
return self.connector.sync_run(self.users)
@property
async def users(self):
'''returns a list of all users.
|force|
|coro|
Returns
-------
list
of type :class:`embypy.objects.Users`
'''
return self.extras.get('users', []) or \
await self.users_force
@property
def users_force_sync(self):
return self.connector.sync_run(self.users_force)
@property
async def users_force(self):
items = await self.connector.getJson('/Users', remote = False)
items = await self.process(items)
self.extras['users'] = items
return items
|
andy29485/embypy | embypy/objects/object.py | EmbyObject.percentage_played | python | def percentage_played(self):
'''returns played percentage [0,1] of item'''
played = self.object_dict.get('UserData', {}).get('PlaybackPositionTicks')
total = self.object_dict.get('RunTimeTicks') or 1
return (played or 0)/total | returns played percentage [0,1] of item | train | https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/objects/object.py#L88-L92 | null | class EmbyObject:
'''Deafult EMby Object Template
Parameters
----------
object_dict : dict
dictionary with json info returned from emby
connector: embypy.utils.connector.Connector
connector object to make upstream api calls
save : bool
if true, append to list of existing objects
saves space/increases speed/reduces issues
only set to false if creating a temp object that will be thrown out
'''
known_objects = {}
def __init__(self, object_dict, connector, save=True):
self.connector = connector
self.object_dict = object_dict
self.extras = {}
if save:
EmbyObject.known_objects[object_dict.get('Id')] = self
def __eq__(self, other):
return isinstance(other, EmbyObject) and self.id == other.id
@property
def id(self):
'''string with hexidecimal hash representing the id of this
object in emby
'''
return self.object_dict.get('Id') or self.object_dict.get('ItemId')
@property
def name(self):
'''name of the item
See Also
--------
post :
'''
return self.object_dict.get('Name', '')
@name.setter
def name(self, value):
self.object_dict['Name'] = value
@property
def title(self):
'''same as name
See Also
--------
post :
'''
return self.name
@title.setter
def title(self, value):
self.name = value
@property
def path(self):
'''get the filepath of the media file (not url)
See Also
--------
url :
'''
return self.object_dict.get('Path', '')
@property
def watched(self):
'''returns True it item has been watched'''
return self.object_dict.get('UserData', {}).get('Played')
@property
def played(self):
'''same as `watched`'''
return self.watched
@property
@property
def play_count(self):
'''returns users playcount for item'''
return self.object_dict.get('UserData', {}).get('PlayCount', 0)
@property
def favorite(self):
'''returns True if user favorited item'''
return self.object_dict.get('UserData', {}).get('IsFavorite', False)
def setFavorite_sync(self, value=True):
self.connector.sync_run(self.setFavorite(value))
def setWatched_sync(self, value=True):
self.connector.sync_run(self.setWatched(value))
async def _mark(self, type, value):
url = '/Users/{{UserId}}/{type}/{id}'.format(type=type, id=self.id)
if value:
(await self.connector.post(url)).close()
else:
(await self.connector.delete(url)).close()
async def setFavorite(self, value=True):
await self._mark('FavoriteItems', value)
async def setWatched(self, value=True):
await self._mark('PlayedItems', value)
@property
def type(self):
'''get the object type (general)
See Also
--------
media_type :
'''
return self.object_dict.get('Type', 'Object')
@property
def media_type(self):
'''get the object type (specific)
See Also
--------
type :
'''
return self.object_dict.get('MediaType', 'Object')
@property
def genres(self):
'''list of genres
See Also
--------
post :
tags :
'''
return self.object_dict.get('Genres', [])
@genres.setter
def genres(self, genres : list):
self.object_dict['Genres'] = genres
@property
def tags(self):
'''list of tags
See Also
--------
post :
genres :
'''
return self.object_dict.get('Tags', [])
@tags.setter
def tags(self, tags : list):
self.object_dict['Tags'] = tags
@property
def overview(self):
'''the description of the item
See Also
--------
post :
'''
return self.object_dict.get('Overview', '')
@overview.setter
def overview(self, value):
self.object_dict['Overview'] = value
@property
def community_rating(self):
'''int [0-10] with the rating of the item
See Also
--------
post :
'''
return self.object_dict.get('CommunityRating', 0)
@community_rating.setter
def community_rating(self, value):
self.object_dict['CommunityRating'] = value
@property
def primary_image_url(self):
'''url of the main poster image'''
path = '/Items/{}/Images/Primary'.format(self.id)
return self.connector.get_url(path, attach_api_key=False)
@property
def parent_id(self):
'''id of the parent object
See Also
--------
parent :
'''
return self.object_dict.get('ParentId')
@property
def parent_sync(self):
return self.connector.sync_run(self.parent)
@property
async def parent(self):
'''parent object as a subclass of EmbyObject
|coro|
'''
if self.parent_id:
return await self.process(self.parent_id)
else:
return None
@property
def url(self):
'''url of the item
Notes
-----
if remote-adderes was given, then that is used as the base
'''
path = '/web/itemdetails.html?id={}'.format(self.id)
return self.connector.get_url(path, attach_api_key=False)
def update_sync(self):
return self.connector.sync_run(self.update())
def refresh_sync(self):
return self.connector.sync_run(self.update())
async def update(self, fields=''):
'''reload object info from emby
|coro|
Parameters
----------
fields : str
additional fields to request when updating
See Also
--------
refresh : same thing
send :
post :
'''
path = 'Users/{{UserId}}/Items/{}'.format(self.id)
info = await self.connector.getJson(path,
remote=False,
Fields='Path,Overview,'+fields
)
self.object_dict.update(info)
self.extras = {}
return self
async def refresh(self, fields=''):
'''Same as update
|coro|
See Also
--------
update :
'''
return await self.update()
def send_sync(self):
return self.connector.sync_run(self.send())
def post_sync(self):
return self.connector.sync_run(self.send())
async def send(self):
'''send data that was changed to emby
|coro|
This should be used after using any of the setter. Not necessarily
immediately, but soon after.
See Also
--------
post: same thing
update :
refresh :
Returns
-------
aiohttp.ClientResponse or None if nothing needed updating
'''
# Why does the whole dict need to be sent?
# because emby is dumb, and will break if I don't
path = 'Items/{}'.format(self.id)
resp = await self.connector.post(path, data=self.object_dict, remote=False)
if resp.status == 400:
await EmbyObject(self.object_dict, self.connector).update()
resp = await self.connector.post(path,data=self.object_dict,remote=False)
return resp
async def post(self):
'''Same as send
|coro|
See Also
--------
send :
'''
return await self.send()
async def process(self, object_dict):
'''[for internal use] convert json/dict into python object
|coro|
Parameters
----------
object_dict : dict
json representation of object from emby
Notes
-----
if a string is given, it is assumed to be an id, obj is returned.
if a list is given, this method is called for each item in list.
Returns
-------
EmbyObject
the object that is represented by the json dict
list
if input is a list, list is returned
'''
# if ID was given, create dummy object and update it to get full dict
try:
if type(object_dict) == str:
existing = EmbyObject.known_objects.get(object_dict)
if existing:
return existing
obj = EmbyObject({"Id":object_dict}, self.connector, save=False)
object_dict = (await obj.update()).object_dict
except:
return None
# if nothing was given, return it back
# if already created object was given, return it back too
if not object_dict or isinstance(object_dict, EmbyObject):
return object_dict
# if a json dict that's really just a list was given,
# convert to list
if type(object_dict) == dict and \
set(object_dict.keys()) == {'Items', 'TotalRecordCount'}:
object_dict = object_dict['Items']
# if a list was given,
# process each item in list
if type(object_dict) == list:
items = []
for item in object_dict:
item = await self.process(item)
if item:
items.append(item)
return items
# otherwise we probably have an object dict
# so we should process that
# if dict has no id, it's a fake
if 'Id' not in object_dict and 'ItemId' not in object_dict:
return object_dict
# if object is already stored,
# update with existing info and return
itemId = object_dict.get('Id', object_dict.get('ItemId'))
existing = EmbyObject.known_objects.get(itemId)
if existing:
existing.object_dict.update(object_dict)
return existing
import embypy.objects.folders
import embypy.objects.videos
import embypy.objects.misc
# if objectc is not already stored,
# figure out its type (if unknown use this base class)
# create an object with subclass of that type
# return
if 'Type' in object_dict:
if object_dict['Type'] == 'Audio':
return embypy.objects.misc.Audio(object_dict, self.connector)
if object_dict['Type'] == 'Person':
return embypy.objects.misc.Person(object_dict, self.connector)
if object_dict['Type'] == 'Video':
return embypy.objects.videos.Video(object_dict, self.connector)
if object_dict['Type'] == 'Movie':
return embypy.objects.videos.Movie(object_dict, self.connector)
if object_dict['Type'] == 'Trailer':
return embypy.objects.videos.Trailer(object_dict, self.connector)
if object_dict['Type'] == 'AdultVideo':
return embypy.objects.videos.AdultVideo(object_dict, self.connector)
if object_dict['Type'] == 'MusicVideo':
return embypy.objects.videos.MusicVideo(object_dict, self.connector)
if object_dict['Type'] == 'Episode':
return embypy.objects.videos.Episode(object_dict, self.connector)
if object_dict['Type'] == 'Folder':
return embypy.objects.folders.Folder(object_dict, self.connector)
if object_dict['Type'] == 'Playlist':
return embypy.objects.folders.Playlist(object_dict, self.connector)
if object_dict['Type'] == 'BoxSet':
return embypy.objects.folders.BoxSet(object_dict, self.connector)
if object_dict['Type'] == 'MusicAlbum':
return embypy.objects.folders.MusicAlbum(object_dict, self.connector)
if object_dict['Type'] == 'MusicArtist':
return embypy.objects.folders.MusicArtist(object_dict, self.connector)
if object_dict['Type'] == 'Season':
return embypy.objects.folders.Season(object_dict, self.connector)
if object_dict['Type'] == 'Series':
return embypy.objects.folders.Series(object_dict, self.connector)
if object_dict['Type'] == 'Game':
return embypy.objects.misc.Game(object_dict, self.connector)
if object_dict['Type'] == 'GameSystem':
return embypy.objects.folders.GameSystem(object_dict, self.connector)
if object_dict['Type'] == 'Photo':
return embypy.objects.misc.Photo(object_dict, self.connector)
if object_dict['Type'] == 'Book':
return embypy.objects.misc.Book(object_dict, self.connector)
if object_dict['Type'] == 'Image':
return embypy.objects.misc.Image(object_dict, self.connector)
elif 'AppName' in object_dict:
return embypy.objects.misc.Device(object_dict, self.connector)
elif 'HasPassword' in object_dict:
return embypy.objects.misc.User(object_dict, self.connector)
return EmbyObject(object_dict, self.connector)
def __str__(self):
return self.name
def __repr__(self):
return '<{} {}>'.format(self.type, self.id)
|
andy29485/embypy | embypy/objects/object.py | EmbyObject.url | python | def url(self):
'''url of the item
Notes
-----
if remote-adderes was given, then that is used as the base
'''
path = '/web/itemdetails.html?id={}'.format(self.id)
return self.connector.get_url(path, attach_api_key=False) | url of the item
Notes
-----
if remote-adderes was given, then that is used as the base | train | https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/objects/object.py#L233-L241 | null | class EmbyObject:
'''Deafult EMby Object Template
Parameters
----------
object_dict : dict
dictionary with json info returned from emby
connector: embypy.utils.connector.Connector
connector object to make upstream api calls
save : bool
if true, append to list of existing objects
saves space/increases speed/reduces issues
only set to false if creating a temp object that will be thrown out
'''
known_objects = {}
def __init__(self, object_dict, connector, save=True):
self.connector = connector
self.object_dict = object_dict
self.extras = {}
if save:
EmbyObject.known_objects[object_dict.get('Id')] = self
def __eq__(self, other):
return isinstance(other, EmbyObject) and self.id == other.id
@property
def id(self):
'''string with hexidecimal hash representing the id of this
object in emby
'''
return self.object_dict.get('Id') or self.object_dict.get('ItemId')
@property
def name(self):
'''name of the item
See Also
--------
post :
'''
return self.object_dict.get('Name', '')
@name.setter
def name(self, value):
self.object_dict['Name'] = value
@property
def title(self):
'''same as name
See Also
--------
post :
'''
return self.name
@title.setter
def title(self, value):
self.name = value
@property
def path(self):
'''get the filepath of the media file (not url)
See Also
--------
url :
'''
return self.object_dict.get('Path', '')
@property
def watched(self):
'''returns True it item has been watched'''
return self.object_dict.get('UserData', {}).get('Played')
@property
def played(self):
'''same as `watched`'''
return self.watched
@property
def percentage_played(self):
'''returns played percentage [0,1] of item'''
played = self.object_dict.get('UserData', {}).get('PlaybackPositionTicks')
total = self.object_dict.get('RunTimeTicks') or 1
return (played or 0)/total
@property
def play_count(self):
'''returns users playcount for item'''
return self.object_dict.get('UserData', {}).get('PlayCount', 0)
@property
def favorite(self):
'''returns True if user favorited item'''
return self.object_dict.get('UserData', {}).get('IsFavorite', False)
def setFavorite_sync(self, value=True):
self.connector.sync_run(self.setFavorite(value))
def setWatched_sync(self, value=True):
self.connector.sync_run(self.setWatched(value))
async def _mark(self, type, value):
url = '/Users/{{UserId}}/{type}/{id}'.format(type=type, id=self.id)
if value:
(await self.connector.post(url)).close()
else:
(await self.connector.delete(url)).close()
async def setFavorite(self, value=True):
await self._mark('FavoriteItems', value)
async def setWatched(self, value=True):
await self._mark('PlayedItems', value)
@property
def type(self):
'''get the object type (general)
See Also
--------
media_type :
'''
return self.object_dict.get('Type', 'Object')
@property
def media_type(self):
'''get the object type (specific)
See Also
--------
type :
'''
return self.object_dict.get('MediaType', 'Object')
@property
def genres(self):
'''list of genres
See Also
--------
post :
tags :
'''
return self.object_dict.get('Genres', [])
@genres.setter
def genres(self, genres : list):
self.object_dict['Genres'] = genres
@property
def tags(self):
'''list of tags
See Also
--------
post :
genres :
'''
return self.object_dict.get('Tags', [])
@tags.setter
def tags(self, tags : list):
self.object_dict['Tags'] = tags
@property
def overview(self):
'''the description of the item
See Also
--------
post :
'''
return self.object_dict.get('Overview', '')
@overview.setter
def overview(self, value):
self.object_dict['Overview'] = value
@property
def community_rating(self):
'''int [0-10] with the rating of the item
See Also
--------
post :
'''
return self.object_dict.get('CommunityRating', 0)
@community_rating.setter
def community_rating(self, value):
self.object_dict['CommunityRating'] = value
@property
def primary_image_url(self):
'''url of the main poster image'''
path = '/Items/{}/Images/Primary'.format(self.id)
return self.connector.get_url(path, attach_api_key=False)
@property
def parent_id(self):
'''id of the parent object
See Also
--------
parent :
'''
return self.object_dict.get('ParentId')
@property
def parent_sync(self):
return self.connector.sync_run(self.parent)
@property
async def parent(self):
'''parent object as a subclass of EmbyObject
|coro|
'''
if self.parent_id:
return await self.process(self.parent_id)
else:
return None
@property
def update_sync(self):
return self.connector.sync_run(self.update())
def refresh_sync(self):
return self.connector.sync_run(self.update())
async def update(self, fields=''):
'''reload object info from emby
|coro|
Parameters
----------
fields : str
additional fields to request when updating
See Also
--------
refresh : same thing
send :
post :
'''
path = 'Users/{{UserId}}/Items/{}'.format(self.id)
info = await self.connector.getJson(path,
remote=False,
Fields='Path,Overview,'+fields
)
self.object_dict.update(info)
self.extras = {}
return self
async def refresh(self, fields=''):
'''Same as update
|coro|
See Also
--------
update :
'''
return await self.update()
def send_sync(self):
return self.connector.sync_run(self.send())
def post_sync(self):
return self.connector.sync_run(self.send())
async def send(self):
'''send data that was changed to emby
|coro|
This should be used after using any of the setter. Not necessarily
immediately, but soon after.
See Also
--------
post: same thing
update :
refresh :
Returns
-------
aiohttp.ClientResponse or None if nothing needed updating
'''
# Why does the whole dict need to be sent?
# because emby is dumb, and will break if I don't
path = 'Items/{}'.format(self.id)
resp = await self.connector.post(path, data=self.object_dict, remote=False)
if resp.status == 400:
await EmbyObject(self.object_dict, self.connector).update()
resp = await self.connector.post(path,data=self.object_dict,remote=False)
return resp
async def post(self):
'''Same as send
|coro|
See Also
--------
send :
'''
return await self.send()
async def process(self, object_dict):
'''[for internal use] convert json/dict into python object
|coro|
Parameters
----------
object_dict : dict
json representation of object from emby
Notes
-----
if a string is given, it is assumed to be an id, obj is returned.
if a list is given, this method is called for each item in list.
Returns
-------
EmbyObject
the object that is represented by the json dict
list
if input is a list, list is returned
'''
# if ID was given, create dummy object and update it to get full dict
try:
if type(object_dict) == str:
existing = EmbyObject.known_objects.get(object_dict)
if existing:
return existing
obj = EmbyObject({"Id":object_dict}, self.connector, save=False)
object_dict = (await obj.update()).object_dict
except:
return None
# if nothing was given, return it back
# if already created object was given, return it back too
if not object_dict or isinstance(object_dict, EmbyObject):
return object_dict
# if a json dict that's really just a list was given,
# convert to list
if type(object_dict) == dict and \
set(object_dict.keys()) == {'Items', 'TotalRecordCount'}:
object_dict = object_dict['Items']
# if a list was given,
# process each item in list
if type(object_dict) == list:
items = []
for item in object_dict:
item = await self.process(item)
if item:
items.append(item)
return items
# otherwise we probably have an object dict
# so we should process that
# if dict has no id, it's a fake
if 'Id' not in object_dict and 'ItemId' not in object_dict:
return object_dict
# if object is already stored,
# update with existing info and return
itemId = object_dict.get('Id', object_dict.get('ItemId'))
existing = EmbyObject.known_objects.get(itemId)
if existing:
existing.object_dict.update(object_dict)
return existing
import embypy.objects.folders
import embypy.objects.videos
import embypy.objects.misc
# if objectc is not already stored,
# figure out its type (if unknown use this base class)
# create an object with subclass of that type
# return
if 'Type' in object_dict:
if object_dict['Type'] == 'Audio':
return embypy.objects.misc.Audio(object_dict, self.connector)
if object_dict['Type'] == 'Person':
return embypy.objects.misc.Person(object_dict, self.connector)
if object_dict['Type'] == 'Video':
return embypy.objects.videos.Video(object_dict, self.connector)
if object_dict['Type'] == 'Movie':
return embypy.objects.videos.Movie(object_dict, self.connector)
if object_dict['Type'] == 'Trailer':
return embypy.objects.videos.Trailer(object_dict, self.connector)
if object_dict['Type'] == 'AdultVideo':
return embypy.objects.videos.AdultVideo(object_dict, self.connector)
if object_dict['Type'] == 'MusicVideo':
return embypy.objects.videos.MusicVideo(object_dict, self.connector)
if object_dict['Type'] == 'Episode':
return embypy.objects.videos.Episode(object_dict, self.connector)
if object_dict['Type'] == 'Folder':
return embypy.objects.folders.Folder(object_dict, self.connector)
if object_dict['Type'] == 'Playlist':
return embypy.objects.folders.Playlist(object_dict, self.connector)
if object_dict['Type'] == 'BoxSet':
return embypy.objects.folders.BoxSet(object_dict, self.connector)
if object_dict['Type'] == 'MusicAlbum':
return embypy.objects.folders.MusicAlbum(object_dict, self.connector)
if object_dict['Type'] == 'MusicArtist':
return embypy.objects.folders.MusicArtist(object_dict, self.connector)
if object_dict['Type'] == 'Season':
return embypy.objects.folders.Season(object_dict, self.connector)
if object_dict['Type'] == 'Series':
return embypy.objects.folders.Series(object_dict, self.connector)
if object_dict['Type'] == 'Game':
return embypy.objects.misc.Game(object_dict, self.connector)
if object_dict['Type'] == 'GameSystem':
return embypy.objects.folders.GameSystem(object_dict, self.connector)
if object_dict['Type'] == 'Photo':
return embypy.objects.misc.Photo(object_dict, self.connector)
if object_dict['Type'] == 'Book':
return embypy.objects.misc.Book(object_dict, self.connector)
if object_dict['Type'] == 'Image':
return embypy.objects.misc.Image(object_dict, self.connector)
elif 'AppName' in object_dict:
return embypy.objects.misc.Device(object_dict, self.connector)
elif 'HasPassword' in object_dict:
return embypy.objects.misc.User(object_dict, self.connector)
return EmbyObject(object_dict, self.connector)
def __str__(self):
return self.name
def __repr__(self):
return '<{} {}>'.format(self.type, self.id)
|
andy29485/embypy | embypy/objects/object.py | EmbyObject.update | python | async def update(self, fields=''):
'''reload object info from emby
|coro|
Parameters
----------
fields : str
additional fields to request when updating
See Also
--------
refresh : same thing
send :
post :
'''
path = 'Users/{{UserId}}/Items/{}'.format(self.id)
info = await self.connector.getJson(path,
remote=False,
Fields='Path,Overview,'+fields
)
self.object_dict.update(info)
self.extras = {}
return self | reload object info from emby
|coro|
Parameters
----------
fields : str
additional fields to request when updating
See Also
--------
refresh : same thing
send :
post : | train | https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/objects/object.py#L249-L272 | null | class EmbyObject:
'''Deafult EMby Object Template
Parameters
----------
object_dict : dict
dictionary with json info returned from emby
connector: embypy.utils.connector.Connector
connector object to make upstream api calls
save : bool
if true, append to list of existing objects
saves space/increases speed/reduces issues
only set to false if creating a temp object that will be thrown out
'''
known_objects = {}
def __init__(self, object_dict, connector, save=True):
self.connector = connector
self.object_dict = object_dict
self.extras = {}
if save:
EmbyObject.known_objects[object_dict.get('Id')] = self
def __eq__(self, other):
return isinstance(other, EmbyObject) and self.id == other.id
@property
def id(self):
'''string with hexidecimal hash representing the id of this
object in emby
'''
return self.object_dict.get('Id') or self.object_dict.get('ItemId')
@property
def name(self):
'''name of the item
See Also
--------
post :
'''
return self.object_dict.get('Name', '')
@name.setter
def name(self, value):
self.object_dict['Name'] = value
@property
def title(self):
'''same as name
See Also
--------
post :
'''
return self.name
@title.setter
def title(self, value):
self.name = value
@property
def path(self):
'''get the filepath of the media file (not url)
See Also
--------
url :
'''
return self.object_dict.get('Path', '')
@property
def watched(self):
'''returns True it item has been watched'''
return self.object_dict.get('UserData', {}).get('Played')
@property
def played(self):
'''same as `watched`'''
return self.watched
@property
def percentage_played(self):
'''returns played percentage [0,1] of item'''
played = self.object_dict.get('UserData', {}).get('PlaybackPositionTicks')
total = self.object_dict.get('RunTimeTicks') or 1
return (played or 0)/total
@property
def play_count(self):
'''returns users playcount for item'''
return self.object_dict.get('UserData', {}).get('PlayCount', 0)
@property
def favorite(self):
'''returns True if user favorited item'''
return self.object_dict.get('UserData', {}).get('IsFavorite', False)
def setFavorite_sync(self, value=True):
self.connector.sync_run(self.setFavorite(value))
def setWatched_sync(self, value=True):
self.connector.sync_run(self.setWatched(value))
async def _mark(self, type, value):
url = '/Users/{{UserId}}/{type}/{id}'.format(type=type, id=self.id)
if value:
(await self.connector.post(url)).close()
else:
(await self.connector.delete(url)).close()
async def setFavorite(self, value=True):
await self._mark('FavoriteItems', value)
async def setWatched(self, value=True):
await self._mark('PlayedItems', value)
@property
def type(self):
'''get the object type (general)
See Also
--------
media_type :
'''
return self.object_dict.get('Type', 'Object')
@property
def media_type(self):
'''get the object type (specific)
See Also
--------
type :
'''
return self.object_dict.get('MediaType', 'Object')
@property
def genres(self):
'''list of genres
See Also
--------
post :
tags :
'''
return self.object_dict.get('Genres', [])
@genres.setter
def genres(self, genres : list):
self.object_dict['Genres'] = genres
@property
def tags(self):
'''list of tags
See Also
--------
post :
genres :
'''
return self.object_dict.get('Tags', [])
@tags.setter
def tags(self, tags : list):
self.object_dict['Tags'] = tags
@property
def overview(self):
'''the description of the item
See Also
--------
post :
'''
return self.object_dict.get('Overview', '')
@overview.setter
def overview(self, value):
self.object_dict['Overview'] = value
@property
def community_rating(self):
'''int [0-10] with the rating of the item
See Also
--------
post :
'''
return self.object_dict.get('CommunityRating', 0)
@community_rating.setter
def community_rating(self, value):
self.object_dict['CommunityRating'] = value
@property
def primary_image_url(self):
'''url of the main poster image'''
path = '/Items/{}/Images/Primary'.format(self.id)
return self.connector.get_url(path, attach_api_key=False)
@property
def parent_id(self):
'''id of the parent object
See Also
--------
parent :
'''
return self.object_dict.get('ParentId')
@property
def parent_sync(self):
return self.connector.sync_run(self.parent)
@property
async def parent(self):
'''parent object as a subclass of EmbyObject
|coro|
'''
if self.parent_id:
return await self.process(self.parent_id)
else:
return None
@property
def url(self):
'''url of the item
Notes
-----
if remote-adderes was given, then that is used as the base
'''
path = '/web/itemdetails.html?id={}'.format(self.id)
return self.connector.get_url(path, attach_api_key=False)
def update_sync(self):
return self.connector.sync_run(self.update())
def refresh_sync(self):
return self.connector.sync_run(self.update())
async def refresh(self, fields=''):
'''Same as update
|coro|
See Also
--------
update :
'''
return await self.update()
def send_sync(self):
return self.connector.sync_run(self.send())
def post_sync(self):
return self.connector.sync_run(self.send())
async def send(self):
'''send data that was changed to emby
|coro|
This should be used after using any of the setter. Not necessarily
immediately, but soon after.
See Also
--------
post: same thing
update :
refresh :
Returns
-------
aiohttp.ClientResponse or None if nothing needed updating
'''
# Why does the whole dict need to be sent?
# because emby is dumb, and will break if I don't
path = 'Items/{}'.format(self.id)
resp = await self.connector.post(path, data=self.object_dict, remote=False)
if resp.status == 400:
await EmbyObject(self.object_dict, self.connector).update()
resp = await self.connector.post(path,data=self.object_dict,remote=False)
return resp
async def post(self):
'''Same as send
|coro|
See Also
--------
send :
'''
return await self.send()
async def process(self, object_dict):
'''[for internal use] convert json/dict into python object
|coro|
Parameters
----------
object_dict : dict
json representation of object from emby
Notes
-----
if a string is given, it is assumed to be an id, obj is returned.
if a list is given, this method is called for each item in list.
Returns
-------
EmbyObject
the object that is represented by the json dict
list
if input is a list, list is returned
'''
# if ID was given, create dummy object and update it to get full dict
try:
if type(object_dict) == str:
existing = EmbyObject.known_objects.get(object_dict)
if existing:
return existing
obj = EmbyObject({"Id":object_dict}, self.connector, save=False)
object_dict = (await obj.update()).object_dict
except:
return None
# if nothing was given, return it back
# if already created object was given, return it back too
if not object_dict or isinstance(object_dict, EmbyObject):
return object_dict
# if a json dict that's really just a list was given,
# convert to list
if type(object_dict) == dict and \
set(object_dict.keys()) == {'Items', 'TotalRecordCount'}:
object_dict = object_dict['Items']
# if a list was given,
# process each item in list
if type(object_dict) == list:
items = []
for item in object_dict:
item = await self.process(item)
if item:
items.append(item)
return items
# otherwise we probably have an object dict
# so we should process that
# if dict has no id, it's a fake
if 'Id' not in object_dict and 'ItemId' not in object_dict:
return object_dict
# if object is already stored,
# update with existing info and return
itemId = object_dict.get('Id', object_dict.get('ItemId'))
existing = EmbyObject.known_objects.get(itemId)
if existing:
existing.object_dict.update(object_dict)
return existing
import embypy.objects.folders
import embypy.objects.videos
import embypy.objects.misc
# if objectc is not already stored,
# figure out its type (if unknown use this base class)
# create an object with subclass of that type
# return
if 'Type' in object_dict:
if object_dict['Type'] == 'Audio':
return embypy.objects.misc.Audio(object_dict, self.connector)
if object_dict['Type'] == 'Person':
return embypy.objects.misc.Person(object_dict, self.connector)
if object_dict['Type'] == 'Video':
return embypy.objects.videos.Video(object_dict, self.connector)
if object_dict['Type'] == 'Movie':
return embypy.objects.videos.Movie(object_dict, self.connector)
if object_dict['Type'] == 'Trailer':
return embypy.objects.videos.Trailer(object_dict, self.connector)
if object_dict['Type'] == 'AdultVideo':
return embypy.objects.videos.AdultVideo(object_dict, self.connector)
if object_dict['Type'] == 'MusicVideo':
return embypy.objects.videos.MusicVideo(object_dict, self.connector)
if object_dict['Type'] == 'Episode':
return embypy.objects.videos.Episode(object_dict, self.connector)
if object_dict['Type'] == 'Folder':
return embypy.objects.folders.Folder(object_dict, self.connector)
if object_dict['Type'] == 'Playlist':
return embypy.objects.folders.Playlist(object_dict, self.connector)
if object_dict['Type'] == 'BoxSet':
return embypy.objects.folders.BoxSet(object_dict, self.connector)
if object_dict['Type'] == 'MusicAlbum':
return embypy.objects.folders.MusicAlbum(object_dict, self.connector)
if object_dict['Type'] == 'MusicArtist':
return embypy.objects.folders.MusicArtist(object_dict, self.connector)
if object_dict['Type'] == 'Season':
return embypy.objects.folders.Season(object_dict, self.connector)
if object_dict['Type'] == 'Series':
return embypy.objects.folders.Series(object_dict, self.connector)
if object_dict['Type'] == 'Game':
return embypy.objects.misc.Game(object_dict, self.connector)
if object_dict['Type'] == 'GameSystem':
return embypy.objects.folders.GameSystem(object_dict, self.connector)
if object_dict['Type'] == 'Photo':
return embypy.objects.misc.Photo(object_dict, self.connector)
if object_dict['Type'] == 'Book':
return embypy.objects.misc.Book(object_dict, self.connector)
if object_dict['Type'] == 'Image':
return embypy.objects.misc.Image(object_dict, self.connector)
elif 'AppName' in object_dict:
return embypy.objects.misc.Device(object_dict, self.connector)
elif 'HasPassword' in object_dict:
return embypy.objects.misc.User(object_dict, self.connector)
return EmbyObject(object_dict, self.connector)
def __str__(self):
return self.name
def __repr__(self):
return '<{} {}>'.format(self.type, self.id)
|
andy29485/embypy | embypy/objects/object.py | EmbyObject.send | python | async def send(self):
'''send data that was changed to emby
|coro|
This should be used after using any of the setter. Not necessarily
immediately, but soon after.
See Also
--------
post: same thing
update :
refresh :
Returns
-------
aiohttp.ClientResponse or None if nothing needed updating
'''
# Why does the whole dict need to be sent?
# because emby is dumb, and will break if I don't
path = 'Items/{}'.format(self.id)
resp = await self.connector.post(path, data=self.object_dict, remote=False)
if resp.status == 400:
await EmbyObject(self.object_dict, self.connector).update()
resp = await self.connector.post(path,data=self.object_dict,remote=False)
return resp | send data that was changed to emby
|coro|
This should be used after using any of the setter. Not necessarily
immediately, but soon after.
See Also
--------
post: same thing
update :
refresh :
Returns
-------
aiohttp.ClientResponse or None if nothing needed updating | train | https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/objects/object.py#L291-L316 | null | class EmbyObject:
'''Deafult EMby Object Template
Parameters
----------
object_dict : dict
dictionary with json info returned from emby
connector: embypy.utils.connector.Connector
connector object to make upstream api calls
save : bool
if true, append to list of existing objects
saves space/increases speed/reduces issues
only set to false if creating a temp object that will be thrown out
'''
known_objects = {}
def __init__(self, object_dict, connector, save=True):
self.connector = connector
self.object_dict = object_dict
self.extras = {}
if save:
EmbyObject.known_objects[object_dict.get('Id')] = self
def __eq__(self, other):
return isinstance(other, EmbyObject) and self.id == other.id
@property
def id(self):
'''string with hexidecimal hash representing the id of this
object in emby
'''
return self.object_dict.get('Id') or self.object_dict.get('ItemId')
@property
def name(self):
'''name of the item
See Also
--------
post :
'''
return self.object_dict.get('Name', '')
@name.setter
def name(self, value):
self.object_dict['Name'] = value
@property
def title(self):
'''same as name
See Also
--------
post :
'''
return self.name
@title.setter
def title(self, value):
self.name = value
@property
def path(self):
'''get the filepath of the media file (not url)
See Also
--------
url :
'''
return self.object_dict.get('Path', '')
@property
def watched(self):
'''returns True it item has been watched'''
return self.object_dict.get('UserData', {}).get('Played')
@property
def played(self):
'''same as `watched`'''
return self.watched
@property
def percentage_played(self):
'''returns played percentage [0,1] of item'''
played = self.object_dict.get('UserData', {}).get('PlaybackPositionTicks')
total = self.object_dict.get('RunTimeTicks') or 1
return (played or 0)/total
@property
def play_count(self):
'''returns users playcount for item'''
return self.object_dict.get('UserData', {}).get('PlayCount', 0)
@property
def favorite(self):
'''returns True if user favorited item'''
return self.object_dict.get('UserData', {}).get('IsFavorite', False)
def setFavorite_sync(self, value=True):
self.connector.sync_run(self.setFavorite(value))
def setWatched_sync(self, value=True):
self.connector.sync_run(self.setWatched(value))
async def _mark(self, type, value):
url = '/Users/{{UserId}}/{type}/{id}'.format(type=type, id=self.id)
if value:
(await self.connector.post(url)).close()
else:
(await self.connector.delete(url)).close()
async def setFavorite(self, value=True):
await self._mark('FavoriteItems', value)
async def setWatched(self, value=True):
await self._mark('PlayedItems', value)
@property
def type(self):
'''get the object type (general)
See Also
--------
media_type :
'''
return self.object_dict.get('Type', 'Object')
@property
def media_type(self):
'''get the object type (specific)
See Also
--------
type :
'''
return self.object_dict.get('MediaType', 'Object')
@property
def genres(self):
'''list of genres
See Also
--------
post :
tags :
'''
return self.object_dict.get('Genres', [])
@genres.setter
def genres(self, genres : list):
self.object_dict['Genres'] = genres
@property
def tags(self):
'''list of tags
See Also
--------
post :
genres :
'''
return self.object_dict.get('Tags', [])
@tags.setter
def tags(self, tags : list):
self.object_dict['Tags'] = tags
@property
def overview(self):
'''the description of the item
See Also
--------
post :
'''
return self.object_dict.get('Overview', '')
@overview.setter
def overview(self, value):
self.object_dict['Overview'] = value
@property
def community_rating(self):
'''int [0-10] with the rating of the item
See Also
--------
post :
'''
return self.object_dict.get('CommunityRating', 0)
@community_rating.setter
def community_rating(self, value):
self.object_dict['CommunityRating'] = value
@property
def primary_image_url(self):
'''url of the main poster image'''
path = '/Items/{}/Images/Primary'.format(self.id)
return self.connector.get_url(path, attach_api_key=False)
@property
def parent_id(self):
'''id of the parent object
See Also
--------
parent :
'''
return self.object_dict.get('ParentId')
@property
def parent_sync(self):
return self.connector.sync_run(self.parent)
@property
async def parent(self):
'''parent object as a subclass of EmbyObject
|coro|
'''
if self.parent_id:
return await self.process(self.parent_id)
else:
return None
@property
def url(self):
'''url of the item
Notes
-----
if remote-adderes was given, then that is used as the base
'''
path = '/web/itemdetails.html?id={}'.format(self.id)
return self.connector.get_url(path, attach_api_key=False)
def update_sync(self):
return self.connector.sync_run(self.update())
def refresh_sync(self):
return self.connector.sync_run(self.update())
async def update(self, fields=''):
'''reload object info from emby
|coro|
Parameters
----------
fields : str
additional fields to request when updating
See Also
--------
refresh : same thing
send :
post :
'''
path = 'Users/{{UserId}}/Items/{}'.format(self.id)
info = await self.connector.getJson(path,
remote=False,
Fields='Path,Overview,'+fields
)
self.object_dict.update(info)
self.extras = {}
return self
async def refresh(self, fields=''):
'''Same as update
|coro|
See Also
--------
update :
'''
return await self.update()
def send_sync(self):
return self.connector.sync_run(self.send())
def post_sync(self):
return self.connector.sync_run(self.send())
async def post(self):
'''Same as send
|coro|
See Also
--------
send :
'''
return await self.send()
async def process(self, object_dict):
'''[for internal use] convert json/dict into python object
|coro|
Parameters
----------
object_dict : dict
json representation of object from emby
Notes
-----
if a string is given, it is assumed to be an id, obj is returned.
if a list is given, this method is called for each item in list.
Returns
-------
EmbyObject
the object that is represented by the json dict
list
if input is a list, list is returned
'''
# if ID was given, create dummy object and update it to get full dict
try:
if type(object_dict) == str:
existing = EmbyObject.known_objects.get(object_dict)
if existing:
return existing
obj = EmbyObject({"Id":object_dict}, self.connector, save=False)
object_dict = (await obj.update()).object_dict
except:
return None
# if nothing was given, return it back
# if already created object was given, return it back too
if not object_dict or isinstance(object_dict, EmbyObject):
return object_dict
# if a json dict that's really just a list was given,
# convert to list
if type(object_dict) == dict and \
set(object_dict.keys()) == {'Items', 'TotalRecordCount'}:
object_dict = object_dict['Items']
# if a list was given,
# process each item in list
if type(object_dict) == list:
items = []
for item in object_dict:
item = await self.process(item)
if item:
items.append(item)
return items
# otherwise we probably have an object dict
# so we should process that
# if dict has no id, it's a fake
if 'Id' not in object_dict and 'ItemId' not in object_dict:
return object_dict
# if object is already stored,
# update with existing info and return
itemId = object_dict.get('Id', object_dict.get('ItemId'))
existing = EmbyObject.known_objects.get(itemId)
if existing:
existing.object_dict.update(object_dict)
return existing
import embypy.objects.folders
import embypy.objects.videos
import embypy.objects.misc
# if objectc is not already stored,
# figure out its type (if unknown use this base class)
# create an object with subclass of that type
# return
if 'Type' in object_dict:
if object_dict['Type'] == 'Audio':
return embypy.objects.misc.Audio(object_dict, self.connector)
if object_dict['Type'] == 'Person':
return embypy.objects.misc.Person(object_dict, self.connector)
if object_dict['Type'] == 'Video':
return embypy.objects.videos.Video(object_dict, self.connector)
if object_dict['Type'] == 'Movie':
return embypy.objects.videos.Movie(object_dict, self.connector)
if object_dict['Type'] == 'Trailer':
return embypy.objects.videos.Trailer(object_dict, self.connector)
if object_dict['Type'] == 'AdultVideo':
return embypy.objects.videos.AdultVideo(object_dict, self.connector)
if object_dict['Type'] == 'MusicVideo':
return embypy.objects.videos.MusicVideo(object_dict, self.connector)
if object_dict['Type'] == 'Episode':
return embypy.objects.videos.Episode(object_dict, self.connector)
if object_dict['Type'] == 'Folder':
return embypy.objects.folders.Folder(object_dict, self.connector)
if object_dict['Type'] == 'Playlist':
return embypy.objects.folders.Playlist(object_dict, self.connector)
if object_dict['Type'] == 'BoxSet':
return embypy.objects.folders.BoxSet(object_dict, self.connector)
if object_dict['Type'] == 'MusicAlbum':
return embypy.objects.folders.MusicAlbum(object_dict, self.connector)
if object_dict['Type'] == 'MusicArtist':
return embypy.objects.folders.MusicArtist(object_dict, self.connector)
if object_dict['Type'] == 'Season':
return embypy.objects.folders.Season(object_dict, self.connector)
if object_dict['Type'] == 'Series':
return embypy.objects.folders.Series(object_dict, self.connector)
if object_dict['Type'] == 'Game':
return embypy.objects.misc.Game(object_dict, self.connector)
if object_dict['Type'] == 'GameSystem':
return embypy.objects.folders.GameSystem(object_dict, self.connector)
if object_dict['Type'] == 'Photo':
return embypy.objects.misc.Photo(object_dict, self.connector)
if object_dict['Type'] == 'Book':
return embypy.objects.misc.Book(object_dict, self.connector)
if object_dict['Type'] == 'Image':
return embypy.objects.misc.Image(object_dict, self.connector)
elif 'AppName' in object_dict:
return embypy.objects.misc.Device(object_dict, self.connector)
elif 'HasPassword' in object_dict:
return embypy.objects.misc.User(object_dict, self.connector)
return EmbyObject(object_dict, self.connector)
def __str__(self):
return self.name
def __repr__(self):
return '<{} {}>'.format(self.type, self.id)
|
andy29485/embypy | embypy/objects/object.py | EmbyObject.process | python | async def process(self, object_dict):
'''[for internal use] convert json/dict into python object
|coro|
Parameters
----------
object_dict : dict
json representation of object from emby
Notes
-----
if a string is given, it is assumed to be an id, obj is returned.
if a list is given, this method is called for each item in list.
Returns
-------
EmbyObject
the object that is represented by the json dict
list
if input is a list, list is returned
'''
# if ID was given, create dummy object and update it to get full dict
try:
if type(object_dict) == str:
existing = EmbyObject.known_objects.get(object_dict)
if existing:
return existing
obj = EmbyObject({"Id":object_dict}, self.connector, save=False)
object_dict = (await obj.update()).object_dict
except:
return None
# if nothing was given, return it back
# if already created object was given, return it back too
if not object_dict or isinstance(object_dict, EmbyObject):
return object_dict
# if a json dict that's really just a list was given,
# convert to list
if type(object_dict) == dict and \
set(object_dict.keys()) == {'Items', 'TotalRecordCount'}:
object_dict = object_dict['Items']
# if a list was given,
# process each item in list
if type(object_dict) == list:
items = []
for item in object_dict:
item = await self.process(item)
if item:
items.append(item)
return items
# otherwise we probably have an object dict
# so we should process that
# if dict has no id, it's a fake
if 'Id' not in object_dict and 'ItemId' not in object_dict:
return object_dict
# if object is already stored,
# update with existing info and return
itemId = object_dict.get('Id', object_dict.get('ItemId'))
existing = EmbyObject.known_objects.get(itemId)
if existing:
existing.object_dict.update(object_dict)
return existing
import embypy.objects.folders
import embypy.objects.videos
import embypy.objects.misc
# if objectc is not already stored,
# figure out its type (if unknown use this base class)
# create an object with subclass of that type
# return
if 'Type' in object_dict:
if object_dict['Type'] == 'Audio':
return embypy.objects.misc.Audio(object_dict, self.connector)
if object_dict['Type'] == 'Person':
return embypy.objects.misc.Person(object_dict, self.connector)
if object_dict['Type'] == 'Video':
return embypy.objects.videos.Video(object_dict, self.connector)
if object_dict['Type'] == 'Movie':
return embypy.objects.videos.Movie(object_dict, self.connector)
if object_dict['Type'] == 'Trailer':
return embypy.objects.videos.Trailer(object_dict, self.connector)
if object_dict['Type'] == 'AdultVideo':
return embypy.objects.videos.AdultVideo(object_dict, self.connector)
if object_dict['Type'] == 'MusicVideo':
return embypy.objects.videos.MusicVideo(object_dict, self.connector)
if object_dict['Type'] == 'Episode':
return embypy.objects.videos.Episode(object_dict, self.connector)
if object_dict['Type'] == 'Folder':
return embypy.objects.folders.Folder(object_dict, self.connector)
if object_dict['Type'] == 'Playlist':
return embypy.objects.folders.Playlist(object_dict, self.connector)
if object_dict['Type'] == 'BoxSet':
return embypy.objects.folders.BoxSet(object_dict, self.connector)
if object_dict['Type'] == 'MusicAlbum':
return embypy.objects.folders.MusicAlbum(object_dict, self.connector)
if object_dict['Type'] == 'MusicArtist':
return embypy.objects.folders.MusicArtist(object_dict, self.connector)
if object_dict['Type'] == 'Season':
return embypy.objects.folders.Season(object_dict, self.connector)
if object_dict['Type'] == 'Series':
return embypy.objects.folders.Series(object_dict, self.connector)
if object_dict['Type'] == 'Game':
return embypy.objects.misc.Game(object_dict, self.connector)
if object_dict['Type'] == 'GameSystem':
return embypy.objects.folders.GameSystem(object_dict, self.connector)
if object_dict['Type'] == 'Photo':
return embypy.objects.misc.Photo(object_dict, self.connector)
if object_dict['Type'] == 'Book':
return embypy.objects.misc.Book(object_dict, self.connector)
if object_dict['Type'] == 'Image':
return embypy.objects.misc.Image(object_dict, self.connector)
elif 'AppName' in object_dict:
return embypy.objects.misc.Device(object_dict, self.connector)
elif 'HasPassword' in object_dict:
return embypy.objects.misc.User(object_dict, self.connector)
return EmbyObject(object_dict, self.connector) | [for internal use] convert json/dict into python object
|coro|
Parameters
----------
object_dict : dict
json representation of object from emby
Notes
-----
if a string is given, it is assumed to be an id, obj is returned.
if a list is given, this method is called for each item in list.
Returns
-------
EmbyObject
the object that is represented by the json dict
list
if input is a list, list is returned | train | https://github.com/andy29485/embypy/blob/cde658d380965caaf4789d4d182d045b0346797b/embypy/objects/object.py#L329-L453 | [
"async def update(self, fields=''):\n '''reload object info from emby\n\n |coro|\n\n Parameters\n ----------\n fields : str\n additional fields to request when updating\n\n See Also\n --------\n refresh : same thing\n send :\n post :\n '''\n path = 'Users/{{UserId}}/Items/{}'.format(self.id)\n ... | class EmbyObject:
'''Deafult EMby Object Template
Parameters
----------
object_dict : dict
dictionary with json info returned from emby
connector: embypy.utils.connector.Connector
connector object to make upstream api calls
save : bool
if true, append to list of existing objects
saves space/increases speed/reduces issues
only set to false if creating a temp object that will be thrown out
'''
known_objects = {}
def __init__(self, object_dict, connector, save=True):
self.connector = connector
self.object_dict = object_dict
self.extras = {}
if save:
EmbyObject.known_objects[object_dict.get('Id')] = self
def __eq__(self, other):
return isinstance(other, EmbyObject) and self.id == other.id
@property
def id(self):
'''string with hexidecimal hash representing the id of this
object in emby
'''
return self.object_dict.get('Id') or self.object_dict.get('ItemId')
@property
def name(self):
'''name of the item
See Also
--------
post :
'''
return self.object_dict.get('Name', '')
@name.setter
def name(self, value):
self.object_dict['Name'] = value
@property
def title(self):
'''same as name
See Also
--------
post :
'''
return self.name
@title.setter
def title(self, value):
self.name = value
@property
def path(self):
'''get the filepath of the media file (not url)
See Also
--------
url :
'''
return self.object_dict.get('Path', '')
@property
def watched(self):
'''returns True it item has been watched'''
return self.object_dict.get('UserData', {}).get('Played')
@property
def played(self):
'''same as `watched`'''
return self.watched
@property
def percentage_played(self):
'''returns played percentage [0,1] of item'''
played = self.object_dict.get('UserData', {}).get('PlaybackPositionTicks')
total = self.object_dict.get('RunTimeTicks') or 1
return (played or 0)/total
@property
def play_count(self):
'''returns users playcount for item'''
return self.object_dict.get('UserData', {}).get('PlayCount', 0)
@property
def favorite(self):
'''returns True if user favorited item'''
return self.object_dict.get('UserData', {}).get('IsFavorite', False)
def setFavorite_sync(self, value=True):
self.connector.sync_run(self.setFavorite(value))
def setWatched_sync(self, value=True):
self.connector.sync_run(self.setWatched(value))
async def _mark(self, type, value):
url = '/Users/{{UserId}}/{type}/{id}'.format(type=type, id=self.id)
if value:
(await self.connector.post(url)).close()
else:
(await self.connector.delete(url)).close()
async def setFavorite(self, value=True):
await self._mark('FavoriteItems', value)
async def setWatched(self, value=True):
await self._mark('PlayedItems', value)
@property
def type(self):
'''get the object type (general)
See Also
--------
media_type :
'''
return self.object_dict.get('Type', 'Object')
@property
def media_type(self):
'''get the object type (specific)
See Also
--------
type :
'''
return self.object_dict.get('MediaType', 'Object')
@property
def genres(self):
'''list of genres
See Also
--------
post :
tags :
'''
return self.object_dict.get('Genres', [])
@genres.setter
def genres(self, genres : list):
self.object_dict['Genres'] = genres
@property
def tags(self):
'''list of tags
See Also
--------
post :
genres :
'''
return self.object_dict.get('Tags', [])
@tags.setter
def tags(self, tags : list):
self.object_dict['Tags'] = tags
@property
def overview(self):
'''the description of the item
See Also
--------
post :
'''
return self.object_dict.get('Overview', '')
@overview.setter
def overview(self, value):
self.object_dict['Overview'] = value
@property
def community_rating(self):
'''int [0-10] with the rating of the item
See Also
--------
post :
'''
return self.object_dict.get('CommunityRating', 0)
@community_rating.setter
def community_rating(self, value):
self.object_dict['CommunityRating'] = value
@property
def primary_image_url(self):
'''url of the main poster image'''
path = '/Items/{}/Images/Primary'.format(self.id)
return self.connector.get_url(path, attach_api_key=False)
@property
def parent_id(self):
'''id of the parent object
See Also
--------
parent :
'''
return self.object_dict.get('ParentId')
@property
def parent_sync(self):
return self.connector.sync_run(self.parent)
@property
async def parent(self):
'''parent object as a subclass of EmbyObject
|coro|
'''
if self.parent_id:
return await self.process(self.parent_id)
else:
return None
@property
def url(self):
'''url of the item
Notes
-----
if remote-adderes was given, then that is used as the base
'''
path = '/web/itemdetails.html?id={}'.format(self.id)
return self.connector.get_url(path, attach_api_key=False)
def update_sync(self):
return self.connector.sync_run(self.update())
def refresh_sync(self):
return self.connector.sync_run(self.update())
async def update(self, fields=''):
'''reload object info from emby
|coro|
Parameters
----------
fields : str
additional fields to request when updating
See Also
--------
refresh : same thing
send :
post :
'''
path = 'Users/{{UserId}}/Items/{}'.format(self.id)
info = await self.connector.getJson(path,
remote=False,
Fields='Path,Overview,'+fields
)
self.object_dict.update(info)
self.extras = {}
return self
async def refresh(self, fields=''):
'''Same as update
|coro|
See Also
--------
update :
'''
return await self.update()
def send_sync(self):
return self.connector.sync_run(self.send())
def post_sync(self):
return self.connector.sync_run(self.send())
async def send(self):
'''send data that was changed to emby
|coro|
This should be used after using any of the setter. Not necessarily
immediately, but soon after.
See Also
--------
post: same thing
update :
refresh :
Returns
-------
aiohttp.ClientResponse or None if nothing needed updating
'''
# Why does the whole dict need to be sent?
# because emby is dumb, and will break if I don't
path = 'Items/{}'.format(self.id)
resp = await self.connector.post(path, data=self.object_dict, remote=False)
if resp.status == 400:
await EmbyObject(self.object_dict, self.connector).update()
resp = await self.connector.post(path,data=self.object_dict,remote=False)
return resp
async def post(self):
'''Same as send
|coro|
See Also
--------
send :
'''
return await self.send()
def __str__(self):
return self.name
def __repr__(self):
return '<{} {}>'.format(self.type, self.id)
|
major/supernova | supernova/credentials.py | get_user_password | python | def get_user_password(env, param, force=False):
username = utils.assemble_username(env, param)
if not utils.confirm_credential_display(force):
return
# Retrieve the credential from the keychain
password = password_get(username)
if password:
return (username, password)
else:
return False | Allows the user to print the credential for a particular keyring entry
to the screen | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/credentials.py#L34-L50 | [
"def assemble_username(env, param):\n return \"{0}:{1}\".format(env, param)\n",
"def confirm_credential_display(force=False):\n if force:\n return True\n\n msg = \"\"\"\n [WARNING] Your credential is about to be displayed on screen.\n If this is really what you want, type 'y' and press enter... | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Handles all of the interactions with the operating system's keyring
"""
import re
import keyring
import six
from . import utils
try:
import gi.require_version
gi.require_version('GnomeKeyring', '1.0')
except ImportError:
pass
def pull_env_credential(env, param, value):
"""
Dissects a keyring credential lookup string from the supernova config file
and returns the username/password combo
"""
rex = "USE_KEYRING\[([\x27\x22])(.*)\\1\]"
# This is the old-style, per-environment keyring credential
if value == "USE_KEYRING":
username = utils.assemble_username(env, param)
# This is the new-style, global keyring credential that can be applied
# to multiple environments
else:
global_identifier = re.match(rex, value).group(2)
username = utils.assemble_username('global', global_identifier)
return (username, password_get(username))
def password_get(username=None):
"""
Retrieves a password from the keychain based on the environment and
configuration parameter pair.
If this fails, None is returned.
"""
password = keyring.get_password('supernova', username)
if password is None:
split_username = tuple(username.split(':'))
msg = ("Couldn't find a credential for {0}:{1}. You need to set one "
"with: supernova-keyring -s {0} {1}").format(*split_username)
raise LookupError(msg)
else:
return password.encode('ascii')
def set_user_password(environment, parameter, password):
"""
Sets a user's password in the keyring storage
"""
username = '%s:%s' % (environment, parameter)
return password_set(username, password)
def password_set(username=None, password=None):
"""
Stores a password in a keychain for a particular environment and
configuration parameter pair.
"""
result = keyring.set_password('supernova', username, password)
# NOTE: keyring returns None when the storage is successful. That's weird.
if result is None:
return True
else:
return False
def prep_shell_environment(nova_env, nova_creds):
"""
Appends new variables to the current shell environment temporarily.
"""
new_env = {}
for key, value in prep_nova_creds(nova_env, nova_creds):
if type(value) == six.binary_type:
value = value.decode()
new_env[key] = value
return new_env
def prep_nova_creds(nova_env, nova_creds):
"""
Finds relevant config options in the supernova config and cleans them
up for novaclient.
"""
try:
raw_creds = dict(nova_creds.get('DEFAULT', {}), **nova_creds[nova_env])
except KeyError:
msg = "{0} was not found in your supernova configuration "\
"file".format(nova_env)
raise KeyError(msg)
proxy_re = re.compile(r"(^http_proxy|^https_proxy)")
creds = []
for param, value in raw_creds.items():
if not proxy_re.match(param):
param = param.upper()
if not hasattr(value, 'startswith'):
continue
# Get values from the keyring if we find a USE_KEYRING constant
if value.startswith("USE_KEYRING"):
username, credential = pull_env_credential(nova_env, param,
value)
else:
credential = value.strip("\"'")
# Make sure we got something valid from the configuration file or
# the keyring
if not credential:
raise LookupError("No matching credentials found in keyring")
creds.append((param, credential))
return creds
|
major/supernova | supernova/credentials.py | pull_env_credential | python | def pull_env_credential(env, param, value):
rex = "USE_KEYRING\[([\x27\x22])(.*)\\1\]"
# This is the old-style, per-environment keyring credential
if value == "USE_KEYRING":
username = utils.assemble_username(env, param)
# This is the new-style, global keyring credential that can be applied
# to multiple environments
else:
global_identifier = re.match(rex, value).group(2)
username = utils.assemble_username('global', global_identifier)
return (username, password_get(username)) | Dissects a keyring credential lookup string from the supernova config file
and returns the username/password combo | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/credentials.py#L53-L70 | [
"def assemble_username(env, param):\n return \"{0}:{1}\".format(env, param)\n",
"def password_get(username=None):\n \"\"\"\n Retrieves a password from the keychain based on the environment and\n configuration parameter pair.\n\n If this fails, None is returned.\n \"\"\"\n password = keyring.g... | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Handles all of the interactions with the operating system's keyring
"""
import re
import keyring
import six
from . import utils
try:
import gi.require_version
gi.require_version('GnomeKeyring', '1.0')
except ImportError:
pass
def get_user_password(env, param, force=False):
"""
Allows the user to print the credential for a particular keyring entry
to the screen
"""
username = utils.assemble_username(env, param)
if not utils.confirm_credential_display(force):
return
# Retrieve the credential from the keychain
password = password_get(username)
if password:
return (username, password)
else:
return False
def password_get(username=None):
"""
Retrieves a password from the keychain based on the environment and
configuration parameter pair.
If this fails, None is returned.
"""
password = keyring.get_password('supernova', username)
if password is None:
split_username = tuple(username.split(':'))
msg = ("Couldn't find a credential for {0}:{1}. You need to set one "
"with: supernova-keyring -s {0} {1}").format(*split_username)
raise LookupError(msg)
else:
return password.encode('ascii')
def set_user_password(environment, parameter, password):
"""
Sets a user's password in the keyring storage
"""
username = '%s:%s' % (environment, parameter)
return password_set(username, password)
def password_set(username=None, password=None):
"""
Stores a password in a keychain for a particular environment and
configuration parameter pair.
"""
result = keyring.set_password('supernova', username, password)
# NOTE: keyring returns None when the storage is successful. That's weird.
if result is None:
return True
else:
return False
def prep_shell_environment(nova_env, nova_creds):
"""
Appends new variables to the current shell environment temporarily.
"""
new_env = {}
for key, value in prep_nova_creds(nova_env, nova_creds):
if type(value) == six.binary_type:
value = value.decode()
new_env[key] = value
return new_env
def prep_nova_creds(nova_env, nova_creds):
"""
Finds relevant config options in the supernova config and cleans them
up for novaclient.
"""
try:
raw_creds = dict(nova_creds.get('DEFAULT', {}), **nova_creds[nova_env])
except KeyError:
msg = "{0} was not found in your supernova configuration "\
"file".format(nova_env)
raise KeyError(msg)
proxy_re = re.compile(r"(^http_proxy|^https_proxy)")
creds = []
for param, value in raw_creds.items():
if not proxy_re.match(param):
param = param.upper()
if not hasattr(value, 'startswith'):
continue
# Get values from the keyring if we find a USE_KEYRING constant
if value.startswith("USE_KEYRING"):
username, credential = pull_env_credential(nova_env, param,
value)
else:
credential = value.strip("\"'")
# Make sure we got something valid from the configuration file or
# the keyring
if not credential:
raise LookupError("No matching credentials found in keyring")
creds.append((param, credential))
return creds
|
major/supernova | supernova/credentials.py | password_get | python | def password_get(username=None):
password = keyring.get_password('supernova', username)
if password is None:
split_username = tuple(username.split(':'))
msg = ("Couldn't find a credential for {0}:{1}. You need to set one "
"with: supernova-keyring -s {0} {1}").format(*split_username)
raise LookupError(msg)
else:
return password.encode('ascii') | Retrieves a password from the keychain based on the environment and
configuration parameter pair.
If this fails, None is returned. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/credentials.py#L73-L87 | null | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Handles all of the interactions with the operating system's keyring
"""
import re
import keyring
import six
from . import utils
try:
import gi.require_version
gi.require_version('GnomeKeyring', '1.0')
except ImportError:
pass
def get_user_password(env, param, force=False):
"""
Allows the user to print the credential for a particular keyring entry
to the screen
"""
username = utils.assemble_username(env, param)
if not utils.confirm_credential_display(force):
return
# Retrieve the credential from the keychain
password = password_get(username)
if password:
return (username, password)
else:
return False
def pull_env_credential(env, param, value):
"""
Dissects a keyring credential lookup string from the supernova config file
and returns the username/password combo
"""
rex = "USE_KEYRING\[([\x27\x22])(.*)\\1\]"
# This is the old-style, per-environment keyring credential
if value == "USE_KEYRING":
username = utils.assemble_username(env, param)
# This is the new-style, global keyring credential that can be applied
# to multiple environments
else:
global_identifier = re.match(rex, value).group(2)
username = utils.assemble_username('global', global_identifier)
return (username, password_get(username))
def set_user_password(environment, parameter, password):
"""
Sets a user's password in the keyring storage
"""
username = '%s:%s' % (environment, parameter)
return password_set(username, password)
def password_set(username=None, password=None):
"""
Stores a password in a keychain for a particular environment and
configuration parameter pair.
"""
result = keyring.set_password('supernova', username, password)
# NOTE: keyring returns None when the storage is successful. That's weird.
if result is None:
return True
else:
return False
def prep_shell_environment(nova_env, nova_creds):
"""
Appends new variables to the current shell environment temporarily.
"""
new_env = {}
for key, value in prep_nova_creds(nova_env, nova_creds):
if type(value) == six.binary_type:
value = value.decode()
new_env[key] = value
return new_env
def prep_nova_creds(nova_env, nova_creds):
"""
Finds relevant config options in the supernova config and cleans them
up for novaclient.
"""
try:
raw_creds = dict(nova_creds.get('DEFAULT', {}), **nova_creds[nova_env])
except KeyError:
msg = "{0} was not found in your supernova configuration "\
"file".format(nova_env)
raise KeyError(msg)
proxy_re = re.compile(r"(^http_proxy|^https_proxy)")
creds = []
for param, value in raw_creds.items():
if not proxy_re.match(param):
param = param.upper()
if not hasattr(value, 'startswith'):
continue
# Get values from the keyring if we find a USE_KEYRING constant
if value.startswith("USE_KEYRING"):
username, credential = pull_env_credential(nova_env, param,
value)
else:
credential = value.strip("\"'")
# Make sure we got something valid from the configuration file or
# the keyring
if not credential:
raise LookupError("No matching credentials found in keyring")
creds.append((param, credential))
return creds
|
major/supernova | supernova/credentials.py | set_user_password | python | def set_user_password(environment, parameter, password):
username = '%s:%s' % (environment, parameter)
return password_set(username, password) | Sets a user's password in the keyring storage | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/credentials.py#L90-L95 | [
"def password_set(username=None, password=None):\n \"\"\"\n Stores a password in a keychain for a particular environment and\n configuration parameter pair.\n \"\"\"\n result = keyring.set_password('supernova', username, password)\n\n # NOTE: keyring returns None when the storage is successful. T... | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Handles all of the interactions with the operating system's keyring
"""
import re
import keyring
import six
from . import utils
try:
import gi.require_version
gi.require_version('GnomeKeyring', '1.0')
except ImportError:
pass
def get_user_password(env, param, force=False):
"""
Allows the user to print the credential for a particular keyring entry
to the screen
"""
username = utils.assemble_username(env, param)
if not utils.confirm_credential_display(force):
return
# Retrieve the credential from the keychain
password = password_get(username)
if password:
return (username, password)
else:
return False
def pull_env_credential(env, param, value):
"""
Dissects a keyring credential lookup string from the supernova config file
and returns the username/password combo
"""
rex = "USE_KEYRING\[([\x27\x22])(.*)\\1\]"
# This is the old-style, per-environment keyring credential
if value == "USE_KEYRING":
username = utils.assemble_username(env, param)
# This is the new-style, global keyring credential that can be applied
# to multiple environments
else:
global_identifier = re.match(rex, value).group(2)
username = utils.assemble_username('global', global_identifier)
return (username, password_get(username))
def password_get(username=None):
"""
Retrieves a password from the keychain based on the environment and
configuration parameter pair.
If this fails, None is returned.
"""
password = keyring.get_password('supernova', username)
if password is None:
split_username = tuple(username.split(':'))
msg = ("Couldn't find a credential for {0}:{1}. You need to set one "
"with: supernova-keyring -s {0} {1}").format(*split_username)
raise LookupError(msg)
else:
return password.encode('ascii')
def password_set(username=None, password=None):
"""
Stores a password in a keychain for a particular environment and
configuration parameter pair.
"""
result = keyring.set_password('supernova', username, password)
# NOTE: keyring returns None when the storage is successful. That's weird.
if result is None:
return True
else:
return False
def prep_shell_environment(nova_env, nova_creds):
"""
Appends new variables to the current shell environment temporarily.
"""
new_env = {}
for key, value in prep_nova_creds(nova_env, nova_creds):
if type(value) == six.binary_type:
value = value.decode()
new_env[key] = value
return new_env
def prep_nova_creds(nova_env, nova_creds):
"""
Finds relevant config options in the supernova config and cleans them
up for novaclient.
"""
try:
raw_creds = dict(nova_creds.get('DEFAULT', {}), **nova_creds[nova_env])
except KeyError:
msg = "{0} was not found in your supernova configuration "\
"file".format(nova_env)
raise KeyError(msg)
proxy_re = re.compile(r"(^http_proxy|^https_proxy)")
creds = []
for param, value in raw_creds.items():
if not proxy_re.match(param):
param = param.upper()
if not hasattr(value, 'startswith'):
continue
# Get values from the keyring if we find a USE_KEYRING constant
if value.startswith("USE_KEYRING"):
username, credential = pull_env_credential(nova_env, param,
value)
else:
credential = value.strip("\"'")
# Make sure we got something valid from the configuration file or
# the keyring
if not credential:
raise LookupError("No matching credentials found in keyring")
creds.append((param, credential))
return creds
|
major/supernova | supernova/credentials.py | password_set | python | def password_set(username=None, password=None):
result = keyring.set_password('supernova', username, password)
# NOTE: keyring returns None when the storage is successful. That's weird.
if result is None:
return True
else:
return False | Stores a password in a keychain for a particular environment and
configuration parameter pair. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/credentials.py#L98-L109 | null | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Handles all of the interactions with the operating system's keyring
"""
import re
import keyring
import six
from . import utils
try:
import gi.require_version
gi.require_version('GnomeKeyring', '1.0')
except ImportError:
pass
def get_user_password(env, param, force=False):
"""
Allows the user to print the credential for a particular keyring entry
to the screen
"""
username = utils.assemble_username(env, param)
if not utils.confirm_credential_display(force):
return
# Retrieve the credential from the keychain
password = password_get(username)
if password:
return (username, password)
else:
return False
def pull_env_credential(env, param, value):
"""
Dissects a keyring credential lookup string from the supernova config file
and returns the username/password combo
"""
rex = "USE_KEYRING\[([\x27\x22])(.*)\\1\]"
# This is the old-style, per-environment keyring credential
if value == "USE_KEYRING":
username = utils.assemble_username(env, param)
# This is the new-style, global keyring credential that can be applied
# to multiple environments
else:
global_identifier = re.match(rex, value).group(2)
username = utils.assemble_username('global', global_identifier)
return (username, password_get(username))
def password_get(username=None):
"""
Retrieves a password from the keychain based on the environment and
configuration parameter pair.
If this fails, None is returned.
"""
password = keyring.get_password('supernova', username)
if password is None:
split_username = tuple(username.split(':'))
msg = ("Couldn't find a credential for {0}:{1}. You need to set one "
"with: supernova-keyring -s {0} {1}").format(*split_username)
raise LookupError(msg)
else:
return password.encode('ascii')
def set_user_password(environment, parameter, password):
"""
Sets a user's password in the keyring storage
"""
username = '%s:%s' % (environment, parameter)
return password_set(username, password)
def prep_shell_environment(nova_env, nova_creds):
"""
Appends new variables to the current shell environment temporarily.
"""
new_env = {}
for key, value in prep_nova_creds(nova_env, nova_creds):
if type(value) == six.binary_type:
value = value.decode()
new_env[key] = value
return new_env
def prep_nova_creds(nova_env, nova_creds):
"""
Finds relevant config options in the supernova config and cleans them
up for novaclient.
"""
try:
raw_creds = dict(nova_creds.get('DEFAULT', {}), **nova_creds[nova_env])
except KeyError:
msg = "{0} was not found in your supernova configuration "\
"file".format(nova_env)
raise KeyError(msg)
proxy_re = re.compile(r"(^http_proxy|^https_proxy)")
creds = []
for param, value in raw_creds.items():
if not proxy_re.match(param):
param = param.upper()
if not hasattr(value, 'startswith'):
continue
# Get values from the keyring if we find a USE_KEYRING constant
if value.startswith("USE_KEYRING"):
username, credential = pull_env_credential(nova_env, param,
value)
else:
credential = value.strip("\"'")
# Make sure we got something valid from the configuration file or
# the keyring
if not credential:
raise LookupError("No matching credentials found in keyring")
creds.append((param, credential))
return creds
|
major/supernova | supernova/credentials.py | prep_shell_environment | python | def prep_shell_environment(nova_env, nova_creds):
new_env = {}
for key, value in prep_nova_creds(nova_env, nova_creds):
if type(value) == six.binary_type:
value = value.decode()
new_env[key] = value
return new_env | Appends new variables to the current shell environment temporarily. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/credentials.py#L112-L123 | [
"def prep_nova_creds(nova_env, nova_creds):\n \"\"\"\n Finds relevant config options in the supernova config and cleans them\n up for novaclient.\n \"\"\"\n try:\n raw_creds = dict(nova_creds.get('DEFAULT', {}), **nova_creds[nova_env])\n except KeyError:\n msg = \"{0} was not found i... | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Handles all of the interactions with the operating system's keyring
"""
import re
import keyring
import six
from . import utils
try:
import gi.require_version
gi.require_version('GnomeKeyring', '1.0')
except ImportError:
pass
def get_user_password(env, param, force=False):
"""
Allows the user to print the credential for a particular keyring entry
to the screen
"""
username = utils.assemble_username(env, param)
if not utils.confirm_credential_display(force):
return
# Retrieve the credential from the keychain
password = password_get(username)
if password:
return (username, password)
else:
return False
def pull_env_credential(env, param, value):
"""
Dissects a keyring credential lookup string from the supernova config file
and returns the username/password combo
"""
rex = "USE_KEYRING\[([\x27\x22])(.*)\\1\]"
# This is the old-style, per-environment keyring credential
if value == "USE_KEYRING":
username = utils.assemble_username(env, param)
# This is the new-style, global keyring credential that can be applied
# to multiple environments
else:
global_identifier = re.match(rex, value).group(2)
username = utils.assemble_username('global', global_identifier)
return (username, password_get(username))
def password_get(username=None):
"""
Retrieves a password from the keychain based on the environment and
configuration parameter pair.
If this fails, None is returned.
"""
password = keyring.get_password('supernova', username)
if password is None:
split_username = tuple(username.split(':'))
msg = ("Couldn't find a credential for {0}:{1}. You need to set one "
"with: supernova-keyring -s {0} {1}").format(*split_username)
raise LookupError(msg)
else:
return password.encode('ascii')
def set_user_password(environment, parameter, password):
"""
Sets a user's password in the keyring storage
"""
username = '%s:%s' % (environment, parameter)
return password_set(username, password)
def password_set(username=None, password=None):
"""
Stores a password in a keychain for a particular environment and
configuration parameter pair.
"""
result = keyring.set_password('supernova', username, password)
# NOTE: keyring returns None when the storage is successful. That's weird.
if result is None:
return True
else:
return False
def prep_nova_creds(nova_env, nova_creds):
"""
Finds relevant config options in the supernova config and cleans them
up for novaclient.
"""
try:
raw_creds = dict(nova_creds.get('DEFAULT', {}), **nova_creds[nova_env])
except KeyError:
msg = "{0} was not found in your supernova configuration "\
"file".format(nova_env)
raise KeyError(msg)
proxy_re = re.compile(r"(^http_proxy|^https_proxy)")
creds = []
for param, value in raw_creds.items():
if not proxy_re.match(param):
param = param.upper()
if not hasattr(value, 'startswith'):
continue
# Get values from the keyring if we find a USE_KEYRING constant
if value.startswith("USE_KEYRING"):
username, credential = pull_env_credential(nova_env, param,
value)
else:
credential = value.strip("\"'")
# Make sure we got something valid from the configuration file or
# the keyring
if not credential:
raise LookupError("No matching credentials found in keyring")
creds.append((param, credential))
return creds
|
major/supernova | supernova/credentials.py | prep_nova_creds | python | def prep_nova_creds(nova_env, nova_creds):
try:
raw_creds = dict(nova_creds.get('DEFAULT', {}), **nova_creds[nova_env])
except KeyError:
msg = "{0} was not found in your supernova configuration "\
"file".format(nova_env)
raise KeyError(msg)
proxy_re = re.compile(r"(^http_proxy|^https_proxy)")
creds = []
for param, value in raw_creds.items():
if not proxy_re.match(param):
param = param.upper()
if not hasattr(value, 'startswith'):
continue
# Get values from the keyring if we find a USE_KEYRING constant
if value.startswith("USE_KEYRING"):
username, credential = pull_env_credential(nova_env, param,
value)
else:
credential = value.strip("\"'")
# Make sure we got something valid from the configuration file or
# the keyring
if not credential:
raise LookupError("No matching credentials found in keyring")
creds.append((param, credential))
return creds | Finds relevant config options in the supernova config and cleans them
up for novaclient. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/credentials.py#L126-L162 | [
"def pull_env_credential(env, param, value):\n \"\"\"\n Dissects a keyring credential lookup string from the supernova config file\n and returns the username/password combo\n \"\"\"\n rex = \"USE_KEYRING\\[([\\x27\\x22])(.*)\\\\1\\]\"\n\n # This is the old-style, per-environment keyring credential... | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Handles all of the interactions with the operating system's keyring
"""
import re
import keyring
import six
from . import utils
try:
import gi.require_version
gi.require_version('GnomeKeyring', '1.0')
except ImportError:
pass
def get_user_password(env, param, force=False):
"""
Allows the user to print the credential for a particular keyring entry
to the screen
"""
username = utils.assemble_username(env, param)
if not utils.confirm_credential_display(force):
return
# Retrieve the credential from the keychain
password = password_get(username)
if password:
return (username, password)
else:
return False
def pull_env_credential(env, param, value):
"""
Dissects a keyring credential lookup string from the supernova config file
and returns the username/password combo
"""
rex = "USE_KEYRING\[([\x27\x22])(.*)\\1\]"
# This is the old-style, per-environment keyring credential
if value == "USE_KEYRING":
username = utils.assemble_username(env, param)
# This is the new-style, global keyring credential that can be applied
# to multiple environments
else:
global_identifier = re.match(rex, value).group(2)
username = utils.assemble_username('global', global_identifier)
return (username, password_get(username))
def password_get(username=None):
"""
Retrieves a password from the keychain based on the environment and
configuration parameter pair.
If this fails, None is returned.
"""
password = keyring.get_password('supernova', username)
if password is None:
split_username = tuple(username.split(':'))
msg = ("Couldn't find a credential for {0}:{1}. You need to set one "
"with: supernova-keyring -s {0} {1}").format(*split_username)
raise LookupError(msg)
else:
return password.encode('ascii')
def set_user_password(environment, parameter, password):
"""
Sets a user's password in the keyring storage
"""
username = '%s:%s' % (environment, parameter)
return password_set(username, password)
def password_set(username=None, password=None):
"""
Stores a password in a keychain for a particular environment and
configuration parameter pair.
"""
result = keyring.set_password('supernova', username, password)
# NOTE: keyring returns None when the storage is successful. That's weird.
if result is None:
return True
else:
return False
def prep_shell_environment(nova_env, nova_creds):
"""
Appends new variables to the current shell environment temporarily.
"""
new_env = {}
for key, value in prep_nova_creds(nova_env, nova_creds):
if type(value) == six.binary_type:
value = value.decode()
new_env[key] = value
return new_env
|
major/supernova | supernova/config.py | load_config | python | def load_config(config_file_override=False):
supernova_config = get_config_file(config_file_override)
supernova_config_dir = get_config_directory(config_file_override)
if not supernova_config and not supernova_config_dir:
raise Exception("Couldn't find a valid configuration file to parse")
nova_creds = ConfigObj()
# Can we successfully read the configuration file?
if supernova_config:
try:
nova_creds.merge(ConfigObj(supernova_config))
except:
raise("There's an error in your configuration file")
if supernova_config_dir:
for dir_file in os.listdir(supernova_config_dir):
full_path = ''.join((supernova_config_dir, dir_file))
try:
nova_creds.merge(ConfigObj(full_path))
except:
msg = "Skipping '%s', Parsing Error.".format(full_path)
print(msg)
create_dynamic_configs(nova_creds)
return nova_creds | Pulls the supernova configuration file and reads it | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/config.py#L40-L69 | [
"def get_config_file(override_files=False):\n \"\"\"\n Looks for the most specific configuration file available. An override\n can be provided as a string if needed.\n \"\"\"\n if override_files:\n if isinstance(override_files, six.string_types):\n possible_configs = [override_file... | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Takes care of the basic setup of the config files and does some preliminary
sanity checks
"""
import copy
import os
from configobj import ConfigObj
import six
def run_config(config_file_override=False):
"""
Runs sanity checks and prepares the global nova_creds variable
"""
try:
nova_creds = load_config(config_file_override)
except:
raise
return nova_creds
def get_config_file(override_files=False):
"""
Looks for the most specific configuration file available. An override
can be provided as a string if needed.
"""
if override_files:
if isinstance(override_files, six.string_types):
possible_configs = [override_files]
else:
raise Exception("Config file override must be a string")
else:
xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \
os.path.expanduser('~/.config')
possible_configs = [os.path.join(xdg_config_home, "supernova"),
os.path.expanduser("~/.supernova"),
".supernova"]
for config_file in reversed(possible_configs):
if os.path.isfile(config_file):
return config_file
return False
def get_config_directory(override_files=False):
"""
Looks for the most specific configuration directory possible, in order to
load individual configuration files.
"""
if override_files:
possible_dirs = [override_files]
else:
xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \
os.path.expanduser('~/.config')
possible_dirs = [os.path.join(xdg_config_home, "supernova.d/"),
os.path.expanduser("~/.supernova.d/"),
".supernova.d/"]
for config_dir in reversed(possible_dirs):
if os.path.isdir(config_dir):
return config_dir
return False
def create_dynamic_configs(config,
dynamic_attrs=('OS_REGION_NAME', 'OS_TENANT_NAME'),
delimiter=';'):
if not isinstance(config, ConfigObj):
raise ValueError("config should be ConfigObj, not %s" % type(config))
sections = copy.copy(config.sections)
for section in sections:
delete_original_section = False
for dynamic_attr in dynamic_attrs:
# Check to see if we should generate new sections.
if delimiter in config[section].get(dynamic_attr, ''):
for new_section_arg in config[section][dynamic_attr].split(
delimiter):
new_section = section + '-' + new_section_arg
# Use default section
config[new_section] = {}
# Copy the existing section config.
config[new_section].update(config[section])
config[new_section][dynamic_attr] = new_section_arg
# We are eventually going to delete the old section.
# Lets use it as a supernova group
config[new_section]['SUPERNOVA_GROUP'] = section
delete_original_section = True
if delete_original_section:
# We are done, lets remove the original section
del config[section]
|
major/supernova | supernova/config.py | get_config_file | python | def get_config_file(override_files=False):
if override_files:
if isinstance(override_files, six.string_types):
possible_configs = [override_files]
else:
raise Exception("Config file override must be a string")
else:
xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \
os.path.expanduser('~/.config')
possible_configs = [os.path.join(xdg_config_home, "supernova"),
os.path.expanduser("~/.supernova"),
".supernova"]
for config_file in reversed(possible_configs):
if os.path.isfile(config_file):
return config_file
return False | Looks for the most specific configuration file available. An override
can be provided as a string if needed. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/config.py#L72-L93 | null | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Takes care of the basic setup of the config files and does some preliminary
sanity checks
"""
import copy
import os
from configobj import ConfigObj
import six
def run_config(config_file_override=False):
"""
Runs sanity checks and prepares the global nova_creds variable
"""
try:
nova_creds = load_config(config_file_override)
except:
raise
return nova_creds
def load_config(config_file_override=False):
"""
Pulls the supernova configuration file and reads it
"""
supernova_config = get_config_file(config_file_override)
supernova_config_dir = get_config_directory(config_file_override)
if not supernova_config and not supernova_config_dir:
raise Exception("Couldn't find a valid configuration file to parse")
nova_creds = ConfigObj()
# Can we successfully read the configuration file?
if supernova_config:
try:
nova_creds.merge(ConfigObj(supernova_config))
except:
raise("There's an error in your configuration file")
if supernova_config_dir:
for dir_file in os.listdir(supernova_config_dir):
full_path = ''.join((supernova_config_dir, dir_file))
try:
nova_creds.merge(ConfigObj(full_path))
except:
msg = "Skipping '%s', Parsing Error.".format(full_path)
print(msg)
create_dynamic_configs(nova_creds)
return nova_creds
def get_config_directory(override_files=False):
"""
Looks for the most specific configuration directory possible, in order to
load individual configuration files.
"""
if override_files:
possible_dirs = [override_files]
else:
xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \
os.path.expanduser('~/.config')
possible_dirs = [os.path.join(xdg_config_home, "supernova.d/"),
os.path.expanduser("~/.supernova.d/"),
".supernova.d/"]
for config_dir in reversed(possible_dirs):
if os.path.isdir(config_dir):
return config_dir
return False
def create_dynamic_configs(config,
dynamic_attrs=('OS_REGION_NAME', 'OS_TENANT_NAME'),
delimiter=';'):
if not isinstance(config, ConfigObj):
raise ValueError("config should be ConfigObj, not %s" % type(config))
sections = copy.copy(config.sections)
for section in sections:
delete_original_section = False
for dynamic_attr in dynamic_attrs:
# Check to see if we should generate new sections.
if delimiter in config[section].get(dynamic_attr, ''):
for new_section_arg in config[section][dynamic_attr].split(
delimiter):
new_section = section + '-' + new_section_arg
# Use default section
config[new_section] = {}
# Copy the existing section config.
config[new_section].update(config[section])
config[new_section][dynamic_attr] = new_section_arg
# We are eventually going to delete the old section.
# Lets use it as a supernova group
config[new_section]['SUPERNOVA_GROUP'] = section
delete_original_section = True
if delete_original_section:
# We are done, lets remove the original section
del config[section]
|
major/supernova | supernova/config.py | get_config_directory | python | def get_config_directory(override_files=False):
if override_files:
possible_dirs = [override_files]
else:
xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \
os.path.expanduser('~/.config')
possible_dirs = [os.path.join(xdg_config_home, "supernova.d/"),
os.path.expanduser("~/.supernova.d/"),
".supernova.d/"]
for config_dir in reversed(possible_dirs):
if os.path.isdir(config_dir):
return config_dir
return False | Looks for the most specific configuration directory possible, in order to
load individual configuration files. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/config.py#L96-L115 | null | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Takes care of the basic setup of the config files and does some preliminary
sanity checks
"""
import copy
import os
from configobj import ConfigObj
import six
def run_config(config_file_override=False):
"""
Runs sanity checks and prepares the global nova_creds variable
"""
try:
nova_creds = load_config(config_file_override)
except:
raise
return nova_creds
def load_config(config_file_override=False):
"""
Pulls the supernova configuration file and reads it
"""
supernova_config = get_config_file(config_file_override)
supernova_config_dir = get_config_directory(config_file_override)
if not supernova_config and not supernova_config_dir:
raise Exception("Couldn't find a valid configuration file to parse")
nova_creds = ConfigObj()
# Can we successfully read the configuration file?
if supernova_config:
try:
nova_creds.merge(ConfigObj(supernova_config))
except:
raise("There's an error in your configuration file")
if supernova_config_dir:
for dir_file in os.listdir(supernova_config_dir):
full_path = ''.join((supernova_config_dir, dir_file))
try:
nova_creds.merge(ConfigObj(full_path))
except:
msg = "Skipping '%s', Parsing Error.".format(full_path)
print(msg)
create_dynamic_configs(nova_creds)
return nova_creds
def get_config_file(override_files=False):
"""
Looks for the most specific configuration file available. An override
can be provided as a string if needed.
"""
if override_files:
if isinstance(override_files, six.string_types):
possible_configs = [override_files]
else:
raise Exception("Config file override must be a string")
else:
xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \
os.path.expanduser('~/.config')
possible_configs = [os.path.join(xdg_config_home, "supernova"),
os.path.expanduser("~/.supernova"),
".supernova"]
for config_file in reversed(possible_configs):
if os.path.isfile(config_file):
return config_file
return False
def create_dynamic_configs(config,
dynamic_attrs=('OS_REGION_NAME', 'OS_TENANT_NAME'),
delimiter=';'):
if not isinstance(config, ConfigObj):
raise ValueError("config should be ConfigObj, not %s" % type(config))
sections = copy.copy(config.sections)
for section in sections:
delete_original_section = False
for dynamic_attr in dynamic_attrs:
# Check to see if we should generate new sections.
if delimiter in config[section].get(dynamic_attr, ''):
for new_section_arg in config[section][dynamic_attr].split(
delimiter):
new_section = section + '-' + new_section_arg
# Use default section
config[new_section] = {}
# Copy the existing section config.
config[new_section].update(config[section])
config[new_section][dynamic_attr] = new_section_arg
# We are eventually going to delete the old section.
# Lets use it as a supernova group
config[new_section]['SUPERNOVA_GROUP'] = section
delete_original_section = True
if delete_original_section:
# We are done, lets remove the original section
del config[section]
|
major/supernova | supernova/supernova.py | execute_executable | python | def execute_executable(nova_args, env_vars):
process = subprocess.Popen(nova_args,
stdout=sys.stdout,
stderr=subprocess.PIPE,
env=env_vars)
process.wait()
return process | Executes the executable given by the user.
Hey, I know this method has a silly name, but I write the code here and
I'm silly. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/supernova.py#L32-L44 | null | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains the actual class that runs novaclient (or the executable chosen by
the user)
"""
import copy
import os
import subprocess
import sys
import click
from . import credentials
def check_for_debug(supernova_args, nova_args):
"""
If the user wanted to run the executable with debugging enabled, we need
to apply the correct arguments to the executable.
Heat is a corner case since it uses -d instead of --debug.
"""
# Heat requires special handling for debug arguments
if supernova_args['debug'] and supernova_args['executable'] == 'heat':
nova_args.insert(0, '-d ')
elif supernova_args['debug']:
nova_args.insert(0, '--debug ')
return nova_args
def check_for_executable(supernova_args, env_vars):
"""
It's possible that a user might set their custom executable via an
environment variable. If we detect one, we should add it to supernova's
arguments ONLY IF an executable wasn't set on the command line. The
command line executable must take priority.
"""
exe = supernova_args.get('executable', 'default')
if exe != 'default':
return supernova_args
if 'OS_EXECUTABLE' in env_vars.keys():
supernova_args['executable'] = env_vars['OS_EXECUTABLE']
return supernova_args
supernova_args['executable'] = 'nova'
return supernova_args
def check_for_bypass_url(raw_creds, nova_args):
"""
Return a list of extra args that need to be passed on cmdline to nova.
"""
if 'BYPASS_URL' in raw_creds.keys():
bypass_args = ['--bypass-url', raw_creds['BYPASS_URL']]
nova_args = bypass_args + nova_args
return nova_args
def handle_stderr(stderr_pipe):
"""
Takes stderr from the command's output and displays it AFTER the stdout
is printed by run_command().
"""
stderr_output = stderr_pipe.read()
if len(stderr_output) > 0:
click.secho("\n__ Error Output {0}".format('_'*62), fg='white',
bold=True)
click.echo(stderr_output)
return True
def run_command(nova_creds, nova_args, supernova_args):
"""
Sets the environment variables for the executable, runs the executable,
and handles the output.
"""
nova_env = supernova_args['nova_env']
# (gtmanfred) make a copy of this object. If we don't copy it, the insert
# to 0 happens multiple times because it is the same object in memory.
nova_args = copy.copy(nova_args)
# Get the environment variables ready
env_vars = os.environ.copy()
env_vars.update(credentials.prep_shell_environment(nova_env,
nova_creds))
# BYPASS_URL is a weird one, so we need to send it as an argument,
# not an environment variable.
nova_args = check_for_bypass_url(nova_creds[nova_env], nova_args)
# Check for OS_EXECUTABLE
supernova_args = check_for_executable(supernova_args, env_vars)
# Check for a debug override
nova_args = check_for_debug(supernova_args, nova_args)
# Print a small message for the user (very helpful for groups)
msg = "Running %s against %s..." % (supernova_args.get('executable'),
nova_env)
if not supernova_args.get('quiet'):
click.echo("[%s] %s " % (click.style('SUPERNOVA', fg='green'), msg))
# Call executable and connect stdout to the current terminal
# so that any unicode characters from the executable's list will be
# displayed appropriately.
#
# In other news, I hate how python 2.6 does unicode.
nova_args.insert(0, supernova_args['executable'])
nova_args = [nova_arg.strip() for nova_arg in nova_args]
process = execute_executable(nova_args, env_vars)
# If the user asked us to be quiet, then let's not print stderr
if not supernova_args.get('quiet'):
handle_stderr(process.stderr)
return process.returncode
|
major/supernova | supernova/supernova.py | check_for_debug | python | def check_for_debug(supernova_args, nova_args):
# Heat requires special handling for debug arguments
if supernova_args['debug'] and supernova_args['executable'] == 'heat':
nova_args.insert(0, '-d ')
elif supernova_args['debug']:
nova_args.insert(0, '--debug ')
return nova_args | If the user wanted to run the executable with debugging enabled, we need
to apply the correct arguments to the executable.
Heat is a corner case since it uses -d instead of --debug. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/supernova.py#L47-L60 | null | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains the actual class that runs novaclient (or the executable chosen by
the user)
"""
import copy
import os
import subprocess
import sys
import click
from . import credentials
def execute_executable(nova_args, env_vars):
"""
Executes the executable given by the user.
Hey, I know this method has a silly name, but I write the code here and
I'm silly.
"""
process = subprocess.Popen(nova_args,
stdout=sys.stdout,
stderr=subprocess.PIPE,
env=env_vars)
process.wait()
return process
def check_for_executable(supernova_args, env_vars):
"""
It's possible that a user might set their custom executable via an
environment variable. If we detect one, we should add it to supernova's
arguments ONLY IF an executable wasn't set on the command line. The
command line executable must take priority.
"""
exe = supernova_args.get('executable', 'default')
if exe != 'default':
return supernova_args
if 'OS_EXECUTABLE' in env_vars.keys():
supernova_args['executable'] = env_vars['OS_EXECUTABLE']
return supernova_args
supernova_args['executable'] = 'nova'
return supernova_args
def check_for_bypass_url(raw_creds, nova_args):
"""
Return a list of extra args that need to be passed on cmdline to nova.
"""
if 'BYPASS_URL' in raw_creds.keys():
bypass_args = ['--bypass-url', raw_creds['BYPASS_URL']]
nova_args = bypass_args + nova_args
return nova_args
def handle_stderr(stderr_pipe):
"""
Takes stderr from the command's output and displays it AFTER the stdout
is printed by run_command().
"""
stderr_output = stderr_pipe.read()
if len(stderr_output) > 0:
click.secho("\n__ Error Output {0}".format('_'*62), fg='white',
bold=True)
click.echo(stderr_output)
return True
def run_command(nova_creds, nova_args, supernova_args):
"""
Sets the environment variables for the executable, runs the executable,
and handles the output.
"""
nova_env = supernova_args['nova_env']
# (gtmanfred) make a copy of this object. If we don't copy it, the insert
# to 0 happens multiple times because it is the same object in memory.
nova_args = copy.copy(nova_args)
# Get the environment variables ready
env_vars = os.environ.copy()
env_vars.update(credentials.prep_shell_environment(nova_env,
nova_creds))
# BYPASS_URL is a weird one, so we need to send it as an argument,
# not an environment variable.
nova_args = check_for_bypass_url(nova_creds[nova_env], nova_args)
# Check for OS_EXECUTABLE
supernova_args = check_for_executable(supernova_args, env_vars)
# Check for a debug override
nova_args = check_for_debug(supernova_args, nova_args)
# Print a small message for the user (very helpful for groups)
msg = "Running %s against %s..." % (supernova_args.get('executable'),
nova_env)
if not supernova_args.get('quiet'):
click.echo("[%s] %s " % (click.style('SUPERNOVA', fg='green'), msg))
# Call executable and connect stdout to the current terminal
# so that any unicode characters from the executable's list will be
# displayed appropriately.
#
# In other news, I hate how python 2.6 does unicode.
nova_args.insert(0, supernova_args['executable'])
nova_args = [nova_arg.strip() for nova_arg in nova_args]
process = execute_executable(nova_args, env_vars)
# If the user asked us to be quiet, then let's not print stderr
if not supernova_args.get('quiet'):
handle_stderr(process.stderr)
return process.returncode
|
major/supernova | supernova/supernova.py | check_for_executable | python | def check_for_executable(supernova_args, env_vars):
exe = supernova_args.get('executable', 'default')
if exe != 'default':
return supernova_args
if 'OS_EXECUTABLE' in env_vars.keys():
supernova_args['executable'] = env_vars['OS_EXECUTABLE']
return supernova_args
supernova_args['executable'] = 'nova'
return supernova_args | It's possible that a user might set their custom executable via an
environment variable. If we detect one, we should add it to supernova's
arguments ONLY IF an executable wasn't set on the command line. The
command line executable must take priority. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/supernova.py#L63-L77 | null | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains the actual class that runs novaclient (or the executable chosen by
the user)
"""
import copy
import os
import subprocess
import sys
import click
from . import credentials
def execute_executable(nova_args, env_vars):
"""
Executes the executable given by the user.
Hey, I know this method has a silly name, but I write the code here and
I'm silly.
"""
process = subprocess.Popen(nova_args,
stdout=sys.stdout,
stderr=subprocess.PIPE,
env=env_vars)
process.wait()
return process
def check_for_debug(supernova_args, nova_args):
"""
If the user wanted to run the executable with debugging enabled, we need
to apply the correct arguments to the executable.
Heat is a corner case since it uses -d instead of --debug.
"""
# Heat requires special handling for debug arguments
if supernova_args['debug'] and supernova_args['executable'] == 'heat':
nova_args.insert(0, '-d ')
elif supernova_args['debug']:
nova_args.insert(0, '--debug ')
return nova_args
def check_for_bypass_url(raw_creds, nova_args):
"""
Return a list of extra args that need to be passed on cmdline to nova.
"""
if 'BYPASS_URL' in raw_creds.keys():
bypass_args = ['--bypass-url', raw_creds['BYPASS_URL']]
nova_args = bypass_args + nova_args
return nova_args
def handle_stderr(stderr_pipe):
"""
Takes stderr from the command's output and displays it AFTER the stdout
is printed by run_command().
"""
stderr_output = stderr_pipe.read()
if len(stderr_output) > 0:
click.secho("\n__ Error Output {0}".format('_'*62), fg='white',
bold=True)
click.echo(stderr_output)
return True
def run_command(nova_creds, nova_args, supernova_args):
"""
Sets the environment variables for the executable, runs the executable,
and handles the output.
"""
nova_env = supernova_args['nova_env']
# (gtmanfred) make a copy of this object. If we don't copy it, the insert
# to 0 happens multiple times because it is the same object in memory.
nova_args = copy.copy(nova_args)
# Get the environment variables ready
env_vars = os.environ.copy()
env_vars.update(credentials.prep_shell_environment(nova_env,
nova_creds))
# BYPASS_URL is a weird one, so we need to send it as an argument,
# not an environment variable.
nova_args = check_for_bypass_url(nova_creds[nova_env], nova_args)
# Check for OS_EXECUTABLE
supernova_args = check_for_executable(supernova_args, env_vars)
# Check for a debug override
nova_args = check_for_debug(supernova_args, nova_args)
# Print a small message for the user (very helpful for groups)
msg = "Running %s against %s..." % (supernova_args.get('executable'),
nova_env)
if not supernova_args.get('quiet'):
click.echo("[%s] %s " % (click.style('SUPERNOVA', fg='green'), msg))
# Call executable and connect stdout to the current terminal
# so that any unicode characters from the executable's list will be
# displayed appropriately.
#
# In other news, I hate how python 2.6 does unicode.
nova_args.insert(0, supernova_args['executable'])
nova_args = [nova_arg.strip() for nova_arg in nova_args]
process = execute_executable(nova_args, env_vars)
# If the user asked us to be quiet, then let's not print stderr
if not supernova_args.get('quiet'):
handle_stderr(process.stderr)
return process.returncode
|
major/supernova | supernova/supernova.py | check_for_bypass_url | python | def check_for_bypass_url(raw_creds, nova_args):
if 'BYPASS_URL' in raw_creds.keys():
bypass_args = ['--bypass-url', raw_creds['BYPASS_URL']]
nova_args = bypass_args + nova_args
return nova_args | Return a list of extra args that need to be passed on cmdline to nova. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/supernova.py#L80-L88 | null | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains the actual class that runs novaclient (or the executable chosen by
the user)
"""
import copy
import os
import subprocess
import sys
import click
from . import credentials
def execute_executable(nova_args, env_vars):
"""
Executes the executable given by the user.
Hey, I know this method has a silly name, but I write the code here and
I'm silly.
"""
process = subprocess.Popen(nova_args,
stdout=sys.stdout,
stderr=subprocess.PIPE,
env=env_vars)
process.wait()
return process
def check_for_debug(supernova_args, nova_args):
"""
If the user wanted to run the executable with debugging enabled, we need
to apply the correct arguments to the executable.
Heat is a corner case since it uses -d instead of --debug.
"""
# Heat requires special handling for debug arguments
if supernova_args['debug'] and supernova_args['executable'] == 'heat':
nova_args.insert(0, '-d ')
elif supernova_args['debug']:
nova_args.insert(0, '--debug ')
return nova_args
def check_for_executable(supernova_args, env_vars):
"""
It's possible that a user might set their custom executable via an
environment variable. If we detect one, we should add it to supernova's
arguments ONLY IF an executable wasn't set on the command line. The
command line executable must take priority.
"""
exe = supernova_args.get('executable', 'default')
if exe != 'default':
return supernova_args
if 'OS_EXECUTABLE' in env_vars.keys():
supernova_args['executable'] = env_vars['OS_EXECUTABLE']
return supernova_args
supernova_args['executable'] = 'nova'
return supernova_args
def handle_stderr(stderr_pipe):
"""
Takes stderr from the command's output and displays it AFTER the stdout
is printed by run_command().
"""
stderr_output = stderr_pipe.read()
if len(stderr_output) > 0:
click.secho("\n__ Error Output {0}".format('_'*62), fg='white',
bold=True)
click.echo(stderr_output)
return True
def run_command(nova_creds, nova_args, supernova_args):
"""
Sets the environment variables for the executable, runs the executable,
and handles the output.
"""
nova_env = supernova_args['nova_env']
# (gtmanfred) make a copy of this object. If we don't copy it, the insert
# to 0 happens multiple times because it is the same object in memory.
nova_args = copy.copy(nova_args)
# Get the environment variables ready
env_vars = os.environ.copy()
env_vars.update(credentials.prep_shell_environment(nova_env,
nova_creds))
# BYPASS_URL is a weird one, so we need to send it as an argument,
# not an environment variable.
nova_args = check_for_bypass_url(nova_creds[nova_env], nova_args)
# Check for OS_EXECUTABLE
supernova_args = check_for_executable(supernova_args, env_vars)
# Check for a debug override
nova_args = check_for_debug(supernova_args, nova_args)
# Print a small message for the user (very helpful for groups)
msg = "Running %s against %s..." % (supernova_args.get('executable'),
nova_env)
if not supernova_args.get('quiet'):
click.echo("[%s] %s " % (click.style('SUPERNOVA', fg='green'), msg))
# Call executable and connect stdout to the current terminal
# so that any unicode characters from the executable's list will be
# displayed appropriately.
#
# In other news, I hate how python 2.6 does unicode.
nova_args.insert(0, supernova_args['executable'])
nova_args = [nova_arg.strip() for nova_arg in nova_args]
process = execute_executable(nova_args, env_vars)
# If the user asked us to be quiet, then let's not print stderr
if not supernova_args.get('quiet'):
handle_stderr(process.stderr)
return process.returncode
|
major/supernova | supernova/supernova.py | handle_stderr | python | def handle_stderr(stderr_pipe):
stderr_output = stderr_pipe.read()
if len(stderr_output) > 0:
click.secho("\n__ Error Output {0}".format('_'*62), fg='white',
bold=True)
click.echo(stderr_output)
return True | Takes stderr from the command's output and displays it AFTER the stdout
is printed by run_command(). | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/supernova.py#L91-L103 | null | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains the actual class that runs novaclient (or the executable chosen by
the user)
"""
import copy
import os
import subprocess
import sys
import click
from . import credentials
def execute_executable(nova_args, env_vars):
"""
Executes the executable given by the user.
Hey, I know this method has a silly name, but I write the code here and
I'm silly.
"""
process = subprocess.Popen(nova_args,
stdout=sys.stdout,
stderr=subprocess.PIPE,
env=env_vars)
process.wait()
return process
def check_for_debug(supernova_args, nova_args):
"""
If the user wanted to run the executable with debugging enabled, we need
to apply the correct arguments to the executable.
Heat is a corner case since it uses -d instead of --debug.
"""
# Heat requires special handling for debug arguments
if supernova_args['debug'] and supernova_args['executable'] == 'heat':
nova_args.insert(0, '-d ')
elif supernova_args['debug']:
nova_args.insert(0, '--debug ')
return nova_args
def check_for_executable(supernova_args, env_vars):
"""
It's possible that a user might set their custom executable via an
environment variable. If we detect one, we should add it to supernova's
arguments ONLY IF an executable wasn't set on the command line. The
command line executable must take priority.
"""
exe = supernova_args.get('executable', 'default')
if exe != 'default':
return supernova_args
if 'OS_EXECUTABLE' in env_vars.keys():
supernova_args['executable'] = env_vars['OS_EXECUTABLE']
return supernova_args
supernova_args['executable'] = 'nova'
return supernova_args
def check_for_bypass_url(raw_creds, nova_args):
"""
Return a list of extra args that need to be passed on cmdline to nova.
"""
if 'BYPASS_URL' in raw_creds.keys():
bypass_args = ['--bypass-url', raw_creds['BYPASS_URL']]
nova_args = bypass_args + nova_args
return nova_args
def run_command(nova_creds, nova_args, supernova_args):
"""
Sets the environment variables for the executable, runs the executable,
and handles the output.
"""
nova_env = supernova_args['nova_env']
# (gtmanfred) make a copy of this object. If we don't copy it, the insert
# to 0 happens multiple times because it is the same object in memory.
nova_args = copy.copy(nova_args)
# Get the environment variables ready
env_vars = os.environ.copy()
env_vars.update(credentials.prep_shell_environment(nova_env,
nova_creds))
# BYPASS_URL is a weird one, so we need to send it as an argument,
# not an environment variable.
nova_args = check_for_bypass_url(nova_creds[nova_env], nova_args)
# Check for OS_EXECUTABLE
supernova_args = check_for_executable(supernova_args, env_vars)
# Check for a debug override
nova_args = check_for_debug(supernova_args, nova_args)
# Print a small message for the user (very helpful for groups)
msg = "Running %s against %s..." % (supernova_args.get('executable'),
nova_env)
if not supernova_args.get('quiet'):
click.echo("[%s] %s " % (click.style('SUPERNOVA', fg='green'), msg))
# Call executable and connect stdout to the current terminal
# so that any unicode characters from the executable's list will be
# displayed appropriately.
#
# In other news, I hate how python 2.6 does unicode.
nova_args.insert(0, supernova_args['executable'])
nova_args = [nova_arg.strip() for nova_arg in nova_args]
process = execute_executable(nova_args, env_vars)
# If the user asked us to be quiet, then let's not print stderr
if not supernova_args.get('quiet'):
handle_stderr(process.stderr)
return process.returncode
|
major/supernova | supernova/supernova.py | run_command | python | def run_command(nova_creds, nova_args, supernova_args):
nova_env = supernova_args['nova_env']
# (gtmanfred) make a copy of this object. If we don't copy it, the insert
# to 0 happens multiple times because it is the same object in memory.
nova_args = copy.copy(nova_args)
# Get the environment variables ready
env_vars = os.environ.copy()
env_vars.update(credentials.prep_shell_environment(nova_env,
nova_creds))
# BYPASS_URL is a weird one, so we need to send it as an argument,
# not an environment variable.
nova_args = check_for_bypass_url(nova_creds[nova_env], nova_args)
# Check for OS_EXECUTABLE
supernova_args = check_for_executable(supernova_args, env_vars)
# Check for a debug override
nova_args = check_for_debug(supernova_args, nova_args)
# Print a small message for the user (very helpful for groups)
msg = "Running %s against %s..." % (supernova_args.get('executable'),
nova_env)
if not supernova_args.get('quiet'):
click.echo("[%s] %s " % (click.style('SUPERNOVA', fg='green'), msg))
# Call executable and connect stdout to the current terminal
# so that any unicode characters from the executable's list will be
# displayed appropriately.
#
# In other news, I hate how python 2.6 does unicode.
nova_args.insert(0, supernova_args['executable'])
nova_args = [nova_arg.strip() for nova_arg in nova_args]
process = execute_executable(nova_args, env_vars)
# If the user asked us to be quiet, then let's not print stderr
if not supernova_args.get('quiet'):
handle_stderr(process.stderr)
return process.returncode | Sets the environment variables for the executable, runs the executable,
and handles the output. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/supernova.py#L106-L150 | [
"def prep_shell_environment(nova_env, nova_creds):\n \"\"\"\n Appends new variables to the current shell environment temporarily.\n \"\"\"\n new_env = {}\n\n for key, value in prep_nova_creds(nova_env, nova_creds):\n if type(value) == six.binary_type:\n value = value.decode()\n ... | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains the actual class that runs novaclient (or the executable chosen by
the user)
"""
import copy
import os
import subprocess
import sys
import click
from . import credentials
def execute_executable(nova_args, env_vars):
"""
Executes the executable given by the user.
Hey, I know this method has a silly name, but I write the code here and
I'm silly.
"""
process = subprocess.Popen(nova_args,
stdout=sys.stdout,
stderr=subprocess.PIPE,
env=env_vars)
process.wait()
return process
def check_for_debug(supernova_args, nova_args):
"""
If the user wanted to run the executable with debugging enabled, we need
to apply the correct arguments to the executable.
Heat is a corner case since it uses -d instead of --debug.
"""
# Heat requires special handling for debug arguments
if supernova_args['debug'] and supernova_args['executable'] == 'heat':
nova_args.insert(0, '-d ')
elif supernova_args['debug']:
nova_args.insert(0, '--debug ')
return nova_args
def check_for_executable(supernova_args, env_vars):
"""
It's possible that a user might set their custom executable via an
environment variable. If we detect one, we should add it to supernova's
arguments ONLY IF an executable wasn't set on the command line. The
command line executable must take priority.
"""
exe = supernova_args.get('executable', 'default')
if exe != 'default':
return supernova_args
if 'OS_EXECUTABLE' in env_vars.keys():
supernova_args['executable'] = env_vars['OS_EXECUTABLE']
return supernova_args
supernova_args['executable'] = 'nova'
return supernova_args
def check_for_bypass_url(raw_creds, nova_args):
"""
Return a list of extra args that need to be passed on cmdline to nova.
"""
if 'BYPASS_URL' in raw_creds.keys():
bypass_args = ['--bypass-url', raw_creds['BYPASS_URL']]
nova_args = bypass_args + nova_args
return nova_args
def handle_stderr(stderr_pipe):
"""
Takes stderr from the command's output and displays it AFTER the stdout
is printed by run_command().
"""
stderr_output = stderr_pipe.read()
if len(stderr_output) > 0:
click.secho("\n__ Error Output {0}".format('_'*62), fg='white',
bold=True)
click.echo(stderr_output)
return True
|
major/supernova | supernova/utils.py | check_environment_presets | python | def check_environment_presets():
presets = [x for x in os.environ.copy().keys() if x.startswith('NOVA_') or
x.startswith('OS_')]
if len(presets) < 1:
return True
else:
click.echo("_" * 80)
click.echo("*WARNING* Found existing environment variables that may "
"cause conflicts:")
for preset in presets:
click.echo(" - %s" % preset)
click.echo("_" * 80)
return False | Checks for environment variables that can cause problems with supernova | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/utils.py#L29-L44 | null | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains many of the shared utility functions
"""
import os
import click
def assemble_username(env, param):
return "{0}:{1}".format(env, param)
def confirm_credential_display(force=False):
if force:
return True
msg = """
[WARNING] Your credential is about to be displayed on screen.
If this is really what you want, type 'y' and press enter."""
result = click.confirm(text=msg)
return result
def get_envs_in_group(group_name, nova_creds):
"""
Takes a group_name and finds any environments that have a SUPERNOVA_GROUP
configuration line that matches the group_name.
"""
envs = []
for key, value in nova_creds.items():
supernova_groups = value.get('SUPERNOVA_GROUP', [])
if hasattr(supernova_groups, 'startswith'):
supernova_groups = [supernova_groups]
if group_name in supernova_groups:
envs.append(key)
elif group_name == 'all':
envs.append(key)
return envs
def is_valid_environment(env, nova_creds):
"""
Checks to see if the configuration file contains a section for our
requested environment.
"""
if env in nova_creds.keys():
return env
else:
return False
def is_valid_group(group_name, nova_creds):
"""
Checks to see if the configuration file contains a SUPERNOVA_GROUP
configuration option.
"""
valid_groups = []
for key, value in nova_creds.items():
supernova_groups = value.get('SUPERNOVA_GROUP', [])
if hasattr(supernova_groups, 'startswith'):
supernova_groups = [supernova_groups]
valid_groups.extend(supernova_groups)
valid_groups.append('all')
if group_name in valid_groups:
return True
else:
return False
def rm_prefix(name):
"""
Removes nova_ os_ novaclient_ prefix from string.
"""
if name.startswith('nova_'):
return name[5:]
elif name.startswith('novaclient_'):
return name[11:]
elif name.startswith('os_'):
return name[3:]
else:
return name
|
major/supernova | supernova/utils.py | get_envs_in_group | python | def get_envs_in_group(group_name, nova_creds):
envs = []
for key, value in nova_creds.items():
supernova_groups = value.get('SUPERNOVA_GROUP', [])
if hasattr(supernova_groups, 'startswith'):
supernova_groups = [supernova_groups]
if group_name in supernova_groups:
envs.append(key)
elif group_name == 'all':
envs.append(key)
return envs | Takes a group_name and finds any environments that have a SUPERNOVA_GROUP
configuration line that matches the group_name. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/utils.py#L59-L73 | null | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains many of the shared utility functions
"""
import os
import click
def assemble_username(env, param):
return "{0}:{1}".format(env, param)
def check_environment_presets():
"""
Checks for environment variables that can cause problems with supernova
"""
presets = [x for x in os.environ.copy().keys() if x.startswith('NOVA_') or
x.startswith('OS_')]
if len(presets) < 1:
return True
else:
click.echo("_" * 80)
click.echo("*WARNING* Found existing environment variables that may "
"cause conflicts:")
for preset in presets:
click.echo(" - %s" % preset)
click.echo("_" * 80)
return False
def confirm_credential_display(force=False):
if force:
return True
msg = """
[WARNING] Your credential is about to be displayed on screen.
If this is really what you want, type 'y' and press enter."""
result = click.confirm(text=msg)
return result
def is_valid_environment(env, nova_creds):
"""
Checks to see if the configuration file contains a section for our
requested environment.
"""
if env in nova_creds.keys():
return env
else:
return False
def is_valid_group(group_name, nova_creds):
"""
Checks to see if the configuration file contains a SUPERNOVA_GROUP
configuration option.
"""
valid_groups = []
for key, value in nova_creds.items():
supernova_groups = value.get('SUPERNOVA_GROUP', [])
if hasattr(supernova_groups, 'startswith'):
supernova_groups = [supernova_groups]
valid_groups.extend(supernova_groups)
valid_groups.append('all')
if group_name in valid_groups:
return True
else:
return False
def rm_prefix(name):
"""
Removes nova_ os_ novaclient_ prefix from string.
"""
if name.startswith('nova_'):
return name[5:]
elif name.startswith('novaclient_'):
return name[11:]
elif name.startswith('os_'):
return name[3:]
else:
return name
|
major/supernova | supernova/utils.py | is_valid_group | python | def is_valid_group(group_name, nova_creds):
valid_groups = []
for key, value in nova_creds.items():
supernova_groups = value.get('SUPERNOVA_GROUP', [])
if hasattr(supernova_groups, 'startswith'):
supernova_groups = [supernova_groups]
valid_groups.extend(supernova_groups)
valid_groups.append('all')
if group_name in valid_groups:
return True
else:
return False | Checks to see if the configuration file contains a SUPERNOVA_GROUP
configuration option. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/utils.py#L87-L102 | null | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains many of the shared utility functions
"""
import os
import click
def assemble_username(env, param):
return "{0}:{1}".format(env, param)
def check_environment_presets():
"""
Checks for environment variables that can cause problems with supernova
"""
presets = [x for x in os.environ.copy().keys() if x.startswith('NOVA_') or
x.startswith('OS_')]
if len(presets) < 1:
return True
else:
click.echo("_" * 80)
click.echo("*WARNING* Found existing environment variables that may "
"cause conflicts:")
for preset in presets:
click.echo(" - %s" % preset)
click.echo("_" * 80)
return False
def confirm_credential_display(force=False):
if force:
return True
msg = """
[WARNING] Your credential is about to be displayed on screen.
If this is really what you want, type 'y' and press enter."""
result = click.confirm(text=msg)
return result
def get_envs_in_group(group_name, nova_creds):
"""
Takes a group_name and finds any environments that have a SUPERNOVA_GROUP
configuration line that matches the group_name.
"""
envs = []
for key, value in nova_creds.items():
supernova_groups = value.get('SUPERNOVA_GROUP', [])
if hasattr(supernova_groups, 'startswith'):
supernova_groups = [supernova_groups]
if group_name in supernova_groups:
envs.append(key)
elif group_name == 'all':
envs.append(key)
return envs
def is_valid_environment(env, nova_creds):
"""
Checks to see if the configuration file contains a section for our
requested environment.
"""
if env in nova_creds.keys():
return env
else:
return False
def rm_prefix(name):
"""
Removes nova_ os_ novaclient_ prefix from string.
"""
if name.startswith('nova_'):
return name[5:]
elif name.startswith('novaclient_'):
return name[11:]
elif name.startswith('os_'):
return name[3:]
else:
return name
|
major/supernova | supernova/utils.py | rm_prefix | python | def rm_prefix(name):
if name.startswith('nova_'):
return name[5:]
elif name.startswith('novaclient_'):
return name[11:]
elif name.startswith('os_'):
return name[3:]
else:
return name | Removes nova_ os_ novaclient_ prefix from string. | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/utils.py#L105-L116 | null | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains many of the shared utility functions
"""
import os
import click
def assemble_username(env, param):
return "{0}:{1}".format(env, param)
def check_environment_presets():
"""
Checks for environment variables that can cause problems with supernova
"""
presets = [x for x in os.environ.copy().keys() if x.startswith('NOVA_') or
x.startswith('OS_')]
if len(presets) < 1:
return True
else:
click.echo("_" * 80)
click.echo("*WARNING* Found existing environment variables that may "
"cause conflicts:")
for preset in presets:
click.echo(" - %s" % preset)
click.echo("_" * 80)
return False
def confirm_credential_display(force=False):
if force:
return True
msg = """
[WARNING] Your credential is about to be displayed on screen.
If this is really what you want, type 'y' and press enter."""
result = click.confirm(text=msg)
return result
def get_envs_in_group(group_name, nova_creds):
"""
Takes a group_name and finds any environments that have a SUPERNOVA_GROUP
configuration line that matches the group_name.
"""
envs = []
for key, value in nova_creds.items():
supernova_groups = value.get('SUPERNOVA_GROUP', [])
if hasattr(supernova_groups, 'startswith'):
supernova_groups = [supernova_groups]
if group_name in supernova_groups:
envs.append(key)
elif group_name == 'all':
envs.append(key)
return envs
def is_valid_environment(env, nova_creds):
"""
Checks to see if the configuration file contains a section for our
requested environment.
"""
if env in nova_creds.keys():
return env
else:
return False
def is_valid_group(group_name, nova_creds):
"""
Checks to see if the configuration file contains a SUPERNOVA_GROUP
configuration option.
"""
valid_groups = []
for key, value in nova_creds.items():
supernova_groups = value.get('SUPERNOVA_GROUP', [])
if hasattr(supernova_groups, 'startswith'):
supernova_groups = [supernova_groups]
valid_groups.extend(supernova_groups)
valid_groups.append('all')
if group_name in valid_groups:
return True
else:
return False
|
major/supernova | supernova/executable.py | run_supernova | python | def run_supernova(ctx, executable, debug, quiet, environment, command, conf,
echo, dashboard):
# Retrieve our credentials from the configuration file
try:
nova_creds = config.run_config(config_file_override=conf)
except Exception as e:
msg = ("\n There's an error in your configuration file:\n\n"
" {0}\n").format(e)
click.echo(msg)
ctx.exit(1)
# Warn the user if there are potentially conflicting environment variables
# already set in the user's environment.
utils.check_environment_presets()
# Is our environment argument a single environment or a supernova group?
if utils.is_valid_group(environment, nova_creds):
envs = utils.get_envs_in_group(environment, nova_creds)
elif ',' in environment:
envs = []
for env in environment.split(','):
if utils.is_valid_group(env, nova_creds):
envs.extend(utils.get_envs_in_group(env, nova_creds))
else:
envs.append(env)
elif environment.startswith('/') and environment.endswith('/'):
envs = [nova_env for nova_env in nova_creds.keys()
if re.search(environment[1:-1], nova_env)]
else:
envs = [environment]
# These are arguments for supernova and not the executable that supernova
# will eventually call.
supernova_args = {
'debug': debug,
'executable': executable,
'quiet': quiet,
'echo': echo,
'dashboard': dashboard,
}
# If the user specified a single environment, we need to verify that the
# environment actually exists in their configuration file.
if len(envs) == 1 and not utils.is_valid_environment(envs[0], nova_creds):
msg = ("\nCouldn't find an environment called '{0}' in your "
"configuration file.\nTry supernova --list to see all "
"configured environments.\n".format(envs[0]))
click.echo(msg)
ctx.exit(1)
if supernova_args['echo']:
if len(envs) > 1:
msg = ("\nCan't echo a group of environments.\nSpecify a single "
"environment when using --echo.")
click.echo(msg)
ctx.exit(1)
env = credentials.prep_shell_environment(envs[0], nova_creds)
for k in env:
click.echo('{0}={1}'.format(k, env[k]))
ctx.exit(0)
if supernova_args['dashboard']:
if len(envs) > 1:
msg = ("\nCan't open dashboard for a group of environments.\n"
"Specify a single environment when using --dashboard.")
click.echo(msg)
ctx.exit(1)
url = nova_creds[envs[0]].get('SUPERNOVA_DASHBOARD_URL')
if url is None:
msg = ("\nNo SUPERNOVA_DASHBOARD_URL specified "
"for environment: %s" % envs[0])
click.echo(msg)
ctx.exit(1)
webbrowser.open(url)
ctx.exit(0)
if len(command) == 0:
msg = ("\nMissing arguments to pass to executable Run supernova "
"--help for examples.\n".format(envs[0]))
click.echo(msg)
ctx.exit(1)
nova_args = list(command)
# Loop through the single environment (if the user specified one) or all
# of the environments in a supernova group (if the user specified a group).
for env in envs:
supernova_args['nova_env'] = env
returncode = supernova.run_command(nova_creds, nova_args,
supernova_args)
# NOTE(major): The return code here is the one that comes back from the
# OS_EXECUTABLE that supernova runs (by default, 'nova'). When using
# supernova groups, the return code is the one returned by the executable
# for the last environment in the group.
#
# It's not ideal, but it's all I can think of for now. ;)
sys.exit(returncode) | You can use supernova with many OpenStack clients and avoid the pain of
managing multiple sets of environment variables. Getting started is easy
and there's some documentation that can help:
http://supernova.readthedocs.org/
The first step is to get your environment variables packed into a
configuration file, usually in ~/.supernova. The docs (linked above) have
some good examples that you can fill in via copy/paste.
Once you have a configuration ready to go, replace 'prod' below with one
of your configured environments and try some of these commands:
supernova prod list (Lists instances via novaclient)
supernova prod image-list (Lists images via novaclient)
supernova prod boot ... (Boots an instance via novaclient)
Have questions, bugs, or comments? Head on over to Github and open an
issue or submit a pull request!
https://github.com/major/supernova | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/executable.py#L132-L253 | [
"def run_config(config_file_override=False):\n \"\"\"\n Runs sanity checks and prepares the global nova_creds variable\n \"\"\"\n try:\n nova_creds = load_config(config_file_override)\n except:\n raise\n return nova_creds\n",
"def prep_shell_environment(nova_env, nova_creds):\n ... | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains the functions needed for supernova and supernova-keyring commands
to run
"""
import re
import sys
import webbrowser
import click
from . import config
from . import credentials
from . import supernova
from . import utils
def print_env_list(ctx, param, value):
if not value or ctx.resilient_parsing:
return
conf = ctx.params.get('conf')
# Retrieve our credentials from the configuration file
try:
nova_creds = config.run_config(config_file_override=conf)
except Exception as e:
msg = ("\n There's an error in your configuration file:\n\n"
" {0}\n").format(e)
click.echo(msg)
ctx.exit(1)
for env in nova_creds.keys():
nova_env = dict(nova_creds.get('DEFAULT', {}), **nova_creds[env])
envheader = '__ %s ' % click.style(env, fg='green')
click.echo(envheader.ljust(86, '_'))
for param, value in sorted(nova_env.items()):
click.echo(' %s: %s' % (param.upper().ljust(25), value))
ctx.exit()
def print_env_short_list(ctx, param, value):
if not value or ctx.resilient_parsing:
return
conf = ctx.params.get('conf')
# Retrieve our credentials from the configuration file
try:
nova_creds = config.run_config(config_file_override=conf)
except Exception as e:
msg = ("\n There's an error in your configuration file:\n\n"
" {0}\n").format(e)
click.echo(msg)
ctx.exit(1)
row = 1
for nova_env in nova_creds.keys():
executable = "nova"
auth_url = ""
for param, value in sorted(nova_creds[nova_env].items()):
if param.upper() == 'OS_EXECUTABLE':
executable = value
if param.upper() == 'OS_AUTH_URL' and 'inova' in value:
executable = 'inova'
if param.upper() == 'OS_AUTH_URL':
auth_url = value
color = 'green'
if row % 2 == 0:
color = 'red'
env = click.style(nova_env, fg=color)
click.echo("%s (%s) @ %s" % (env, executable, auth_url))
row += 1
ctx.exit()
# NOTE(major): This tells click to allow us to put arguments/options after
# the environment is specified. See the note just before the
# supernova.run_command() call below for more clarity.
command_settings = {
'ignore_unknown_options': True,
}
@click.command(context_settings=command_settings)
# This is a little hacky, but we cannot directly set the default to nova
# because then we have no way of determining if the option was supplied on the
# command line, or simply set to the default. We need that information so that
# we can properly set the executable to run in this order:
# 1. executable provided on command line
# 2. executable from environment variable
# 3. fallback to nova as a default if neither 1 nor 2 are set
@click.option('--executable', '-x', default='default',
help='Command to run [default: nova]', show_default=False)
@click.option('--debug', '-d', default=False, is_flag=True,
help="Enable debugging", show_default=True)
@click.option('--conf', '-c', default=None, is_flag=False,
help="Manually specify a supernova configuration file")
@click.option('--quiet', '-q', default=False, show_default=False,
is_flag=True,
help="Display the least amount of output possible")
@click.option('--echo', '-e', default=None, is_flag=True,
help="Print the specified environment and exit")
@click.option('--dashboard', '-D', default=None, is_flag=True,
help="Open dashboard in browser for specified environment")
@click.argument('environment', nargs=1)
@click.argument('command', nargs=-1)
@click.version_option()
@click.option('--list', '-l', is_flag=True, callback=print_env_list,
expose_value=False, is_eager=False, default=False,
help="List all configured environments")
@click.option('--shortlist', '-s', is_flag=True, callback=print_env_short_list,
expose_value=False, is_eager=False, default=False,
help="List all configured environments in shorter format")
@click.pass_context
@click.command()
@click.option('--get', '-g', 'action', flag_value='get_credential',
help='retrieve a credential from keyring storage')
@click.option('--set', '-s', 'action', flag_value='set_credential',
help='store a credential in keyring storage',)
@click.argument('environment', nargs=1)
@click.argument('parameter', nargs=1)
@click.pass_context
def run_supernova_keyring(ctx, action, environment, parameter):
"""
Sets or retrieves credentials stored in your system's keyring using the
python-keyring module.
Global credentials can be shared between multiple configuration sections:
\b
[prod]
OS_PASSWORD=USE_KEYRING['sso_password']
...
\b
[staging]
OS_PASSWORD=USE_KEYRING['my sso_password']
...
You could retrieve or set the global credential using these commands:
\b
supernova -g global sso_password <= get the credential
supernova -s global sso_password <= set the credential
Local credentials are intended for use with only one configuration section:
\b
[prod]
OS_PASSWORD=USE_KEYRING
...
\b
[staging]
OS_PASSWORD=USE_KEYRING
...
You could retrieve or set the local credential using these commands:
\b
supernova -g prod OS_PASSWORD <= get the credential for prod
supernova -s prod OS_PASSWORD <= set the credential for prod
\b
supernova -g staging OS_PASSWORD <= get the credential for staging
supernova -s staging OS_PASSWORD <= set the credential for staging
Full documentation:
\b
http://supernova.readthedocs.org/en/latest/configuring/
"""
if action == 'get_credential':
result = credentials.get_user_password(env=environment,
param=parameter)
if not result:
click.echo("\nUnable to find a credential matching the data "
"provided.")
ctx.exit(1)
else:
click.echo("\nFound credential for {0}: {1}".format(*result))
ctx.exit()
elif action == 'set_credential':
msg = """
Preparing to set a credential in the keyring for:
- Environment : {0}
- Parameter : {1}
If this is correct, enter the corresponding credential to store in your keyring
or press CTRL-C to abort""".format(environment, parameter)
credential = click.prompt(text=msg, hide_input=True)
result = credentials.set_user_password(environment=environment,
parameter=parameter,
password=credential)
if result:
click.echo("\nSuccessfully stored.")
ctx.exit()
else:
click.echo("\nUnable to store your credential.")
ctx.exit(1)
else:
click.secho("ERROR: must specify --get or --set", bold=True)
click.echo(ctx.get_help())
ctx.exit()
|
major/supernova | supernova/executable.py | run_supernova_keyring | python | def run_supernova_keyring(ctx, action, environment, parameter):
if action == 'get_credential':
result = credentials.get_user_password(env=environment,
param=parameter)
if not result:
click.echo("\nUnable to find a credential matching the data "
"provided.")
ctx.exit(1)
else:
click.echo("\nFound credential for {0}: {1}".format(*result))
ctx.exit()
elif action == 'set_credential':
msg = """
Preparing to set a credential in the keyring for:
- Environment : {0}
- Parameter : {1}
If this is correct, enter the corresponding credential to store in your keyring
or press CTRL-C to abort""".format(environment, parameter)
credential = click.prompt(text=msg, hide_input=True)
result = credentials.set_user_password(environment=environment,
parameter=parameter,
password=credential)
if result:
click.echo("\nSuccessfully stored.")
ctx.exit()
else:
click.echo("\nUnable to store your credential.")
ctx.exit(1)
else:
click.secho("ERROR: must specify --get or --set", bold=True)
click.echo(ctx.get_help())
ctx.exit() | Sets or retrieves credentials stored in your system's keyring using the
python-keyring module.
Global credentials can be shared between multiple configuration sections:
\b
[prod]
OS_PASSWORD=USE_KEYRING['sso_password']
...
\b
[staging]
OS_PASSWORD=USE_KEYRING['my sso_password']
...
You could retrieve or set the global credential using these commands:
\b
supernova -g global sso_password <= get the credential
supernova -s global sso_password <= set the credential
Local credentials are intended for use with only one configuration section:
\b
[prod]
OS_PASSWORD=USE_KEYRING
...
\b
[staging]
OS_PASSWORD=USE_KEYRING
...
You could retrieve or set the local credential using these commands:
\b
supernova -g prod OS_PASSWORD <= get the credential for prod
supernova -s prod OS_PASSWORD <= set the credential for prod
\b
supernova -g staging OS_PASSWORD <= get the credential for staging
supernova -s staging OS_PASSWORD <= set the credential for staging
Full documentation:
\b
http://supernova.readthedocs.org/en/latest/configuring/ | train | https://github.com/major/supernova/blob/4a217ae53c1c05567014b047c0b6b9dea2d383b3/supernova/executable.py#L264-L350 | [
"def get_user_password(env, param, force=False):\n \"\"\"\n Allows the user to print the credential for a particular keyring entry\n to the screen\n \"\"\"\n username = utils.assemble_username(env, param)\n\n if not utils.confirm_credential_display(force):\n return\n\n # Retrieve the cre... | #
# Copyright 2014 Major Hayden
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Contains the functions needed for supernova and supernova-keyring commands
to run
"""
import re
import sys
import webbrowser
import click
from . import config
from . import credentials
from . import supernova
from . import utils
def print_env_list(ctx, param, value):
if not value or ctx.resilient_parsing:
return
conf = ctx.params.get('conf')
# Retrieve our credentials from the configuration file
try:
nova_creds = config.run_config(config_file_override=conf)
except Exception as e:
msg = ("\n There's an error in your configuration file:\n\n"
" {0}\n").format(e)
click.echo(msg)
ctx.exit(1)
for env in nova_creds.keys():
nova_env = dict(nova_creds.get('DEFAULT', {}), **nova_creds[env])
envheader = '__ %s ' % click.style(env, fg='green')
click.echo(envheader.ljust(86, '_'))
for param, value in sorted(nova_env.items()):
click.echo(' %s: %s' % (param.upper().ljust(25), value))
ctx.exit()
def print_env_short_list(ctx, param, value):
if not value or ctx.resilient_parsing:
return
conf = ctx.params.get('conf')
# Retrieve our credentials from the configuration file
try:
nova_creds = config.run_config(config_file_override=conf)
except Exception as e:
msg = ("\n There's an error in your configuration file:\n\n"
" {0}\n").format(e)
click.echo(msg)
ctx.exit(1)
row = 1
for nova_env in nova_creds.keys():
executable = "nova"
auth_url = ""
for param, value in sorted(nova_creds[nova_env].items()):
if param.upper() == 'OS_EXECUTABLE':
executable = value
if param.upper() == 'OS_AUTH_URL' and 'inova' in value:
executable = 'inova'
if param.upper() == 'OS_AUTH_URL':
auth_url = value
color = 'green'
if row % 2 == 0:
color = 'red'
env = click.style(nova_env, fg=color)
click.echo("%s (%s) @ %s" % (env, executable, auth_url))
row += 1
ctx.exit()
# NOTE(major): This tells click to allow us to put arguments/options after
# the environment is specified. See the note just before the
# supernova.run_command() call below for more clarity.
command_settings = {
'ignore_unknown_options': True,
}
@click.command(context_settings=command_settings)
# This is a little hacky, but we cannot directly set the default to nova
# because then we have no way of determining if the option was supplied on the
# command line, or simply set to the default. We need that information so that
# we can properly set the executable to run in this order:
# 1. executable provided on command line
# 2. executable from environment variable
# 3. fallback to nova as a default if neither 1 nor 2 are set
@click.option('--executable', '-x', default='default',
help='Command to run [default: nova]', show_default=False)
@click.option('--debug', '-d', default=False, is_flag=True,
help="Enable debugging", show_default=True)
@click.option('--conf', '-c', default=None, is_flag=False,
help="Manually specify a supernova configuration file")
@click.option('--quiet', '-q', default=False, show_default=False,
is_flag=True,
help="Display the least amount of output possible")
@click.option('--echo', '-e', default=None, is_flag=True,
help="Print the specified environment and exit")
@click.option('--dashboard', '-D', default=None, is_flag=True,
help="Open dashboard in browser for specified environment")
@click.argument('environment', nargs=1)
@click.argument('command', nargs=-1)
@click.version_option()
@click.option('--list', '-l', is_flag=True, callback=print_env_list,
expose_value=False, is_eager=False, default=False,
help="List all configured environments")
@click.option('--shortlist', '-s', is_flag=True, callback=print_env_short_list,
expose_value=False, is_eager=False, default=False,
help="List all configured environments in shorter format")
@click.pass_context
def run_supernova(ctx, executable, debug, quiet, environment, command, conf,
echo, dashboard):
"""
You can use supernova with many OpenStack clients and avoid the pain of
managing multiple sets of environment variables. Getting started is easy
and there's some documentation that can help:
http://supernova.readthedocs.org/
The first step is to get your environment variables packed into a
configuration file, usually in ~/.supernova. The docs (linked above) have
some good examples that you can fill in via copy/paste.
Once you have a configuration ready to go, replace 'prod' below with one
of your configured environments and try some of these commands:
supernova prod list (Lists instances via novaclient)
supernova prod image-list (Lists images via novaclient)
supernova prod boot ... (Boots an instance via novaclient)
Have questions, bugs, or comments? Head on over to Github and open an
issue or submit a pull request!
https://github.com/major/supernova
"""
# Retrieve our credentials from the configuration file
try:
nova_creds = config.run_config(config_file_override=conf)
except Exception as e:
msg = ("\n There's an error in your configuration file:\n\n"
" {0}\n").format(e)
click.echo(msg)
ctx.exit(1)
# Warn the user if there are potentially conflicting environment variables
# already set in the user's environment.
utils.check_environment_presets()
# Is our environment argument a single environment or a supernova group?
if utils.is_valid_group(environment, nova_creds):
envs = utils.get_envs_in_group(environment, nova_creds)
elif ',' in environment:
envs = []
for env in environment.split(','):
if utils.is_valid_group(env, nova_creds):
envs.extend(utils.get_envs_in_group(env, nova_creds))
else:
envs.append(env)
elif environment.startswith('/') and environment.endswith('/'):
envs = [nova_env for nova_env in nova_creds.keys()
if re.search(environment[1:-1], nova_env)]
else:
envs = [environment]
# These are arguments for supernova and not the executable that supernova
# will eventually call.
supernova_args = {
'debug': debug,
'executable': executable,
'quiet': quiet,
'echo': echo,
'dashboard': dashboard,
}
# If the user specified a single environment, we need to verify that the
# environment actually exists in their configuration file.
if len(envs) == 1 and not utils.is_valid_environment(envs[0], nova_creds):
msg = ("\nCouldn't find an environment called '{0}' in your "
"configuration file.\nTry supernova --list to see all "
"configured environments.\n".format(envs[0]))
click.echo(msg)
ctx.exit(1)
if supernova_args['echo']:
if len(envs) > 1:
msg = ("\nCan't echo a group of environments.\nSpecify a single "
"environment when using --echo.")
click.echo(msg)
ctx.exit(1)
env = credentials.prep_shell_environment(envs[0], nova_creds)
for k in env:
click.echo('{0}={1}'.format(k, env[k]))
ctx.exit(0)
if supernova_args['dashboard']:
if len(envs) > 1:
msg = ("\nCan't open dashboard for a group of environments.\n"
"Specify a single environment when using --dashboard.")
click.echo(msg)
ctx.exit(1)
url = nova_creds[envs[0]].get('SUPERNOVA_DASHBOARD_URL')
if url is None:
msg = ("\nNo SUPERNOVA_DASHBOARD_URL specified "
"for environment: %s" % envs[0])
click.echo(msg)
ctx.exit(1)
webbrowser.open(url)
ctx.exit(0)
if len(command) == 0:
msg = ("\nMissing arguments to pass to executable Run supernova "
"--help for examples.\n".format(envs[0]))
click.echo(msg)
ctx.exit(1)
nova_args = list(command)
# Loop through the single environment (if the user specified one) or all
# of the environments in a supernova group (if the user specified a group).
for env in envs:
supernova_args['nova_env'] = env
returncode = supernova.run_command(nova_creds, nova_args,
supernova_args)
# NOTE(major): The return code here is the one that comes back from the
# OS_EXECUTABLE that supernova runs (by default, 'nova'). When using
# supernova groups, the return code is the one returned by the executable
# for the last environment in the group.
#
# It's not ideal, but it's all I can think of for now. ;)
sys.exit(returncode)
@click.command()
@click.option('--get', '-g', 'action', flag_value='get_credential',
help='retrieve a credential from keyring storage')
@click.option('--set', '-s', 'action', flag_value='set_credential',
help='store a credential in keyring storage',)
@click.argument('environment', nargs=1)
@click.argument('parameter', nargs=1)
@click.pass_context
|
erikrose/nose-progressive | noseprogressive/tracebacks.py | format_traceback | python | def format_traceback(extracted_tb,
exc_type,
exc_value,
cwd='',
term=None,
function_color=12,
dim_color=8,
editor='vi',
template=DEFAULT_EDITOR_SHORTCUT_TEMPLATE):
def format_shortcut(editor,
path,
line_number,
function=None):
"""Return a pretty-printed editor shortcut."""
return template.format(editor=editor,
line_number=line_number or 0,
path=path,
function=function or u'',
hash_if_function=u' # ' if function else u'',
function_format=term.color(function_color),
# Underline is also nice and doesn't make us
# worry about appearance on different background
# colors.
normal=term.normal,
dim_format=term.color(dim_color) + term.bold,
line_number_max_width=line_number_max_width,
term=term)
template += '\n' # Newlines are awkward to express on the command line.
extracted_tb = _unicode_decode_extracted_tb(extracted_tb)
if not term:
term = Terminal()
if extracted_tb:
# Shorten file paths:
for i, (file, line_number, function, text) in enumerate(extracted_tb):
extracted_tb[i] = human_path(src(file), cwd), line_number, function, text
line_number_max_width = len(unicode(max(the_line for _, the_line, _, _ in extracted_tb)))
# Stack frames:
for i, (path, line_number, function, text) in enumerate(extracted_tb):
text = (text and text.strip()) or u''
yield (format_shortcut(editor, path, line_number, function) +
(u' %s\n' % text))
# Exception:
if exc_type is SyntaxError:
# Format a SyntaxError to look like our other traceback lines.
# SyntaxErrors have a format different from other errors and include a
# file path which looks out of place in our newly highlit, editor-
# shortcutted world.
if hasattr(exc_value, 'filename') and hasattr(exc_value, 'lineno'):
exc_lines = [format_shortcut(editor, exc_value.filename, exc_value.lineno)]
formatted_exception = format_exception_only(SyntaxError, exc_value)[1:]
else:
# The logcapture plugin may format exceptions as strings,
# stripping them of the full filename and lineno
exc_lines = []
formatted_exception = format_exception_only(SyntaxError, exc_value)
formatted_exception.append(u'(Try --nologcapture for a more detailed traceback)\n')
else:
exc_lines = []
formatted_exception = format_exception_only(exc_type, exc_value)
exc_lines.extend([_decode(f) for f in formatted_exception])
yield u''.join(exc_lines) | Return an iterable of formatted Unicode traceback frames.
Also include a pseudo-frame at the end representing the exception itself.
Format things more compactly than the stock formatter, and make every
frame an editor shortcut. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/tracebacks.py#L21-L95 | [
"def human_path(path, cwd):\n \"\"\"Return the most human-readable representation of the given path.\n\n If an absolute path is given that's within the current directory, convert\n it to a relative path to shorten it. Otherwise, return the absolute path.\n\n \"\"\"\n # TODO: Canonicalize the path to ... | """Fancy traceback formatting"""
import os
from sys import version_info
from traceback import extract_tb, format_exception_only
from blessings import Terminal
from nose.util import src
from noseprogressive.utils import human_path
DEFAULT_EDITOR_SHORTCUT_TEMPLATE = (u' {dim_format}{editor} '
'+{line_number:<{line_number_max_width}} '
'{path}{normal}'
'{function_format}{hash_if_function}'
'{function}{normal}')
# Adapted from unittest:
def extract_relevant_tb(tb, exctype, is_test_failure):
"""Return extracted traceback frame 4-tuples that aren't unittest ones.
This used to be _exc_info_to_string().
"""
# Skip test runner traceback levels:
while tb and _is_unittest_frame(tb):
tb = tb.tb_next
if is_test_failure:
# Skip assert*() traceback levels:
length = _count_relevant_tb_levels(tb)
return extract_tb(tb, length)
return extract_tb(tb)
def _decode(string):
"""Decode a string as if it were UTF-8, swallowing errors. Turn Nones into
"None", which is more helpful than crashing.
In Python 2, extract_tb() returns simple strings. We arbitrarily guess that
UTF-8 is the encoding and use "replace" mode for undecodable chars. I'm
guessing that in Python 3 we've come to our senses and everything's
Unicode. We'll see when we add Python 3 to the tox config.
"""
if string is None:
return 'None'
return string if isinstance(string, unicode) else string.decode('utf-8', 'replace')
def _unicode_decode_extracted_tb(extracted_tb):
"""Return a traceback with the string elements translated into Unicode."""
return [(_decode(file), line_number, _decode(function), _decode(text))
for file, line_number, function, text in extracted_tb]
def _is_unittest_frame(tb):
"""Return whether the given frame is something other than a unittest one."""
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(tb):
"""Return the number of frames in ``tb`` before all that's left is unittest frames.
Unlike its namesake in unittest, this doesn't bail out as soon as it hits a
unittest frame, which means we don't bail out as soon as somebody uses the
mock library, which defines ``__unittest``.
"""
length = contiguous_unittest_frames = 0
while tb:
length += 1
if _is_unittest_frame(tb):
contiguous_unittest_frames += 1
else:
contiguous_unittest_frames = 0
tb = tb.tb_next
return length - contiguous_unittest_frames
|
erikrose/nose-progressive | noseprogressive/tracebacks.py | extract_relevant_tb | python | def extract_relevant_tb(tb, exctype, is_test_failure):
# Skip test runner traceback levels:
while tb and _is_unittest_frame(tb):
tb = tb.tb_next
if is_test_failure:
# Skip assert*() traceback levels:
length = _count_relevant_tb_levels(tb)
return extract_tb(tb, length)
return extract_tb(tb) | Return extracted traceback frame 4-tuples that aren't unittest ones.
This used to be _exc_info_to_string(). | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/tracebacks.py#L100-L113 | [
"def _is_unittest_frame(tb):\n \"\"\"Return whether the given frame is something other than a unittest one.\"\"\"\n return '__unittest' in tb.tb_frame.f_globals\n",
"def _count_relevant_tb_levels(tb):\n \"\"\"Return the number of frames in ``tb`` before all that's left is unittest frames.\n\n Unlike i... | """Fancy traceback formatting"""
import os
from sys import version_info
from traceback import extract_tb, format_exception_only
from blessings import Terminal
from nose.util import src
from noseprogressive.utils import human_path
DEFAULT_EDITOR_SHORTCUT_TEMPLATE = (u' {dim_format}{editor} '
'+{line_number:<{line_number_max_width}} '
'{path}{normal}'
'{function_format}{hash_if_function}'
'{function}{normal}')
def format_traceback(extracted_tb,
exc_type,
exc_value,
cwd='',
term=None,
function_color=12,
dim_color=8,
editor='vi',
template=DEFAULT_EDITOR_SHORTCUT_TEMPLATE):
"""Return an iterable of formatted Unicode traceback frames.
Also include a pseudo-frame at the end representing the exception itself.
Format things more compactly than the stock formatter, and make every
frame an editor shortcut.
"""
def format_shortcut(editor,
path,
line_number,
function=None):
"""Return a pretty-printed editor shortcut."""
return template.format(editor=editor,
line_number=line_number or 0,
path=path,
function=function or u'',
hash_if_function=u' # ' if function else u'',
function_format=term.color(function_color),
# Underline is also nice and doesn't make us
# worry about appearance on different background
# colors.
normal=term.normal,
dim_format=term.color(dim_color) + term.bold,
line_number_max_width=line_number_max_width,
term=term)
template += '\n' # Newlines are awkward to express on the command line.
extracted_tb = _unicode_decode_extracted_tb(extracted_tb)
if not term:
term = Terminal()
if extracted_tb:
# Shorten file paths:
for i, (file, line_number, function, text) in enumerate(extracted_tb):
extracted_tb[i] = human_path(src(file), cwd), line_number, function, text
line_number_max_width = len(unicode(max(the_line for _, the_line, _, _ in extracted_tb)))
# Stack frames:
for i, (path, line_number, function, text) in enumerate(extracted_tb):
text = (text and text.strip()) or u''
yield (format_shortcut(editor, path, line_number, function) +
(u' %s\n' % text))
# Exception:
if exc_type is SyntaxError:
# Format a SyntaxError to look like our other traceback lines.
# SyntaxErrors have a format different from other errors and include a
# file path which looks out of place in our newly highlit, editor-
# shortcutted world.
if hasattr(exc_value, 'filename') and hasattr(exc_value, 'lineno'):
exc_lines = [format_shortcut(editor, exc_value.filename, exc_value.lineno)]
formatted_exception = format_exception_only(SyntaxError, exc_value)[1:]
else:
# The logcapture plugin may format exceptions as strings,
# stripping them of the full filename and lineno
exc_lines = []
formatted_exception = format_exception_only(SyntaxError, exc_value)
formatted_exception.append(u'(Try --nologcapture for a more detailed traceback)\n')
else:
exc_lines = []
formatted_exception = format_exception_only(exc_type, exc_value)
exc_lines.extend([_decode(f) for f in formatted_exception])
yield u''.join(exc_lines)
# Adapted from unittest:
def _decode(string):
"""Decode a string as if it were UTF-8, swallowing errors. Turn Nones into
"None", which is more helpful than crashing.
In Python 2, extract_tb() returns simple strings. We arbitrarily guess that
UTF-8 is the encoding and use "replace" mode for undecodable chars. I'm
guessing that in Python 3 we've come to our senses and everything's
Unicode. We'll see when we add Python 3 to the tox config.
"""
if string is None:
return 'None'
return string if isinstance(string, unicode) else string.decode('utf-8', 'replace')
def _unicode_decode_extracted_tb(extracted_tb):
"""Return a traceback with the string elements translated into Unicode."""
return [(_decode(file), line_number, _decode(function), _decode(text))
for file, line_number, function, text in extracted_tb]
def _is_unittest_frame(tb):
"""Return whether the given frame is something other than a unittest one."""
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(tb):
"""Return the number of frames in ``tb`` before all that's left is unittest frames.
Unlike its namesake in unittest, this doesn't bail out as soon as it hits a
unittest frame, which means we don't bail out as soon as somebody uses the
mock library, which defines ``__unittest``.
"""
length = contiguous_unittest_frames = 0
while tb:
length += 1
if _is_unittest_frame(tb):
contiguous_unittest_frames += 1
else:
contiguous_unittest_frames = 0
tb = tb.tb_next
return length - contiguous_unittest_frames
|
erikrose/nose-progressive | noseprogressive/tracebacks.py | _unicode_decode_extracted_tb | python | def _unicode_decode_extracted_tb(extracted_tb):
return [(_decode(file), line_number, _decode(function), _decode(text))
for file, line_number, function, text in extracted_tb] | Return a traceback with the string elements translated into Unicode. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/tracebacks.py#L131-L134 | null | """Fancy traceback formatting"""
import os
from sys import version_info
from traceback import extract_tb, format_exception_only
from blessings import Terminal
from nose.util import src
from noseprogressive.utils import human_path
DEFAULT_EDITOR_SHORTCUT_TEMPLATE = (u' {dim_format}{editor} '
'+{line_number:<{line_number_max_width}} '
'{path}{normal}'
'{function_format}{hash_if_function}'
'{function}{normal}')
def format_traceback(extracted_tb,
exc_type,
exc_value,
cwd='',
term=None,
function_color=12,
dim_color=8,
editor='vi',
template=DEFAULT_EDITOR_SHORTCUT_TEMPLATE):
"""Return an iterable of formatted Unicode traceback frames.
Also include a pseudo-frame at the end representing the exception itself.
Format things more compactly than the stock formatter, and make every
frame an editor shortcut.
"""
def format_shortcut(editor,
path,
line_number,
function=None):
"""Return a pretty-printed editor shortcut."""
return template.format(editor=editor,
line_number=line_number or 0,
path=path,
function=function or u'',
hash_if_function=u' # ' if function else u'',
function_format=term.color(function_color),
# Underline is also nice and doesn't make us
# worry about appearance on different background
# colors.
normal=term.normal,
dim_format=term.color(dim_color) + term.bold,
line_number_max_width=line_number_max_width,
term=term)
template += '\n' # Newlines are awkward to express on the command line.
extracted_tb = _unicode_decode_extracted_tb(extracted_tb)
if not term:
term = Terminal()
if extracted_tb:
# Shorten file paths:
for i, (file, line_number, function, text) in enumerate(extracted_tb):
extracted_tb[i] = human_path(src(file), cwd), line_number, function, text
line_number_max_width = len(unicode(max(the_line for _, the_line, _, _ in extracted_tb)))
# Stack frames:
for i, (path, line_number, function, text) in enumerate(extracted_tb):
text = (text and text.strip()) or u''
yield (format_shortcut(editor, path, line_number, function) +
(u' %s\n' % text))
# Exception:
if exc_type is SyntaxError:
# Format a SyntaxError to look like our other traceback lines.
# SyntaxErrors have a format different from other errors and include a
# file path which looks out of place in our newly highlit, editor-
# shortcutted world.
if hasattr(exc_value, 'filename') and hasattr(exc_value, 'lineno'):
exc_lines = [format_shortcut(editor, exc_value.filename, exc_value.lineno)]
formatted_exception = format_exception_only(SyntaxError, exc_value)[1:]
else:
# The logcapture plugin may format exceptions as strings,
# stripping them of the full filename and lineno
exc_lines = []
formatted_exception = format_exception_only(SyntaxError, exc_value)
formatted_exception.append(u'(Try --nologcapture for a more detailed traceback)\n')
else:
exc_lines = []
formatted_exception = format_exception_only(exc_type, exc_value)
exc_lines.extend([_decode(f) for f in formatted_exception])
yield u''.join(exc_lines)
# Adapted from unittest:
def extract_relevant_tb(tb, exctype, is_test_failure):
"""Return extracted traceback frame 4-tuples that aren't unittest ones.
This used to be _exc_info_to_string().
"""
# Skip test runner traceback levels:
while tb and _is_unittest_frame(tb):
tb = tb.tb_next
if is_test_failure:
# Skip assert*() traceback levels:
length = _count_relevant_tb_levels(tb)
return extract_tb(tb, length)
return extract_tb(tb)
def _decode(string):
"""Decode a string as if it were UTF-8, swallowing errors. Turn Nones into
"None", which is more helpful than crashing.
In Python 2, extract_tb() returns simple strings. We arbitrarily guess that
UTF-8 is the encoding and use "replace" mode for undecodable chars. I'm
guessing that in Python 3 we've come to our senses and everything's
Unicode. We'll see when we add Python 3 to the tox config.
"""
if string is None:
return 'None'
return string if isinstance(string, unicode) else string.decode('utf-8', 'replace')
def _is_unittest_frame(tb):
"""Return whether the given frame is something other than a unittest one."""
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(tb):
"""Return the number of frames in ``tb`` before all that's left is unittest frames.
Unlike its namesake in unittest, this doesn't bail out as soon as it hits a
unittest frame, which means we don't bail out as soon as somebody uses the
mock library, which defines ``__unittest``.
"""
length = contiguous_unittest_frames = 0
while tb:
length += 1
if _is_unittest_frame(tb):
contiguous_unittest_frames += 1
else:
contiguous_unittest_frames = 0
tb = tb.tb_next
return length - contiguous_unittest_frames
|
erikrose/nose-progressive | noseprogressive/tracebacks.py | _count_relevant_tb_levels | python | def _count_relevant_tb_levels(tb):
length = contiguous_unittest_frames = 0
while tb:
length += 1
if _is_unittest_frame(tb):
contiguous_unittest_frames += 1
else:
contiguous_unittest_frames = 0
tb = tb.tb_next
return length - contiguous_unittest_frames | Return the number of frames in ``tb`` before all that's left is unittest frames.
Unlike its namesake in unittest, this doesn't bail out as soon as it hits a
unittest frame, which means we don't bail out as soon as somebody uses the
mock library, which defines ``__unittest``. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/tracebacks.py#L142-L158 | [
"def _is_unittest_frame(tb):\n \"\"\"Return whether the given frame is something other than a unittest one.\"\"\"\n return '__unittest' in tb.tb_frame.f_globals\n"
] | """Fancy traceback formatting"""
import os
from sys import version_info
from traceback import extract_tb, format_exception_only
from blessings import Terminal
from nose.util import src
from noseprogressive.utils import human_path
DEFAULT_EDITOR_SHORTCUT_TEMPLATE = (u' {dim_format}{editor} '
'+{line_number:<{line_number_max_width}} '
'{path}{normal}'
'{function_format}{hash_if_function}'
'{function}{normal}')
def format_traceback(extracted_tb,
exc_type,
exc_value,
cwd='',
term=None,
function_color=12,
dim_color=8,
editor='vi',
template=DEFAULT_EDITOR_SHORTCUT_TEMPLATE):
"""Return an iterable of formatted Unicode traceback frames.
Also include a pseudo-frame at the end representing the exception itself.
Format things more compactly than the stock formatter, and make every
frame an editor shortcut.
"""
def format_shortcut(editor,
path,
line_number,
function=None):
"""Return a pretty-printed editor shortcut."""
return template.format(editor=editor,
line_number=line_number or 0,
path=path,
function=function or u'',
hash_if_function=u' # ' if function else u'',
function_format=term.color(function_color),
# Underline is also nice and doesn't make us
# worry about appearance on different background
# colors.
normal=term.normal,
dim_format=term.color(dim_color) + term.bold,
line_number_max_width=line_number_max_width,
term=term)
template += '\n' # Newlines are awkward to express on the command line.
extracted_tb = _unicode_decode_extracted_tb(extracted_tb)
if not term:
term = Terminal()
if extracted_tb:
# Shorten file paths:
for i, (file, line_number, function, text) in enumerate(extracted_tb):
extracted_tb[i] = human_path(src(file), cwd), line_number, function, text
line_number_max_width = len(unicode(max(the_line for _, the_line, _, _ in extracted_tb)))
# Stack frames:
for i, (path, line_number, function, text) in enumerate(extracted_tb):
text = (text and text.strip()) or u''
yield (format_shortcut(editor, path, line_number, function) +
(u' %s\n' % text))
# Exception:
if exc_type is SyntaxError:
# Format a SyntaxError to look like our other traceback lines.
# SyntaxErrors have a format different from other errors and include a
# file path which looks out of place in our newly highlit, editor-
# shortcutted world.
if hasattr(exc_value, 'filename') and hasattr(exc_value, 'lineno'):
exc_lines = [format_shortcut(editor, exc_value.filename, exc_value.lineno)]
formatted_exception = format_exception_only(SyntaxError, exc_value)[1:]
else:
# The logcapture plugin may format exceptions as strings,
# stripping them of the full filename and lineno
exc_lines = []
formatted_exception = format_exception_only(SyntaxError, exc_value)
formatted_exception.append(u'(Try --nologcapture for a more detailed traceback)\n')
else:
exc_lines = []
formatted_exception = format_exception_only(exc_type, exc_value)
exc_lines.extend([_decode(f) for f in formatted_exception])
yield u''.join(exc_lines)
# Adapted from unittest:
def extract_relevant_tb(tb, exctype, is_test_failure):
"""Return extracted traceback frame 4-tuples that aren't unittest ones.
This used to be _exc_info_to_string().
"""
# Skip test runner traceback levels:
while tb and _is_unittest_frame(tb):
tb = tb.tb_next
if is_test_failure:
# Skip assert*() traceback levels:
length = _count_relevant_tb_levels(tb)
return extract_tb(tb, length)
return extract_tb(tb)
def _decode(string):
"""Decode a string as if it were UTF-8, swallowing errors. Turn Nones into
"None", which is more helpful than crashing.
In Python 2, extract_tb() returns simple strings. We arbitrarily guess that
UTF-8 is the encoding and use "replace" mode for undecodable chars. I'm
guessing that in Python 3 we've come to our senses and everything's
Unicode. We'll see when we add Python 3 to the tox config.
"""
if string is None:
return 'None'
return string if isinstance(string, unicode) else string.decode('utf-8', 'replace')
def _unicode_decode_extracted_tb(extracted_tb):
"""Return a traceback with the string elements translated into Unicode."""
return [(_decode(file), line_number, _decode(function), _decode(text))
for file, line_number, function, text in extracted_tb]
def _is_unittest_frame(tb):
"""Return whether the given frame is something other than a unittest one."""
return '__unittest' in tb.tb_frame.f_globals
|
erikrose/nose-progressive | noseprogressive/wrapping.py | cmdloop | python | def cmdloop(self, *args, **kwargs):
def unwrapping_raw_input(*args, **kwargs):
"""Call raw_input(), making sure it finds an unwrapped stdout."""
wrapped_stdout = sys.stdout
sys.stdout = wrapped_stdout.stream
ret = orig_raw_input(*args, **kwargs)
sys.stdout = wrapped_stdout
return ret
try:
orig_raw_input = raw_input
except NameError:
orig_raw_input = input
if hasattr(sys.stdout, 'stream'):
__builtin__.raw_input = unwrapping_raw_input
# else if capture plugin has replaced it with a StringIO, don't bother.
try:
# Interesting things happen when you try to not reference the
# superclass explicitly.
ret = cmd.Cmd.cmdloop(self, *args, **kwargs)
finally:
__builtin__.raw_input = orig_raw_input
return ret | Call pdb's cmdloop, making readline work.
Patch raw_input so it sees the original stdin and stdout, lest
readline refuse to work.
The C implementation of raw_input uses readline functionality only if
both stdin and stdout are from a terminal AND are FILE*s (not
PyObject*s): http://bugs.python.org/issue5727 and
https://bugzilla.redhat.com/show_bug.cgi?id=448864 | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/wrapping.py#L10-L45 | null | """Facilities for wrapping stderr and stdout and dealing with the fallout"""
from __future__ import with_statement
import __builtin__
import cmd
import pdb
import sys
def set_trace(*args, **kwargs):
"""Call pdb.set_trace, making sure it receives the unwrapped stdout.
This is so we don't keep drawing progress bars over debugger output.
"""
# There's no stream attr if capture plugin is enabled:
out = sys.stdout.stream if hasattr(sys.stdout, 'stream') else None
# Python 2.5 can't put an explicit kwarg and **kwargs in the same function
# call.
kwargs['stdout'] = out
debugger = pdb.Pdb(*args, **kwargs)
# Ordinarily (and in a silly fashion), pdb refuses to use raw_input() if
# you pass it a stream on instantiation. Fix that:
debugger.use_rawinput = True
debugger.set_trace(sys._getframe().f_back)
class StreamWrapper(object):
"""Wrapper for stdout/stderr to do progress bar dodging"""
# An outer class so isinstance() works in begin()
def __init__(self, stream, plugin):
self.stream = stream
self._plugin = plugin
def __getattr__(self, name):
return getattr(self.stream, name)
def write(self, data):
if hasattr(self._plugin, 'bar'):
with self._plugin.bar.dodging():
self.stream.write(data)
else:
# Some things write to stderr before the bar is inited.
self.stream.write(data)
|
erikrose/nose-progressive | noseprogressive/wrapping.py | set_trace | python | def set_trace(*args, **kwargs):
# There's no stream attr if capture plugin is enabled:
out = sys.stdout.stream if hasattr(sys.stdout, 'stream') else None
# Python 2.5 can't put an explicit kwarg and **kwargs in the same function
# call.
kwargs['stdout'] = out
debugger = pdb.Pdb(*args, **kwargs)
# Ordinarily (and in a silly fashion), pdb refuses to use raw_input() if
# you pass it a stream on instantiation. Fix that:
debugger.use_rawinput = True
debugger.set_trace(sys._getframe().f_back) | Call pdb.set_trace, making sure it receives the unwrapped stdout.
This is so we don't keep drawing progress bars over debugger output. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/wrapping.py#L48-L66 | null | """Facilities for wrapping stderr and stdout and dealing with the fallout"""
from __future__ import with_statement
import __builtin__
import cmd
import pdb
import sys
def cmdloop(self, *args, **kwargs):
"""Call pdb's cmdloop, making readline work.
Patch raw_input so it sees the original stdin and stdout, lest
readline refuse to work.
The C implementation of raw_input uses readline functionality only if
both stdin and stdout are from a terminal AND are FILE*s (not
PyObject*s): http://bugs.python.org/issue5727 and
https://bugzilla.redhat.com/show_bug.cgi?id=448864
"""
def unwrapping_raw_input(*args, **kwargs):
"""Call raw_input(), making sure it finds an unwrapped stdout."""
wrapped_stdout = sys.stdout
sys.stdout = wrapped_stdout.stream
ret = orig_raw_input(*args, **kwargs)
sys.stdout = wrapped_stdout
return ret
try:
orig_raw_input = raw_input
except NameError:
orig_raw_input = input
if hasattr(sys.stdout, 'stream'):
__builtin__.raw_input = unwrapping_raw_input
# else if capture plugin has replaced it with a StringIO, don't bother.
try:
# Interesting things happen when you try to not reference the
# superclass explicitly.
ret = cmd.Cmd.cmdloop(self, *args, **kwargs)
finally:
__builtin__.raw_input = orig_raw_input
return ret
class StreamWrapper(object):
"""Wrapper for stdout/stderr to do progress bar dodging"""
# An outer class so isinstance() works in begin()
def __init__(self, stream, plugin):
self.stream = stream
self._plugin = plugin
def __getattr__(self, name):
return getattr(self.stream, name)
def write(self, data):
if hasattr(self._plugin, 'bar'):
with self._plugin.bar.dodging():
self.stream.write(data)
else:
# Some things write to stderr before the bar is inited.
self.stream.write(data)
|
erikrose/nose-progressive | noseprogressive/plugin.py | ProgressivePlugin.begin | python | def begin(self):
# The calls to begin/finalize end up like this: a call to begin() on
# instance A of the plugin, then a paired begin/finalize for each test
# on instance B, then a final call to finalize() on instance A.
# TODO: Do only if isatty.
self._stderr.append(sys.stderr)
sys.stderr = StreamWrapper(sys.stderr, self) # TODO: Any point?
self._stdout.append(sys.stdout)
sys.stdout = StreamWrapper(sys.stdout, self)
self._set_trace.append(pdb.set_trace)
pdb.set_trace = set_trace
self._cmdloop.append(pdb.Pdb.cmdloop)
pdb.Pdb.cmdloop = cmdloop
# nosetests changes directories to the tests dir when run from a
# distribution dir, so save the original cwd for relativizing paths.
self._cwd = '' if self.conf.options.absolute_paths else getcwd() | Make some monkeypatches to dodge progress bar.
Wrap stderr and stdout to keep other users of them from smearing the
progress bar. Wrap some pdb routines to stop showing the bar while in
the debugger. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/plugin.py#L27-L54 | null | class ProgressivePlugin(Plugin):
"""A nose plugin which has a progress bar and formats tracebacks for humans"""
name = 'progressive'
_totalTests = 0
score = 10000 # Grab stdout and stderr before the capture plugin.
def __init__(self, *args, **kwargs):
super(ProgressivePlugin, self).__init__(*args, **kwargs)
# Same wrapping pattern as the built-in capture plugin. The lists
# shouldn't be necessary, but they don't cost much, and I have to
# wonder why capture uses them.
self._stderr, self._stdout, self._set_trace, self._cmdloop = \
[], [], [], []
def finalize(self, result):
"""Put monkeypatches back as we found them."""
sys.stderr = self._stderr.pop()
sys.stdout = self._stdout.pop()
pdb.set_trace = self._set_trace.pop()
pdb.Pdb.cmdloop = self._cmdloop.pop()
def options(self, parser, env):
super(ProgressivePlugin, self).options(parser, env)
parser.add_option('--progressive-editor',
type='string',
dest='editor',
default=env.get('NOSE_PROGRESSIVE_EDITOR',
env.get('EDITOR', 'vi')),
help='The editor to use for the shortcuts in '
'tracebacks. Defaults to the value of $EDITOR '
'and then "vi". [NOSE_PROGRESSIVE_EDITOR]')
parser.add_option('--progressive-abs',
action='store_true',
dest='absolute_paths',
default=env.get('NOSE_PROGRESSIVE_ABSOLUTE_PATHS', False),
help='Display paths in traceback as absolute, '
'rather than relative to the current working '
'directory. [NOSE_PROGRESSIVE_ABSOLUTE_PATHS]')
parser.add_option('--progressive-advisories',
action='store_true',
dest='show_advisories',
default=env.get('NOSE_PROGRESSIVE_ADVISORIES', False),
help='Show skips and deprecation exceptions in '
'addition to failures and errors. '
'[NOSE_PROGRESSIVE_ADVISORIES]')
parser.add_option('--progressive-with-styling',
action='store_true',
dest='with_styling',
default=env.get('NOSE_PROGRESSIVE_WITH_STYLING', False),
help='nose-progressive automatically omits bold and '
'color formatting when its output is directed '
'to a non-terminal. Specifying '
'--progressive-with-styling forces such '
'styling to be output regardless. '
'[NOSE_PROGRESSIVE_WITH_STYLING]')
parser.add_option('--progressive-with-bar',
action='store_true',
dest='with_bar',
default=env.get('NOSE_PROGRESSIVE_WITH_BAR', False),
help='nose-progressive automatically omits the '
'progress bar when its output is directed to a '
'non-terminal. Specifying '
'--progressive-with-bar forces the bar to be '
'output regardless. This option implies '
'--progressive-with-styling. '
'[NOSE_PROGRESSIVE_WITH_BAR]')
parser.add_option('--progressive-function-color',
type='int',
dest='function_color',
default=env.get('NOSE_PROGRESSIVE_FUNCTION_COLOR', 12),
help='Color of function names in tracebacks. An '
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_FUNCTION_COLOR]')
parser.add_option('--progressive-dim-color',
type='int',
dest='dim_color',
default=env.get('NOSE_PROGRESSIVE_DIM_COLOR', 8),
help='Color of de-emphasized text (like editor '
'shortcuts) in tracebacks. An ANSI color '
'expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_DIM_COLOR]')
parser.add_option('--progressive-bar-filled-color',
type='int',
dest='bar_filled_color',
default=env.get('NOSE_PROGRESSIVE_BAR_FILLED_COLOR', 8),
help="Color of the progress bar's filled portion. An "
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_BAR_FILLED_COLOR]')
parser.add_option('--progressive-bar-empty-color',
type='int',
dest='bar_empty_color',
default=env.get('NOSE_PROGRESSIVE_BAR_EMPTY_COLOR', 7),
help="Color of the progress bar's empty portion. An "
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_BAR_EMPTY_COLOR]')
parser.add_option('--progressive-editor-shortcut-template',
type='string',
dest='editor_shortcut_template',
default=env.get(
'NOSE_PROGRESSIVE_EDITOR_SHORTCUT_TEMPLATE',
DEFAULT_EDITOR_SHORTCUT_TEMPLATE),
help='A str.format() template for the non-code lines'
' of the traceback. '
'[NOSE_PROGRESSIVE_EDITOR_SHORTCUT_TEMPLATE]')
def configure(self, options, conf):
"""Turn style-forcing on if bar-forcing is on.
It'd be messy to position the bar but still have the rest of the
terminal capabilities emit ''.
"""
super(ProgressivePlugin, self).configure(options, conf)
if (getattr(options, 'verbosity', 0) > 1 and
getattr(options, 'enable_plugin_id', False)):
# TODO: Can we forcibly disable the ID plugin?
print ('Using --with-id and --verbosity=2 or higher with '
'nose-progressive causes visualization errors. Remove one '
'or the other to avoid a mess.')
if options.with_bar:
options.with_styling = True
def prepareTestLoader(self, loader):
"""Insert ourselves into loader calls to count tests.
The top-level loader call often returns lazy results, like a LazySuite.
This is a problem, as we would destroy the suite by iterating over it
to count the tests. Consequently, we monkeypatch the top-level loader
call to do the load twice: once for the actual test running and again
to yield something we can iterate over to do the count.
"""
def capture_suite(orig_method, *args, **kwargs):
"""Intercept calls to the loader before they get lazy.
Re-execute them to grab a copy of the possibly lazy suite, and
count the tests therein.
"""
self._totalTests += orig_method(*args, **kwargs).countTestCases()
# Clear out the loader's cache. Otherwise, it never finds any tests
# for the actual test run:
loader._visitedPaths = set()
return orig_method(*args, **kwargs)
# TODO: If there's ever a practical need, also patch loader.suiteClass
# or even TestProgram.createTests. createTests seems to be main top-
# level caller of loader methods, and nose.core.collector() (which
# isn't even called in nose) is an alternate one.
if hasattr(loader, 'loadTestsFromNames'):
loader.loadTestsFromNames = partial(capture_suite,
loader.loadTestsFromNames)
def prepareTestRunner(self, runner):
"""Replace TextTestRunner with something that prints fewer dots."""
return ProgressiveRunner(self._cwd,
self._totalTests,
runner.stream,
verbosity=self.conf.verbosity,
config=self.conf) # So we don't get a default
# NoPlugins manager
def prepareTestResult(self, result):
"""Hang onto the progress bar so the StreamWrappers can grab it."""
self.bar = result.bar
|
erikrose/nose-progressive | noseprogressive/plugin.py | ProgressivePlugin.finalize | python | def finalize(self, result):
sys.stderr = self._stderr.pop()
sys.stdout = self._stdout.pop()
pdb.set_trace = self._set_trace.pop()
pdb.Pdb.cmdloop = self._cmdloop.pop() | Put monkeypatches back as we found them. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/plugin.py#L56-L61 | null | class ProgressivePlugin(Plugin):
"""A nose plugin which has a progress bar and formats tracebacks for humans"""
name = 'progressive'
_totalTests = 0
score = 10000 # Grab stdout and stderr before the capture plugin.
def __init__(self, *args, **kwargs):
super(ProgressivePlugin, self).__init__(*args, **kwargs)
# Same wrapping pattern as the built-in capture plugin. The lists
# shouldn't be necessary, but they don't cost much, and I have to
# wonder why capture uses them.
self._stderr, self._stdout, self._set_trace, self._cmdloop = \
[], [], [], []
def begin(self):
"""Make some monkeypatches to dodge progress bar.
Wrap stderr and stdout to keep other users of them from smearing the
progress bar. Wrap some pdb routines to stop showing the bar while in
the debugger.
"""
# The calls to begin/finalize end up like this: a call to begin() on
# instance A of the plugin, then a paired begin/finalize for each test
# on instance B, then a final call to finalize() on instance A.
# TODO: Do only if isatty.
self._stderr.append(sys.stderr)
sys.stderr = StreamWrapper(sys.stderr, self) # TODO: Any point?
self._stdout.append(sys.stdout)
sys.stdout = StreamWrapper(sys.stdout, self)
self._set_trace.append(pdb.set_trace)
pdb.set_trace = set_trace
self._cmdloop.append(pdb.Pdb.cmdloop)
pdb.Pdb.cmdloop = cmdloop
# nosetests changes directories to the tests dir when run from a
# distribution dir, so save the original cwd for relativizing paths.
self._cwd = '' if self.conf.options.absolute_paths else getcwd()
def options(self, parser, env):
super(ProgressivePlugin, self).options(parser, env)
parser.add_option('--progressive-editor',
type='string',
dest='editor',
default=env.get('NOSE_PROGRESSIVE_EDITOR',
env.get('EDITOR', 'vi')),
help='The editor to use for the shortcuts in '
'tracebacks. Defaults to the value of $EDITOR '
'and then "vi". [NOSE_PROGRESSIVE_EDITOR]')
parser.add_option('--progressive-abs',
action='store_true',
dest='absolute_paths',
default=env.get('NOSE_PROGRESSIVE_ABSOLUTE_PATHS', False),
help='Display paths in traceback as absolute, '
'rather than relative to the current working '
'directory. [NOSE_PROGRESSIVE_ABSOLUTE_PATHS]')
parser.add_option('--progressive-advisories',
action='store_true',
dest='show_advisories',
default=env.get('NOSE_PROGRESSIVE_ADVISORIES', False),
help='Show skips and deprecation exceptions in '
'addition to failures and errors. '
'[NOSE_PROGRESSIVE_ADVISORIES]')
parser.add_option('--progressive-with-styling',
action='store_true',
dest='with_styling',
default=env.get('NOSE_PROGRESSIVE_WITH_STYLING', False),
help='nose-progressive automatically omits bold and '
'color formatting when its output is directed '
'to a non-terminal. Specifying '
'--progressive-with-styling forces such '
'styling to be output regardless. '
'[NOSE_PROGRESSIVE_WITH_STYLING]')
parser.add_option('--progressive-with-bar',
action='store_true',
dest='with_bar',
default=env.get('NOSE_PROGRESSIVE_WITH_BAR', False),
help='nose-progressive automatically omits the '
'progress bar when its output is directed to a '
'non-terminal. Specifying '
'--progressive-with-bar forces the bar to be '
'output regardless. This option implies '
'--progressive-with-styling. '
'[NOSE_PROGRESSIVE_WITH_BAR]')
parser.add_option('--progressive-function-color',
type='int',
dest='function_color',
default=env.get('NOSE_PROGRESSIVE_FUNCTION_COLOR', 12),
help='Color of function names in tracebacks. An '
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_FUNCTION_COLOR]')
parser.add_option('--progressive-dim-color',
type='int',
dest='dim_color',
default=env.get('NOSE_PROGRESSIVE_DIM_COLOR', 8),
help='Color of de-emphasized text (like editor '
'shortcuts) in tracebacks. An ANSI color '
'expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_DIM_COLOR]')
parser.add_option('--progressive-bar-filled-color',
type='int',
dest='bar_filled_color',
default=env.get('NOSE_PROGRESSIVE_BAR_FILLED_COLOR', 8),
help="Color of the progress bar's filled portion. An "
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_BAR_FILLED_COLOR]')
parser.add_option('--progressive-bar-empty-color',
type='int',
dest='bar_empty_color',
default=env.get('NOSE_PROGRESSIVE_BAR_EMPTY_COLOR', 7),
help="Color of the progress bar's empty portion. An "
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_BAR_EMPTY_COLOR]')
parser.add_option('--progressive-editor-shortcut-template',
type='string',
dest='editor_shortcut_template',
default=env.get(
'NOSE_PROGRESSIVE_EDITOR_SHORTCUT_TEMPLATE',
DEFAULT_EDITOR_SHORTCUT_TEMPLATE),
help='A str.format() template for the non-code lines'
' of the traceback. '
'[NOSE_PROGRESSIVE_EDITOR_SHORTCUT_TEMPLATE]')
def configure(self, options, conf):
"""Turn style-forcing on if bar-forcing is on.
It'd be messy to position the bar but still have the rest of the
terminal capabilities emit ''.
"""
super(ProgressivePlugin, self).configure(options, conf)
if (getattr(options, 'verbosity', 0) > 1 and
getattr(options, 'enable_plugin_id', False)):
# TODO: Can we forcibly disable the ID plugin?
print ('Using --with-id and --verbosity=2 or higher with '
'nose-progressive causes visualization errors. Remove one '
'or the other to avoid a mess.')
if options.with_bar:
options.with_styling = True
def prepareTestLoader(self, loader):
"""Insert ourselves into loader calls to count tests.
The top-level loader call often returns lazy results, like a LazySuite.
This is a problem, as we would destroy the suite by iterating over it
to count the tests. Consequently, we monkeypatch the top-level loader
call to do the load twice: once for the actual test running and again
to yield something we can iterate over to do the count.
"""
def capture_suite(orig_method, *args, **kwargs):
"""Intercept calls to the loader before they get lazy.
Re-execute them to grab a copy of the possibly lazy suite, and
count the tests therein.
"""
self._totalTests += orig_method(*args, **kwargs).countTestCases()
# Clear out the loader's cache. Otherwise, it never finds any tests
# for the actual test run:
loader._visitedPaths = set()
return orig_method(*args, **kwargs)
# TODO: If there's ever a practical need, also patch loader.suiteClass
# or even TestProgram.createTests. createTests seems to be main top-
# level caller of loader methods, and nose.core.collector() (which
# isn't even called in nose) is an alternate one.
if hasattr(loader, 'loadTestsFromNames'):
loader.loadTestsFromNames = partial(capture_suite,
loader.loadTestsFromNames)
def prepareTestRunner(self, runner):
"""Replace TextTestRunner with something that prints fewer dots."""
return ProgressiveRunner(self._cwd,
self._totalTests,
runner.stream,
verbosity=self.conf.verbosity,
config=self.conf) # So we don't get a default
# NoPlugins manager
def prepareTestResult(self, result):
"""Hang onto the progress bar so the StreamWrappers can grab it."""
self.bar = result.bar
|
erikrose/nose-progressive | noseprogressive/plugin.py | ProgressivePlugin.configure | python | def configure(self, options, conf):
super(ProgressivePlugin, self).configure(options, conf)
if (getattr(options, 'verbosity', 0) > 1 and
getattr(options, 'enable_plugin_id', False)):
# TODO: Can we forcibly disable the ID plugin?
print ('Using --with-id and --verbosity=2 or higher with '
'nose-progressive causes visualization errors. Remove one '
'or the other to avoid a mess.')
if options.with_bar:
options.with_styling = True | Turn style-forcing on if bar-forcing is on.
It'd be messy to position the bar but still have the rest of the
terminal capabilities emit ''. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/plugin.py#L147-L162 | null | class ProgressivePlugin(Plugin):
"""A nose plugin which has a progress bar and formats tracebacks for humans"""
name = 'progressive'
_totalTests = 0
score = 10000 # Grab stdout and stderr before the capture plugin.
def __init__(self, *args, **kwargs):
super(ProgressivePlugin, self).__init__(*args, **kwargs)
# Same wrapping pattern as the built-in capture plugin. The lists
# shouldn't be necessary, but they don't cost much, and I have to
# wonder why capture uses them.
self._stderr, self._stdout, self._set_trace, self._cmdloop = \
[], [], [], []
def begin(self):
"""Make some monkeypatches to dodge progress bar.
Wrap stderr and stdout to keep other users of them from smearing the
progress bar. Wrap some pdb routines to stop showing the bar while in
the debugger.
"""
# The calls to begin/finalize end up like this: a call to begin() on
# instance A of the plugin, then a paired begin/finalize for each test
# on instance B, then a final call to finalize() on instance A.
# TODO: Do only if isatty.
self._stderr.append(sys.stderr)
sys.stderr = StreamWrapper(sys.stderr, self) # TODO: Any point?
self._stdout.append(sys.stdout)
sys.stdout = StreamWrapper(sys.stdout, self)
self._set_trace.append(pdb.set_trace)
pdb.set_trace = set_trace
self._cmdloop.append(pdb.Pdb.cmdloop)
pdb.Pdb.cmdloop = cmdloop
# nosetests changes directories to the tests dir when run from a
# distribution dir, so save the original cwd for relativizing paths.
self._cwd = '' if self.conf.options.absolute_paths else getcwd()
def finalize(self, result):
"""Put monkeypatches back as we found them."""
sys.stderr = self._stderr.pop()
sys.stdout = self._stdout.pop()
pdb.set_trace = self._set_trace.pop()
pdb.Pdb.cmdloop = self._cmdloop.pop()
def options(self, parser, env):
super(ProgressivePlugin, self).options(parser, env)
parser.add_option('--progressive-editor',
type='string',
dest='editor',
default=env.get('NOSE_PROGRESSIVE_EDITOR',
env.get('EDITOR', 'vi')),
help='The editor to use for the shortcuts in '
'tracebacks. Defaults to the value of $EDITOR '
'and then "vi". [NOSE_PROGRESSIVE_EDITOR]')
parser.add_option('--progressive-abs',
action='store_true',
dest='absolute_paths',
default=env.get('NOSE_PROGRESSIVE_ABSOLUTE_PATHS', False),
help='Display paths in traceback as absolute, '
'rather than relative to the current working '
'directory. [NOSE_PROGRESSIVE_ABSOLUTE_PATHS]')
parser.add_option('--progressive-advisories',
action='store_true',
dest='show_advisories',
default=env.get('NOSE_PROGRESSIVE_ADVISORIES', False),
help='Show skips and deprecation exceptions in '
'addition to failures and errors. '
'[NOSE_PROGRESSIVE_ADVISORIES]')
parser.add_option('--progressive-with-styling',
action='store_true',
dest='with_styling',
default=env.get('NOSE_PROGRESSIVE_WITH_STYLING', False),
help='nose-progressive automatically omits bold and '
'color formatting when its output is directed '
'to a non-terminal. Specifying '
'--progressive-with-styling forces such '
'styling to be output regardless. '
'[NOSE_PROGRESSIVE_WITH_STYLING]')
parser.add_option('--progressive-with-bar',
action='store_true',
dest='with_bar',
default=env.get('NOSE_PROGRESSIVE_WITH_BAR', False),
help='nose-progressive automatically omits the '
'progress bar when its output is directed to a '
'non-terminal. Specifying '
'--progressive-with-bar forces the bar to be '
'output regardless. This option implies '
'--progressive-with-styling. '
'[NOSE_PROGRESSIVE_WITH_BAR]')
parser.add_option('--progressive-function-color',
type='int',
dest='function_color',
default=env.get('NOSE_PROGRESSIVE_FUNCTION_COLOR', 12),
help='Color of function names in tracebacks. An '
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_FUNCTION_COLOR]')
parser.add_option('--progressive-dim-color',
type='int',
dest='dim_color',
default=env.get('NOSE_PROGRESSIVE_DIM_COLOR', 8),
help='Color of de-emphasized text (like editor '
'shortcuts) in tracebacks. An ANSI color '
'expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_DIM_COLOR]')
parser.add_option('--progressive-bar-filled-color',
type='int',
dest='bar_filled_color',
default=env.get('NOSE_PROGRESSIVE_BAR_FILLED_COLOR', 8),
help="Color of the progress bar's filled portion. An "
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_BAR_FILLED_COLOR]')
parser.add_option('--progressive-bar-empty-color',
type='int',
dest='bar_empty_color',
default=env.get('NOSE_PROGRESSIVE_BAR_EMPTY_COLOR', 7),
help="Color of the progress bar's empty portion. An "
'ANSI color expressed as a number 0-15. '
'[NOSE_PROGRESSIVE_BAR_EMPTY_COLOR]')
parser.add_option('--progressive-editor-shortcut-template',
type='string',
dest='editor_shortcut_template',
default=env.get(
'NOSE_PROGRESSIVE_EDITOR_SHORTCUT_TEMPLATE',
DEFAULT_EDITOR_SHORTCUT_TEMPLATE),
help='A str.format() template for the non-code lines'
' of the traceback. '
'[NOSE_PROGRESSIVE_EDITOR_SHORTCUT_TEMPLATE]')
def prepareTestLoader(self, loader):
"""Insert ourselves into loader calls to count tests.
The top-level loader call often returns lazy results, like a LazySuite.
This is a problem, as we would destroy the suite by iterating over it
to count the tests. Consequently, we monkeypatch the top-level loader
call to do the load twice: once for the actual test running and again
to yield something we can iterate over to do the count.
"""
def capture_suite(orig_method, *args, **kwargs):
"""Intercept calls to the loader before they get lazy.
Re-execute them to grab a copy of the possibly lazy suite, and
count the tests therein.
"""
self._totalTests += orig_method(*args, **kwargs).countTestCases()
# Clear out the loader's cache. Otherwise, it never finds any tests
# for the actual test run:
loader._visitedPaths = set()
return orig_method(*args, **kwargs)
# TODO: If there's ever a practical need, also patch loader.suiteClass
# or even TestProgram.createTests. createTests seems to be main top-
# level caller of loader methods, and nose.core.collector() (which
# isn't even called in nose) is an alternate one.
if hasattr(loader, 'loadTestsFromNames'):
loader.loadTestsFromNames = partial(capture_suite,
loader.loadTestsFromNames)
def prepareTestRunner(self, runner):
"""Replace TextTestRunner with something that prints fewer dots."""
return ProgressiveRunner(self._cwd,
self._totalTests,
runner.stream,
verbosity=self.conf.verbosity,
config=self.conf) # So we don't get a default
# NoPlugins manager
def prepareTestResult(self, result):
"""Hang onto the progress bar so the StreamWrappers can grab it."""
self.bar = result.bar
|
erikrose/nose-progressive | noseprogressive/bar.py | ProgressBar.update | python | def update(self, test_path, number):
# TODO: Play nicely with absurdly narrow terminals. (OS X's won't even
# go small enough to hurt us.)
# Figure out graph:
GRAPH_WIDTH = 14
# min() is in case we somehow get the total test count wrong. It's tricky.
num_filled = int(round(min(1.0, float(number) / self.max) * GRAPH_WIDTH))
graph = ''.join([self._fill_cap(' ' * num_filled),
self._empty_cap(self._empty_char * (GRAPH_WIDTH - num_filled))])
# Figure out the test identifier portion:
cols_for_path = self.cols - GRAPH_WIDTH - 2 # 2 spaces between path & graph
if len(test_path) > cols_for_path:
test_path = test_path[len(test_path) - cols_for_path:]
else:
test_path += ' ' * (cols_for_path - len(test_path))
# Put them together, and let simmer:
self.last = self._term.bold(test_path) + ' ' + graph
with self._at_last_line():
self.stream.write(self.last)
self.stream.flush() | Draw an updated progress bar.
At the moment, the graph takes a fixed width, and the test identifier
takes the rest of the row, truncated from the left to fit.
test_path -- the selector of the test being run
number -- how many tests have been run so far, including this one | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/bar.py#L41-L72 | [
"def _at_last_line(self):\n \"\"\"Return a context manager that positions the cursor at the last line, lets you write things, and then returns it to its previous position.\"\"\"\n return self._term.location(0, self.lines)\n",
"self._empty_cap = lambda s: s\n"
] | class ProgressBar(object):
_is_dodging = 0 # Like a semaphore
def __init__(self, max_value, term, filled_color=8, empty_color=7):
"""``max_value`` is the highest value I will attain. Must be >0."""
self.stream = term.stream
self.max = max_value
self._term = term
self.last = '' # The contents of the previous progress line printed
self._measure_terminal()
# Prepare formatting, dependent on whether we have terminal colors:
if term.number_of_colors > max(filled_color, empty_color):
self._fill_cap = term.on_color(filled_color)
self._empty_cap = term.on_color(empty_color)
self._empty_char = ' '
else:
self._fill_cap = term.reverse
self._empty_cap = lambda s: s
self._empty_char = '_'
signal(SIGWINCH, self._handle_winch)
def _measure_terminal(self):
self.lines, self.cols = (self._term.height or 24,
self._term.width or 80)
def _handle_winch(self, *args):
#self.erase() # Doesn't seem to help.
self._measure_terminal()
# TODO: Reprint the bar but at the new width.
def erase(self):
"""White out the progress bar."""
with self._at_last_line():
self.stream.write(self._term.clear_eol)
self.stream.flush()
def _at_last_line(self):
"""Return a context manager that positions the cursor at the last line, lets you write things, and then returns it to its previous position."""
return self._term.location(0, self.lines)
def dodging(bar):
"""Return a context manager which erases the bar, lets you output things, and then redraws the bar.
It's reentrant.
"""
class ShyProgressBar(object):
"""Context manager that implements a progress bar that gets out of the way"""
def __enter__(self):
"""Erase the progress bar so bits of disembodied progress bar don't get scrolled up the terminal."""
# My terminal has no status line, so we make one manually.
bar._is_dodging += 1 # Increment before calling erase(), which
# calls dodging() again.
if bar._is_dodging <= 1: # It *was* 0.
bar.erase()
def __exit__(self, type, value, tb):
"""Redraw the last saved state of the progress bar."""
if bar._is_dodging == 1: # Can't decrement yet; write() could
# read it.
# This is really necessary only because we monkeypatch
# stderr; the next test is about to start and will redraw
# the bar.
with bar._at_last_line():
bar.stream.write(bar.last)
bar.stream.flush()
bar._is_dodging -= 1
return ShyProgressBar()
|
erikrose/nose-progressive | noseprogressive/bar.py | ProgressBar.erase | python | def erase(self):
with self._at_last_line():
self.stream.write(self._term.clear_eol)
self.stream.flush() | White out the progress bar. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/bar.py#L74-L78 | [
"def _at_last_line(self):\n \"\"\"Return a context manager that positions the cursor at the last line, lets you write things, and then returns it to its previous position.\"\"\"\n return self._term.location(0, self.lines)\n"
] | class ProgressBar(object):
_is_dodging = 0 # Like a semaphore
def __init__(self, max_value, term, filled_color=8, empty_color=7):
"""``max_value`` is the highest value I will attain. Must be >0."""
self.stream = term.stream
self.max = max_value
self._term = term
self.last = '' # The contents of the previous progress line printed
self._measure_terminal()
# Prepare formatting, dependent on whether we have terminal colors:
if term.number_of_colors > max(filled_color, empty_color):
self._fill_cap = term.on_color(filled_color)
self._empty_cap = term.on_color(empty_color)
self._empty_char = ' '
else:
self._fill_cap = term.reverse
self._empty_cap = lambda s: s
self._empty_char = '_'
signal(SIGWINCH, self._handle_winch)
def _measure_terminal(self):
self.lines, self.cols = (self._term.height or 24,
self._term.width or 80)
def _handle_winch(self, *args):
#self.erase() # Doesn't seem to help.
self._measure_terminal()
# TODO: Reprint the bar but at the new width.
def update(self, test_path, number):
"""Draw an updated progress bar.
At the moment, the graph takes a fixed width, and the test identifier
takes the rest of the row, truncated from the left to fit.
test_path -- the selector of the test being run
number -- how many tests have been run so far, including this one
"""
# TODO: Play nicely with absurdly narrow terminals. (OS X's won't even
# go small enough to hurt us.)
# Figure out graph:
GRAPH_WIDTH = 14
# min() is in case we somehow get the total test count wrong. It's tricky.
num_filled = int(round(min(1.0, float(number) / self.max) * GRAPH_WIDTH))
graph = ''.join([self._fill_cap(' ' * num_filled),
self._empty_cap(self._empty_char * (GRAPH_WIDTH - num_filled))])
# Figure out the test identifier portion:
cols_for_path = self.cols - GRAPH_WIDTH - 2 # 2 spaces between path & graph
if len(test_path) > cols_for_path:
test_path = test_path[len(test_path) - cols_for_path:]
else:
test_path += ' ' * (cols_for_path - len(test_path))
# Put them together, and let simmer:
self.last = self._term.bold(test_path) + ' ' + graph
with self._at_last_line():
self.stream.write(self.last)
self.stream.flush()
def _at_last_line(self):
"""Return a context manager that positions the cursor at the last line, lets you write things, and then returns it to its previous position."""
return self._term.location(0, self.lines)
def dodging(bar):
"""Return a context manager which erases the bar, lets you output things, and then redraws the bar.
It's reentrant.
"""
class ShyProgressBar(object):
"""Context manager that implements a progress bar that gets out of the way"""
def __enter__(self):
"""Erase the progress bar so bits of disembodied progress bar don't get scrolled up the terminal."""
# My terminal has no status line, so we make one manually.
bar._is_dodging += 1 # Increment before calling erase(), which
# calls dodging() again.
if bar._is_dodging <= 1: # It *was* 0.
bar.erase()
def __exit__(self, type, value, tb):
"""Redraw the last saved state of the progress bar."""
if bar._is_dodging == 1: # Can't decrement yet; write() could
# read it.
# This is really necessary only because we monkeypatch
# stderr; the next test is about to start and will redraw
# the bar.
with bar._at_last_line():
bar.stream.write(bar.last)
bar.stream.flush()
bar._is_dodging -= 1
return ShyProgressBar()
|
erikrose/nose-progressive | noseprogressive/bar.py | ProgressBar.dodging | python | def dodging(bar):
class ShyProgressBar(object):
"""Context manager that implements a progress bar that gets out of the way"""
def __enter__(self):
"""Erase the progress bar so bits of disembodied progress bar don't get scrolled up the terminal."""
# My terminal has no status line, so we make one manually.
bar._is_dodging += 1 # Increment before calling erase(), which
# calls dodging() again.
if bar._is_dodging <= 1: # It *was* 0.
bar.erase()
def __exit__(self, type, value, tb):
"""Redraw the last saved state of the progress bar."""
if bar._is_dodging == 1: # Can't decrement yet; write() could
# read it.
# This is really necessary only because we monkeypatch
# stderr; the next test is about to start and will redraw
# the bar.
with bar._at_last_line():
bar.stream.write(bar.last)
bar.stream.flush()
bar._is_dodging -= 1
return ShyProgressBar() | Return a context manager which erases the bar, lets you output things, and then redraws the bar.
It's reentrant. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/bar.py#L84-L113 | null | class ProgressBar(object):
_is_dodging = 0 # Like a semaphore
def __init__(self, max_value, term, filled_color=8, empty_color=7):
"""``max_value`` is the highest value I will attain. Must be >0."""
self.stream = term.stream
self.max = max_value
self._term = term
self.last = '' # The contents of the previous progress line printed
self._measure_terminal()
# Prepare formatting, dependent on whether we have terminal colors:
if term.number_of_colors > max(filled_color, empty_color):
self._fill_cap = term.on_color(filled_color)
self._empty_cap = term.on_color(empty_color)
self._empty_char = ' '
else:
self._fill_cap = term.reverse
self._empty_cap = lambda s: s
self._empty_char = '_'
signal(SIGWINCH, self._handle_winch)
def _measure_terminal(self):
self.lines, self.cols = (self._term.height or 24,
self._term.width or 80)
def _handle_winch(self, *args):
#self.erase() # Doesn't seem to help.
self._measure_terminal()
# TODO: Reprint the bar but at the new width.
def update(self, test_path, number):
"""Draw an updated progress bar.
At the moment, the graph takes a fixed width, and the test identifier
takes the rest of the row, truncated from the left to fit.
test_path -- the selector of the test being run
number -- how many tests have been run so far, including this one
"""
# TODO: Play nicely with absurdly narrow terminals. (OS X's won't even
# go small enough to hurt us.)
# Figure out graph:
GRAPH_WIDTH = 14
# min() is in case we somehow get the total test count wrong. It's tricky.
num_filled = int(round(min(1.0, float(number) / self.max) * GRAPH_WIDTH))
graph = ''.join([self._fill_cap(' ' * num_filled),
self._empty_cap(self._empty_char * (GRAPH_WIDTH - num_filled))])
# Figure out the test identifier portion:
cols_for_path = self.cols - GRAPH_WIDTH - 2 # 2 spaces between path & graph
if len(test_path) > cols_for_path:
test_path = test_path[len(test_path) - cols_for_path:]
else:
test_path += ' ' * (cols_for_path - len(test_path))
# Put them together, and let simmer:
self.last = self._term.bold(test_path) + ' ' + graph
with self._at_last_line():
self.stream.write(self.last)
self.stream.flush()
def erase(self):
"""White out the progress bar."""
with self._at_last_line():
self.stream.write(self._term.clear_eol)
self.stream.flush()
def _at_last_line(self):
"""Return a context manager that positions the cursor at the last line, lets you write things, and then returns it to its previous position."""
return self._term.location(0, self.lines)
|
erikrose/nose-progressive | noseprogressive/runner.py | ProgressiveRunner._makeResult | python | def _makeResult(self):
return ProgressiveResult(self._cwd,
self._totalTests,
self.stream,
config=self.config) | Return a Result that doesn't print dots.
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into our progress bar, care of
ProgressivePlugin's stderr/out wrapping. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/runner.py#L16-L27 | null | class ProgressiveRunner(nose.core.TextTestRunner):
"""Test runner that makes a lot less noise than TextTestRunner"""
def __init__(self, cwd, totalTests, stream, **kwargs):
super(ProgressiveRunner, self).__init__(stream, **kwargs)
self._cwd = cwd
self._totalTests = totalTests
def run(self, test):
"Run the given test case or test suite...quietly."
# These parts of Nose's pluggability are baked into
# nose.core.TextTestRunner. Reproduce them:
wrapper = self.config.plugins.prepareTest(test)
if wrapper is not None:
test = wrapper
wrapped = self.config.plugins.setOutputStream(self.stream)
if wrapped is not None:
self.stream = wrapped
result = self._makeResult()
startTime = time()
try:
test(result)
except KeyboardInterrupt:
# we need to ignore these exception to not
# show traceback when user intentionally
# interrupted test suite execution, and
# to output some reasonable results on
# already passed and failed tests.
pass
stopTime = time()
# We don't care to hear about errors again at the end; we take care of
# that in result.addError(), while the tests run.
# result.printErrors()
#
# However, we do need to call this one useful line from
# nose.result.TextTestResult's implementation of printErrors() to make
# sure other plugins get a chance to report:
self.config.plugins.report(self.stream)
result.printSummary(startTime, stopTime)
self.config.plugins.finalize(result)
return result
|
erikrose/nose-progressive | noseprogressive/runner.py | ProgressiveRunner.run | python | def run(self, test):
"Run the given test case or test suite...quietly."
# These parts of Nose's pluggability are baked into
# nose.core.TextTestRunner. Reproduce them:
wrapper = self.config.plugins.prepareTest(test)
if wrapper is not None:
test = wrapper
wrapped = self.config.plugins.setOutputStream(self.stream)
if wrapped is not None:
self.stream = wrapped
result = self._makeResult()
startTime = time()
try:
test(result)
except KeyboardInterrupt:
# we need to ignore these exception to not
# show traceback when user intentionally
# interrupted test suite execution, and
# to output some reasonable results on
# already passed and failed tests.
pass
stopTime = time()
# We don't care to hear about errors again at the end; we take care of
# that in result.addError(), while the tests run.
# result.printErrors()
#
# However, we do need to call this one useful line from
# nose.result.TextTestResult's implementation of printErrors() to make
# sure other plugins get a chance to report:
self.config.plugins.report(self.stream)
result.printSummary(startTime, stopTime)
self.config.plugins.finalize(result)
return result | Run the given test case or test suite...quietly. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/runner.py#L29-L64 | [
"def _makeResult(self):\n \"\"\"Return a Result that doesn't print dots.\n\n Nose's ResultProxy will wrap it, and other plugins can still print\n stuff---but without smashing into our progress bar, care of\n ProgressivePlugin's stderr/out wrapping.\n\n \"\"\"\n return ProgressiveResult(self._cwd,\... | class ProgressiveRunner(nose.core.TextTestRunner):
"""Test runner that makes a lot less noise than TextTestRunner"""
def __init__(self, cwd, totalTests, stream, **kwargs):
super(ProgressiveRunner, self).__init__(stream, **kwargs)
self._cwd = cwd
self._totalTests = totalTests
def _makeResult(self):
"""Return a Result that doesn't print dots.
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into our progress bar, care of
ProgressivePlugin's stderr/out wrapping.
"""
return ProgressiveResult(self._cwd,
self._totalTests,
self.stream,
config=self.config)
|
erikrose/nose-progressive | noseprogressive/result.py | ProgressiveResult._printTraceback | python | def _printTraceback(self, test, err):
# Don't bind third item to a local var; that can create
# circular refs which are expensive to collect. See the
# sys.exc_info() docs.
exception_type, exception_value = err[:2]
# TODO: In Python 3, the traceback is attached to the exception
# instance through the __traceback__ attribute. If the instance
# is saved in a local variable that persists outside the except
# block, the traceback will create a reference cycle with the
# current frame and its dictionary of local variables. This will
# delay reclaiming dead resources until the next cyclic garbage
# collection pass.
extracted_tb = extract_relevant_tb(
err[2],
exception_type,
exception_type is test.failureException)
test_frame_index = index_of_test_frame(
extracted_tb,
exception_type,
exception_value,
test)
if test_frame_index:
# We have a good guess at which frame is the test, so
# trim everything until that. We don't care to see test
# framework frames.
extracted_tb = extracted_tb[test_frame_index:]
with self.bar.dodging():
self.stream.write(''.join(
format_traceback(
extracted_tb,
exception_type,
exception_value,
self._cwd,
self._term,
self._options.function_color,
self._options.dim_color,
self._options.editor,
self._options.editor_shortcut_template))) | Print a nicely formatted traceback.
:arg err: exc_info()-style traceback triple
:arg test: the test that precipitated this call | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/result.py#L46-L91 | null | class ProgressiveResult(TextTestResult):
"""Test result which updates a progress bar instead of printing dots
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into my progress bar, care of my Plugin's
stderr/out wrapping.
"""
def __init__(self, cwd, total_tests, stream, config=None):
super(ProgressiveResult, self).__init__(stream, None, 0, config=config)
self._cwd = cwd
self._options = config.options
self._term = Terminal(stream=stream,
force_styling=config.options.with_styling)
if self._term.is_a_tty or self._options.with_bar:
# 1 in case test counting failed and returned 0
self.bar = ProgressBar(total_tests or 1,
self._term,
config.options.bar_filled_color,
config.options.bar_empty_color)
else:
self.bar = NullProgressBar()
# Declare errorclass-savviness so ErrorClassPlugins don't monkeypatch
# half my methods away:
self.errorClasses = {}
def startTest(self, test):
"""Update the progress bar."""
super(ProgressiveResult, self).startTest(test)
self.bar.update(nose_selector(test), self.testsRun)
def _printHeadline(self, kind, test, is_failure=True):
"""Output a 1-line error summary to the stream if appropriate.
The line contains the kind of error and the pathname of the test.
:arg kind: The (string) type of incident the precipitated this call
:arg test: The test that precipitated this call
"""
if is_failure or self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(
'\n' +
(self._term.bold if is_failure else '') +
'%s: %s' % (kind, nose_selector(test)) +
(self._term.normal if is_failure else '')) # end bold
def _recordAndPrintHeadline(self, test, error_class, artifact):
"""Record that an error-like thing occurred, and print a summary.
Store ``artifact`` with the record.
Return whether the test result is any sort of failure.
"""
# We duplicate the errorclass handling from super rather than calling
# it and monkeying around with showAll flags to keep it from printing
# anything.
is_error_class = False
for cls, (storage, label, is_failure) in self.errorClasses.items():
if isclass(error_class) and issubclass(error_class, cls):
if is_failure:
test.passed = False
storage.append((test, artifact))
is_error_class = True
if not is_error_class:
self.errors.append((test, artifact))
test.passed = False
is_any_failure = not is_error_class or is_failure
self._printHeadline(label if is_error_class else 'ERROR',
test,
is_failure=is_any_failure)
return is_any_failure
def addSkip(self, test, reason):
"""Catch skipped tests in Python 2.7 and above.
Though ``addSkip()`` is deprecated in the nose plugin API, it is very
much not deprecated as a Python 2.7 ``TestResult`` method. In Python
2.7, this will get called instead of ``addError()`` for skips.
:arg reason: Text describing why the test was skipped
"""
self._recordAndPrintHeadline(test, SkipTest, reason)
# Python 2.7 users get a little bonus: the reason the test was skipped.
if isinstance(reason, Exception):
reason = getattr(reason, 'message', None) or getattr(
reason, 'args')[0]
if reason and self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(reason)
def addError(self, test, err):
# We don't read this, but some other plugin might conceivably expect it
# to be there:
excInfo = self._exc_info_to_string(err, test)
is_failure = self._recordAndPrintHeadline(test, err[0], excInfo)
if is_failure:
self._printTraceback(test, err)
def addFailure(self, test, err):
super(ProgressiveResult, self).addFailure(test, err)
self._printHeadline('FAIL', test)
self._printTraceback(test, err)
def printSummary(self, start, stop):
"""As a final summary, print number of tests, broken down by result."""
def renderResultType(type, number, is_failure):
"""Return a rendering like '2 failures'.
:arg type: A singular label, like "failure"
:arg number: The number of tests with a result of that type
:arg is_failure: Whether that type counts as a failure
"""
# I'd rather hope for the best with plurals than totally punt on
# being Englishlike:
ret = '%s %s%s' % (number, type, 's' if number != 1 else '')
if is_failure and number:
ret = self._term.bold(ret)
return ret
# Summarize the special cases:
counts = [('test', self.testsRun, False),
('failure', len(self.failures), True),
('error', len(self.errors), True)]
# Support custom errorclasses as well as normal failures and errors.
# Lowercase any all-caps labels, but leave the rest alone in case there
# are hard-to-read camelCaseWordBreaks.
counts.extend([(label.lower() if label.isupper() else label,
len(storage),
is_failure)
for (storage, label, is_failure) in
self.errorClasses.values() if len(storage)])
summary = (', '.join(renderResultType(*a) for a in counts) +
' in %.1fs' % (stop - start))
# Erase progress bar. Bash doesn't clear the whole line when printing
# the prompt, leaving a piece of the bar. Also, the prompt may not be
# at the bottom of the terminal.
self.bar.erase()
self.stream.writeln()
if self.wasSuccessful():
self.stream.write(self._term.bold_green('OK! '))
self.stream.writeln(summary)
|
erikrose/nose-progressive | noseprogressive/result.py | ProgressiveResult._printHeadline | python | def _printHeadline(self, kind, test, is_failure=True):
if is_failure or self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(
'\n' +
(self._term.bold if is_failure else '') +
'%s: %s' % (kind, nose_selector(test)) +
(self._term.normal if is_failure else '')) | Output a 1-line error summary to the stream if appropriate.
The line contains the kind of error and the pathname of the test.
:arg kind: The (string) type of incident the precipitated this call
:arg test: The test that precipitated this call | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/result.py#L93-L108 | null | class ProgressiveResult(TextTestResult):
"""Test result which updates a progress bar instead of printing dots
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into my progress bar, care of my Plugin's
stderr/out wrapping.
"""
def __init__(self, cwd, total_tests, stream, config=None):
super(ProgressiveResult, self).__init__(stream, None, 0, config=config)
self._cwd = cwd
self._options = config.options
self._term = Terminal(stream=stream,
force_styling=config.options.with_styling)
if self._term.is_a_tty or self._options.with_bar:
# 1 in case test counting failed and returned 0
self.bar = ProgressBar(total_tests or 1,
self._term,
config.options.bar_filled_color,
config.options.bar_empty_color)
else:
self.bar = NullProgressBar()
# Declare errorclass-savviness so ErrorClassPlugins don't monkeypatch
# half my methods away:
self.errorClasses = {}
def startTest(self, test):
"""Update the progress bar."""
super(ProgressiveResult, self).startTest(test)
self.bar.update(nose_selector(test), self.testsRun)
def _printTraceback(self, test, err):
"""Print a nicely formatted traceback.
:arg err: exc_info()-style traceback triple
:arg test: the test that precipitated this call
"""
# Don't bind third item to a local var; that can create
# circular refs which are expensive to collect. See the
# sys.exc_info() docs.
exception_type, exception_value = err[:2]
# TODO: In Python 3, the traceback is attached to the exception
# instance through the __traceback__ attribute. If the instance
# is saved in a local variable that persists outside the except
# block, the traceback will create a reference cycle with the
# current frame and its dictionary of local variables. This will
# delay reclaiming dead resources until the next cyclic garbage
# collection pass.
extracted_tb = extract_relevant_tb(
err[2],
exception_type,
exception_type is test.failureException)
test_frame_index = index_of_test_frame(
extracted_tb,
exception_type,
exception_value,
test)
if test_frame_index:
# We have a good guess at which frame is the test, so
# trim everything until that. We don't care to see test
# framework frames.
extracted_tb = extracted_tb[test_frame_index:]
with self.bar.dodging():
self.stream.write(''.join(
format_traceback(
extracted_tb,
exception_type,
exception_value,
self._cwd,
self._term,
self._options.function_color,
self._options.dim_color,
self._options.editor,
self._options.editor_shortcut_template)))
# end bold
def _recordAndPrintHeadline(self, test, error_class, artifact):
"""Record that an error-like thing occurred, and print a summary.
Store ``artifact`` with the record.
Return whether the test result is any sort of failure.
"""
# We duplicate the errorclass handling from super rather than calling
# it and monkeying around with showAll flags to keep it from printing
# anything.
is_error_class = False
for cls, (storage, label, is_failure) in self.errorClasses.items():
if isclass(error_class) and issubclass(error_class, cls):
if is_failure:
test.passed = False
storage.append((test, artifact))
is_error_class = True
if not is_error_class:
self.errors.append((test, artifact))
test.passed = False
is_any_failure = not is_error_class or is_failure
self._printHeadline(label if is_error_class else 'ERROR',
test,
is_failure=is_any_failure)
return is_any_failure
def addSkip(self, test, reason):
"""Catch skipped tests in Python 2.7 and above.
Though ``addSkip()`` is deprecated in the nose plugin API, it is very
much not deprecated as a Python 2.7 ``TestResult`` method. In Python
2.7, this will get called instead of ``addError()`` for skips.
:arg reason: Text describing why the test was skipped
"""
self._recordAndPrintHeadline(test, SkipTest, reason)
# Python 2.7 users get a little bonus: the reason the test was skipped.
if isinstance(reason, Exception):
reason = getattr(reason, 'message', None) or getattr(
reason, 'args')[0]
if reason and self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(reason)
def addError(self, test, err):
# We don't read this, but some other plugin might conceivably expect it
# to be there:
excInfo = self._exc_info_to_string(err, test)
is_failure = self._recordAndPrintHeadline(test, err[0], excInfo)
if is_failure:
self._printTraceback(test, err)
def addFailure(self, test, err):
super(ProgressiveResult, self).addFailure(test, err)
self._printHeadline('FAIL', test)
self._printTraceback(test, err)
def printSummary(self, start, stop):
"""As a final summary, print number of tests, broken down by result."""
def renderResultType(type, number, is_failure):
"""Return a rendering like '2 failures'.
:arg type: A singular label, like "failure"
:arg number: The number of tests with a result of that type
:arg is_failure: Whether that type counts as a failure
"""
# I'd rather hope for the best with plurals than totally punt on
# being Englishlike:
ret = '%s %s%s' % (number, type, 's' if number != 1 else '')
if is_failure and number:
ret = self._term.bold(ret)
return ret
# Summarize the special cases:
counts = [('test', self.testsRun, False),
('failure', len(self.failures), True),
('error', len(self.errors), True)]
# Support custom errorclasses as well as normal failures and errors.
# Lowercase any all-caps labels, but leave the rest alone in case there
# are hard-to-read camelCaseWordBreaks.
counts.extend([(label.lower() if label.isupper() else label,
len(storage),
is_failure)
for (storage, label, is_failure) in
self.errorClasses.values() if len(storage)])
summary = (', '.join(renderResultType(*a) for a in counts) +
' in %.1fs' % (stop - start))
# Erase progress bar. Bash doesn't clear the whole line when printing
# the prompt, leaving a piece of the bar. Also, the prompt may not be
# at the bottom of the terminal.
self.bar.erase()
self.stream.writeln()
if self.wasSuccessful():
self.stream.write(self._term.bold_green('OK! '))
self.stream.writeln(summary)
|
erikrose/nose-progressive | noseprogressive/result.py | ProgressiveResult._recordAndPrintHeadline | python | def _recordAndPrintHeadline(self, test, error_class, artifact):
# We duplicate the errorclass handling from super rather than calling
# it and monkeying around with showAll flags to keep it from printing
# anything.
is_error_class = False
for cls, (storage, label, is_failure) in self.errorClasses.items():
if isclass(error_class) and issubclass(error_class, cls):
if is_failure:
test.passed = False
storage.append((test, artifact))
is_error_class = True
if not is_error_class:
self.errors.append((test, artifact))
test.passed = False
is_any_failure = not is_error_class or is_failure
self._printHeadline(label if is_error_class else 'ERROR',
test,
is_failure=is_any_failure)
return is_any_failure | Record that an error-like thing occurred, and print a summary.
Store ``artifact`` with the record.
Return whether the test result is any sort of failure. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/result.py#L110-L136 | null | class ProgressiveResult(TextTestResult):
"""Test result which updates a progress bar instead of printing dots
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into my progress bar, care of my Plugin's
stderr/out wrapping.
"""
def __init__(self, cwd, total_tests, stream, config=None):
super(ProgressiveResult, self).__init__(stream, None, 0, config=config)
self._cwd = cwd
self._options = config.options
self._term = Terminal(stream=stream,
force_styling=config.options.with_styling)
if self._term.is_a_tty or self._options.with_bar:
# 1 in case test counting failed and returned 0
self.bar = ProgressBar(total_tests or 1,
self._term,
config.options.bar_filled_color,
config.options.bar_empty_color)
else:
self.bar = NullProgressBar()
# Declare errorclass-savviness so ErrorClassPlugins don't monkeypatch
# half my methods away:
self.errorClasses = {}
def startTest(self, test):
"""Update the progress bar."""
super(ProgressiveResult, self).startTest(test)
self.bar.update(nose_selector(test), self.testsRun)
def _printTraceback(self, test, err):
"""Print a nicely formatted traceback.
:arg err: exc_info()-style traceback triple
:arg test: the test that precipitated this call
"""
# Don't bind third item to a local var; that can create
# circular refs which are expensive to collect. See the
# sys.exc_info() docs.
exception_type, exception_value = err[:2]
# TODO: In Python 3, the traceback is attached to the exception
# instance through the __traceback__ attribute. If the instance
# is saved in a local variable that persists outside the except
# block, the traceback will create a reference cycle with the
# current frame and its dictionary of local variables. This will
# delay reclaiming dead resources until the next cyclic garbage
# collection pass.
extracted_tb = extract_relevant_tb(
err[2],
exception_type,
exception_type is test.failureException)
test_frame_index = index_of_test_frame(
extracted_tb,
exception_type,
exception_value,
test)
if test_frame_index:
# We have a good guess at which frame is the test, so
# trim everything until that. We don't care to see test
# framework frames.
extracted_tb = extracted_tb[test_frame_index:]
with self.bar.dodging():
self.stream.write(''.join(
format_traceback(
extracted_tb,
exception_type,
exception_value,
self._cwd,
self._term,
self._options.function_color,
self._options.dim_color,
self._options.editor,
self._options.editor_shortcut_template)))
def _printHeadline(self, kind, test, is_failure=True):
"""Output a 1-line error summary to the stream if appropriate.
The line contains the kind of error and the pathname of the test.
:arg kind: The (string) type of incident the precipitated this call
:arg test: The test that precipitated this call
"""
if is_failure or self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(
'\n' +
(self._term.bold if is_failure else '') +
'%s: %s' % (kind, nose_selector(test)) +
(self._term.normal if is_failure else '')) # end bold
def addSkip(self, test, reason):
"""Catch skipped tests in Python 2.7 and above.
Though ``addSkip()`` is deprecated in the nose plugin API, it is very
much not deprecated as a Python 2.7 ``TestResult`` method. In Python
2.7, this will get called instead of ``addError()`` for skips.
:arg reason: Text describing why the test was skipped
"""
self._recordAndPrintHeadline(test, SkipTest, reason)
# Python 2.7 users get a little bonus: the reason the test was skipped.
if isinstance(reason, Exception):
reason = getattr(reason, 'message', None) or getattr(
reason, 'args')[0]
if reason and self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(reason)
def addError(self, test, err):
# We don't read this, but some other plugin might conceivably expect it
# to be there:
excInfo = self._exc_info_to_string(err, test)
is_failure = self._recordAndPrintHeadline(test, err[0], excInfo)
if is_failure:
self._printTraceback(test, err)
def addFailure(self, test, err):
super(ProgressiveResult, self).addFailure(test, err)
self._printHeadline('FAIL', test)
self._printTraceback(test, err)
def printSummary(self, start, stop):
"""As a final summary, print number of tests, broken down by result."""
def renderResultType(type, number, is_failure):
"""Return a rendering like '2 failures'.
:arg type: A singular label, like "failure"
:arg number: The number of tests with a result of that type
:arg is_failure: Whether that type counts as a failure
"""
# I'd rather hope for the best with plurals than totally punt on
# being Englishlike:
ret = '%s %s%s' % (number, type, 's' if number != 1 else '')
if is_failure and number:
ret = self._term.bold(ret)
return ret
# Summarize the special cases:
counts = [('test', self.testsRun, False),
('failure', len(self.failures), True),
('error', len(self.errors), True)]
# Support custom errorclasses as well as normal failures and errors.
# Lowercase any all-caps labels, but leave the rest alone in case there
# are hard-to-read camelCaseWordBreaks.
counts.extend([(label.lower() if label.isupper() else label,
len(storage),
is_failure)
for (storage, label, is_failure) in
self.errorClasses.values() if len(storage)])
summary = (', '.join(renderResultType(*a) for a in counts) +
' in %.1fs' % (stop - start))
# Erase progress bar. Bash doesn't clear the whole line when printing
# the prompt, leaving a piece of the bar. Also, the prompt may not be
# at the bottom of the terminal.
self.bar.erase()
self.stream.writeln()
if self.wasSuccessful():
self.stream.write(self._term.bold_green('OK! '))
self.stream.writeln(summary)
|
erikrose/nose-progressive | noseprogressive/result.py | ProgressiveResult.addSkip | python | def addSkip(self, test, reason):
self._recordAndPrintHeadline(test, SkipTest, reason)
# Python 2.7 users get a little bonus: the reason the test was skipped.
if isinstance(reason, Exception):
reason = getattr(reason, 'message', None) or getattr(
reason, 'args')[0]
if reason and self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(reason) | Catch skipped tests in Python 2.7 and above.
Though ``addSkip()`` is deprecated in the nose plugin API, it is very
much not deprecated as a Python 2.7 ``TestResult`` method. In Python
2.7, this will get called instead of ``addError()`` for skips.
:arg reason: Text describing why the test was skipped | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/result.py#L138-L155 | null | class ProgressiveResult(TextTestResult):
"""Test result which updates a progress bar instead of printing dots
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into my progress bar, care of my Plugin's
stderr/out wrapping.
"""
def __init__(self, cwd, total_tests, stream, config=None):
super(ProgressiveResult, self).__init__(stream, None, 0, config=config)
self._cwd = cwd
self._options = config.options
self._term = Terminal(stream=stream,
force_styling=config.options.with_styling)
if self._term.is_a_tty or self._options.with_bar:
# 1 in case test counting failed and returned 0
self.bar = ProgressBar(total_tests or 1,
self._term,
config.options.bar_filled_color,
config.options.bar_empty_color)
else:
self.bar = NullProgressBar()
# Declare errorclass-savviness so ErrorClassPlugins don't monkeypatch
# half my methods away:
self.errorClasses = {}
def startTest(self, test):
"""Update the progress bar."""
super(ProgressiveResult, self).startTest(test)
self.bar.update(nose_selector(test), self.testsRun)
def _printTraceback(self, test, err):
"""Print a nicely formatted traceback.
:arg err: exc_info()-style traceback triple
:arg test: the test that precipitated this call
"""
# Don't bind third item to a local var; that can create
# circular refs which are expensive to collect. See the
# sys.exc_info() docs.
exception_type, exception_value = err[:2]
# TODO: In Python 3, the traceback is attached to the exception
# instance through the __traceback__ attribute. If the instance
# is saved in a local variable that persists outside the except
# block, the traceback will create a reference cycle with the
# current frame and its dictionary of local variables. This will
# delay reclaiming dead resources until the next cyclic garbage
# collection pass.
extracted_tb = extract_relevant_tb(
err[2],
exception_type,
exception_type is test.failureException)
test_frame_index = index_of_test_frame(
extracted_tb,
exception_type,
exception_value,
test)
if test_frame_index:
# We have a good guess at which frame is the test, so
# trim everything until that. We don't care to see test
# framework frames.
extracted_tb = extracted_tb[test_frame_index:]
with self.bar.dodging():
self.stream.write(''.join(
format_traceback(
extracted_tb,
exception_type,
exception_value,
self._cwd,
self._term,
self._options.function_color,
self._options.dim_color,
self._options.editor,
self._options.editor_shortcut_template)))
def _printHeadline(self, kind, test, is_failure=True):
"""Output a 1-line error summary to the stream if appropriate.
The line contains the kind of error and the pathname of the test.
:arg kind: The (string) type of incident the precipitated this call
:arg test: The test that precipitated this call
"""
if is_failure or self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(
'\n' +
(self._term.bold if is_failure else '') +
'%s: %s' % (kind, nose_selector(test)) +
(self._term.normal if is_failure else '')) # end bold
def _recordAndPrintHeadline(self, test, error_class, artifact):
"""Record that an error-like thing occurred, and print a summary.
Store ``artifact`` with the record.
Return whether the test result is any sort of failure.
"""
# We duplicate the errorclass handling from super rather than calling
# it and monkeying around with showAll flags to keep it from printing
# anything.
is_error_class = False
for cls, (storage, label, is_failure) in self.errorClasses.items():
if isclass(error_class) and issubclass(error_class, cls):
if is_failure:
test.passed = False
storage.append((test, artifact))
is_error_class = True
if not is_error_class:
self.errors.append((test, artifact))
test.passed = False
is_any_failure = not is_error_class or is_failure
self._printHeadline(label if is_error_class else 'ERROR',
test,
is_failure=is_any_failure)
return is_any_failure
def addError(self, test, err):
# We don't read this, but some other plugin might conceivably expect it
# to be there:
excInfo = self._exc_info_to_string(err, test)
is_failure = self._recordAndPrintHeadline(test, err[0], excInfo)
if is_failure:
self._printTraceback(test, err)
def addFailure(self, test, err):
super(ProgressiveResult, self).addFailure(test, err)
self._printHeadline('FAIL', test)
self._printTraceback(test, err)
def printSummary(self, start, stop):
"""As a final summary, print number of tests, broken down by result."""
def renderResultType(type, number, is_failure):
"""Return a rendering like '2 failures'.
:arg type: A singular label, like "failure"
:arg number: The number of tests with a result of that type
:arg is_failure: Whether that type counts as a failure
"""
# I'd rather hope for the best with plurals than totally punt on
# being Englishlike:
ret = '%s %s%s' % (number, type, 's' if number != 1 else '')
if is_failure and number:
ret = self._term.bold(ret)
return ret
# Summarize the special cases:
counts = [('test', self.testsRun, False),
('failure', len(self.failures), True),
('error', len(self.errors), True)]
# Support custom errorclasses as well as normal failures and errors.
# Lowercase any all-caps labels, but leave the rest alone in case there
# are hard-to-read camelCaseWordBreaks.
counts.extend([(label.lower() if label.isupper() else label,
len(storage),
is_failure)
for (storage, label, is_failure) in
self.errorClasses.values() if len(storage)])
summary = (', '.join(renderResultType(*a) for a in counts) +
' in %.1fs' % (stop - start))
# Erase progress bar. Bash doesn't clear the whole line when printing
# the prompt, leaving a piece of the bar. Also, the prompt may not be
# at the bottom of the terminal.
self.bar.erase()
self.stream.writeln()
if self.wasSuccessful():
self.stream.write(self._term.bold_green('OK! '))
self.stream.writeln(summary)
|
erikrose/nose-progressive | noseprogressive/result.py | ProgressiveResult.printSummary | python | def printSummary(self, start, stop):
def renderResultType(type, number, is_failure):
"""Return a rendering like '2 failures'.
:arg type: A singular label, like "failure"
:arg number: The number of tests with a result of that type
:arg is_failure: Whether that type counts as a failure
"""
# I'd rather hope for the best with plurals than totally punt on
# being Englishlike:
ret = '%s %s%s' % (number, type, 's' if number != 1 else '')
if is_failure and number:
ret = self._term.bold(ret)
return ret
# Summarize the special cases:
counts = [('test', self.testsRun, False),
('failure', len(self.failures), True),
('error', len(self.errors), True)]
# Support custom errorclasses as well as normal failures and errors.
# Lowercase any all-caps labels, but leave the rest alone in case there
# are hard-to-read camelCaseWordBreaks.
counts.extend([(label.lower() if label.isupper() else label,
len(storage),
is_failure)
for (storage, label, is_failure) in
self.errorClasses.values() if len(storage)])
summary = (', '.join(renderResultType(*a) for a in counts) +
' in %.1fs' % (stop - start))
# Erase progress bar. Bash doesn't clear the whole line when printing
# the prompt, leaving a piece of the bar. Also, the prompt may not be
# at the bottom of the terminal.
self.bar.erase()
self.stream.writeln()
if self.wasSuccessful():
self.stream.write(self._term.bold_green('OK! '))
self.stream.writeln(summary) | As a final summary, print number of tests, broken down by result. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/result.py#L170-L209 | null | class ProgressiveResult(TextTestResult):
"""Test result which updates a progress bar instead of printing dots
Nose's ResultProxy will wrap it, and other plugins can still print
stuff---but without smashing into my progress bar, care of my Plugin's
stderr/out wrapping.
"""
def __init__(self, cwd, total_tests, stream, config=None):
super(ProgressiveResult, self).__init__(stream, None, 0, config=config)
self._cwd = cwd
self._options = config.options
self._term = Terminal(stream=stream,
force_styling=config.options.with_styling)
if self._term.is_a_tty or self._options.with_bar:
# 1 in case test counting failed and returned 0
self.bar = ProgressBar(total_tests or 1,
self._term,
config.options.bar_filled_color,
config.options.bar_empty_color)
else:
self.bar = NullProgressBar()
# Declare errorclass-savviness so ErrorClassPlugins don't monkeypatch
# half my methods away:
self.errorClasses = {}
def startTest(self, test):
"""Update the progress bar."""
super(ProgressiveResult, self).startTest(test)
self.bar.update(nose_selector(test), self.testsRun)
def _printTraceback(self, test, err):
"""Print a nicely formatted traceback.
:arg err: exc_info()-style traceback triple
:arg test: the test that precipitated this call
"""
# Don't bind third item to a local var; that can create
# circular refs which are expensive to collect. See the
# sys.exc_info() docs.
exception_type, exception_value = err[:2]
# TODO: In Python 3, the traceback is attached to the exception
# instance through the __traceback__ attribute. If the instance
# is saved in a local variable that persists outside the except
# block, the traceback will create a reference cycle with the
# current frame and its dictionary of local variables. This will
# delay reclaiming dead resources until the next cyclic garbage
# collection pass.
extracted_tb = extract_relevant_tb(
err[2],
exception_type,
exception_type is test.failureException)
test_frame_index = index_of_test_frame(
extracted_tb,
exception_type,
exception_value,
test)
if test_frame_index:
# We have a good guess at which frame is the test, so
# trim everything until that. We don't care to see test
# framework frames.
extracted_tb = extracted_tb[test_frame_index:]
with self.bar.dodging():
self.stream.write(''.join(
format_traceback(
extracted_tb,
exception_type,
exception_value,
self._cwd,
self._term,
self._options.function_color,
self._options.dim_color,
self._options.editor,
self._options.editor_shortcut_template)))
def _printHeadline(self, kind, test, is_failure=True):
"""Output a 1-line error summary to the stream if appropriate.
The line contains the kind of error and the pathname of the test.
:arg kind: The (string) type of incident the precipitated this call
:arg test: The test that precipitated this call
"""
if is_failure or self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(
'\n' +
(self._term.bold if is_failure else '') +
'%s: %s' % (kind, nose_selector(test)) +
(self._term.normal if is_failure else '')) # end bold
def _recordAndPrintHeadline(self, test, error_class, artifact):
"""Record that an error-like thing occurred, and print a summary.
Store ``artifact`` with the record.
Return whether the test result is any sort of failure.
"""
# We duplicate the errorclass handling from super rather than calling
# it and monkeying around with showAll flags to keep it from printing
# anything.
is_error_class = False
for cls, (storage, label, is_failure) in self.errorClasses.items():
if isclass(error_class) and issubclass(error_class, cls):
if is_failure:
test.passed = False
storage.append((test, artifact))
is_error_class = True
if not is_error_class:
self.errors.append((test, artifact))
test.passed = False
is_any_failure = not is_error_class or is_failure
self._printHeadline(label if is_error_class else 'ERROR',
test,
is_failure=is_any_failure)
return is_any_failure
def addSkip(self, test, reason):
"""Catch skipped tests in Python 2.7 and above.
Though ``addSkip()`` is deprecated in the nose plugin API, it is very
much not deprecated as a Python 2.7 ``TestResult`` method. In Python
2.7, this will get called instead of ``addError()`` for skips.
:arg reason: Text describing why the test was skipped
"""
self._recordAndPrintHeadline(test, SkipTest, reason)
# Python 2.7 users get a little bonus: the reason the test was skipped.
if isinstance(reason, Exception):
reason = getattr(reason, 'message', None) or getattr(
reason, 'args')[0]
if reason and self._options.show_advisories:
with self.bar.dodging():
self.stream.writeln(reason)
def addError(self, test, err):
# We don't read this, but some other plugin might conceivably expect it
# to be there:
excInfo = self._exc_info_to_string(err, test)
is_failure = self._recordAndPrintHeadline(test, err[0], excInfo)
if is_failure:
self._printTraceback(test, err)
def addFailure(self, test, err):
super(ProgressiveResult, self).addFailure(test, err)
self._printHeadline('FAIL', test)
self._printTraceback(test, err)
|
erikrose/nose-progressive | noseprogressive/utils.py | nose_selector | python | def nose_selector(test):
address = test_address(test)
if address:
file, module, rest = address
if module:
if rest:
try:
return '%s:%s%s' % (module, rest, test.test.arg or '')
except AttributeError:
return '%s:%s' % (module, rest)
else:
return module
return 'Unknown test' | Return the string you can pass to nose to run `test`, including argument
values if the test was made by a test generator.
Return "Unknown test" if it can't construct a decent path. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/utils.py#L17-L36 | null | from os.path import abspath, realpath
from nose.tools import nottest
import nose.util
@nottest
def test_address(test):
"""Return the result of nose's test_address(), None if it's stumped."""
try:
return nose.util.test_address(test)
except TypeError: # Explodes if the function passed to @with_setup applied
# to a test generator has an error.
pass
class OneTrackMind(object):
"""An accurate simulation of my brain
I can know one thing at a time, at some level of confidence. You can tell
me other things, but if I'm not as confident of them, I'll forget them. If
I'm more confident of them, they'll replace what I knew before.
"""
def __init__(self):
self.confidence = 0
self.best = None
def know(self, what, confidence):
"""Know something with the given confidence, and return self for chaining.
If confidence is higher than that of what we already know, replace
what we already know with what you're telling us.
"""
if confidence > self.confidence:
self.best = what
self.confidence = confidence
return self
@nottest # still needed?
def index_of_test_frame(extracted_tb, exception_type, exception_value, test):
"""Return the index of the frame that points to the failed test or None.
Sometimes this is hard. It takes its best guess. If exception_type is
SyntaxError or it has no idea, it returns None.
Args:
address: The result of a call to test_address(), indicating which test
failed
exception_type, exception_value: Needed in case this is a SyntaxError
and therefore doesn't have the whole story in extracted_tb
extracted_tb: The traceback, after having been passed through
extract_tb()
"""
try:
address = test_address(test)
except TypeError:
# Explodes if the function passed to @with_setup
# applied to a test generator has an error.
address = None
# address is None if the test callable couldn't be found. No sense trying
# to find the test frame if there's no such thing:
if address is None:
return None
test_file, _, test_call = address
# OneTrackMind helps us favor the latest frame, even if there's more than
# one match of equal confidence.
knower = OneTrackMind()
if test_file is not None:
test_file_path = realpath(test_file)
# TODO: Perfect. Right now, I'm just comparing by function name within
# a module. This should break only if you have two identically-named
# functions from a single module in the call stack when your test
# fails. However, it bothers me. I'd rather be finding the actual
# callables and comparing them directly, but that might not work with
# test generators.
for i, frame in enumerate(extracted_tb):
file, line, function, text = frame
if file is not None and test_file_path == realpath(file):
# TODO: Now that we're eliding until the test frame, is it
# desirable to have this confidence-2 guess when just the file
# path is matched?
knower.know(i, 2)
if (hasattr(test_call, 'rsplit') and # test_call can be None
function == test_call.rsplit('.')[-1]):
knower.know(i, 3)
break
return knower.best
def human_path(path, cwd):
"""Return the most human-readable representation of the given path.
If an absolute path is given that's within the current directory, convert
it to a relative path to shorten it. Otherwise, return the absolute path.
"""
# TODO: Canonicalize the path to remove /kitsune/../kitsune nonsense.
path = abspath(path)
if cwd and path.startswith(cwd):
path = path[len(cwd) + 1:] # Make path relative. Remove leading slash.
return path
|
erikrose/nose-progressive | noseprogressive/utils.py | human_path | python | def human_path(path, cwd):
# TODO: Canonicalize the path to remove /kitsune/../kitsune nonsense.
path = abspath(path)
if cwd and path.startswith(cwd):
path = path[len(cwd) + 1:] # Make path relative. Remove leading slash.
return path | Return the most human-readable representation of the given path.
If an absolute path is given that's within the current directory, convert
it to a relative path to shorten it. Otherwise, return the absolute path. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/utils.py#L121-L132 | null | from os.path import abspath, realpath
from nose.tools import nottest
import nose.util
@nottest
def test_address(test):
"""Return the result of nose's test_address(), None if it's stumped."""
try:
return nose.util.test_address(test)
except TypeError: # Explodes if the function passed to @with_setup applied
# to a test generator has an error.
pass
def nose_selector(test):
"""Return the string you can pass to nose to run `test`, including argument
values if the test was made by a test generator.
Return "Unknown test" if it can't construct a decent path.
"""
address = test_address(test)
if address:
file, module, rest = address
if module:
if rest:
try:
return '%s:%s%s' % (module, rest, test.test.arg or '')
except AttributeError:
return '%s:%s' % (module, rest)
else:
return module
return 'Unknown test'
class OneTrackMind(object):
"""An accurate simulation of my brain
I can know one thing at a time, at some level of confidence. You can tell
me other things, but if I'm not as confident of them, I'll forget them. If
I'm more confident of them, they'll replace what I knew before.
"""
def __init__(self):
self.confidence = 0
self.best = None
def know(self, what, confidence):
"""Know something with the given confidence, and return self for chaining.
If confidence is higher than that of what we already know, replace
what we already know with what you're telling us.
"""
if confidence > self.confidence:
self.best = what
self.confidence = confidence
return self
@nottest # still needed?
def index_of_test_frame(extracted_tb, exception_type, exception_value, test):
"""Return the index of the frame that points to the failed test or None.
Sometimes this is hard. It takes its best guess. If exception_type is
SyntaxError or it has no idea, it returns None.
Args:
address: The result of a call to test_address(), indicating which test
failed
exception_type, exception_value: Needed in case this is a SyntaxError
and therefore doesn't have the whole story in extracted_tb
extracted_tb: The traceback, after having been passed through
extract_tb()
"""
try:
address = test_address(test)
except TypeError:
# Explodes if the function passed to @with_setup
# applied to a test generator has an error.
address = None
# address is None if the test callable couldn't be found. No sense trying
# to find the test frame if there's no such thing:
if address is None:
return None
test_file, _, test_call = address
# OneTrackMind helps us favor the latest frame, even if there's more than
# one match of equal confidence.
knower = OneTrackMind()
if test_file is not None:
test_file_path = realpath(test_file)
# TODO: Perfect. Right now, I'm just comparing by function name within
# a module. This should break only if you have two identically-named
# functions from a single module in the call stack when your test
# fails. However, it bothers me. I'd rather be finding the actual
# callables and comparing them directly, but that might not work with
# test generators.
for i, frame in enumerate(extracted_tb):
file, line, function, text = frame
if file is not None and test_file_path == realpath(file):
# TODO: Now that we're eliding until the test frame, is it
# desirable to have this confidence-2 guess when just the file
# path is matched?
knower.know(i, 2)
if (hasattr(test_call, 'rsplit') and # test_call can be None
function == test_call.rsplit('.')[-1]):
knower.know(i, 3)
break
return knower.best
|
erikrose/nose-progressive | noseprogressive/utils.py | OneTrackMind.know | python | def know(self, what, confidence):
if confidence > self.confidence:
self.best = what
self.confidence = confidence
return self | Know something with the given confidence, and return self for chaining.
If confidence is higher than that of what we already know, replace
what we already know with what you're telling us. | train | https://github.com/erikrose/nose-progressive/blob/42853f11290cfaac8aa3d204714b71e27cc4ec07/noseprogressive/utils.py#L51-L61 | null | class OneTrackMind(object):
"""An accurate simulation of my brain
I can know one thing at a time, at some level of confidence. You can tell
me other things, but if I'm not as confident of them, I'll forget them. If
I'm more confident of them, they'll replace what I knew before.
"""
def __init__(self):
self.confidence = 0
self.best = None
|
bookieio/breadability | breadability/scoring.py | check_node_attributes | python | def check_node_attributes(pattern, node, *attributes):
for attribute_name in attributes:
attribute = node.get(attribute_name)
if attribute is not None and pattern.search(attribute):
return True
return False | Searches match in attributes against given pattern and if
finds the match against any of them returns True. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/scoring.py#L43-L53 | null | # -*- coding: utf8 -*-
"""Handle dealing with scoring nodes and content for our parsing."""
from __future__ import absolute_import
from __future__ import division, print_function
import re
import logging
from hashlib import md5
from lxml.etree import tostring
from ._compat import to_bytes
from .utils import normalize_whitespace
# A series of sets of attributes we check to help in determining if a node is
# a potential candidate or not.
CLS_UNLIKELY = re.compile(
"combx|comment|community|disqus|extra|foot|header|menu|remark|rss|"
"shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|perma|popup|"
"tweet|twitter|social|breadcrumb",
re.IGNORECASE
)
CLS_MAYBE = re.compile(
"and|article|body|column|main|shadow|entry",
re.IGNORECASE
)
CLS_WEIGHT_POSITIVE = re.compile(
"article|body|content|entry|main|page|pagination|post|text|blog|story",
re.IGNORECASE
)
CLS_WEIGHT_NEGATIVE = re.compile(
"combx|comment|com-|contact|foot|footer|footnote|head|masthead|media|meta|"
"outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|"
"tool|widget",
re.IGNORECASE
)
logger = logging.getLogger("breadability")
def generate_hash_id(node):
"""
Generates a hash_id for the node in question.
:param node: lxml etree node
"""
try:
content = tostring(node)
except Exception:
logger.exception("Generating of hash failed")
content = to_bytes(repr(node))
hash_id = md5(content).hexdigest()
return hash_id[:8]
def get_link_density(node, node_text=None):
"""
Computes the ratio for text in given node and text in links
contained in the node. It is computed from number of
characters in the texts.
:parameter Element node:
HTML element in which links density is computed.
:parameter string node_text:
Text content of given node if it was obtained before.
:returns float:
Returns value of computed 0 <= density <= 1, where 0 means
no links and 1 means that node contains only links.
"""
if node_text is None:
node_text = node.text_content()
node_text = normalize_whitespace(node_text.strip())
text_length = len(node_text)
if text_length == 0:
return 0.0
links_length = sum(map(_get_normalized_text_length, node.findall(".//a")))
# Give 50 bonus chars worth of length for each img.
# Tweaking this 50 down a notch should help if we hit false positives.
img_bonuses = 50 * len(node.findall(".//img"))
links_length = max(0, links_length - img_bonuses)
return links_length / text_length
def _get_normalized_text_length(node):
return len(normalize_whitespace(node.text_content().strip()))
def get_class_weight(node):
"""
Computes weight of element according to its class/id.
We're using sets to help efficiently check for existence of matches.
"""
weight = 0
if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "class"):
weight -= 25
if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "class"):
weight += 25
if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "id"):
weight -= 25
if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "id"):
weight += 25
return weight
def is_unlikely_node(node):
"""
Short helper for checking unlikely status.
If the class or id are in the unlikely list, and there's not also a
class/id in the likely list then it might need to be removed.
"""
unlikely = check_node_attributes(CLS_UNLIKELY, node, "class", "id")
maybe = check_node_attributes(CLS_MAYBE, node, "class", "id")
return bool(unlikely and not maybe and node.tag != "body")
def score_candidates(nodes):
"""Given a list of potential nodes, find some initial scores to start"""
MIN_HIT_LENTH = 25
candidates = {}
for node in nodes:
logger.debug("* Scoring candidate %s %r", node.tag, node.attrib)
# if the node has no parent it knows of then it ends up creating a
# body & html tag to parent the html fragment
parent = node.getparent()
if parent is None:
logger.debug("Skipping candidate - parent node is 'None'.")
continue
grand = parent.getparent()
if grand is None:
logger.debug("Skipping candidate - grand parent node is 'None'.")
continue
# if paragraph is < `MIN_HIT_LENTH` characters don't even count it
inner_text = node.text_content().strip()
if len(inner_text) < MIN_HIT_LENTH:
logger.debug(
"Skipping candidate - inner text < %d characters.",
MIN_HIT_LENTH)
continue
# initialize readability data for the parent
# add parent node if it isn't in the candidate list
if parent not in candidates:
candidates[parent] = ScoredNode(parent)
if grand not in candidates:
candidates[grand] = ScoredNode(grand)
# add a point for the paragraph itself as a base
content_score = 1
if inner_text:
# add 0.25 points for any commas within this paragraph
commas_count = inner_text.count(",")
content_score += commas_count * 0.25
logger.debug("Bonus points for %d commas.", commas_count)
# subtract 0.5 points for each double quote within this paragraph
double_quotes_count = inner_text.count('"')
content_score += double_quotes_count * -0.5
logger.debug(
"Penalty points for %d double-quotes.", double_quotes_count)
# for every 100 characters in this paragraph, add another point
# up to 3 points
length_points = len(inner_text) / 100
content_score += min(length_points, 3.0)
logger.debug("Bonus points for length of text: %f", length_points)
# add the score to the parent
logger.debug(
"Bonus points for parent %s %r with score %f: %f",
parent.tag, parent.attrib, candidates[parent].content_score,
content_score)
candidates[parent].content_score += content_score
# the grand node gets half
logger.debug(
"Bonus points for grand %s %r with score %f: %f",
grand.tag, grand.attrib, candidates[grand].content_score,
content_score / 2.0)
candidates[grand].content_score += content_score / 2.0
if node not in candidates:
candidates[node] = ScoredNode(node)
candidates[node].content_score += content_score
for candidate in candidates.values():
adjustment = 1.0 - get_link_density(candidate.node)
candidate.content_score *= adjustment
logger.debug(
"Link density adjustment for %s %r: %f",
candidate.node.tag, candidate.node.attrib, adjustment)
return candidates
class ScoredNode(object):
"""
We need Scored nodes we use to track possible article matches
We might have a bunch of these so we use __slots__ to keep memory usage
down.
"""
__slots__ = ('node', 'content_score')
def __init__(self, node):
"""Given node, set an initial score and weigh based on css and id"""
self.node = node
self.content_score = 0
if node.tag in ('div', 'article'):
self.content_score = 5
if node.tag in ('pre', 'td', 'blockquote'):
self.content_score = 3
if node.tag in ('address', 'ol', 'ul', 'dl', 'dd', 'dt', 'li', 'form'):
self.content_score = -3
if node.tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'th'):
self.content_score = -5
self.content_score += get_class_weight(node)
@property
def hash_id(self):
return generate_hash_id(self.node)
def __repr__(self):
if self.node is None:
return "<NullScoredNode with score {2:0.1F}>" % self.content_score
return "<ScoredNode {0} {1}: {2:0.1F}>".format(
self.node.tag,
self.node.attrib,
self.content_score
)
|
bookieio/breadability | breadability/scoring.py | generate_hash_id | python | def generate_hash_id(node):
try:
content = tostring(node)
except Exception:
logger.exception("Generating of hash failed")
content = to_bytes(repr(node))
hash_id = md5(content).hexdigest()
return hash_id[:8] | Generates a hash_id for the node in question.
:param node: lxml etree node | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/scoring.py#L56-L69 | [
"def to_bytes(object):\n try:\n if isinstance(object, bytes):\n return object\n elif isinstance(object, unicode):\n return object.encode(\"utf8\")\n else:\n # try encode instance to bytes\n return instance_to_bytes(object)\n except UnicodeError:... | # -*- coding: utf8 -*-
"""Handle dealing with scoring nodes and content for our parsing."""
from __future__ import absolute_import
from __future__ import division, print_function
import re
import logging
from hashlib import md5
from lxml.etree import tostring
from ._compat import to_bytes
from .utils import normalize_whitespace
# A series of sets of attributes we check to help in determining if a node is
# a potential candidate or not.
CLS_UNLIKELY = re.compile(
"combx|comment|community|disqus|extra|foot|header|menu|remark|rss|"
"shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|perma|popup|"
"tweet|twitter|social|breadcrumb",
re.IGNORECASE
)
CLS_MAYBE = re.compile(
"and|article|body|column|main|shadow|entry",
re.IGNORECASE
)
CLS_WEIGHT_POSITIVE = re.compile(
"article|body|content|entry|main|page|pagination|post|text|blog|story",
re.IGNORECASE
)
CLS_WEIGHT_NEGATIVE = re.compile(
"combx|comment|com-|contact|foot|footer|footnote|head|masthead|media|meta|"
"outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|"
"tool|widget",
re.IGNORECASE
)
logger = logging.getLogger("breadability")
def check_node_attributes(pattern, node, *attributes):
"""
Searches match in attributes against given pattern and if
finds the match against any of them returns True.
"""
for attribute_name in attributes:
attribute = node.get(attribute_name)
if attribute is not None and pattern.search(attribute):
return True
return False
def get_link_density(node, node_text=None):
"""
Computes the ratio for text in given node and text in links
contained in the node. It is computed from number of
characters in the texts.
:parameter Element node:
HTML element in which links density is computed.
:parameter string node_text:
Text content of given node if it was obtained before.
:returns float:
Returns value of computed 0 <= density <= 1, where 0 means
no links and 1 means that node contains only links.
"""
if node_text is None:
node_text = node.text_content()
node_text = normalize_whitespace(node_text.strip())
text_length = len(node_text)
if text_length == 0:
return 0.0
links_length = sum(map(_get_normalized_text_length, node.findall(".//a")))
# Give 50 bonus chars worth of length for each img.
# Tweaking this 50 down a notch should help if we hit false positives.
img_bonuses = 50 * len(node.findall(".//img"))
links_length = max(0, links_length - img_bonuses)
return links_length / text_length
def _get_normalized_text_length(node):
return len(normalize_whitespace(node.text_content().strip()))
def get_class_weight(node):
"""
Computes weight of element according to its class/id.
We're using sets to help efficiently check for existence of matches.
"""
weight = 0
if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "class"):
weight -= 25
if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "class"):
weight += 25
if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "id"):
weight -= 25
if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "id"):
weight += 25
return weight
def is_unlikely_node(node):
"""
Short helper for checking unlikely status.
If the class or id are in the unlikely list, and there's not also a
class/id in the likely list then it might need to be removed.
"""
unlikely = check_node_attributes(CLS_UNLIKELY, node, "class", "id")
maybe = check_node_attributes(CLS_MAYBE, node, "class", "id")
return bool(unlikely and not maybe and node.tag != "body")
def score_candidates(nodes):
"""Given a list of potential nodes, find some initial scores to start"""
MIN_HIT_LENTH = 25
candidates = {}
for node in nodes:
logger.debug("* Scoring candidate %s %r", node.tag, node.attrib)
# if the node has no parent it knows of then it ends up creating a
# body & html tag to parent the html fragment
parent = node.getparent()
if parent is None:
logger.debug("Skipping candidate - parent node is 'None'.")
continue
grand = parent.getparent()
if grand is None:
logger.debug("Skipping candidate - grand parent node is 'None'.")
continue
# if paragraph is < `MIN_HIT_LENTH` characters don't even count it
inner_text = node.text_content().strip()
if len(inner_text) < MIN_HIT_LENTH:
logger.debug(
"Skipping candidate - inner text < %d characters.",
MIN_HIT_LENTH)
continue
# initialize readability data for the parent
# add parent node if it isn't in the candidate list
if parent not in candidates:
candidates[parent] = ScoredNode(parent)
if grand not in candidates:
candidates[grand] = ScoredNode(grand)
# add a point for the paragraph itself as a base
content_score = 1
if inner_text:
# add 0.25 points for any commas within this paragraph
commas_count = inner_text.count(",")
content_score += commas_count * 0.25
logger.debug("Bonus points for %d commas.", commas_count)
# subtract 0.5 points for each double quote within this paragraph
double_quotes_count = inner_text.count('"')
content_score += double_quotes_count * -0.5
logger.debug(
"Penalty points for %d double-quotes.", double_quotes_count)
# for every 100 characters in this paragraph, add another point
# up to 3 points
length_points = len(inner_text) / 100
content_score += min(length_points, 3.0)
logger.debug("Bonus points for length of text: %f", length_points)
# add the score to the parent
logger.debug(
"Bonus points for parent %s %r with score %f: %f",
parent.tag, parent.attrib, candidates[parent].content_score,
content_score)
candidates[parent].content_score += content_score
# the grand node gets half
logger.debug(
"Bonus points for grand %s %r with score %f: %f",
grand.tag, grand.attrib, candidates[grand].content_score,
content_score / 2.0)
candidates[grand].content_score += content_score / 2.0
if node not in candidates:
candidates[node] = ScoredNode(node)
candidates[node].content_score += content_score
for candidate in candidates.values():
adjustment = 1.0 - get_link_density(candidate.node)
candidate.content_score *= adjustment
logger.debug(
"Link density adjustment for %s %r: %f",
candidate.node.tag, candidate.node.attrib, adjustment)
return candidates
class ScoredNode(object):
"""
We need Scored nodes we use to track possible article matches
We might have a bunch of these so we use __slots__ to keep memory usage
down.
"""
__slots__ = ('node', 'content_score')
def __init__(self, node):
"""Given node, set an initial score and weigh based on css and id"""
self.node = node
self.content_score = 0
if node.tag in ('div', 'article'):
self.content_score = 5
if node.tag in ('pre', 'td', 'blockquote'):
self.content_score = 3
if node.tag in ('address', 'ol', 'ul', 'dl', 'dd', 'dt', 'li', 'form'):
self.content_score = -3
if node.tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'th'):
self.content_score = -5
self.content_score += get_class_weight(node)
@property
def hash_id(self):
return generate_hash_id(self.node)
def __repr__(self):
if self.node is None:
return "<NullScoredNode with score {2:0.1F}>" % self.content_score
return "<ScoredNode {0} {1}: {2:0.1F}>".format(
self.node.tag,
self.node.attrib,
self.content_score
)
|
bookieio/breadability | breadability/scoring.py | get_link_density | python | def get_link_density(node, node_text=None):
if node_text is None:
node_text = node.text_content()
node_text = normalize_whitespace(node_text.strip())
text_length = len(node_text)
if text_length == 0:
return 0.0
links_length = sum(map(_get_normalized_text_length, node.findall(".//a")))
# Give 50 bonus chars worth of length for each img.
# Tweaking this 50 down a notch should help if we hit false positives.
img_bonuses = 50 * len(node.findall(".//img"))
links_length = max(0, links_length - img_bonuses)
return links_length / text_length | Computes the ratio for text in given node and text in links
contained in the node. It is computed from number of
characters in the texts.
:parameter Element node:
HTML element in which links density is computed.
:parameter string node_text:
Text content of given node if it was obtained before.
:returns float:
Returns value of computed 0 <= density <= 1, where 0 means
no links and 1 means that node contains only links. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/scoring.py#L72-L100 | [
"def normalize_whitespace(text):\n \"\"\"\n Translates multiple whitespace into single space character.\n If there is at least one new line character chunk is replaced\n by single LF (Unix new line) character.\n \"\"\"\n return MULTIPLE_WHITESPACE_PATTERN.sub(_replace_whitespace, text)\n"
] | # -*- coding: utf8 -*-
"""Handle dealing with scoring nodes and content for our parsing."""
from __future__ import absolute_import
from __future__ import division, print_function
import re
import logging
from hashlib import md5
from lxml.etree import tostring
from ._compat import to_bytes
from .utils import normalize_whitespace
# A series of sets of attributes we check to help in determining if a node is
# a potential candidate or not.
CLS_UNLIKELY = re.compile(
"combx|comment|community|disqus|extra|foot|header|menu|remark|rss|"
"shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|perma|popup|"
"tweet|twitter|social|breadcrumb",
re.IGNORECASE
)
CLS_MAYBE = re.compile(
"and|article|body|column|main|shadow|entry",
re.IGNORECASE
)
CLS_WEIGHT_POSITIVE = re.compile(
"article|body|content|entry|main|page|pagination|post|text|blog|story",
re.IGNORECASE
)
CLS_WEIGHT_NEGATIVE = re.compile(
"combx|comment|com-|contact|foot|footer|footnote|head|masthead|media|meta|"
"outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|"
"tool|widget",
re.IGNORECASE
)
logger = logging.getLogger("breadability")
def check_node_attributes(pattern, node, *attributes):
"""
Searches match in attributes against given pattern and if
finds the match against any of them returns True.
"""
for attribute_name in attributes:
attribute = node.get(attribute_name)
if attribute is not None and pattern.search(attribute):
return True
return False
def generate_hash_id(node):
"""
Generates a hash_id for the node in question.
:param node: lxml etree node
"""
try:
content = tostring(node)
except Exception:
logger.exception("Generating of hash failed")
content = to_bytes(repr(node))
hash_id = md5(content).hexdigest()
return hash_id[:8]
def _get_normalized_text_length(node):
return len(normalize_whitespace(node.text_content().strip()))
def get_class_weight(node):
"""
Computes weight of element according to its class/id.
We're using sets to help efficiently check for existence of matches.
"""
weight = 0
if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "class"):
weight -= 25
if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "class"):
weight += 25
if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "id"):
weight -= 25
if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "id"):
weight += 25
return weight
def is_unlikely_node(node):
"""
Short helper for checking unlikely status.
If the class or id are in the unlikely list, and there's not also a
class/id in the likely list then it might need to be removed.
"""
unlikely = check_node_attributes(CLS_UNLIKELY, node, "class", "id")
maybe = check_node_attributes(CLS_MAYBE, node, "class", "id")
return bool(unlikely and not maybe and node.tag != "body")
def score_candidates(nodes):
"""Given a list of potential nodes, find some initial scores to start"""
MIN_HIT_LENTH = 25
candidates = {}
for node in nodes:
logger.debug("* Scoring candidate %s %r", node.tag, node.attrib)
# if the node has no parent it knows of then it ends up creating a
# body & html tag to parent the html fragment
parent = node.getparent()
if parent is None:
logger.debug("Skipping candidate - parent node is 'None'.")
continue
grand = parent.getparent()
if grand is None:
logger.debug("Skipping candidate - grand parent node is 'None'.")
continue
# if paragraph is < `MIN_HIT_LENTH` characters don't even count it
inner_text = node.text_content().strip()
if len(inner_text) < MIN_HIT_LENTH:
logger.debug(
"Skipping candidate - inner text < %d characters.",
MIN_HIT_LENTH)
continue
# initialize readability data for the parent
# add parent node if it isn't in the candidate list
if parent not in candidates:
candidates[parent] = ScoredNode(parent)
if grand not in candidates:
candidates[grand] = ScoredNode(grand)
# add a point for the paragraph itself as a base
content_score = 1
if inner_text:
# add 0.25 points for any commas within this paragraph
commas_count = inner_text.count(",")
content_score += commas_count * 0.25
logger.debug("Bonus points for %d commas.", commas_count)
# subtract 0.5 points for each double quote within this paragraph
double_quotes_count = inner_text.count('"')
content_score += double_quotes_count * -0.5
logger.debug(
"Penalty points for %d double-quotes.", double_quotes_count)
# for every 100 characters in this paragraph, add another point
# up to 3 points
length_points = len(inner_text) / 100
content_score += min(length_points, 3.0)
logger.debug("Bonus points for length of text: %f", length_points)
# add the score to the parent
logger.debug(
"Bonus points for parent %s %r with score %f: %f",
parent.tag, parent.attrib, candidates[parent].content_score,
content_score)
candidates[parent].content_score += content_score
# the grand node gets half
logger.debug(
"Bonus points for grand %s %r with score %f: %f",
grand.tag, grand.attrib, candidates[grand].content_score,
content_score / 2.0)
candidates[grand].content_score += content_score / 2.0
if node not in candidates:
candidates[node] = ScoredNode(node)
candidates[node].content_score += content_score
for candidate in candidates.values():
adjustment = 1.0 - get_link_density(candidate.node)
candidate.content_score *= adjustment
logger.debug(
"Link density adjustment for %s %r: %f",
candidate.node.tag, candidate.node.attrib, adjustment)
return candidates
class ScoredNode(object):
"""
We need Scored nodes we use to track possible article matches
We might have a bunch of these so we use __slots__ to keep memory usage
down.
"""
__slots__ = ('node', 'content_score')
def __init__(self, node):
"""Given node, set an initial score and weigh based on css and id"""
self.node = node
self.content_score = 0
if node.tag in ('div', 'article'):
self.content_score = 5
if node.tag in ('pre', 'td', 'blockquote'):
self.content_score = 3
if node.tag in ('address', 'ol', 'ul', 'dl', 'dd', 'dt', 'li', 'form'):
self.content_score = -3
if node.tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'th'):
self.content_score = -5
self.content_score += get_class_weight(node)
@property
def hash_id(self):
return generate_hash_id(self.node)
def __repr__(self):
if self.node is None:
return "<NullScoredNode with score {2:0.1F}>" % self.content_score
return "<ScoredNode {0} {1}: {2:0.1F}>".format(
self.node.tag,
self.node.attrib,
self.content_score
)
|
bookieio/breadability | breadability/scoring.py | get_class_weight | python | def get_class_weight(node):
weight = 0
if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "class"):
weight -= 25
if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "class"):
weight += 25
if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "id"):
weight -= 25
if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "id"):
weight += 25
return weight | Computes weight of element according to its class/id.
We're using sets to help efficiently check for existence of matches. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/scoring.py#L107-L125 | [
"def check_node_attributes(pattern, node, *attributes):\n \"\"\"\n Searches match in attributes against given pattern and if\n finds the match against any of them returns True.\n \"\"\"\n for attribute_name in attributes:\n attribute = node.get(attribute_name)\n if attribute is not None... | # -*- coding: utf8 -*-
"""Handle dealing with scoring nodes and content for our parsing."""
from __future__ import absolute_import
from __future__ import division, print_function
import re
import logging
from hashlib import md5
from lxml.etree import tostring
from ._compat import to_bytes
from .utils import normalize_whitespace
# A series of sets of attributes we check to help in determining if a node is
# a potential candidate or not.
CLS_UNLIKELY = re.compile(
"combx|comment|community|disqus|extra|foot|header|menu|remark|rss|"
"shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|perma|popup|"
"tweet|twitter|social|breadcrumb",
re.IGNORECASE
)
CLS_MAYBE = re.compile(
"and|article|body|column|main|shadow|entry",
re.IGNORECASE
)
CLS_WEIGHT_POSITIVE = re.compile(
"article|body|content|entry|main|page|pagination|post|text|blog|story",
re.IGNORECASE
)
CLS_WEIGHT_NEGATIVE = re.compile(
"combx|comment|com-|contact|foot|footer|footnote|head|masthead|media|meta|"
"outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|"
"tool|widget",
re.IGNORECASE
)
logger = logging.getLogger("breadability")
def check_node_attributes(pattern, node, *attributes):
"""
Searches match in attributes against given pattern and if
finds the match against any of them returns True.
"""
for attribute_name in attributes:
attribute = node.get(attribute_name)
if attribute is not None and pattern.search(attribute):
return True
return False
def generate_hash_id(node):
"""
Generates a hash_id for the node in question.
:param node: lxml etree node
"""
try:
content = tostring(node)
except Exception:
logger.exception("Generating of hash failed")
content = to_bytes(repr(node))
hash_id = md5(content).hexdigest()
return hash_id[:8]
def get_link_density(node, node_text=None):
"""
Computes the ratio for text in given node and text in links
contained in the node. It is computed from number of
characters in the texts.
:parameter Element node:
HTML element in which links density is computed.
:parameter string node_text:
Text content of given node if it was obtained before.
:returns float:
Returns value of computed 0 <= density <= 1, where 0 means
no links and 1 means that node contains only links.
"""
if node_text is None:
node_text = node.text_content()
node_text = normalize_whitespace(node_text.strip())
text_length = len(node_text)
if text_length == 0:
return 0.0
links_length = sum(map(_get_normalized_text_length, node.findall(".//a")))
# Give 50 bonus chars worth of length for each img.
# Tweaking this 50 down a notch should help if we hit false positives.
img_bonuses = 50 * len(node.findall(".//img"))
links_length = max(0, links_length - img_bonuses)
return links_length / text_length
def _get_normalized_text_length(node):
return len(normalize_whitespace(node.text_content().strip()))
def is_unlikely_node(node):
"""
Short helper for checking unlikely status.
If the class or id are in the unlikely list, and there's not also a
class/id in the likely list then it might need to be removed.
"""
unlikely = check_node_attributes(CLS_UNLIKELY, node, "class", "id")
maybe = check_node_attributes(CLS_MAYBE, node, "class", "id")
return bool(unlikely and not maybe and node.tag != "body")
def score_candidates(nodes):
"""Given a list of potential nodes, find some initial scores to start"""
MIN_HIT_LENTH = 25
candidates = {}
for node in nodes:
logger.debug("* Scoring candidate %s %r", node.tag, node.attrib)
# if the node has no parent it knows of then it ends up creating a
# body & html tag to parent the html fragment
parent = node.getparent()
if parent is None:
logger.debug("Skipping candidate - parent node is 'None'.")
continue
grand = parent.getparent()
if grand is None:
logger.debug("Skipping candidate - grand parent node is 'None'.")
continue
# if paragraph is < `MIN_HIT_LENTH` characters don't even count it
inner_text = node.text_content().strip()
if len(inner_text) < MIN_HIT_LENTH:
logger.debug(
"Skipping candidate - inner text < %d characters.",
MIN_HIT_LENTH)
continue
# initialize readability data for the parent
# add parent node if it isn't in the candidate list
if parent not in candidates:
candidates[parent] = ScoredNode(parent)
if grand not in candidates:
candidates[grand] = ScoredNode(grand)
# add a point for the paragraph itself as a base
content_score = 1
if inner_text:
# add 0.25 points for any commas within this paragraph
commas_count = inner_text.count(",")
content_score += commas_count * 0.25
logger.debug("Bonus points for %d commas.", commas_count)
# subtract 0.5 points for each double quote within this paragraph
double_quotes_count = inner_text.count('"')
content_score += double_quotes_count * -0.5
logger.debug(
"Penalty points for %d double-quotes.", double_quotes_count)
# for every 100 characters in this paragraph, add another point
# up to 3 points
length_points = len(inner_text) / 100
content_score += min(length_points, 3.0)
logger.debug("Bonus points for length of text: %f", length_points)
# add the score to the parent
logger.debug(
"Bonus points for parent %s %r with score %f: %f",
parent.tag, parent.attrib, candidates[parent].content_score,
content_score)
candidates[parent].content_score += content_score
# the grand node gets half
logger.debug(
"Bonus points for grand %s %r with score %f: %f",
grand.tag, grand.attrib, candidates[grand].content_score,
content_score / 2.0)
candidates[grand].content_score += content_score / 2.0
if node not in candidates:
candidates[node] = ScoredNode(node)
candidates[node].content_score += content_score
for candidate in candidates.values():
adjustment = 1.0 - get_link_density(candidate.node)
candidate.content_score *= adjustment
logger.debug(
"Link density adjustment for %s %r: %f",
candidate.node.tag, candidate.node.attrib, adjustment)
return candidates
class ScoredNode(object):
"""
We need Scored nodes we use to track possible article matches
We might have a bunch of these so we use __slots__ to keep memory usage
down.
"""
__slots__ = ('node', 'content_score')
def __init__(self, node):
"""Given node, set an initial score and weigh based on css and id"""
self.node = node
self.content_score = 0
if node.tag in ('div', 'article'):
self.content_score = 5
if node.tag in ('pre', 'td', 'blockquote'):
self.content_score = 3
if node.tag in ('address', 'ol', 'ul', 'dl', 'dd', 'dt', 'li', 'form'):
self.content_score = -3
if node.tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'th'):
self.content_score = -5
self.content_score += get_class_weight(node)
@property
def hash_id(self):
return generate_hash_id(self.node)
def __repr__(self):
if self.node is None:
return "<NullScoredNode with score {2:0.1F}>" % self.content_score
return "<ScoredNode {0} {1}: {2:0.1F}>".format(
self.node.tag,
self.node.attrib,
self.content_score
)
|
bookieio/breadability | breadability/scoring.py | is_unlikely_node | python | def is_unlikely_node(node):
unlikely = check_node_attributes(CLS_UNLIKELY, node, "class", "id")
maybe = check_node_attributes(CLS_MAYBE, node, "class", "id")
return bool(unlikely and not maybe and node.tag != "body") | Short helper for checking unlikely status.
If the class or id are in the unlikely list, and there's not also a
class/id in the likely list then it might need to be removed. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/scoring.py#L128-L138 | [
"def check_node_attributes(pattern, node, *attributes):\n \"\"\"\n Searches match in attributes against given pattern and if\n finds the match against any of them returns True.\n \"\"\"\n for attribute_name in attributes:\n attribute = node.get(attribute_name)\n if attribute is not None... | # -*- coding: utf8 -*-
"""Handle dealing with scoring nodes and content for our parsing."""
from __future__ import absolute_import
from __future__ import division, print_function
import re
import logging
from hashlib import md5
from lxml.etree import tostring
from ._compat import to_bytes
from .utils import normalize_whitespace
# A series of sets of attributes we check to help in determining if a node is
# a potential candidate or not.
CLS_UNLIKELY = re.compile(
"combx|comment|community|disqus|extra|foot|header|menu|remark|rss|"
"shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|perma|popup|"
"tweet|twitter|social|breadcrumb",
re.IGNORECASE
)
CLS_MAYBE = re.compile(
"and|article|body|column|main|shadow|entry",
re.IGNORECASE
)
CLS_WEIGHT_POSITIVE = re.compile(
"article|body|content|entry|main|page|pagination|post|text|blog|story",
re.IGNORECASE
)
CLS_WEIGHT_NEGATIVE = re.compile(
"combx|comment|com-|contact|foot|footer|footnote|head|masthead|media|meta|"
"outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|"
"tool|widget",
re.IGNORECASE
)
logger = logging.getLogger("breadability")
def check_node_attributes(pattern, node, *attributes):
"""
Searches match in attributes against given pattern and if
finds the match against any of them returns True.
"""
for attribute_name in attributes:
attribute = node.get(attribute_name)
if attribute is not None and pattern.search(attribute):
return True
return False
def generate_hash_id(node):
"""
Generates a hash_id for the node in question.
:param node: lxml etree node
"""
try:
content = tostring(node)
except Exception:
logger.exception("Generating of hash failed")
content = to_bytes(repr(node))
hash_id = md5(content).hexdigest()
return hash_id[:8]
def get_link_density(node, node_text=None):
"""
Computes the ratio for text in given node and text in links
contained in the node. It is computed from number of
characters in the texts.
:parameter Element node:
HTML element in which links density is computed.
:parameter string node_text:
Text content of given node if it was obtained before.
:returns float:
Returns value of computed 0 <= density <= 1, where 0 means
no links and 1 means that node contains only links.
"""
if node_text is None:
node_text = node.text_content()
node_text = normalize_whitespace(node_text.strip())
text_length = len(node_text)
if text_length == 0:
return 0.0
links_length = sum(map(_get_normalized_text_length, node.findall(".//a")))
# Give 50 bonus chars worth of length for each img.
# Tweaking this 50 down a notch should help if we hit false positives.
img_bonuses = 50 * len(node.findall(".//img"))
links_length = max(0, links_length - img_bonuses)
return links_length / text_length
def _get_normalized_text_length(node):
return len(normalize_whitespace(node.text_content().strip()))
def get_class_weight(node):
"""
Computes weight of element according to its class/id.
We're using sets to help efficiently check for existence of matches.
"""
weight = 0
if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "class"):
weight -= 25
if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "class"):
weight += 25
if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "id"):
weight -= 25
if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "id"):
weight += 25
return weight
def score_candidates(nodes):
"""Given a list of potential nodes, find some initial scores to start"""
MIN_HIT_LENTH = 25
candidates = {}
for node in nodes:
logger.debug("* Scoring candidate %s %r", node.tag, node.attrib)
# if the node has no parent it knows of then it ends up creating a
# body & html tag to parent the html fragment
parent = node.getparent()
if parent is None:
logger.debug("Skipping candidate - parent node is 'None'.")
continue
grand = parent.getparent()
if grand is None:
logger.debug("Skipping candidate - grand parent node is 'None'.")
continue
# if paragraph is < `MIN_HIT_LENTH` characters don't even count it
inner_text = node.text_content().strip()
if len(inner_text) < MIN_HIT_LENTH:
logger.debug(
"Skipping candidate - inner text < %d characters.",
MIN_HIT_LENTH)
continue
# initialize readability data for the parent
# add parent node if it isn't in the candidate list
if parent not in candidates:
candidates[parent] = ScoredNode(parent)
if grand not in candidates:
candidates[grand] = ScoredNode(grand)
# add a point for the paragraph itself as a base
content_score = 1
if inner_text:
# add 0.25 points for any commas within this paragraph
commas_count = inner_text.count(",")
content_score += commas_count * 0.25
logger.debug("Bonus points for %d commas.", commas_count)
# subtract 0.5 points for each double quote within this paragraph
double_quotes_count = inner_text.count('"')
content_score += double_quotes_count * -0.5
logger.debug(
"Penalty points for %d double-quotes.", double_quotes_count)
# for every 100 characters in this paragraph, add another point
# up to 3 points
length_points = len(inner_text) / 100
content_score += min(length_points, 3.0)
logger.debug("Bonus points for length of text: %f", length_points)
# add the score to the parent
logger.debug(
"Bonus points for parent %s %r with score %f: %f",
parent.tag, parent.attrib, candidates[parent].content_score,
content_score)
candidates[parent].content_score += content_score
# the grand node gets half
logger.debug(
"Bonus points for grand %s %r with score %f: %f",
grand.tag, grand.attrib, candidates[grand].content_score,
content_score / 2.0)
candidates[grand].content_score += content_score / 2.0
if node not in candidates:
candidates[node] = ScoredNode(node)
candidates[node].content_score += content_score
for candidate in candidates.values():
adjustment = 1.0 - get_link_density(candidate.node)
candidate.content_score *= adjustment
logger.debug(
"Link density adjustment for %s %r: %f",
candidate.node.tag, candidate.node.attrib, adjustment)
return candidates
class ScoredNode(object):
"""
We need Scored nodes we use to track possible article matches
We might have a bunch of these so we use __slots__ to keep memory usage
down.
"""
__slots__ = ('node', 'content_score')
def __init__(self, node):
"""Given node, set an initial score and weigh based on css and id"""
self.node = node
self.content_score = 0
if node.tag in ('div', 'article'):
self.content_score = 5
if node.tag in ('pre', 'td', 'blockquote'):
self.content_score = 3
if node.tag in ('address', 'ol', 'ul', 'dl', 'dd', 'dt', 'li', 'form'):
self.content_score = -3
if node.tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'th'):
self.content_score = -5
self.content_score += get_class_weight(node)
@property
def hash_id(self):
return generate_hash_id(self.node)
def __repr__(self):
if self.node is None:
return "<NullScoredNode with score {2:0.1F}>" % self.content_score
return "<ScoredNode {0} {1}: {2:0.1F}>".format(
self.node.tag,
self.node.attrib,
self.content_score
)
|
bookieio/breadability | breadability/scoring.py | score_candidates | python | def score_candidates(nodes):
MIN_HIT_LENTH = 25
candidates = {}
for node in nodes:
logger.debug("* Scoring candidate %s %r", node.tag, node.attrib)
# if the node has no parent it knows of then it ends up creating a
# body & html tag to parent the html fragment
parent = node.getparent()
if parent is None:
logger.debug("Skipping candidate - parent node is 'None'.")
continue
grand = parent.getparent()
if grand is None:
logger.debug("Skipping candidate - grand parent node is 'None'.")
continue
# if paragraph is < `MIN_HIT_LENTH` characters don't even count it
inner_text = node.text_content().strip()
if len(inner_text) < MIN_HIT_LENTH:
logger.debug(
"Skipping candidate - inner text < %d characters.",
MIN_HIT_LENTH)
continue
# initialize readability data for the parent
# add parent node if it isn't in the candidate list
if parent not in candidates:
candidates[parent] = ScoredNode(parent)
if grand not in candidates:
candidates[grand] = ScoredNode(grand)
# add a point for the paragraph itself as a base
content_score = 1
if inner_text:
# add 0.25 points for any commas within this paragraph
commas_count = inner_text.count(",")
content_score += commas_count * 0.25
logger.debug("Bonus points for %d commas.", commas_count)
# subtract 0.5 points for each double quote within this paragraph
double_quotes_count = inner_text.count('"')
content_score += double_quotes_count * -0.5
logger.debug(
"Penalty points for %d double-quotes.", double_quotes_count)
# for every 100 characters in this paragraph, add another point
# up to 3 points
length_points = len(inner_text) / 100
content_score += min(length_points, 3.0)
logger.debug("Bonus points for length of text: %f", length_points)
# add the score to the parent
logger.debug(
"Bonus points for parent %s %r with score %f: %f",
parent.tag, parent.attrib, candidates[parent].content_score,
content_score)
candidates[parent].content_score += content_score
# the grand node gets half
logger.debug(
"Bonus points for grand %s %r with score %f: %f",
grand.tag, grand.attrib, candidates[grand].content_score,
content_score / 2.0)
candidates[grand].content_score += content_score / 2.0
if node not in candidates:
candidates[node] = ScoredNode(node)
candidates[node].content_score += content_score
for candidate in candidates.values():
adjustment = 1.0 - get_link_density(candidate.node)
candidate.content_score *= adjustment
logger.debug(
"Link density adjustment for %s %r: %f",
candidate.node.tag, candidate.node.attrib, adjustment)
return candidates | Given a list of potential nodes, find some initial scores to start | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/scoring.py#L141-L222 | [
"def get_link_density(node, node_text=None):\n \"\"\"\n Computes the ratio for text in given node and text in links\n contained in the node. It is computed from number of\n characters in the texts.\n\n :parameter Element node:\n HTML element in which links density is computed.\n :parameter ... | # -*- coding: utf8 -*-
"""Handle dealing with scoring nodes and content for our parsing."""
from __future__ import absolute_import
from __future__ import division, print_function
import re
import logging
from hashlib import md5
from lxml.etree import tostring
from ._compat import to_bytes
from .utils import normalize_whitespace
# A series of sets of attributes we check to help in determining if a node is
# a potential candidate or not.
CLS_UNLIKELY = re.compile(
"combx|comment|community|disqus|extra|foot|header|menu|remark|rss|"
"shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|perma|popup|"
"tweet|twitter|social|breadcrumb",
re.IGNORECASE
)
CLS_MAYBE = re.compile(
"and|article|body|column|main|shadow|entry",
re.IGNORECASE
)
CLS_WEIGHT_POSITIVE = re.compile(
"article|body|content|entry|main|page|pagination|post|text|blog|story",
re.IGNORECASE
)
CLS_WEIGHT_NEGATIVE = re.compile(
"combx|comment|com-|contact|foot|footer|footnote|head|masthead|media|meta|"
"outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|"
"tool|widget",
re.IGNORECASE
)
logger = logging.getLogger("breadability")
def check_node_attributes(pattern, node, *attributes):
"""
Searches match in attributes against given pattern and if
finds the match against any of them returns True.
"""
for attribute_name in attributes:
attribute = node.get(attribute_name)
if attribute is not None and pattern.search(attribute):
return True
return False
def generate_hash_id(node):
"""
Generates a hash_id for the node in question.
:param node: lxml etree node
"""
try:
content = tostring(node)
except Exception:
logger.exception("Generating of hash failed")
content = to_bytes(repr(node))
hash_id = md5(content).hexdigest()
return hash_id[:8]
def get_link_density(node, node_text=None):
"""
Computes the ratio for text in given node and text in links
contained in the node. It is computed from number of
characters in the texts.
:parameter Element node:
HTML element in which links density is computed.
:parameter string node_text:
Text content of given node if it was obtained before.
:returns float:
Returns value of computed 0 <= density <= 1, where 0 means
no links and 1 means that node contains only links.
"""
if node_text is None:
node_text = node.text_content()
node_text = normalize_whitespace(node_text.strip())
text_length = len(node_text)
if text_length == 0:
return 0.0
links_length = sum(map(_get_normalized_text_length, node.findall(".//a")))
# Give 50 bonus chars worth of length for each img.
# Tweaking this 50 down a notch should help if we hit false positives.
img_bonuses = 50 * len(node.findall(".//img"))
links_length = max(0, links_length - img_bonuses)
return links_length / text_length
def _get_normalized_text_length(node):
return len(normalize_whitespace(node.text_content().strip()))
def get_class_weight(node):
"""
Computes weight of element according to its class/id.
We're using sets to help efficiently check for existence of matches.
"""
weight = 0
if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "class"):
weight -= 25
if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "class"):
weight += 25
if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, "id"):
weight -= 25
if check_node_attributes(CLS_WEIGHT_POSITIVE, node, "id"):
weight += 25
return weight
def is_unlikely_node(node):
"""
Short helper for checking unlikely status.
If the class or id are in the unlikely list, and there's not also a
class/id in the likely list then it might need to be removed.
"""
unlikely = check_node_attributes(CLS_UNLIKELY, node, "class", "id")
maybe = check_node_attributes(CLS_MAYBE, node, "class", "id")
return bool(unlikely and not maybe and node.tag != "body")
class ScoredNode(object):
"""
We need Scored nodes we use to track possible article matches
We might have a bunch of these so we use __slots__ to keep memory usage
down.
"""
__slots__ = ('node', 'content_score')
def __init__(self, node):
"""Given node, set an initial score and weigh based on css and id"""
self.node = node
self.content_score = 0
if node.tag in ('div', 'article'):
self.content_score = 5
if node.tag in ('pre', 'td', 'blockquote'):
self.content_score = 3
if node.tag in ('address', 'ol', 'ul', 'dl', 'dd', 'dt', 'li', 'form'):
self.content_score = -3
if node.tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'th'):
self.content_score = -5
self.content_score += get_class_weight(node)
@property
def hash_id(self):
return generate_hash_id(self.node)
def __repr__(self):
if self.node is None:
return "<NullScoredNode with score {2:0.1F}>" % self.content_score
return "<ScoredNode {0} {1}: {2:0.1F}>".format(
self.node.tag,
self.node.attrib,
self.content_score
)
|
bookieio/breadability | breadability/utils.py | cached_property | python | def cached_property(getter):
def decorator(self):
key = "_cached_property_" + getter.__name__
if not hasattr(self, key):
setattr(self, key, getter(self))
return getattr(self, key)
decorator.__name__ = getter.__name__
decorator.__module__ = getter.__module__
decorator.__doc__ = getter.__doc__
return property(decorator) | Decorator that converts a method into memoized property.
The decorator works as expected only for classes with
attribute '__dict__' and immutable properties. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/utils.py#L54-L72 | null | # -*- coding: utf8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import re
try:
from contextlib import ignored
except ImportError:
from contextlib import contextmanager
@contextmanager
def ignored(*exceptions):
try:
yield
except tuple(exceptions):
pass
MULTIPLE_WHITESPACE_PATTERN = re.compile(r"\s+", re.UNICODE)
def is_blank(text):
"""
Returns ``True`` if string contains only whitespace characters
or is empty. Otherwise ``False`` is returned.
"""
return not text or text.isspace()
def shrink_text(text):
return normalize_whitespace(text.strip())
def normalize_whitespace(text):
"""
Translates multiple whitespace into single space character.
If there is at least one new line character chunk is replaced
by single LF (Unix new line) character.
"""
return MULTIPLE_WHITESPACE_PATTERN.sub(_replace_whitespace, text)
def _replace_whitespace(match):
text = match.group()
if "\n" in text or "\r" in text:
return "\n"
else:
return " "
|
bookieio/breadability | breadability/readable.py | ok_embedded_video | python | def ok_embedded_video(node):
good_keywords = ('youtube', 'blip.tv', 'vimeo')
node_str = tounicode(node)
for key in good_keywords:
if key in node_str:
return True
return False | Check if this embed/video is an ok one to count. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L55-L64 | null | # -*- coding: utf8 -*-
from __future__ import absolute_import
import logging
from copy import deepcopy
from operator import attrgetter
from pprint import PrettyPrinter
from lxml.html.clean import Cleaner
from lxml.etree import tounicode, tostring
from lxml.html import fragment_fromstring, fromstring
from .document import OriginalDocument
from .annotated_text import AnnotatedTextHandler
from .scoring import (
get_class_weight,
get_link_density,
is_unlikely_node,
score_candidates,
)
from .utils import cached_property, shrink_text
html_cleaner = Cleaner(
scripts=True, javascript=True, comments=True,
style=True, links=True, meta=False, add_nofollow=False,
page_structure=False, processing_instructions=True,
embedded=False, frames=False, forms=False,
annoying_tags=False, remove_tags=None, kill_tags=("noscript", "iframe"),
remove_unknown_tags=False, safe_attrs_only=False)
SCORABLE_TAGS = ("div", "p", "td", "pre", "article")
ANNOTATION_TAGS = (
"a", "abbr", "acronym", "b", "big", "blink", "blockquote", "br", "cite",
"code", "dd", "del", "dir", "dl", "dt", "em", "font", "h", "h1", "h2",
"h3", "h4", "h5", "h6", "hr", "i", "ins", "kbd", "li", "marquee", "menu",
"ol", "p", "pre", "q", "s", "samp", "span", "strike", "strong", "sub",
"sup", "tt", "u", "ul", "var",
)
NULL_DOCUMENT = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
</head>
<body>
</body>
</html>
"""
logger = logging.getLogger("breadability")
def build_base_document(dom, return_fragment=True):
"""
Builds a base document with the body as root.
:param dom: Parsed lxml tree (Document Object Model).
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
body_element = dom.find(".//body")
if body_element is None:
fragment = fragment_fromstring('<div id="readabilityBody"/>')
fragment.append(dom)
else:
body_element.tag = "div"
body_element.set("id", "readabilityBody")
fragment = body_element
return document_from_fragment(fragment, return_fragment)
def build_error_document(dom, return_fragment=True):
"""
Builds an empty erorr document with the body as root.
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
fragment = fragment_fromstring(
'<div id="readabilityBody" class="parsing-error"/>')
return document_from_fragment(fragment, return_fragment)
def document_from_fragment(fragment, return_fragment):
if return_fragment:
document = fragment
else:
document = fromstring(NULL_DOCUMENT)
body_element = document.find(".//body")
body_element.append(fragment)
document.doctype = "<!DOCTYPE html>"
return document
def check_siblings(candidate_node, candidate_list):
"""
Looks through siblings for content that might also be related.
Things like preambles, content split by ads that we removed, etc.
"""
candidate_css = candidate_node.node.get("class")
potential_target = candidate_node.content_score * 0.2
sibling_target_score = potential_target if potential_target > 10 else 10
parent = candidate_node.node.getparent()
siblings = parent.getchildren() if parent is not None else []
for sibling in siblings:
append = False
content_bonus = 0
if sibling is candidate_node.node:
append = True
# Give a bonus if sibling nodes and top candidates have the example
# same class name
if candidate_css and sibling.get("class") == candidate_css:
content_bonus += candidate_node.content_score * 0.2
if sibling in candidate_list:
adjusted_score = \
candidate_list[sibling].content_score + content_bonus
if adjusted_score >= sibling_target_score:
append = True
if sibling.tag == "p":
link_density = get_link_density(sibling)
content = sibling.text_content()
content_length = len(content)
if content_length > 80 and link_density < 0.25:
append = True
elif content_length < 80 and link_density == 0:
if ". " in content:
append = True
if append:
logger.debug(
"Sibling appended: %s %r", sibling.tag, sibling.attrib)
if sibling.tag not in ("div", "p"):
# We have a node that isn't a common block level element, like
# a form or td tag. Turn it into a div so it doesn't get
# filtered out later by accident.
sibling.tag = "div"
if candidate_node.node != sibling:
candidate_node.node.append(sibling)
return candidate_node
def clean_document(node):
"""Cleans up the final document we return as the readable article."""
if node is None or len(node) == 0:
return None
logger.debug("\n\n-------------- CLEANING DOCUMENT -----------------")
to_drop = []
for n in node.iter():
# clean out any in-line style properties
if "style" in n.attrib:
n.set("style", "")
# remove embended objects unless it's wanted video
if n.tag in ("object", "embed") and not ok_embedded_video(n):
logger.debug("Dropping node %s %r", n.tag, n.attrib)
to_drop.append(n)
# clean headings with bad css or high link density
if n.tag in ("h1", "h2", "h3", "h4") and get_class_weight(n) < 0:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
if n.tag in ("h3", "h4") and get_link_density(n) > 0.33:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
# drop block element without content and children
if n.tag in ("div", "p"):
text_content = shrink_text(n.text_content())
if len(text_content) < 5 and not n.getchildren():
logger.debug(
"Dropping %s %r without content.", n.tag, n.attrib)
to_drop.append(n)
# finally try out the conditional cleaning of the target node
if clean_conditionally(n):
to_drop.append(n)
drop_nodes_with_parents(to_drop)
return node
def drop_nodes_with_parents(nodes):
for node in nodes:
if node.getparent() is None:
continue
node.drop_tree()
logger.debug(
"Dropped node with parent %s %r %s",
node.tag,
node.attrib,
node.text_content()[:50]
)
def clean_conditionally(node):
"""Remove the clean_el if it looks like bad content based on rules."""
if node.tag not in ('form', 'table', 'ul', 'div', 'p'):
return # this is not the tag we are looking for
weight = get_class_weight(node)
# content_score = LOOK up the content score for this node we found
# before else default to 0
content_score = 0
if weight + content_score < 0:
logger.debug('Dropping conditional node')
logger.debug('Weight + score < 0')
return True
commas_count = node.text_content().count(',')
if commas_count < 10:
logger.debug(
"There are %d commas so we're processing more.", commas_count)
# If there are not very many commas, and the number of
# non-paragraph elements is more than paragraphs or other ominous
# signs, remove the element.
p = len(node.findall('.//p'))
img = len(node.findall('.//img'))
li = len(node.findall('.//li')) - 100
inputs = len(node.findall('.//input'))
embed = 0
embeds = node.findall('.//embed')
for e in embeds:
if ok_embedded_video(e):
embed += 1
link_density = get_link_density(node)
content_length = len(node.text_content())
remove_node = False
if li > p and node.tag != 'ul' and node.tag != 'ol':
logger.debug('Conditional drop: li > p and not ul/ol')
remove_node = True
elif inputs > p / 3.0:
logger.debug('Conditional drop: inputs > p/3.0')
remove_node = True
elif content_length < 25 and (img == 0 or img > 2):
logger.debug('Conditional drop: len < 25 and 0/>2 images')
remove_node = True
elif weight < 25 and link_density > 0.2:
logger.debug('Conditional drop: weight small (%f) and link is dense (%f)', weight, link_density)
remove_node = True
elif weight >= 25 and link_density > 0.5:
logger.debug('Conditional drop: weight big but link heavy')
remove_node = True
elif (embed == 1 and content_length < 75) or embed > 1:
logger.debug(
'Conditional drop: embed w/o much content or many embed')
remove_node = True
if remove_node:
logger.debug('Node will be removed: %s %r %s', node.tag, node.attrib, node.text_content()[:30])
return remove_node
return False # nope, don't remove anything
def prep_article(doc):
"""Once we've found our target article we want to clean it up.
Clean out:
- inline styles
- forms
- strip empty <p>
- extra tags
"""
return clean_document(doc)
def find_candidates(document):
"""
Finds cadidate nodes for the readable version of the article.
Here's we're going to remove unlikely nodes, find scores on the rest,
clean up and return the final best match.
"""
nodes_to_score = set()
should_remove = set()
for node in document.iter():
if is_unlikely_node(node):
logger.debug(
"We should drop unlikely: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif is_bad_link(node):
logger.debug(
"We should drop bad link: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif node.tag in SCORABLE_TAGS:
nodes_to_score.add(node)
return score_candidates(nodes_to_score), should_remove
def is_bad_link(node):
"""
Helper to determine if the node is link that is useless.
We've hit articles with many multiple links that should be cleaned out
because they're just there to pollute the space. See tests for examples.
"""
if node.tag != "a":
return False
name = node.get("name")
href = node.get("href")
if name and not href:
return True
if href:
href_parts = href.split("#")
if len(href_parts) == 2 and len(href_parts[1]) > 25:
return True
return False
class Article(object):
"""Parsed readable object"""
def __init__(self, html, url=None, return_fragment=True):
"""
Create the Article we're going to use.
:param html: The string of HTML we're going to parse.
:param url: The url so we can adjust the links to still work.
:param return_fragment: Should we return a <div> fragment or
a full <html> document.
"""
self._original_document = OriginalDocument(html, url=url)
self._return_fragment = return_fragment
def __str__(self):
return tostring(self._readable())
def __unicode__(self):
return tounicode(self._readable())
@cached_property
def dom(self):
"""Parsed lxml tree (Document Object Model) of the given html."""
try:
dom = self._original_document.dom
# cleaning doesn't return, just wipes in place
html_cleaner(dom)
return leaf_div_elements_into_paragraphs(dom)
except ValueError:
return None
@cached_property
def candidates(self):
"""Generates list of candidates from the DOM."""
dom = self.dom
if dom is None or len(dom) == 0:
return None
candidates, unlikely_candidates = find_candidates(dom)
drop_nodes_with_parents(unlikely_candidates)
return candidates
@cached_property
def main_text(self):
dom = deepcopy(self.readable_dom).get_element_by_id("readabilityBody")
return AnnotatedTextHandler.parse(dom)
@cached_property
def readable(self):
return tounicode(self.readable_dom)
@cached_property
def readable_dom(self):
return self._readable()
def _readable(self):
"""The readable parsed article"""
if not self.candidates:
logger.info("No candidates found in document.")
return self._handle_no_candidates()
# right now we return the highest scoring candidate content
best_candidates = sorted(
(c for c in self.candidates.values()),
key=attrgetter("content_score"), reverse=True)
printer = PrettyPrinter(indent=2)
logger.debug(printer.pformat(best_candidates))
# since we have several candidates, check the winner's siblings
# for extra content
winner = best_candidates[0]
updated_winner = check_siblings(winner, self.candidates)
updated_winner.node = prep_article(updated_winner.node)
if updated_winner.node is not None:
dom = build_base_document(
updated_winner.node, self._return_fragment)
else:
logger.info(
'Had candidates but failed to find a cleaned winning DOM.')
dom = self._handle_no_candidates()
return self._remove_orphans(dom.get_element_by_id("readabilityBody"))
def _remove_orphans(self, dom):
for node in dom.iterdescendants():
if len(node) == 1 and tuple(node)[0].tag == node.tag:
node.drop_tag()
return dom
def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment)
def leaf_div_elements_into_paragraphs(document):
"""
Turn some block elements that don't have children block level
elements into <p> elements.
Since we can't change the tree as we iterate over it, we must do this
before we process our document.
"""
for element in document.iter(tag="div"):
child_tags = tuple(n.tag for n in element.getchildren())
if "div" not in child_tags and "p" not in child_tags:
logger.debug(
"Changing leaf block element <%s> into <p>", element.tag)
element.tag = "p"
return document
|
bookieio/breadability | breadability/readable.py | build_base_document | python | def build_base_document(dom, return_fragment=True):
body_element = dom.find(".//body")
if body_element is None:
fragment = fragment_fromstring('<div id="readabilityBody"/>')
fragment.append(dom)
else:
body_element.tag = "div"
body_element.set("id", "readabilityBody")
fragment = body_element
return document_from_fragment(fragment, return_fragment) | Builds a base document with the body as root.
:param dom: Parsed lxml tree (Document Object Model).
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L67-L85 | [
"def document_from_fragment(fragment, return_fragment):\n if return_fragment:\n document = fragment\n else:\n document = fromstring(NULL_DOCUMENT)\n body_element = document.find(\".//body\")\n body_element.append(fragment)\n\n document.doctype = \"<!DOCTYPE html>\"\n return d... | # -*- coding: utf8 -*-
from __future__ import absolute_import
import logging
from copy import deepcopy
from operator import attrgetter
from pprint import PrettyPrinter
from lxml.html.clean import Cleaner
from lxml.etree import tounicode, tostring
from lxml.html import fragment_fromstring, fromstring
from .document import OriginalDocument
from .annotated_text import AnnotatedTextHandler
from .scoring import (
get_class_weight,
get_link_density,
is_unlikely_node,
score_candidates,
)
from .utils import cached_property, shrink_text
html_cleaner = Cleaner(
scripts=True, javascript=True, comments=True,
style=True, links=True, meta=False, add_nofollow=False,
page_structure=False, processing_instructions=True,
embedded=False, frames=False, forms=False,
annoying_tags=False, remove_tags=None, kill_tags=("noscript", "iframe"),
remove_unknown_tags=False, safe_attrs_only=False)
SCORABLE_TAGS = ("div", "p", "td", "pre", "article")
ANNOTATION_TAGS = (
"a", "abbr", "acronym", "b", "big", "blink", "blockquote", "br", "cite",
"code", "dd", "del", "dir", "dl", "dt", "em", "font", "h", "h1", "h2",
"h3", "h4", "h5", "h6", "hr", "i", "ins", "kbd", "li", "marquee", "menu",
"ol", "p", "pre", "q", "s", "samp", "span", "strike", "strong", "sub",
"sup", "tt", "u", "ul", "var",
)
NULL_DOCUMENT = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
</head>
<body>
</body>
</html>
"""
logger = logging.getLogger("breadability")
def ok_embedded_video(node):
"""Check if this embed/video is an ok one to count."""
good_keywords = ('youtube', 'blip.tv', 'vimeo')
node_str = tounicode(node)
for key in good_keywords:
if key in node_str:
return True
return False
def build_error_document(dom, return_fragment=True):
"""
Builds an empty erorr document with the body as root.
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
fragment = fragment_fromstring(
'<div id="readabilityBody" class="parsing-error"/>')
return document_from_fragment(fragment, return_fragment)
def document_from_fragment(fragment, return_fragment):
if return_fragment:
document = fragment
else:
document = fromstring(NULL_DOCUMENT)
body_element = document.find(".//body")
body_element.append(fragment)
document.doctype = "<!DOCTYPE html>"
return document
def check_siblings(candidate_node, candidate_list):
"""
Looks through siblings for content that might also be related.
Things like preambles, content split by ads that we removed, etc.
"""
candidate_css = candidate_node.node.get("class")
potential_target = candidate_node.content_score * 0.2
sibling_target_score = potential_target if potential_target > 10 else 10
parent = candidate_node.node.getparent()
siblings = parent.getchildren() if parent is not None else []
for sibling in siblings:
append = False
content_bonus = 0
if sibling is candidate_node.node:
append = True
# Give a bonus if sibling nodes and top candidates have the example
# same class name
if candidate_css and sibling.get("class") == candidate_css:
content_bonus += candidate_node.content_score * 0.2
if sibling in candidate_list:
adjusted_score = \
candidate_list[sibling].content_score + content_bonus
if adjusted_score >= sibling_target_score:
append = True
if sibling.tag == "p":
link_density = get_link_density(sibling)
content = sibling.text_content()
content_length = len(content)
if content_length > 80 and link_density < 0.25:
append = True
elif content_length < 80 and link_density == 0:
if ". " in content:
append = True
if append:
logger.debug(
"Sibling appended: %s %r", sibling.tag, sibling.attrib)
if sibling.tag not in ("div", "p"):
# We have a node that isn't a common block level element, like
# a form or td tag. Turn it into a div so it doesn't get
# filtered out later by accident.
sibling.tag = "div"
if candidate_node.node != sibling:
candidate_node.node.append(sibling)
return candidate_node
def clean_document(node):
"""Cleans up the final document we return as the readable article."""
if node is None or len(node) == 0:
return None
logger.debug("\n\n-------------- CLEANING DOCUMENT -----------------")
to_drop = []
for n in node.iter():
# clean out any in-line style properties
if "style" in n.attrib:
n.set("style", "")
# remove embended objects unless it's wanted video
if n.tag in ("object", "embed") and not ok_embedded_video(n):
logger.debug("Dropping node %s %r", n.tag, n.attrib)
to_drop.append(n)
# clean headings with bad css or high link density
if n.tag in ("h1", "h2", "h3", "h4") and get_class_weight(n) < 0:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
if n.tag in ("h3", "h4") and get_link_density(n) > 0.33:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
# drop block element without content and children
if n.tag in ("div", "p"):
text_content = shrink_text(n.text_content())
if len(text_content) < 5 and not n.getchildren():
logger.debug(
"Dropping %s %r without content.", n.tag, n.attrib)
to_drop.append(n)
# finally try out the conditional cleaning of the target node
if clean_conditionally(n):
to_drop.append(n)
drop_nodes_with_parents(to_drop)
return node
def drop_nodes_with_parents(nodes):
for node in nodes:
if node.getparent() is None:
continue
node.drop_tree()
logger.debug(
"Dropped node with parent %s %r %s",
node.tag,
node.attrib,
node.text_content()[:50]
)
def clean_conditionally(node):
"""Remove the clean_el if it looks like bad content based on rules."""
if node.tag not in ('form', 'table', 'ul', 'div', 'p'):
return # this is not the tag we are looking for
weight = get_class_weight(node)
# content_score = LOOK up the content score for this node we found
# before else default to 0
content_score = 0
if weight + content_score < 0:
logger.debug('Dropping conditional node')
logger.debug('Weight + score < 0')
return True
commas_count = node.text_content().count(',')
if commas_count < 10:
logger.debug(
"There are %d commas so we're processing more.", commas_count)
# If there are not very many commas, and the number of
# non-paragraph elements is more than paragraphs or other ominous
# signs, remove the element.
p = len(node.findall('.//p'))
img = len(node.findall('.//img'))
li = len(node.findall('.//li')) - 100
inputs = len(node.findall('.//input'))
embed = 0
embeds = node.findall('.//embed')
for e in embeds:
if ok_embedded_video(e):
embed += 1
link_density = get_link_density(node)
content_length = len(node.text_content())
remove_node = False
if li > p and node.tag != 'ul' and node.tag != 'ol':
logger.debug('Conditional drop: li > p and not ul/ol')
remove_node = True
elif inputs > p / 3.0:
logger.debug('Conditional drop: inputs > p/3.0')
remove_node = True
elif content_length < 25 and (img == 0 or img > 2):
logger.debug('Conditional drop: len < 25 and 0/>2 images')
remove_node = True
elif weight < 25 and link_density > 0.2:
logger.debug('Conditional drop: weight small (%f) and link is dense (%f)', weight, link_density)
remove_node = True
elif weight >= 25 and link_density > 0.5:
logger.debug('Conditional drop: weight big but link heavy')
remove_node = True
elif (embed == 1 and content_length < 75) or embed > 1:
logger.debug(
'Conditional drop: embed w/o much content or many embed')
remove_node = True
if remove_node:
logger.debug('Node will be removed: %s %r %s', node.tag, node.attrib, node.text_content()[:30])
return remove_node
return False # nope, don't remove anything
def prep_article(doc):
"""Once we've found our target article we want to clean it up.
Clean out:
- inline styles
- forms
- strip empty <p>
- extra tags
"""
return clean_document(doc)
def find_candidates(document):
"""
Finds cadidate nodes for the readable version of the article.
Here's we're going to remove unlikely nodes, find scores on the rest,
clean up and return the final best match.
"""
nodes_to_score = set()
should_remove = set()
for node in document.iter():
if is_unlikely_node(node):
logger.debug(
"We should drop unlikely: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif is_bad_link(node):
logger.debug(
"We should drop bad link: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif node.tag in SCORABLE_TAGS:
nodes_to_score.add(node)
return score_candidates(nodes_to_score), should_remove
def is_bad_link(node):
"""
Helper to determine if the node is link that is useless.
We've hit articles with many multiple links that should be cleaned out
because they're just there to pollute the space. See tests for examples.
"""
if node.tag != "a":
return False
name = node.get("name")
href = node.get("href")
if name and not href:
return True
if href:
href_parts = href.split("#")
if len(href_parts) == 2 and len(href_parts[1]) > 25:
return True
return False
class Article(object):
"""Parsed readable object"""
def __init__(self, html, url=None, return_fragment=True):
"""
Create the Article we're going to use.
:param html: The string of HTML we're going to parse.
:param url: The url so we can adjust the links to still work.
:param return_fragment: Should we return a <div> fragment or
a full <html> document.
"""
self._original_document = OriginalDocument(html, url=url)
self._return_fragment = return_fragment
def __str__(self):
return tostring(self._readable())
def __unicode__(self):
return tounicode(self._readable())
@cached_property
def dom(self):
"""Parsed lxml tree (Document Object Model) of the given html."""
try:
dom = self._original_document.dom
# cleaning doesn't return, just wipes in place
html_cleaner(dom)
return leaf_div_elements_into_paragraphs(dom)
except ValueError:
return None
@cached_property
def candidates(self):
"""Generates list of candidates from the DOM."""
dom = self.dom
if dom is None or len(dom) == 0:
return None
candidates, unlikely_candidates = find_candidates(dom)
drop_nodes_with_parents(unlikely_candidates)
return candidates
@cached_property
def main_text(self):
dom = deepcopy(self.readable_dom).get_element_by_id("readabilityBody")
return AnnotatedTextHandler.parse(dom)
@cached_property
def readable(self):
return tounicode(self.readable_dom)
@cached_property
def readable_dom(self):
return self._readable()
def _readable(self):
"""The readable parsed article"""
if not self.candidates:
logger.info("No candidates found in document.")
return self._handle_no_candidates()
# right now we return the highest scoring candidate content
best_candidates = sorted(
(c for c in self.candidates.values()),
key=attrgetter("content_score"), reverse=True)
printer = PrettyPrinter(indent=2)
logger.debug(printer.pformat(best_candidates))
# since we have several candidates, check the winner's siblings
# for extra content
winner = best_candidates[0]
updated_winner = check_siblings(winner, self.candidates)
updated_winner.node = prep_article(updated_winner.node)
if updated_winner.node is not None:
dom = build_base_document(
updated_winner.node, self._return_fragment)
else:
logger.info(
'Had candidates but failed to find a cleaned winning DOM.')
dom = self._handle_no_candidates()
return self._remove_orphans(dom.get_element_by_id("readabilityBody"))
def _remove_orphans(self, dom):
for node in dom.iterdescendants():
if len(node) == 1 and tuple(node)[0].tag == node.tag:
node.drop_tag()
return dom
def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment)
def leaf_div_elements_into_paragraphs(document):
"""
Turn some block elements that don't have children block level
elements into <p> elements.
Since we can't change the tree as we iterate over it, we must do this
before we process our document.
"""
for element in document.iter(tag="div"):
child_tags = tuple(n.tag for n in element.getchildren())
if "div" not in child_tags and "p" not in child_tags:
logger.debug(
"Changing leaf block element <%s> into <p>", element.tag)
element.tag = "p"
return document
|
bookieio/breadability | breadability/readable.py | check_siblings | python | def check_siblings(candidate_node, candidate_list):
candidate_css = candidate_node.node.get("class")
potential_target = candidate_node.content_score * 0.2
sibling_target_score = potential_target if potential_target > 10 else 10
parent = candidate_node.node.getparent()
siblings = parent.getchildren() if parent is not None else []
for sibling in siblings:
append = False
content_bonus = 0
if sibling is candidate_node.node:
append = True
# Give a bonus if sibling nodes and top candidates have the example
# same class name
if candidate_css and sibling.get("class") == candidate_css:
content_bonus += candidate_node.content_score * 0.2
if sibling in candidate_list:
adjusted_score = \
candidate_list[sibling].content_score + content_bonus
if adjusted_score >= sibling_target_score:
append = True
if sibling.tag == "p":
link_density = get_link_density(sibling)
content = sibling.text_content()
content_length = len(content)
if content_length > 80 and link_density < 0.25:
append = True
elif content_length < 80 and link_density == 0:
if ". " in content:
append = True
if append:
logger.debug(
"Sibling appended: %s %r", sibling.tag, sibling.attrib)
if sibling.tag not in ("div", "p"):
# We have a node that isn't a common block level element, like
# a form or td tag. Turn it into a div so it doesn't get
# filtered out later by accident.
sibling.tag = "div"
if candidate_node.node != sibling:
candidate_node.node.append(sibling)
return candidate_node | Looks through siblings for content that might also be related.
Things like preambles, content split by ads that we removed, etc. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L113-L166 | [
"def get_link_density(node, node_text=None):\n \"\"\"\n Computes the ratio for text in given node and text in links\n contained in the node. It is computed from number of\n characters in the texts.\n\n :parameter Element node:\n HTML element in which links density is computed.\n :parameter ... | # -*- coding: utf8 -*-
from __future__ import absolute_import
import logging
from copy import deepcopy
from operator import attrgetter
from pprint import PrettyPrinter
from lxml.html.clean import Cleaner
from lxml.etree import tounicode, tostring
from lxml.html import fragment_fromstring, fromstring
from .document import OriginalDocument
from .annotated_text import AnnotatedTextHandler
from .scoring import (
get_class_weight,
get_link_density,
is_unlikely_node,
score_candidates,
)
from .utils import cached_property, shrink_text
html_cleaner = Cleaner(
scripts=True, javascript=True, comments=True,
style=True, links=True, meta=False, add_nofollow=False,
page_structure=False, processing_instructions=True,
embedded=False, frames=False, forms=False,
annoying_tags=False, remove_tags=None, kill_tags=("noscript", "iframe"),
remove_unknown_tags=False, safe_attrs_only=False)
SCORABLE_TAGS = ("div", "p", "td", "pre", "article")
ANNOTATION_TAGS = (
"a", "abbr", "acronym", "b", "big", "blink", "blockquote", "br", "cite",
"code", "dd", "del", "dir", "dl", "dt", "em", "font", "h", "h1", "h2",
"h3", "h4", "h5", "h6", "hr", "i", "ins", "kbd", "li", "marquee", "menu",
"ol", "p", "pre", "q", "s", "samp", "span", "strike", "strong", "sub",
"sup", "tt", "u", "ul", "var",
)
NULL_DOCUMENT = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
</head>
<body>
</body>
</html>
"""
logger = logging.getLogger("breadability")
def ok_embedded_video(node):
"""Check if this embed/video is an ok one to count."""
good_keywords = ('youtube', 'blip.tv', 'vimeo')
node_str = tounicode(node)
for key in good_keywords:
if key in node_str:
return True
return False
def build_base_document(dom, return_fragment=True):
"""
Builds a base document with the body as root.
:param dom: Parsed lxml tree (Document Object Model).
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
body_element = dom.find(".//body")
if body_element is None:
fragment = fragment_fromstring('<div id="readabilityBody"/>')
fragment.append(dom)
else:
body_element.tag = "div"
body_element.set("id", "readabilityBody")
fragment = body_element
return document_from_fragment(fragment, return_fragment)
def build_error_document(dom, return_fragment=True):
"""
Builds an empty erorr document with the body as root.
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
fragment = fragment_fromstring(
'<div id="readabilityBody" class="parsing-error"/>')
return document_from_fragment(fragment, return_fragment)
def document_from_fragment(fragment, return_fragment):
if return_fragment:
document = fragment
else:
document = fromstring(NULL_DOCUMENT)
body_element = document.find(".//body")
body_element.append(fragment)
document.doctype = "<!DOCTYPE html>"
return document
def clean_document(node):
"""Cleans up the final document we return as the readable article."""
if node is None or len(node) == 0:
return None
logger.debug("\n\n-------------- CLEANING DOCUMENT -----------------")
to_drop = []
for n in node.iter():
# clean out any in-line style properties
if "style" in n.attrib:
n.set("style", "")
# remove embended objects unless it's wanted video
if n.tag in ("object", "embed") and not ok_embedded_video(n):
logger.debug("Dropping node %s %r", n.tag, n.attrib)
to_drop.append(n)
# clean headings with bad css or high link density
if n.tag in ("h1", "h2", "h3", "h4") and get_class_weight(n) < 0:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
if n.tag in ("h3", "h4") and get_link_density(n) > 0.33:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
# drop block element without content and children
if n.tag in ("div", "p"):
text_content = shrink_text(n.text_content())
if len(text_content) < 5 and not n.getchildren():
logger.debug(
"Dropping %s %r without content.", n.tag, n.attrib)
to_drop.append(n)
# finally try out the conditional cleaning of the target node
if clean_conditionally(n):
to_drop.append(n)
drop_nodes_with_parents(to_drop)
return node
def drop_nodes_with_parents(nodes):
for node in nodes:
if node.getparent() is None:
continue
node.drop_tree()
logger.debug(
"Dropped node with parent %s %r %s",
node.tag,
node.attrib,
node.text_content()[:50]
)
def clean_conditionally(node):
"""Remove the clean_el if it looks like bad content based on rules."""
if node.tag not in ('form', 'table', 'ul', 'div', 'p'):
return # this is not the tag we are looking for
weight = get_class_weight(node)
# content_score = LOOK up the content score for this node we found
# before else default to 0
content_score = 0
if weight + content_score < 0:
logger.debug('Dropping conditional node')
logger.debug('Weight + score < 0')
return True
commas_count = node.text_content().count(',')
if commas_count < 10:
logger.debug(
"There are %d commas so we're processing more.", commas_count)
# If there are not very many commas, and the number of
# non-paragraph elements is more than paragraphs or other ominous
# signs, remove the element.
p = len(node.findall('.//p'))
img = len(node.findall('.//img'))
li = len(node.findall('.//li')) - 100
inputs = len(node.findall('.//input'))
embed = 0
embeds = node.findall('.//embed')
for e in embeds:
if ok_embedded_video(e):
embed += 1
link_density = get_link_density(node)
content_length = len(node.text_content())
remove_node = False
if li > p and node.tag != 'ul' and node.tag != 'ol':
logger.debug('Conditional drop: li > p and not ul/ol')
remove_node = True
elif inputs > p / 3.0:
logger.debug('Conditional drop: inputs > p/3.0')
remove_node = True
elif content_length < 25 and (img == 0 or img > 2):
logger.debug('Conditional drop: len < 25 and 0/>2 images')
remove_node = True
elif weight < 25 and link_density > 0.2:
logger.debug('Conditional drop: weight small (%f) and link is dense (%f)', weight, link_density)
remove_node = True
elif weight >= 25 and link_density > 0.5:
logger.debug('Conditional drop: weight big but link heavy')
remove_node = True
elif (embed == 1 and content_length < 75) or embed > 1:
logger.debug(
'Conditional drop: embed w/o much content or many embed')
remove_node = True
if remove_node:
logger.debug('Node will be removed: %s %r %s', node.tag, node.attrib, node.text_content()[:30])
return remove_node
return False # nope, don't remove anything
def prep_article(doc):
"""Once we've found our target article we want to clean it up.
Clean out:
- inline styles
- forms
- strip empty <p>
- extra tags
"""
return clean_document(doc)
def find_candidates(document):
"""
Finds cadidate nodes for the readable version of the article.
Here's we're going to remove unlikely nodes, find scores on the rest,
clean up and return the final best match.
"""
nodes_to_score = set()
should_remove = set()
for node in document.iter():
if is_unlikely_node(node):
logger.debug(
"We should drop unlikely: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif is_bad_link(node):
logger.debug(
"We should drop bad link: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif node.tag in SCORABLE_TAGS:
nodes_to_score.add(node)
return score_candidates(nodes_to_score), should_remove
def is_bad_link(node):
"""
Helper to determine if the node is link that is useless.
We've hit articles with many multiple links that should be cleaned out
because they're just there to pollute the space. See tests for examples.
"""
if node.tag != "a":
return False
name = node.get("name")
href = node.get("href")
if name and not href:
return True
if href:
href_parts = href.split("#")
if len(href_parts) == 2 and len(href_parts[1]) > 25:
return True
return False
class Article(object):
"""Parsed readable object"""
def __init__(self, html, url=None, return_fragment=True):
"""
Create the Article we're going to use.
:param html: The string of HTML we're going to parse.
:param url: The url so we can adjust the links to still work.
:param return_fragment: Should we return a <div> fragment or
a full <html> document.
"""
self._original_document = OriginalDocument(html, url=url)
self._return_fragment = return_fragment
def __str__(self):
return tostring(self._readable())
def __unicode__(self):
return tounicode(self._readable())
@cached_property
def dom(self):
"""Parsed lxml tree (Document Object Model) of the given html."""
try:
dom = self._original_document.dom
# cleaning doesn't return, just wipes in place
html_cleaner(dom)
return leaf_div_elements_into_paragraphs(dom)
except ValueError:
return None
@cached_property
def candidates(self):
"""Generates list of candidates from the DOM."""
dom = self.dom
if dom is None or len(dom) == 0:
return None
candidates, unlikely_candidates = find_candidates(dom)
drop_nodes_with_parents(unlikely_candidates)
return candidates
@cached_property
def main_text(self):
dom = deepcopy(self.readable_dom).get_element_by_id("readabilityBody")
return AnnotatedTextHandler.parse(dom)
@cached_property
def readable(self):
return tounicode(self.readable_dom)
@cached_property
def readable_dom(self):
return self._readable()
def _readable(self):
"""The readable parsed article"""
if not self.candidates:
logger.info("No candidates found in document.")
return self._handle_no_candidates()
# right now we return the highest scoring candidate content
best_candidates = sorted(
(c for c in self.candidates.values()),
key=attrgetter("content_score"), reverse=True)
printer = PrettyPrinter(indent=2)
logger.debug(printer.pformat(best_candidates))
# since we have several candidates, check the winner's siblings
# for extra content
winner = best_candidates[0]
updated_winner = check_siblings(winner, self.candidates)
updated_winner.node = prep_article(updated_winner.node)
if updated_winner.node is not None:
dom = build_base_document(
updated_winner.node, self._return_fragment)
else:
logger.info(
'Had candidates but failed to find a cleaned winning DOM.')
dom = self._handle_no_candidates()
return self._remove_orphans(dom.get_element_by_id("readabilityBody"))
def _remove_orphans(self, dom):
for node in dom.iterdescendants():
if len(node) == 1 and tuple(node)[0].tag == node.tag:
node.drop_tag()
return dom
def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment)
def leaf_div_elements_into_paragraphs(document):
"""
Turn some block elements that don't have children block level
elements into <p> elements.
Since we can't change the tree as we iterate over it, we must do this
before we process our document.
"""
for element in document.iter(tag="div"):
child_tags = tuple(n.tag for n in element.getchildren())
if "div" not in child_tags and "p" not in child_tags:
logger.debug(
"Changing leaf block element <%s> into <p>", element.tag)
element.tag = "p"
return document
|
bookieio/breadability | breadability/readable.py | clean_document | python | def clean_document(node):
if node is None or len(node) == 0:
return None
logger.debug("\n\n-------------- CLEANING DOCUMENT -----------------")
to_drop = []
for n in node.iter():
# clean out any in-line style properties
if "style" in n.attrib:
n.set("style", "")
# remove embended objects unless it's wanted video
if n.tag in ("object", "embed") and not ok_embedded_video(n):
logger.debug("Dropping node %s %r", n.tag, n.attrib)
to_drop.append(n)
# clean headings with bad css or high link density
if n.tag in ("h1", "h2", "h3", "h4") and get_class_weight(n) < 0:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
if n.tag in ("h3", "h4") and get_link_density(n) > 0.33:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
# drop block element without content and children
if n.tag in ("div", "p"):
text_content = shrink_text(n.text_content())
if len(text_content) < 5 and not n.getchildren():
logger.debug(
"Dropping %s %r without content.", n.tag, n.attrib)
to_drop.append(n)
# finally try out the conditional cleaning of the target node
if clean_conditionally(n):
to_drop.append(n)
drop_nodes_with_parents(to_drop)
return node | Cleans up the final document we return as the readable article. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L169-L210 | [
"def shrink_text(text):\n return normalize_whitespace(text.strip())\n",
"def get_class_weight(node):\n \"\"\"\n Computes weight of element according to its class/id.\n\n We're using sets to help efficiently check for existence of matches.\n \"\"\"\n weight = 0\n\n if check_node_attributes(CLS... | # -*- coding: utf8 -*-
from __future__ import absolute_import
import logging
from copy import deepcopy
from operator import attrgetter
from pprint import PrettyPrinter
from lxml.html.clean import Cleaner
from lxml.etree import tounicode, tostring
from lxml.html import fragment_fromstring, fromstring
from .document import OriginalDocument
from .annotated_text import AnnotatedTextHandler
from .scoring import (
get_class_weight,
get_link_density,
is_unlikely_node,
score_candidates,
)
from .utils import cached_property, shrink_text
html_cleaner = Cleaner(
scripts=True, javascript=True, comments=True,
style=True, links=True, meta=False, add_nofollow=False,
page_structure=False, processing_instructions=True,
embedded=False, frames=False, forms=False,
annoying_tags=False, remove_tags=None, kill_tags=("noscript", "iframe"),
remove_unknown_tags=False, safe_attrs_only=False)
SCORABLE_TAGS = ("div", "p", "td", "pre", "article")
ANNOTATION_TAGS = (
"a", "abbr", "acronym", "b", "big", "blink", "blockquote", "br", "cite",
"code", "dd", "del", "dir", "dl", "dt", "em", "font", "h", "h1", "h2",
"h3", "h4", "h5", "h6", "hr", "i", "ins", "kbd", "li", "marquee", "menu",
"ol", "p", "pre", "q", "s", "samp", "span", "strike", "strong", "sub",
"sup", "tt", "u", "ul", "var",
)
NULL_DOCUMENT = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
</head>
<body>
</body>
</html>
"""
logger = logging.getLogger("breadability")
def ok_embedded_video(node):
"""Check if this embed/video is an ok one to count."""
good_keywords = ('youtube', 'blip.tv', 'vimeo')
node_str = tounicode(node)
for key in good_keywords:
if key in node_str:
return True
return False
def build_base_document(dom, return_fragment=True):
"""
Builds a base document with the body as root.
:param dom: Parsed lxml tree (Document Object Model).
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
body_element = dom.find(".//body")
if body_element is None:
fragment = fragment_fromstring('<div id="readabilityBody"/>')
fragment.append(dom)
else:
body_element.tag = "div"
body_element.set("id", "readabilityBody")
fragment = body_element
return document_from_fragment(fragment, return_fragment)
def build_error_document(dom, return_fragment=True):
"""
Builds an empty erorr document with the body as root.
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
fragment = fragment_fromstring(
'<div id="readabilityBody" class="parsing-error"/>')
return document_from_fragment(fragment, return_fragment)
def document_from_fragment(fragment, return_fragment):
if return_fragment:
document = fragment
else:
document = fromstring(NULL_DOCUMENT)
body_element = document.find(".//body")
body_element.append(fragment)
document.doctype = "<!DOCTYPE html>"
return document
def check_siblings(candidate_node, candidate_list):
"""
Looks through siblings for content that might also be related.
Things like preambles, content split by ads that we removed, etc.
"""
candidate_css = candidate_node.node.get("class")
potential_target = candidate_node.content_score * 0.2
sibling_target_score = potential_target if potential_target > 10 else 10
parent = candidate_node.node.getparent()
siblings = parent.getchildren() if parent is not None else []
for sibling in siblings:
append = False
content_bonus = 0
if sibling is candidate_node.node:
append = True
# Give a bonus if sibling nodes and top candidates have the example
# same class name
if candidate_css and sibling.get("class") == candidate_css:
content_bonus += candidate_node.content_score * 0.2
if sibling in candidate_list:
adjusted_score = \
candidate_list[sibling].content_score + content_bonus
if adjusted_score >= sibling_target_score:
append = True
if sibling.tag == "p":
link_density = get_link_density(sibling)
content = sibling.text_content()
content_length = len(content)
if content_length > 80 and link_density < 0.25:
append = True
elif content_length < 80 and link_density == 0:
if ". " in content:
append = True
if append:
logger.debug(
"Sibling appended: %s %r", sibling.tag, sibling.attrib)
if sibling.tag not in ("div", "p"):
# We have a node that isn't a common block level element, like
# a form or td tag. Turn it into a div so it doesn't get
# filtered out later by accident.
sibling.tag = "div"
if candidate_node.node != sibling:
candidate_node.node.append(sibling)
return candidate_node
def drop_nodes_with_parents(nodes):
for node in nodes:
if node.getparent() is None:
continue
node.drop_tree()
logger.debug(
"Dropped node with parent %s %r %s",
node.tag,
node.attrib,
node.text_content()[:50]
)
def clean_conditionally(node):
"""Remove the clean_el if it looks like bad content based on rules."""
if node.tag not in ('form', 'table', 'ul', 'div', 'p'):
return # this is not the tag we are looking for
weight = get_class_weight(node)
# content_score = LOOK up the content score for this node we found
# before else default to 0
content_score = 0
if weight + content_score < 0:
logger.debug('Dropping conditional node')
logger.debug('Weight + score < 0')
return True
commas_count = node.text_content().count(',')
if commas_count < 10:
logger.debug(
"There are %d commas so we're processing more.", commas_count)
# If there are not very many commas, and the number of
# non-paragraph elements is more than paragraphs or other ominous
# signs, remove the element.
p = len(node.findall('.//p'))
img = len(node.findall('.//img'))
li = len(node.findall('.//li')) - 100
inputs = len(node.findall('.//input'))
embed = 0
embeds = node.findall('.//embed')
for e in embeds:
if ok_embedded_video(e):
embed += 1
link_density = get_link_density(node)
content_length = len(node.text_content())
remove_node = False
if li > p and node.tag != 'ul' and node.tag != 'ol':
logger.debug('Conditional drop: li > p and not ul/ol')
remove_node = True
elif inputs > p / 3.0:
logger.debug('Conditional drop: inputs > p/3.0')
remove_node = True
elif content_length < 25 and (img == 0 or img > 2):
logger.debug('Conditional drop: len < 25 and 0/>2 images')
remove_node = True
elif weight < 25 and link_density > 0.2:
logger.debug('Conditional drop: weight small (%f) and link is dense (%f)', weight, link_density)
remove_node = True
elif weight >= 25 and link_density > 0.5:
logger.debug('Conditional drop: weight big but link heavy')
remove_node = True
elif (embed == 1 and content_length < 75) or embed > 1:
logger.debug(
'Conditional drop: embed w/o much content or many embed')
remove_node = True
if remove_node:
logger.debug('Node will be removed: %s %r %s', node.tag, node.attrib, node.text_content()[:30])
return remove_node
return False # nope, don't remove anything
def prep_article(doc):
"""Once we've found our target article we want to clean it up.
Clean out:
- inline styles
- forms
- strip empty <p>
- extra tags
"""
return clean_document(doc)
def find_candidates(document):
"""
Finds cadidate nodes for the readable version of the article.
Here's we're going to remove unlikely nodes, find scores on the rest,
clean up and return the final best match.
"""
nodes_to_score = set()
should_remove = set()
for node in document.iter():
if is_unlikely_node(node):
logger.debug(
"We should drop unlikely: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif is_bad_link(node):
logger.debug(
"We should drop bad link: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif node.tag in SCORABLE_TAGS:
nodes_to_score.add(node)
return score_candidates(nodes_to_score), should_remove
def is_bad_link(node):
"""
Helper to determine if the node is link that is useless.
We've hit articles with many multiple links that should be cleaned out
because they're just there to pollute the space. See tests for examples.
"""
if node.tag != "a":
return False
name = node.get("name")
href = node.get("href")
if name and not href:
return True
if href:
href_parts = href.split("#")
if len(href_parts) == 2 and len(href_parts[1]) > 25:
return True
return False
class Article(object):
"""Parsed readable object"""
def __init__(self, html, url=None, return_fragment=True):
"""
Create the Article we're going to use.
:param html: The string of HTML we're going to parse.
:param url: The url so we can adjust the links to still work.
:param return_fragment: Should we return a <div> fragment or
a full <html> document.
"""
self._original_document = OriginalDocument(html, url=url)
self._return_fragment = return_fragment
def __str__(self):
return tostring(self._readable())
def __unicode__(self):
return tounicode(self._readable())
@cached_property
def dom(self):
"""Parsed lxml tree (Document Object Model) of the given html."""
try:
dom = self._original_document.dom
# cleaning doesn't return, just wipes in place
html_cleaner(dom)
return leaf_div_elements_into_paragraphs(dom)
except ValueError:
return None
@cached_property
def candidates(self):
"""Generates list of candidates from the DOM."""
dom = self.dom
if dom is None or len(dom) == 0:
return None
candidates, unlikely_candidates = find_candidates(dom)
drop_nodes_with_parents(unlikely_candidates)
return candidates
@cached_property
def main_text(self):
dom = deepcopy(self.readable_dom).get_element_by_id("readabilityBody")
return AnnotatedTextHandler.parse(dom)
@cached_property
def readable(self):
return tounicode(self.readable_dom)
@cached_property
def readable_dom(self):
return self._readable()
def _readable(self):
"""The readable parsed article"""
if not self.candidates:
logger.info("No candidates found in document.")
return self._handle_no_candidates()
# right now we return the highest scoring candidate content
best_candidates = sorted(
(c for c in self.candidates.values()),
key=attrgetter("content_score"), reverse=True)
printer = PrettyPrinter(indent=2)
logger.debug(printer.pformat(best_candidates))
# since we have several candidates, check the winner's siblings
# for extra content
winner = best_candidates[0]
updated_winner = check_siblings(winner, self.candidates)
updated_winner.node = prep_article(updated_winner.node)
if updated_winner.node is not None:
dom = build_base_document(
updated_winner.node, self._return_fragment)
else:
logger.info(
'Had candidates but failed to find a cleaned winning DOM.')
dom = self._handle_no_candidates()
return self._remove_orphans(dom.get_element_by_id("readabilityBody"))
def _remove_orphans(self, dom):
for node in dom.iterdescendants():
if len(node) == 1 and tuple(node)[0].tag == node.tag:
node.drop_tag()
return dom
def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment)
def leaf_div_elements_into_paragraphs(document):
"""
Turn some block elements that don't have children block level
elements into <p> elements.
Since we can't change the tree as we iterate over it, we must do this
before we process our document.
"""
for element in document.iter(tag="div"):
child_tags = tuple(n.tag for n in element.getchildren())
if "div" not in child_tags and "p" not in child_tags:
logger.debug(
"Changing leaf block element <%s> into <p>", element.tag)
element.tag = "p"
return document
|
bookieio/breadability | breadability/readable.py | clean_conditionally | python | def clean_conditionally(node):
if node.tag not in ('form', 'table', 'ul', 'div', 'p'):
return # this is not the tag we are looking for
weight = get_class_weight(node)
# content_score = LOOK up the content score for this node we found
# before else default to 0
content_score = 0
if weight + content_score < 0:
logger.debug('Dropping conditional node')
logger.debug('Weight + score < 0')
return True
commas_count = node.text_content().count(',')
if commas_count < 10:
logger.debug(
"There are %d commas so we're processing more.", commas_count)
# If there are not very many commas, and the number of
# non-paragraph elements is more than paragraphs or other ominous
# signs, remove the element.
p = len(node.findall('.//p'))
img = len(node.findall('.//img'))
li = len(node.findall('.//li')) - 100
inputs = len(node.findall('.//input'))
embed = 0
embeds = node.findall('.//embed')
for e in embeds:
if ok_embedded_video(e):
embed += 1
link_density = get_link_density(node)
content_length = len(node.text_content())
remove_node = False
if li > p and node.tag != 'ul' and node.tag != 'ol':
logger.debug('Conditional drop: li > p and not ul/ol')
remove_node = True
elif inputs > p / 3.0:
logger.debug('Conditional drop: inputs > p/3.0')
remove_node = True
elif content_length < 25 and (img == 0 or img > 2):
logger.debug('Conditional drop: len < 25 and 0/>2 images')
remove_node = True
elif weight < 25 and link_density > 0.2:
logger.debug('Conditional drop: weight small (%f) and link is dense (%f)', weight, link_density)
remove_node = True
elif weight >= 25 and link_density > 0.5:
logger.debug('Conditional drop: weight big but link heavy')
remove_node = True
elif (embed == 1 and content_length < 75) or embed > 1:
logger.debug(
'Conditional drop: embed w/o much content or many embed')
remove_node = True
if remove_node:
logger.debug('Node will be removed: %s %r %s', node.tag, node.attrib, node.text_content()[:30])
return remove_node
return False | Remove the clean_el if it looks like bad content based on rules. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L227-L290 | [
"def get_class_weight(node):\n \"\"\"\n Computes weight of element according to its class/id.\n\n We're using sets to help efficiently check for existence of matches.\n \"\"\"\n weight = 0\n\n if check_node_attributes(CLS_WEIGHT_NEGATIVE, node, \"class\"):\n weight -= 25\n if check_node_... | # -*- coding: utf8 -*-
from __future__ import absolute_import
import logging
from copy import deepcopy
from operator import attrgetter
from pprint import PrettyPrinter
from lxml.html.clean import Cleaner
from lxml.etree import tounicode, tostring
from lxml.html import fragment_fromstring, fromstring
from .document import OriginalDocument
from .annotated_text import AnnotatedTextHandler
from .scoring import (
get_class_weight,
get_link_density,
is_unlikely_node,
score_candidates,
)
from .utils import cached_property, shrink_text
html_cleaner = Cleaner(
scripts=True, javascript=True, comments=True,
style=True, links=True, meta=False, add_nofollow=False,
page_structure=False, processing_instructions=True,
embedded=False, frames=False, forms=False,
annoying_tags=False, remove_tags=None, kill_tags=("noscript", "iframe"),
remove_unknown_tags=False, safe_attrs_only=False)
SCORABLE_TAGS = ("div", "p", "td", "pre", "article")
ANNOTATION_TAGS = (
"a", "abbr", "acronym", "b", "big", "blink", "blockquote", "br", "cite",
"code", "dd", "del", "dir", "dl", "dt", "em", "font", "h", "h1", "h2",
"h3", "h4", "h5", "h6", "hr", "i", "ins", "kbd", "li", "marquee", "menu",
"ol", "p", "pre", "q", "s", "samp", "span", "strike", "strong", "sub",
"sup", "tt", "u", "ul", "var",
)
NULL_DOCUMENT = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
</head>
<body>
</body>
</html>
"""
logger = logging.getLogger("breadability")
def ok_embedded_video(node):
"""Check if this embed/video is an ok one to count."""
good_keywords = ('youtube', 'blip.tv', 'vimeo')
node_str = tounicode(node)
for key in good_keywords:
if key in node_str:
return True
return False
def build_base_document(dom, return_fragment=True):
"""
Builds a base document with the body as root.
:param dom: Parsed lxml tree (Document Object Model).
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
body_element = dom.find(".//body")
if body_element is None:
fragment = fragment_fromstring('<div id="readabilityBody"/>')
fragment.append(dom)
else:
body_element.tag = "div"
body_element.set("id", "readabilityBody")
fragment = body_element
return document_from_fragment(fragment, return_fragment)
def build_error_document(dom, return_fragment=True):
"""
Builds an empty erorr document with the body as root.
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
fragment = fragment_fromstring(
'<div id="readabilityBody" class="parsing-error"/>')
return document_from_fragment(fragment, return_fragment)
def document_from_fragment(fragment, return_fragment):
if return_fragment:
document = fragment
else:
document = fromstring(NULL_DOCUMENT)
body_element = document.find(".//body")
body_element.append(fragment)
document.doctype = "<!DOCTYPE html>"
return document
def check_siblings(candidate_node, candidate_list):
"""
Looks through siblings for content that might also be related.
Things like preambles, content split by ads that we removed, etc.
"""
candidate_css = candidate_node.node.get("class")
potential_target = candidate_node.content_score * 0.2
sibling_target_score = potential_target if potential_target > 10 else 10
parent = candidate_node.node.getparent()
siblings = parent.getchildren() if parent is not None else []
for sibling in siblings:
append = False
content_bonus = 0
if sibling is candidate_node.node:
append = True
# Give a bonus if sibling nodes and top candidates have the example
# same class name
if candidate_css and sibling.get("class") == candidate_css:
content_bonus += candidate_node.content_score * 0.2
if sibling in candidate_list:
adjusted_score = \
candidate_list[sibling].content_score + content_bonus
if adjusted_score >= sibling_target_score:
append = True
if sibling.tag == "p":
link_density = get_link_density(sibling)
content = sibling.text_content()
content_length = len(content)
if content_length > 80 and link_density < 0.25:
append = True
elif content_length < 80 and link_density == 0:
if ". " in content:
append = True
if append:
logger.debug(
"Sibling appended: %s %r", sibling.tag, sibling.attrib)
if sibling.tag not in ("div", "p"):
# We have a node that isn't a common block level element, like
# a form or td tag. Turn it into a div so it doesn't get
# filtered out later by accident.
sibling.tag = "div"
if candidate_node.node != sibling:
candidate_node.node.append(sibling)
return candidate_node
def clean_document(node):
"""Cleans up the final document we return as the readable article."""
if node is None or len(node) == 0:
return None
logger.debug("\n\n-------------- CLEANING DOCUMENT -----------------")
to_drop = []
for n in node.iter():
# clean out any in-line style properties
if "style" in n.attrib:
n.set("style", "")
# remove embended objects unless it's wanted video
if n.tag in ("object", "embed") and not ok_embedded_video(n):
logger.debug("Dropping node %s %r", n.tag, n.attrib)
to_drop.append(n)
# clean headings with bad css or high link density
if n.tag in ("h1", "h2", "h3", "h4") and get_class_weight(n) < 0:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
if n.tag in ("h3", "h4") and get_link_density(n) > 0.33:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
# drop block element without content and children
if n.tag in ("div", "p"):
text_content = shrink_text(n.text_content())
if len(text_content) < 5 and not n.getchildren():
logger.debug(
"Dropping %s %r without content.", n.tag, n.attrib)
to_drop.append(n)
# finally try out the conditional cleaning of the target node
if clean_conditionally(n):
to_drop.append(n)
drop_nodes_with_parents(to_drop)
return node
def drop_nodes_with_parents(nodes):
for node in nodes:
if node.getparent() is None:
continue
node.drop_tree()
logger.debug(
"Dropped node with parent %s %r %s",
node.tag,
node.attrib,
node.text_content()[:50]
)
# nope, don't remove anything
def prep_article(doc):
"""Once we've found our target article we want to clean it up.
Clean out:
- inline styles
- forms
- strip empty <p>
- extra tags
"""
return clean_document(doc)
def find_candidates(document):
"""
Finds cadidate nodes for the readable version of the article.
Here's we're going to remove unlikely nodes, find scores on the rest,
clean up and return the final best match.
"""
nodes_to_score = set()
should_remove = set()
for node in document.iter():
if is_unlikely_node(node):
logger.debug(
"We should drop unlikely: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif is_bad_link(node):
logger.debug(
"We should drop bad link: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif node.tag in SCORABLE_TAGS:
nodes_to_score.add(node)
return score_candidates(nodes_to_score), should_remove
def is_bad_link(node):
"""
Helper to determine if the node is link that is useless.
We've hit articles with many multiple links that should be cleaned out
because they're just there to pollute the space. See tests for examples.
"""
if node.tag != "a":
return False
name = node.get("name")
href = node.get("href")
if name and not href:
return True
if href:
href_parts = href.split("#")
if len(href_parts) == 2 and len(href_parts[1]) > 25:
return True
return False
class Article(object):
"""Parsed readable object"""
def __init__(self, html, url=None, return_fragment=True):
"""
Create the Article we're going to use.
:param html: The string of HTML we're going to parse.
:param url: The url so we can adjust the links to still work.
:param return_fragment: Should we return a <div> fragment or
a full <html> document.
"""
self._original_document = OriginalDocument(html, url=url)
self._return_fragment = return_fragment
def __str__(self):
return tostring(self._readable())
def __unicode__(self):
return tounicode(self._readable())
@cached_property
def dom(self):
"""Parsed lxml tree (Document Object Model) of the given html."""
try:
dom = self._original_document.dom
# cleaning doesn't return, just wipes in place
html_cleaner(dom)
return leaf_div_elements_into_paragraphs(dom)
except ValueError:
return None
@cached_property
def candidates(self):
"""Generates list of candidates from the DOM."""
dom = self.dom
if dom is None or len(dom) == 0:
return None
candidates, unlikely_candidates = find_candidates(dom)
drop_nodes_with_parents(unlikely_candidates)
return candidates
@cached_property
def main_text(self):
dom = deepcopy(self.readable_dom).get_element_by_id("readabilityBody")
return AnnotatedTextHandler.parse(dom)
@cached_property
def readable(self):
return tounicode(self.readable_dom)
@cached_property
def readable_dom(self):
return self._readable()
def _readable(self):
"""The readable parsed article"""
if not self.candidates:
logger.info("No candidates found in document.")
return self._handle_no_candidates()
# right now we return the highest scoring candidate content
best_candidates = sorted(
(c for c in self.candidates.values()),
key=attrgetter("content_score"), reverse=True)
printer = PrettyPrinter(indent=2)
logger.debug(printer.pformat(best_candidates))
# since we have several candidates, check the winner's siblings
# for extra content
winner = best_candidates[0]
updated_winner = check_siblings(winner, self.candidates)
updated_winner.node = prep_article(updated_winner.node)
if updated_winner.node is not None:
dom = build_base_document(
updated_winner.node, self._return_fragment)
else:
logger.info(
'Had candidates but failed to find a cleaned winning DOM.')
dom = self._handle_no_candidates()
return self._remove_orphans(dom.get_element_by_id("readabilityBody"))
def _remove_orphans(self, dom):
for node in dom.iterdescendants():
if len(node) == 1 and tuple(node)[0].tag == node.tag:
node.drop_tag()
return dom
def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment)
def leaf_div_elements_into_paragraphs(document):
"""
Turn some block elements that don't have children block level
elements into <p> elements.
Since we can't change the tree as we iterate over it, we must do this
before we process our document.
"""
for element in document.iter(tag="div"):
child_tags = tuple(n.tag for n in element.getchildren())
if "div" not in child_tags and "p" not in child_tags:
logger.debug(
"Changing leaf block element <%s> into <p>", element.tag)
element.tag = "p"
return document
|
bookieio/breadability | breadability/readable.py | find_candidates | python | def find_candidates(document):
nodes_to_score = set()
should_remove = set()
for node in document.iter():
if is_unlikely_node(node):
logger.debug(
"We should drop unlikely: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif is_bad_link(node):
logger.debug(
"We should drop bad link: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif node.tag in SCORABLE_TAGS:
nodes_to_score.add(node)
return score_candidates(nodes_to_score), should_remove | Finds cadidate nodes for the readable version of the article.
Here's we're going to remove unlikely nodes, find scores on the rest,
clean up and return the final best match. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L305-L327 | [
"def is_unlikely_node(node):\n \"\"\"\n Short helper for checking unlikely status.\n\n If the class or id are in the unlikely list, and there's not also a\n class/id in the likely list then it might need to be removed.\n \"\"\"\n unlikely = check_node_attributes(CLS_UNLIKELY, node, \"class\", \"id... | # -*- coding: utf8 -*-
from __future__ import absolute_import
import logging
from copy import deepcopy
from operator import attrgetter
from pprint import PrettyPrinter
from lxml.html.clean import Cleaner
from lxml.etree import tounicode, tostring
from lxml.html import fragment_fromstring, fromstring
from .document import OriginalDocument
from .annotated_text import AnnotatedTextHandler
from .scoring import (
get_class_weight,
get_link_density,
is_unlikely_node,
score_candidates,
)
from .utils import cached_property, shrink_text
html_cleaner = Cleaner(
scripts=True, javascript=True, comments=True,
style=True, links=True, meta=False, add_nofollow=False,
page_structure=False, processing_instructions=True,
embedded=False, frames=False, forms=False,
annoying_tags=False, remove_tags=None, kill_tags=("noscript", "iframe"),
remove_unknown_tags=False, safe_attrs_only=False)
SCORABLE_TAGS = ("div", "p", "td", "pre", "article")
ANNOTATION_TAGS = (
"a", "abbr", "acronym", "b", "big", "blink", "blockquote", "br", "cite",
"code", "dd", "del", "dir", "dl", "dt", "em", "font", "h", "h1", "h2",
"h3", "h4", "h5", "h6", "hr", "i", "ins", "kbd", "li", "marquee", "menu",
"ol", "p", "pre", "q", "s", "samp", "span", "strike", "strong", "sub",
"sup", "tt", "u", "ul", "var",
)
NULL_DOCUMENT = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
</head>
<body>
</body>
</html>
"""
logger = logging.getLogger("breadability")
def ok_embedded_video(node):
"""Check if this embed/video is an ok one to count."""
good_keywords = ('youtube', 'blip.tv', 'vimeo')
node_str = tounicode(node)
for key in good_keywords:
if key in node_str:
return True
return False
def build_base_document(dom, return_fragment=True):
"""
Builds a base document with the body as root.
:param dom: Parsed lxml tree (Document Object Model).
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
body_element = dom.find(".//body")
if body_element is None:
fragment = fragment_fromstring('<div id="readabilityBody"/>')
fragment.append(dom)
else:
body_element.tag = "div"
body_element.set("id", "readabilityBody")
fragment = body_element
return document_from_fragment(fragment, return_fragment)
def build_error_document(dom, return_fragment=True):
"""
Builds an empty erorr document with the body as root.
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
fragment = fragment_fromstring(
'<div id="readabilityBody" class="parsing-error"/>')
return document_from_fragment(fragment, return_fragment)
def document_from_fragment(fragment, return_fragment):
if return_fragment:
document = fragment
else:
document = fromstring(NULL_DOCUMENT)
body_element = document.find(".//body")
body_element.append(fragment)
document.doctype = "<!DOCTYPE html>"
return document
def check_siblings(candidate_node, candidate_list):
"""
Looks through siblings for content that might also be related.
Things like preambles, content split by ads that we removed, etc.
"""
candidate_css = candidate_node.node.get("class")
potential_target = candidate_node.content_score * 0.2
sibling_target_score = potential_target if potential_target > 10 else 10
parent = candidate_node.node.getparent()
siblings = parent.getchildren() if parent is not None else []
for sibling in siblings:
append = False
content_bonus = 0
if sibling is candidate_node.node:
append = True
# Give a bonus if sibling nodes and top candidates have the example
# same class name
if candidate_css and sibling.get("class") == candidate_css:
content_bonus += candidate_node.content_score * 0.2
if sibling in candidate_list:
adjusted_score = \
candidate_list[sibling].content_score + content_bonus
if adjusted_score >= sibling_target_score:
append = True
if sibling.tag == "p":
link_density = get_link_density(sibling)
content = sibling.text_content()
content_length = len(content)
if content_length > 80 and link_density < 0.25:
append = True
elif content_length < 80 and link_density == 0:
if ". " in content:
append = True
if append:
logger.debug(
"Sibling appended: %s %r", sibling.tag, sibling.attrib)
if sibling.tag not in ("div", "p"):
# We have a node that isn't a common block level element, like
# a form or td tag. Turn it into a div so it doesn't get
# filtered out later by accident.
sibling.tag = "div"
if candidate_node.node != sibling:
candidate_node.node.append(sibling)
return candidate_node
def clean_document(node):
"""Cleans up the final document we return as the readable article."""
if node is None or len(node) == 0:
return None
logger.debug("\n\n-------------- CLEANING DOCUMENT -----------------")
to_drop = []
for n in node.iter():
# clean out any in-line style properties
if "style" in n.attrib:
n.set("style", "")
# remove embended objects unless it's wanted video
if n.tag in ("object", "embed") and not ok_embedded_video(n):
logger.debug("Dropping node %s %r", n.tag, n.attrib)
to_drop.append(n)
# clean headings with bad css or high link density
if n.tag in ("h1", "h2", "h3", "h4") and get_class_weight(n) < 0:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
if n.tag in ("h3", "h4") and get_link_density(n) > 0.33:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
# drop block element without content and children
if n.tag in ("div", "p"):
text_content = shrink_text(n.text_content())
if len(text_content) < 5 and not n.getchildren():
logger.debug(
"Dropping %s %r without content.", n.tag, n.attrib)
to_drop.append(n)
# finally try out the conditional cleaning of the target node
if clean_conditionally(n):
to_drop.append(n)
drop_nodes_with_parents(to_drop)
return node
def drop_nodes_with_parents(nodes):
for node in nodes:
if node.getparent() is None:
continue
node.drop_tree()
logger.debug(
"Dropped node with parent %s %r %s",
node.tag,
node.attrib,
node.text_content()[:50]
)
def clean_conditionally(node):
"""Remove the clean_el if it looks like bad content based on rules."""
if node.tag not in ('form', 'table', 'ul', 'div', 'p'):
return # this is not the tag we are looking for
weight = get_class_weight(node)
# content_score = LOOK up the content score for this node we found
# before else default to 0
content_score = 0
if weight + content_score < 0:
logger.debug('Dropping conditional node')
logger.debug('Weight + score < 0')
return True
commas_count = node.text_content().count(',')
if commas_count < 10:
logger.debug(
"There are %d commas so we're processing more.", commas_count)
# If there are not very many commas, and the number of
# non-paragraph elements is more than paragraphs or other ominous
# signs, remove the element.
p = len(node.findall('.//p'))
img = len(node.findall('.//img'))
li = len(node.findall('.//li')) - 100
inputs = len(node.findall('.//input'))
embed = 0
embeds = node.findall('.//embed')
for e in embeds:
if ok_embedded_video(e):
embed += 1
link_density = get_link_density(node)
content_length = len(node.text_content())
remove_node = False
if li > p and node.tag != 'ul' and node.tag != 'ol':
logger.debug('Conditional drop: li > p and not ul/ol')
remove_node = True
elif inputs > p / 3.0:
logger.debug('Conditional drop: inputs > p/3.0')
remove_node = True
elif content_length < 25 and (img == 0 or img > 2):
logger.debug('Conditional drop: len < 25 and 0/>2 images')
remove_node = True
elif weight < 25 and link_density > 0.2:
logger.debug('Conditional drop: weight small (%f) and link is dense (%f)', weight, link_density)
remove_node = True
elif weight >= 25 and link_density > 0.5:
logger.debug('Conditional drop: weight big but link heavy')
remove_node = True
elif (embed == 1 and content_length < 75) or embed > 1:
logger.debug(
'Conditional drop: embed w/o much content or many embed')
remove_node = True
if remove_node:
logger.debug('Node will be removed: %s %r %s', node.tag, node.attrib, node.text_content()[:30])
return remove_node
return False # nope, don't remove anything
def prep_article(doc):
"""Once we've found our target article we want to clean it up.
Clean out:
- inline styles
- forms
- strip empty <p>
- extra tags
"""
return clean_document(doc)
def is_bad_link(node):
"""
Helper to determine if the node is link that is useless.
We've hit articles with many multiple links that should be cleaned out
because they're just there to pollute the space. See tests for examples.
"""
if node.tag != "a":
return False
name = node.get("name")
href = node.get("href")
if name and not href:
return True
if href:
href_parts = href.split("#")
if len(href_parts) == 2 and len(href_parts[1]) > 25:
return True
return False
class Article(object):
"""Parsed readable object"""
def __init__(self, html, url=None, return_fragment=True):
"""
Create the Article we're going to use.
:param html: The string of HTML we're going to parse.
:param url: The url so we can adjust the links to still work.
:param return_fragment: Should we return a <div> fragment or
a full <html> document.
"""
self._original_document = OriginalDocument(html, url=url)
self._return_fragment = return_fragment
def __str__(self):
return tostring(self._readable())
def __unicode__(self):
return tounicode(self._readable())
@cached_property
def dom(self):
"""Parsed lxml tree (Document Object Model) of the given html."""
try:
dom = self._original_document.dom
# cleaning doesn't return, just wipes in place
html_cleaner(dom)
return leaf_div_elements_into_paragraphs(dom)
except ValueError:
return None
@cached_property
def candidates(self):
"""Generates list of candidates from the DOM."""
dom = self.dom
if dom is None or len(dom) == 0:
return None
candidates, unlikely_candidates = find_candidates(dom)
drop_nodes_with_parents(unlikely_candidates)
return candidates
@cached_property
def main_text(self):
dom = deepcopy(self.readable_dom).get_element_by_id("readabilityBody")
return AnnotatedTextHandler.parse(dom)
@cached_property
def readable(self):
return tounicode(self.readable_dom)
@cached_property
def readable_dom(self):
return self._readable()
def _readable(self):
"""The readable parsed article"""
if not self.candidates:
logger.info("No candidates found in document.")
return self._handle_no_candidates()
# right now we return the highest scoring candidate content
best_candidates = sorted(
(c for c in self.candidates.values()),
key=attrgetter("content_score"), reverse=True)
printer = PrettyPrinter(indent=2)
logger.debug(printer.pformat(best_candidates))
# since we have several candidates, check the winner's siblings
# for extra content
winner = best_candidates[0]
updated_winner = check_siblings(winner, self.candidates)
updated_winner.node = prep_article(updated_winner.node)
if updated_winner.node is not None:
dom = build_base_document(
updated_winner.node, self._return_fragment)
else:
logger.info(
'Had candidates but failed to find a cleaned winning DOM.')
dom = self._handle_no_candidates()
return self._remove_orphans(dom.get_element_by_id("readabilityBody"))
def _remove_orphans(self, dom):
for node in dom.iterdescendants():
if len(node) == 1 and tuple(node)[0].tag == node.tag:
node.drop_tag()
return dom
def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment)
def leaf_div_elements_into_paragraphs(document):
"""
Turn some block elements that don't have children block level
elements into <p> elements.
Since we can't change the tree as we iterate over it, we must do this
before we process our document.
"""
for element in document.iter(tag="div"):
child_tags = tuple(n.tag for n in element.getchildren())
if "div" not in child_tags and "p" not in child_tags:
logger.debug(
"Changing leaf block element <%s> into <p>", element.tag)
element.tag = "p"
return document
|
bookieio/breadability | breadability/readable.py | is_bad_link | python | def is_bad_link(node):
if node.tag != "a":
return False
name = node.get("name")
href = node.get("href")
if name and not href:
return True
if href:
href_parts = href.split("#")
if len(href_parts) == 2 and len(href_parts[1]) > 25:
return True
return False | Helper to determine if the node is link that is useless.
We've hit articles with many multiple links that should be cleaned out
because they're just there to pollute the space. See tests for examples. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L330-L350 | null | # -*- coding: utf8 -*-
from __future__ import absolute_import
import logging
from copy import deepcopy
from operator import attrgetter
from pprint import PrettyPrinter
from lxml.html.clean import Cleaner
from lxml.etree import tounicode, tostring
from lxml.html import fragment_fromstring, fromstring
from .document import OriginalDocument
from .annotated_text import AnnotatedTextHandler
from .scoring import (
get_class_weight,
get_link_density,
is_unlikely_node,
score_candidates,
)
from .utils import cached_property, shrink_text
html_cleaner = Cleaner(
scripts=True, javascript=True, comments=True,
style=True, links=True, meta=False, add_nofollow=False,
page_structure=False, processing_instructions=True,
embedded=False, frames=False, forms=False,
annoying_tags=False, remove_tags=None, kill_tags=("noscript", "iframe"),
remove_unknown_tags=False, safe_attrs_only=False)
SCORABLE_TAGS = ("div", "p", "td", "pre", "article")
ANNOTATION_TAGS = (
"a", "abbr", "acronym", "b", "big", "blink", "blockquote", "br", "cite",
"code", "dd", "del", "dir", "dl", "dt", "em", "font", "h", "h1", "h2",
"h3", "h4", "h5", "h6", "hr", "i", "ins", "kbd", "li", "marquee", "menu",
"ol", "p", "pre", "q", "s", "samp", "span", "strike", "strong", "sub",
"sup", "tt", "u", "ul", "var",
)
NULL_DOCUMENT = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
</head>
<body>
</body>
</html>
"""
logger = logging.getLogger("breadability")
def ok_embedded_video(node):
"""Check if this embed/video is an ok one to count."""
good_keywords = ('youtube', 'blip.tv', 'vimeo')
node_str = tounicode(node)
for key in good_keywords:
if key in node_str:
return True
return False
def build_base_document(dom, return_fragment=True):
"""
Builds a base document with the body as root.
:param dom: Parsed lxml tree (Document Object Model).
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
body_element = dom.find(".//body")
if body_element is None:
fragment = fragment_fromstring('<div id="readabilityBody"/>')
fragment.append(dom)
else:
body_element.tag = "div"
body_element.set("id", "readabilityBody")
fragment = body_element
return document_from_fragment(fragment, return_fragment)
def build_error_document(dom, return_fragment=True):
"""
Builds an empty erorr document with the body as root.
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
fragment = fragment_fromstring(
'<div id="readabilityBody" class="parsing-error"/>')
return document_from_fragment(fragment, return_fragment)
def document_from_fragment(fragment, return_fragment):
if return_fragment:
document = fragment
else:
document = fromstring(NULL_DOCUMENT)
body_element = document.find(".//body")
body_element.append(fragment)
document.doctype = "<!DOCTYPE html>"
return document
def check_siblings(candidate_node, candidate_list):
"""
Looks through siblings for content that might also be related.
Things like preambles, content split by ads that we removed, etc.
"""
candidate_css = candidate_node.node.get("class")
potential_target = candidate_node.content_score * 0.2
sibling_target_score = potential_target if potential_target > 10 else 10
parent = candidate_node.node.getparent()
siblings = parent.getchildren() if parent is not None else []
for sibling in siblings:
append = False
content_bonus = 0
if sibling is candidate_node.node:
append = True
# Give a bonus if sibling nodes and top candidates have the example
# same class name
if candidate_css and sibling.get("class") == candidate_css:
content_bonus += candidate_node.content_score * 0.2
if sibling in candidate_list:
adjusted_score = \
candidate_list[sibling].content_score + content_bonus
if adjusted_score >= sibling_target_score:
append = True
if sibling.tag == "p":
link_density = get_link_density(sibling)
content = sibling.text_content()
content_length = len(content)
if content_length > 80 and link_density < 0.25:
append = True
elif content_length < 80 and link_density == 0:
if ". " in content:
append = True
if append:
logger.debug(
"Sibling appended: %s %r", sibling.tag, sibling.attrib)
if sibling.tag not in ("div", "p"):
# We have a node that isn't a common block level element, like
# a form or td tag. Turn it into a div so it doesn't get
# filtered out later by accident.
sibling.tag = "div"
if candidate_node.node != sibling:
candidate_node.node.append(sibling)
return candidate_node
def clean_document(node):
"""Cleans up the final document we return as the readable article."""
if node is None or len(node) == 0:
return None
logger.debug("\n\n-------------- CLEANING DOCUMENT -----------------")
to_drop = []
for n in node.iter():
# clean out any in-line style properties
if "style" in n.attrib:
n.set("style", "")
# remove embended objects unless it's wanted video
if n.tag in ("object", "embed") and not ok_embedded_video(n):
logger.debug("Dropping node %s %r", n.tag, n.attrib)
to_drop.append(n)
# clean headings with bad css or high link density
if n.tag in ("h1", "h2", "h3", "h4") and get_class_weight(n) < 0:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
if n.tag in ("h3", "h4") and get_link_density(n) > 0.33:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
# drop block element without content and children
if n.tag in ("div", "p"):
text_content = shrink_text(n.text_content())
if len(text_content) < 5 and not n.getchildren():
logger.debug(
"Dropping %s %r without content.", n.tag, n.attrib)
to_drop.append(n)
# finally try out the conditional cleaning of the target node
if clean_conditionally(n):
to_drop.append(n)
drop_nodes_with_parents(to_drop)
return node
def drop_nodes_with_parents(nodes):
for node in nodes:
if node.getparent() is None:
continue
node.drop_tree()
logger.debug(
"Dropped node with parent %s %r %s",
node.tag,
node.attrib,
node.text_content()[:50]
)
def clean_conditionally(node):
"""Remove the clean_el if it looks like bad content based on rules."""
if node.tag not in ('form', 'table', 'ul', 'div', 'p'):
return # this is not the tag we are looking for
weight = get_class_weight(node)
# content_score = LOOK up the content score for this node we found
# before else default to 0
content_score = 0
if weight + content_score < 0:
logger.debug('Dropping conditional node')
logger.debug('Weight + score < 0')
return True
commas_count = node.text_content().count(',')
if commas_count < 10:
logger.debug(
"There are %d commas so we're processing more.", commas_count)
# If there are not very many commas, and the number of
# non-paragraph elements is more than paragraphs or other ominous
# signs, remove the element.
p = len(node.findall('.//p'))
img = len(node.findall('.//img'))
li = len(node.findall('.//li')) - 100
inputs = len(node.findall('.//input'))
embed = 0
embeds = node.findall('.//embed')
for e in embeds:
if ok_embedded_video(e):
embed += 1
link_density = get_link_density(node)
content_length = len(node.text_content())
remove_node = False
if li > p and node.tag != 'ul' and node.tag != 'ol':
logger.debug('Conditional drop: li > p and not ul/ol')
remove_node = True
elif inputs > p / 3.0:
logger.debug('Conditional drop: inputs > p/3.0')
remove_node = True
elif content_length < 25 and (img == 0 or img > 2):
logger.debug('Conditional drop: len < 25 and 0/>2 images')
remove_node = True
elif weight < 25 and link_density > 0.2:
logger.debug('Conditional drop: weight small (%f) and link is dense (%f)', weight, link_density)
remove_node = True
elif weight >= 25 and link_density > 0.5:
logger.debug('Conditional drop: weight big but link heavy')
remove_node = True
elif (embed == 1 and content_length < 75) or embed > 1:
logger.debug(
'Conditional drop: embed w/o much content or many embed')
remove_node = True
if remove_node:
logger.debug('Node will be removed: %s %r %s', node.tag, node.attrib, node.text_content()[:30])
return remove_node
return False # nope, don't remove anything
def prep_article(doc):
"""Once we've found our target article we want to clean it up.
Clean out:
- inline styles
- forms
- strip empty <p>
- extra tags
"""
return clean_document(doc)
def find_candidates(document):
"""
Finds cadidate nodes for the readable version of the article.
Here's we're going to remove unlikely nodes, find scores on the rest,
clean up and return the final best match.
"""
nodes_to_score = set()
should_remove = set()
for node in document.iter():
if is_unlikely_node(node):
logger.debug(
"We should drop unlikely: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif is_bad_link(node):
logger.debug(
"We should drop bad link: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif node.tag in SCORABLE_TAGS:
nodes_to_score.add(node)
return score_candidates(nodes_to_score), should_remove
class Article(object):
"""Parsed readable object"""
def __init__(self, html, url=None, return_fragment=True):
"""
Create the Article we're going to use.
:param html: The string of HTML we're going to parse.
:param url: The url so we can adjust the links to still work.
:param return_fragment: Should we return a <div> fragment or
a full <html> document.
"""
self._original_document = OriginalDocument(html, url=url)
self._return_fragment = return_fragment
def __str__(self):
return tostring(self._readable())
def __unicode__(self):
return tounicode(self._readable())
@cached_property
def dom(self):
"""Parsed lxml tree (Document Object Model) of the given html."""
try:
dom = self._original_document.dom
# cleaning doesn't return, just wipes in place
html_cleaner(dom)
return leaf_div_elements_into_paragraphs(dom)
except ValueError:
return None
@cached_property
def candidates(self):
"""Generates list of candidates from the DOM."""
dom = self.dom
if dom is None or len(dom) == 0:
return None
candidates, unlikely_candidates = find_candidates(dom)
drop_nodes_with_parents(unlikely_candidates)
return candidates
@cached_property
def main_text(self):
dom = deepcopy(self.readable_dom).get_element_by_id("readabilityBody")
return AnnotatedTextHandler.parse(dom)
@cached_property
def readable(self):
return tounicode(self.readable_dom)
@cached_property
def readable_dom(self):
return self._readable()
def _readable(self):
"""The readable parsed article"""
if not self.candidates:
logger.info("No candidates found in document.")
return self._handle_no_candidates()
# right now we return the highest scoring candidate content
best_candidates = sorted(
(c for c in self.candidates.values()),
key=attrgetter("content_score"), reverse=True)
printer = PrettyPrinter(indent=2)
logger.debug(printer.pformat(best_candidates))
# since we have several candidates, check the winner's siblings
# for extra content
winner = best_candidates[0]
updated_winner = check_siblings(winner, self.candidates)
updated_winner.node = prep_article(updated_winner.node)
if updated_winner.node is not None:
dom = build_base_document(
updated_winner.node, self._return_fragment)
else:
logger.info(
'Had candidates but failed to find a cleaned winning DOM.')
dom = self._handle_no_candidates()
return self._remove_orphans(dom.get_element_by_id("readabilityBody"))
def _remove_orphans(self, dom):
for node in dom.iterdescendants():
if len(node) == 1 and tuple(node)[0].tag == node.tag:
node.drop_tag()
return dom
def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment)
def leaf_div_elements_into_paragraphs(document):
"""
Turn some block elements that don't have children block level
elements into <p> elements.
Since we can't change the tree as we iterate over it, we must do this
before we process our document.
"""
for element in document.iter(tag="div"):
child_tags = tuple(n.tag for n in element.getchildren())
if "div" not in child_tags and "p" not in child_tags:
logger.debug(
"Changing leaf block element <%s> into <p>", element.tag)
element.tag = "p"
return document
|
bookieio/breadability | breadability/readable.py | leaf_div_elements_into_paragraphs | python | def leaf_div_elements_into_paragraphs(document):
for element in document.iter(tag="div"):
child_tags = tuple(n.tag for n in element.getchildren())
if "div" not in child_tags and "p" not in child_tags:
logger.debug(
"Changing leaf block element <%s> into <p>", element.tag)
element.tag = "p"
return document | Turn some block elements that don't have children block level
elements into <p> elements.
Since we can't change the tree as we iterate over it, we must do this
before we process our document. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L461-L476 | null | # -*- coding: utf8 -*-
from __future__ import absolute_import
import logging
from copy import deepcopy
from operator import attrgetter
from pprint import PrettyPrinter
from lxml.html.clean import Cleaner
from lxml.etree import tounicode, tostring
from lxml.html import fragment_fromstring, fromstring
from .document import OriginalDocument
from .annotated_text import AnnotatedTextHandler
from .scoring import (
get_class_weight,
get_link_density,
is_unlikely_node,
score_candidates,
)
from .utils import cached_property, shrink_text
html_cleaner = Cleaner(
scripts=True, javascript=True, comments=True,
style=True, links=True, meta=False, add_nofollow=False,
page_structure=False, processing_instructions=True,
embedded=False, frames=False, forms=False,
annoying_tags=False, remove_tags=None, kill_tags=("noscript", "iframe"),
remove_unknown_tags=False, safe_attrs_only=False)
SCORABLE_TAGS = ("div", "p", "td", "pre", "article")
ANNOTATION_TAGS = (
"a", "abbr", "acronym", "b", "big", "blink", "blockquote", "br", "cite",
"code", "dd", "del", "dir", "dl", "dt", "em", "font", "h", "h1", "h2",
"h3", "h4", "h5", "h6", "hr", "i", "ins", "kbd", "li", "marquee", "menu",
"ol", "p", "pre", "q", "s", "samp", "span", "strike", "strong", "sub",
"sup", "tt", "u", "ul", "var",
)
NULL_DOCUMENT = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
</head>
<body>
</body>
</html>
"""
logger = logging.getLogger("breadability")
def ok_embedded_video(node):
"""Check if this embed/video is an ok one to count."""
good_keywords = ('youtube', 'blip.tv', 'vimeo')
node_str = tounicode(node)
for key in good_keywords:
if key in node_str:
return True
return False
def build_base_document(dom, return_fragment=True):
"""
Builds a base document with the body as root.
:param dom: Parsed lxml tree (Document Object Model).
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
body_element = dom.find(".//body")
if body_element is None:
fragment = fragment_fromstring('<div id="readabilityBody"/>')
fragment.append(dom)
else:
body_element.tag = "div"
body_element.set("id", "readabilityBody")
fragment = body_element
return document_from_fragment(fragment, return_fragment)
def build_error_document(dom, return_fragment=True):
"""
Builds an empty erorr document with the body as root.
:param bool return_fragment: If True only <div> fragment is returned.
Otherwise full HTML document is returned.
"""
fragment = fragment_fromstring(
'<div id="readabilityBody" class="parsing-error"/>')
return document_from_fragment(fragment, return_fragment)
def document_from_fragment(fragment, return_fragment):
if return_fragment:
document = fragment
else:
document = fromstring(NULL_DOCUMENT)
body_element = document.find(".//body")
body_element.append(fragment)
document.doctype = "<!DOCTYPE html>"
return document
def check_siblings(candidate_node, candidate_list):
"""
Looks through siblings for content that might also be related.
Things like preambles, content split by ads that we removed, etc.
"""
candidate_css = candidate_node.node.get("class")
potential_target = candidate_node.content_score * 0.2
sibling_target_score = potential_target if potential_target > 10 else 10
parent = candidate_node.node.getparent()
siblings = parent.getchildren() if parent is not None else []
for sibling in siblings:
append = False
content_bonus = 0
if sibling is candidate_node.node:
append = True
# Give a bonus if sibling nodes and top candidates have the example
# same class name
if candidate_css and sibling.get("class") == candidate_css:
content_bonus += candidate_node.content_score * 0.2
if sibling in candidate_list:
adjusted_score = \
candidate_list[sibling].content_score + content_bonus
if adjusted_score >= sibling_target_score:
append = True
if sibling.tag == "p":
link_density = get_link_density(sibling)
content = sibling.text_content()
content_length = len(content)
if content_length > 80 and link_density < 0.25:
append = True
elif content_length < 80 and link_density == 0:
if ". " in content:
append = True
if append:
logger.debug(
"Sibling appended: %s %r", sibling.tag, sibling.attrib)
if sibling.tag not in ("div", "p"):
# We have a node that isn't a common block level element, like
# a form or td tag. Turn it into a div so it doesn't get
# filtered out later by accident.
sibling.tag = "div"
if candidate_node.node != sibling:
candidate_node.node.append(sibling)
return candidate_node
def clean_document(node):
"""Cleans up the final document we return as the readable article."""
if node is None or len(node) == 0:
return None
logger.debug("\n\n-------------- CLEANING DOCUMENT -----------------")
to_drop = []
for n in node.iter():
# clean out any in-line style properties
if "style" in n.attrib:
n.set("style", "")
# remove embended objects unless it's wanted video
if n.tag in ("object", "embed") and not ok_embedded_video(n):
logger.debug("Dropping node %s %r", n.tag, n.attrib)
to_drop.append(n)
# clean headings with bad css or high link density
if n.tag in ("h1", "h2", "h3", "h4") and get_class_weight(n) < 0:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
if n.tag in ("h3", "h4") and get_link_density(n) > 0.33:
logger.debug("Dropping <%s>, it's insignificant", n.tag)
to_drop.append(n)
# drop block element without content and children
if n.tag in ("div", "p"):
text_content = shrink_text(n.text_content())
if len(text_content) < 5 and not n.getchildren():
logger.debug(
"Dropping %s %r without content.", n.tag, n.attrib)
to_drop.append(n)
# finally try out the conditional cleaning of the target node
if clean_conditionally(n):
to_drop.append(n)
drop_nodes_with_parents(to_drop)
return node
def drop_nodes_with_parents(nodes):
for node in nodes:
if node.getparent() is None:
continue
node.drop_tree()
logger.debug(
"Dropped node with parent %s %r %s",
node.tag,
node.attrib,
node.text_content()[:50]
)
def clean_conditionally(node):
"""Remove the clean_el if it looks like bad content based on rules."""
if node.tag not in ('form', 'table', 'ul', 'div', 'p'):
return # this is not the tag we are looking for
weight = get_class_weight(node)
# content_score = LOOK up the content score for this node we found
# before else default to 0
content_score = 0
if weight + content_score < 0:
logger.debug('Dropping conditional node')
logger.debug('Weight + score < 0')
return True
commas_count = node.text_content().count(',')
if commas_count < 10:
logger.debug(
"There are %d commas so we're processing more.", commas_count)
# If there are not very many commas, and the number of
# non-paragraph elements is more than paragraphs or other ominous
# signs, remove the element.
p = len(node.findall('.//p'))
img = len(node.findall('.//img'))
li = len(node.findall('.//li')) - 100
inputs = len(node.findall('.//input'))
embed = 0
embeds = node.findall('.//embed')
for e in embeds:
if ok_embedded_video(e):
embed += 1
link_density = get_link_density(node)
content_length = len(node.text_content())
remove_node = False
if li > p and node.tag != 'ul' and node.tag != 'ol':
logger.debug('Conditional drop: li > p and not ul/ol')
remove_node = True
elif inputs > p / 3.0:
logger.debug('Conditional drop: inputs > p/3.0')
remove_node = True
elif content_length < 25 and (img == 0 or img > 2):
logger.debug('Conditional drop: len < 25 and 0/>2 images')
remove_node = True
elif weight < 25 and link_density > 0.2:
logger.debug('Conditional drop: weight small (%f) and link is dense (%f)', weight, link_density)
remove_node = True
elif weight >= 25 and link_density > 0.5:
logger.debug('Conditional drop: weight big but link heavy')
remove_node = True
elif (embed == 1 and content_length < 75) or embed > 1:
logger.debug(
'Conditional drop: embed w/o much content or many embed')
remove_node = True
if remove_node:
logger.debug('Node will be removed: %s %r %s', node.tag, node.attrib, node.text_content()[:30])
return remove_node
return False # nope, don't remove anything
def prep_article(doc):
"""Once we've found our target article we want to clean it up.
Clean out:
- inline styles
- forms
- strip empty <p>
- extra tags
"""
return clean_document(doc)
def find_candidates(document):
"""
Finds cadidate nodes for the readable version of the article.
Here's we're going to remove unlikely nodes, find scores on the rest,
clean up and return the final best match.
"""
nodes_to_score = set()
should_remove = set()
for node in document.iter():
if is_unlikely_node(node):
logger.debug(
"We should drop unlikely: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif is_bad_link(node):
logger.debug(
"We should drop bad link: %s %r", node.tag, node.attrib)
should_remove.add(node)
elif node.tag in SCORABLE_TAGS:
nodes_to_score.add(node)
return score_candidates(nodes_to_score), should_remove
def is_bad_link(node):
"""
Helper to determine if the node is link that is useless.
We've hit articles with many multiple links that should be cleaned out
because they're just there to pollute the space. See tests for examples.
"""
if node.tag != "a":
return False
name = node.get("name")
href = node.get("href")
if name and not href:
return True
if href:
href_parts = href.split("#")
if len(href_parts) == 2 and len(href_parts[1]) > 25:
return True
return False
class Article(object):
"""Parsed readable object"""
def __init__(self, html, url=None, return_fragment=True):
"""
Create the Article we're going to use.
:param html: The string of HTML we're going to parse.
:param url: The url so we can adjust the links to still work.
:param return_fragment: Should we return a <div> fragment or
a full <html> document.
"""
self._original_document = OriginalDocument(html, url=url)
self._return_fragment = return_fragment
def __str__(self):
return tostring(self._readable())
def __unicode__(self):
return tounicode(self._readable())
@cached_property
def dom(self):
"""Parsed lxml tree (Document Object Model) of the given html."""
try:
dom = self._original_document.dom
# cleaning doesn't return, just wipes in place
html_cleaner(dom)
return leaf_div_elements_into_paragraphs(dom)
except ValueError:
return None
@cached_property
def candidates(self):
"""Generates list of candidates from the DOM."""
dom = self.dom
if dom is None or len(dom) == 0:
return None
candidates, unlikely_candidates = find_candidates(dom)
drop_nodes_with_parents(unlikely_candidates)
return candidates
@cached_property
def main_text(self):
dom = deepcopy(self.readable_dom).get_element_by_id("readabilityBody")
return AnnotatedTextHandler.parse(dom)
@cached_property
def readable(self):
return tounicode(self.readable_dom)
@cached_property
def readable_dom(self):
return self._readable()
def _readable(self):
"""The readable parsed article"""
if not self.candidates:
logger.info("No candidates found in document.")
return self._handle_no_candidates()
# right now we return the highest scoring candidate content
best_candidates = sorted(
(c for c in self.candidates.values()),
key=attrgetter("content_score"), reverse=True)
printer = PrettyPrinter(indent=2)
logger.debug(printer.pformat(best_candidates))
# since we have several candidates, check the winner's siblings
# for extra content
winner = best_candidates[0]
updated_winner = check_siblings(winner, self.candidates)
updated_winner.node = prep_article(updated_winner.node)
if updated_winner.node is not None:
dom = build_base_document(
updated_winner.node, self._return_fragment)
else:
logger.info(
'Had candidates but failed to find a cleaned winning DOM.')
dom = self._handle_no_candidates()
return self._remove_orphans(dom.get_element_by_id("readabilityBody"))
def _remove_orphans(self, dom):
for node in dom.iterdescendants():
if len(node) == 1 and tuple(node)[0].tag == node.tag:
node.drop_tag()
return dom
def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment)
|
bookieio/breadability | breadability/readable.py | Article.dom | python | def dom(self):
try:
dom = self._original_document.dom
# cleaning doesn't return, just wipes in place
html_cleaner(dom)
return leaf_div_elements_into_paragraphs(dom)
except ValueError:
return None | Parsed lxml tree (Document Object Model) of the given html. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L375-L383 | [
"def leaf_div_elements_into_paragraphs(document):\n \"\"\"\n Turn some block elements that don't have children block level\n elements into <p> elements.\n\n Since we can't change the tree as we iterate over it, we must do this\n before we process our document.\n \"\"\"\n for element in document... | class Article(object):
"""Parsed readable object"""
def __init__(self, html, url=None, return_fragment=True):
"""
Create the Article we're going to use.
:param html: The string of HTML we're going to parse.
:param url: The url so we can adjust the links to still work.
:param return_fragment: Should we return a <div> fragment or
a full <html> document.
"""
self._original_document = OriginalDocument(html, url=url)
self._return_fragment = return_fragment
def __str__(self):
return tostring(self._readable())
def __unicode__(self):
return tounicode(self._readable())
@cached_property
@cached_property
def candidates(self):
"""Generates list of candidates from the DOM."""
dom = self.dom
if dom is None or len(dom) == 0:
return None
candidates, unlikely_candidates = find_candidates(dom)
drop_nodes_with_parents(unlikely_candidates)
return candidates
@cached_property
def main_text(self):
dom = deepcopy(self.readable_dom).get_element_by_id("readabilityBody")
return AnnotatedTextHandler.parse(dom)
@cached_property
def readable(self):
return tounicode(self.readable_dom)
@cached_property
def readable_dom(self):
return self._readable()
def _readable(self):
"""The readable parsed article"""
if not self.candidates:
logger.info("No candidates found in document.")
return self._handle_no_candidates()
# right now we return the highest scoring candidate content
best_candidates = sorted(
(c for c in self.candidates.values()),
key=attrgetter("content_score"), reverse=True)
printer = PrettyPrinter(indent=2)
logger.debug(printer.pformat(best_candidates))
# since we have several candidates, check the winner's siblings
# for extra content
winner = best_candidates[0]
updated_winner = check_siblings(winner, self.candidates)
updated_winner.node = prep_article(updated_winner.node)
if updated_winner.node is not None:
dom = build_base_document(
updated_winner.node, self._return_fragment)
else:
logger.info(
'Had candidates but failed to find a cleaned winning DOM.')
dom = self._handle_no_candidates()
return self._remove_orphans(dom.get_element_by_id("readabilityBody"))
def _remove_orphans(self, dom):
for node in dom.iterdescendants():
if len(node) == 1 and tuple(node)[0].tag == node.tag:
node.drop_tag()
return dom
def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment)
|
bookieio/breadability | breadability/readable.py | Article.candidates | python | def candidates(self):
dom = self.dom
if dom is None or len(dom) == 0:
return None
candidates, unlikely_candidates = find_candidates(dom)
drop_nodes_with_parents(unlikely_candidates)
return candidates | Generates list of candidates from the DOM. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L386-L395 | [
"def drop_nodes_with_parents(nodes):\n for node in nodes:\n if node.getparent() is None:\n continue\n\n node.drop_tree()\n logger.debug(\n \"Dropped node with parent %s %r %s\",\n node.tag,\n node.attrib,\n node.text_content()[:50]\n ... | class Article(object):
"""Parsed readable object"""
def __init__(self, html, url=None, return_fragment=True):
"""
Create the Article we're going to use.
:param html: The string of HTML we're going to parse.
:param url: The url so we can adjust the links to still work.
:param return_fragment: Should we return a <div> fragment or
a full <html> document.
"""
self._original_document = OriginalDocument(html, url=url)
self._return_fragment = return_fragment
def __str__(self):
return tostring(self._readable())
def __unicode__(self):
return tounicode(self._readable())
@cached_property
def dom(self):
"""Parsed lxml tree (Document Object Model) of the given html."""
try:
dom = self._original_document.dom
# cleaning doesn't return, just wipes in place
html_cleaner(dom)
return leaf_div_elements_into_paragraphs(dom)
except ValueError:
return None
@cached_property
@cached_property
def main_text(self):
dom = deepcopy(self.readable_dom).get_element_by_id("readabilityBody")
return AnnotatedTextHandler.parse(dom)
@cached_property
def readable(self):
return tounicode(self.readable_dom)
@cached_property
def readable_dom(self):
return self._readable()
def _readable(self):
"""The readable parsed article"""
if not self.candidates:
logger.info("No candidates found in document.")
return self._handle_no_candidates()
# right now we return the highest scoring candidate content
best_candidates = sorted(
(c for c in self.candidates.values()),
key=attrgetter("content_score"), reverse=True)
printer = PrettyPrinter(indent=2)
logger.debug(printer.pformat(best_candidates))
# since we have several candidates, check the winner's siblings
# for extra content
winner = best_candidates[0]
updated_winner = check_siblings(winner, self.candidates)
updated_winner.node = prep_article(updated_winner.node)
if updated_winner.node is not None:
dom = build_base_document(
updated_winner.node, self._return_fragment)
else:
logger.info(
'Had candidates but failed to find a cleaned winning DOM.')
dom = self._handle_no_candidates()
return self._remove_orphans(dom.get_element_by_id("readabilityBody"))
def _remove_orphans(self, dom):
for node in dom.iterdescendants():
if len(node) == 1 and tuple(node)[0].tag == node.tag:
node.drop_tag()
return dom
def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment)
|
bookieio/breadability | breadability/readable.py | Article._readable | python | def _readable(self):
if not self.candidates:
logger.info("No candidates found in document.")
return self._handle_no_candidates()
# right now we return the highest scoring candidate content
best_candidates = sorted(
(c for c in self.candidates.values()),
key=attrgetter("content_score"), reverse=True)
printer = PrettyPrinter(indent=2)
logger.debug(printer.pformat(best_candidates))
# since we have several candidates, check the winner's siblings
# for extra content
winner = best_candidates[0]
updated_winner = check_siblings(winner, self.candidates)
updated_winner.node = prep_article(updated_winner.node)
if updated_winner.node is not None:
dom = build_base_document(
updated_winner.node, self._return_fragment)
else:
logger.info(
'Had candidates but failed to find a cleaned winning DOM.')
dom = self._handle_no_candidates()
return self._remove_orphans(dom.get_element_by_id("readabilityBody")) | The readable parsed article | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L410-L437 | [
"def build_base_document(dom, return_fragment=True):\n \"\"\"\n Builds a base document with the body as root.\n\n :param dom: Parsed lxml tree (Document Object Model).\n :param bool return_fragment: If True only <div> fragment is returned.\n Otherwise full HTML document is returned.\n \"\"\"\n... | class Article(object):
"""Parsed readable object"""
def __init__(self, html, url=None, return_fragment=True):
"""
Create the Article we're going to use.
:param html: The string of HTML we're going to parse.
:param url: The url so we can adjust the links to still work.
:param return_fragment: Should we return a <div> fragment or
a full <html> document.
"""
self._original_document = OriginalDocument(html, url=url)
self._return_fragment = return_fragment
def __str__(self):
return tostring(self._readable())
def __unicode__(self):
return tounicode(self._readable())
@cached_property
def dom(self):
"""Parsed lxml tree (Document Object Model) of the given html."""
try:
dom = self._original_document.dom
# cleaning doesn't return, just wipes in place
html_cleaner(dom)
return leaf_div_elements_into_paragraphs(dom)
except ValueError:
return None
@cached_property
def candidates(self):
"""Generates list of candidates from the DOM."""
dom = self.dom
if dom is None or len(dom) == 0:
return None
candidates, unlikely_candidates = find_candidates(dom)
drop_nodes_with_parents(unlikely_candidates)
return candidates
@cached_property
def main_text(self):
dom = deepcopy(self.readable_dom).get_element_by_id("readabilityBody")
return AnnotatedTextHandler.parse(dom)
@cached_property
def readable(self):
return tounicode(self.readable_dom)
@cached_property
def readable_dom(self):
return self._readable()
def _remove_orphans(self, dom):
for node in dom.iterdescendants():
if len(node) == 1 and tuple(node)[0].tag == node.tag:
node.drop_tag()
return dom
def _handle_no_candidates(self):
"""
If we fail to find a good candidate we need to find something else.
"""
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment)
|
bookieio/breadability | breadability/readable.py | Article._handle_no_candidates | python | def _handle_no_candidates(self):
# since we've not found a good candidate we're should help this
if self.dom is not None and len(self.dom):
dom = prep_article(self.dom)
dom = build_base_document(dom, self._return_fragment)
return self._remove_orphans(
dom.get_element_by_id("readabilityBody"))
else:
logger.info("No document to use.")
return build_error_document(self._return_fragment) | If we fail to find a good candidate we need to find something else. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/readable.py#L446-L458 | null | class Article(object):
"""Parsed readable object"""
def __init__(self, html, url=None, return_fragment=True):
"""
Create the Article we're going to use.
:param html: The string of HTML we're going to parse.
:param url: The url so we can adjust the links to still work.
:param return_fragment: Should we return a <div> fragment or
a full <html> document.
"""
self._original_document = OriginalDocument(html, url=url)
self._return_fragment = return_fragment
def __str__(self):
return tostring(self._readable())
def __unicode__(self):
return tounicode(self._readable())
@cached_property
def dom(self):
"""Parsed lxml tree (Document Object Model) of the given html."""
try:
dom = self._original_document.dom
# cleaning doesn't return, just wipes in place
html_cleaner(dom)
return leaf_div_elements_into_paragraphs(dom)
except ValueError:
return None
@cached_property
def candidates(self):
"""Generates list of candidates from the DOM."""
dom = self.dom
if dom is None or len(dom) == 0:
return None
candidates, unlikely_candidates = find_candidates(dom)
drop_nodes_with_parents(unlikely_candidates)
return candidates
@cached_property
def main_text(self):
dom = deepcopy(self.readable_dom).get_element_by_id("readabilityBody")
return AnnotatedTextHandler.parse(dom)
@cached_property
def readable(self):
return tounicode(self.readable_dom)
@cached_property
def readable_dom(self):
return self._readable()
def _readable(self):
"""The readable parsed article"""
if not self.candidates:
logger.info("No candidates found in document.")
return self._handle_no_candidates()
# right now we return the highest scoring candidate content
best_candidates = sorted(
(c for c in self.candidates.values()),
key=attrgetter("content_score"), reverse=True)
printer = PrettyPrinter(indent=2)
logger.debug(printer.pformat(best_candidates))
# since we have several candidates, check the winner's siblings
# for extra content
winner = best_candidates[0]
updated_winner = check_siblings(winner, self.candidates)
updated_winner.node = prep_article(updated_winner.node)
if updated_winner.node is not None:
dom = build_base_document(
updated_winner.node, self._return_fragment)
else:
logger.info(
'Had candidates but failed to find a cleaned winning DOM.')
dom = self._handle_no_candidates()
return self._remove_orphans(dom.get_element_by_id("readabilityBody"))
def _remove_orphans(self, dom):
for node in dom.iterdescendants():
if len(node) == 1 and tuple(node)[0].tag == node.tag:
node.drop_tag()
return dom
|
bookieio/breadability | breadability/annotated_text.py | AnnotatedTextHandler.parse | python | def parse(cls, dom):
handler = cls()
saxify(dom, handler)
return handler.content | Converts DOM into paragraphs. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/annotated_text.py#L24-L28 | null | class AnnotatedTextHandler(ContentHandler):
"""A class for converting a HTML DOM into annotated text."""
@classmethod
def __init__(self):
self._content = []
self._paragraph = []
self._dom_path = []
@property
def content(self):
return self._content
def startElementNS(self, name, qname, attrs):
namespace, name = name
if name in _SEMANTIC_TAGS:
self._dom_path.append(to_unicode(name))
def endElementNS(self, name, qname):
namespace, name = name
if name == "p" and self._paragraph:
self._append_paragraph(self._paragraph)
elif name in ("ol", "ul", "pre") and self._paragraph:
self._append_paragraph(self._paragraph)
self._dom_path.pop()
elif name in _SEMANTIC_TAGS:
self._dom_path.pop()
def endDocument(self):
if self._paragraph:
self._append_paragraph(self._paragraph)
def _append_paragraph(self, paragraph):
paragraph = self._process_paragraph(paragraph)
self._content.append(paragraph)
self._paragraph = []
def _process_paragraph(self, paragraph):
current_paragraph = []
for annotation, items in groupby(paragraph, key=lambda i: i[1]):
if annotation and "li" in annotation:
for text, _ in items:
text = shrink_text(text)
current_paragraph.append((text, annotation))
else:
text = "".join(i[0] for i in items)
text = shrink_text(text)
current_paragraph.append((text, annotation))
return tuple(current_paragraph)
def characters(self, content):
if is_blank(content):
return
if self._dom_path:
pair = (content, tuple(sorted(frozenset(self._dom_path))))
else:
pair = (content, None)
self._paragraph.append(pair)
|
bookieio/breadability | breadability/document.py | decode_html | python | def decode_html(html):
if isinstance(html, unicode):
return html
match = CHARSET_META_TAG_PATTERN.search(html)
if match:
declared_encoding = match.group(1).decode("ASCII")
# proceed unknown encoding as if it wasn't found at all
with ignored(LookupError):
return html.decode(declared_encoding, "ignore")
# try to enforce UTF-8 firstly
with ignored(UnicodeDecodeError):
return html.decode("utf8")
text = TAG_MARK_PATTERN.sub(to_bytes(" "), html)
diff = text.decode("utf8", "ignore").encode("utf8")
sizes = len(diff), len(text)
# 99% of text is UTF-8
if abs(len(text) - len(diff)) < max(sizes) * 0.01:
return html.decode("utf8", "ignore")
# try detect encoding
encoding = "utf8"
encoding_detector = chardet.detect(text)
if encoding_detector["encoding"]:
encoding = encoding_detector["encoding"]
return html.decode(encoding, "ignore") | Converts bytes stream containing an HTML page into Unicode.
Tries to guess character encoding from meta tag of by "chardet" library. | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/document.py#L28-L61 | null | # -*- coding: utf8 -*-
"""Generate a clean nice starting html document to process for an article."""
from __future__ import absolute_import
import logging
import re
import chardet
from lxml.etree import ParserError, XMLSyntaxError, tounicode
from lxml.html import HTMLParser, document_fromstring
from ._compat import to_bytes, to_unicode, unicode, unicode_compatible
from .utils import cached_property, ignored
logger = logging.getLogger("breadability")
TAG_MARK_PATTERN = re.compile(to_bytes(r"</?[^>]*>\s*"))
UTF8_PARSER = HTMLParser(encoding="utf8")
CHARSET_META_TAG_PATTERN = re.compile(
br"""<meta[^>]+charset=["']?([^'"/>\s]+)""",
re.IGNORECASE
)
BREAK_TAGS_PATTERN = re.compile(
to_unicode(r"(?:<\s*[bh]r[^>]*>\s*)+"),
re.IGNORECASE
)
def convert_breaks_to_paragraphs(html):
"""
Converts <hr> tag and multiple <br> tags into paragraph.
"""
logger.debug("Converting multiple <br> & <hr> tags into <p>.")
return BREAK_TAGS_PATTERN.sub(_replace_break_tags, html)
def _replace_break_tags(match):
tags = match.group()
if to_unicode("<hr") in tags:
return to_unicode("</p><p>")
elif tags.count(to_unicode("<br")) > 1:
return to_unicode("</p><p>")
else:
return tags
def build_document(html_content, base_href=None):
"""Requires that the `html_content` not be None"""
assert html_content is not None
if isinstance(html_content, unicode):
html_content = html_content.encode("utf8", "xmlcharrefreplace")
try:
document = document_fromstring(html_content, parser=UTF8_PARSER)
except (ParserError, XMLSyntaxError):
raise ValueError("Failed to parse document contents.")
if base_href:
document.make_links_absolute(base_href, resolve_base_href=True)
else:
document.resolve_base_href()
return document
@unicode_compatible
class OriginalDocument(object):
"""The original document to process."""
def __init__(self, html, url=None):
self._html = html
self._url = url
@property
def url(self):
"""Source URL of HTML document."""
return self._url
def __unicode__(self):
"""Renders the document as a string."""
return tounicode(self.dom)
@cached_property
def dom(self):
"""Parsed HTML document from the input."""
html = self._html
if not isinstance(html, unicode):
html = decode_html(html)
html = convert_breaks_to_paragraphs(html)
document = build_document(html, self._url)
return document
@cached_property
def links(self):
"""Links within the document."""
return self.dom.findall(".//a")
@cached_property
def title(self):
"""Title attribute of the parsed document."""
title_element = self.dom.find(".//title")
if title_element is None or title_element.text is None:
return ""
else:
return title_element.text.strip()
|
bookieio/breadability | breadability/document.py | build_document | python | def build_document(html_content, base_href=None):
assert html_content is not None
if isinstance(html_content, unicode):
html_content = html_content.encode("utf8", "xmlcharrefreplace")
try:
document = document_fromstring(html_content, parser=UTF8_PARSER)
except (ParserError, XMLSyntaxError):
raise ValueError("Failed to parse document contents.")
if base_href:
document.make_links_absolute(base_href, resolve_base_href=True)
else:
document.resolve_base_href()
return document | Requires that the `html_content` not be None | train | https://github.com/bookieio/breadability/blob/95a364c43b00baf6664bea1997a7310827fb1ee9/breadability/document.py#L90-L107 | null | # -*- coding: utf8 -*-
"""Generate a clean nice starting html document to process for an article."""
from __future__ import absolute_import
import logging
import re
import chardet
from lxml.etree import ParserError, XMLSyntaxError, tounicode
from lxml.html import HTMLParser, document_fromstring
from ._compat import to_bytes, to_unicode, unicode, unicode_compatible
from .utils import cached_property, ignored
logger = logging.getLogger("breadability")
TAG_MARK_PATTERN = re.compile(to_bytes(r"</?[^>]*>\s*"))
UTF8_PARSER = HTMLParser(encoding="utf8")
CHARSET_META_TAG_PATTERN = re.compile(
br"""<meta[^>]+charset=["']?([^'"/>\s]+)""",
re.IGNORECASE
)
def decode_html(html):
"""
Converts bytes stream containing an HTML page into Unicode.
Tries to guess character encoding from meta tag of by "chardet" library.
"""
if isinstance(html, unicode):
return html
match = CHARSET_META_TAG_PATTERN.search(html)
if match:
declared_encoding = match.group(1).decode("ASCII")
# proceed unknown encoding as if it wasn't found at all
with ignored(LookupError):
return html.decode(declared_encoding, "ignore")
# try to enforce UTF-8 firstly
with ignored(UnicodeDecodeError):
return html.decode("utf8")
text = TAG_MARK_PATTERN.sub(to_bytes(" "), html)
diff = text.decode("utf8", "ignore").encode("utf8")
sizes = len(diff), len(text)
# 99% of text is UTF-8
if abs(len(text) - len(diff)) < max(sizes) * 0.01:
return html.decode("utf8", "ignore")
# try detect encoding
encoding = "utf8"
encoding_detector = chardet.detect(text)
if encoding_detector["encoding"]:
encoding = encoding_detector["encoding"]
return html.decode(encoding, "ignore")
BREAK_TAGS_PATTERN = re.compile(
to_unicode(r"(?:<\s*[bh]r[^>]*>\s*)+"),
re.IGNORECASE
)
def convert_breaks_to_paragraphs(html):
"""
Converts <hr> tag and multiple <br> tags into paragraph.
"""
logger.debug("Converting multiple <br> & <hr> tags into <p>.")
return BREAK_TAGS_PATTERN.sub(_replace_break_tags, html)
def _replace_break_tags(match):
tags = match.group()
if to_unicode("<hr") in tags:
return to_unicode("</p><p>")
elif tags.count(to_unicode("<br")) > 1:
return to_unicode("</p><p>")
else:
return tags
@unicode_compatible
class OriginalDocument(object):
"""The original document to process."""
def __init__(self, html, url=None):
self._html = html
self._url = url
@property
def url(self):
"""Source URL of HTML document."""
return self._url
def __unicode__(self):
"""Renders the document as a string."""
return tounicode(self.dom)
@cached_property
def dom(self):
"""Parsed HTML document from the input."""
html = self._html
if not isinstance(html, unicode):
html = decode_html(html)
html = convert_breaks_to_paragraphs(html)
document = build_document(html, self._url)
return document
@cached_property
def links(self):
"""Links within the document."""
return self.dom.findall(".//a")
@cached_property
def title(self):
"""Title attribute of the parsed document."""
title_element = self.dom.find(".//title")
if title_element is None or title_element.text is None:
return ""
else:
return title_element.text.strip()
|
nagius/snmp_passpersist | example/settings.py | Settings.write | python | def write(self):
'''atomic writing'''
tmp_file, tmp_fname = tempfile.mkstemp()
os.close(tmp_file)
shutil.copystat(self.file_name, tmp_fname)
self.config.write(open(tmp_fname, 'w'))
shutil.move(tmp_fname, self.file_name) | atomic writing | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/example/settings.py#L76-L82 | null | class Settings(object):
'''Base class to read and write ini files
define FNAME in child class
'''
FNAME = None
def __init__(self):
self.changed = False
self.file_name = self.FNAME
self.mtime = 0
self.config = None
self.read()
def read(self):
self.config = configparser.ConfigParser()
self.config.read(self.file_name)
def updateFromFile(self):
current_mtime = os.path.getmtime(self.file_name)
if current_mtime > self.mtime:
self.read()
self.mtime = current_mtime
return True
else:
return False
def set(self, section, option, value):
self.changed = True
self.config.set(section, option, value)
def get(self, section, option):
return self.config.get(section, option)
def items(self, section):
return self.config.items(section)
def sections(self):
return self.config.sections()
def __enter__(self):
return this
def __exit__(self, *args):
if self.changed:
self.save()
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.encode | python | def encode(string):
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result | Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>> | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L106-L117 | null | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.get | python | def get(self,oid):
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release() | Return snmp value for the given OID. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L141-L150 | null | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.get_next | python | def get_next(self,oid):
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release() | Return snmp value for the next OID. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L152-L170 | [
"def get(self,oid):\n\t\"\"\"Return snmp value for the given OID.\"\"\"\n\ttry:\n\t\tself.lock.acquire()\n\t\tif oid not in self.data:\n\t\t\treturn \"NONE\"\n\t\telse:\n\t\t\treturn self.base_oid + oid + '\\n' + self.data[oid]['type'] + '\\n' +\tstr(self.data[oid]['value'])\n\tfinally:\n\t\tself.lock.release()\n"
... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.get_first | python | def get_first(self):
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release() | Return snmp value for the first OID. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L172-L181 | [
"def get(self,oid):\n\t\"\"\"Return snmp value for the given OID.\"\"\"\n\ttry:\n\t\tself.lock.acquire()\n\t\tif oid not in self.data:\n\t\t\treturn \"NONE\"\n\t\telse:\n\t\t\treturn self.base_oid + oid + '\\n' + self.data[oid]['type'] + '\\n' +\tstr(self.data[oid]['value'])\n\tfinally:\n\t\tself.lock.release()\n"
... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.cut_oid | python | def cut_oid(self,full_oid):
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):] | Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12' | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L183-L195 | null | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.add_oid_entry | python | def add_oid_entry(self, oid, type, value, label=None):
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item | General function to add an oid entry to the MIB subtree. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L197-L204 | null | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.add_oid | python | def add_oid(self,oid,value,label=None):
self.add_oid_entry(oid,'OBJECTID',value,label=label) | Short helper to add an object ID value to the MIB subtree. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L206-L208 | [
"def add_oid_entry(self, oid, type, value, label=None):\n\t\"\"\"General function to add an oid entry to the MIB subtree.\"\"\"\n\tif self.debug:\n\t\tprint('DEBUG: %s %s %s %s'%(oid,type,value,label))\n\titem={'type': str(type), 'value': str(value)}\n\tif label is not None:\n\t item['label']=str(label)\n\tself.... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.add_int | python | def add_int(self,oid,value,label=None):
self.add_oid_entry(oid,'INTEGER',value,label=label) | Short helper to add an integer value to the MIB subtree. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L210-L212 | [
"def add_oid_entry(self, oid, type, value, label=None):\n\t\"\"\"General function to add an oid entry to the MIB subtree.\"\"\"\n\tif self.debug:\n\t\tprint('DEBUG: %s %s %s %s'%(oid,type,value,label))\n\titem={'type': str(type), 'value': str(value)}\n\tif label is not None:\n\t item['label']=str(label)\n\tself.... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.add_oct | python | def add_oct(self,oid,value,label=None):
self.add_oid_entry(oid,'OCTET',value,label=label) | Short helper to add an octet value to the MIB subtree. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L214-L216 | [
"def add_oid_entry(self, oid, type, value, label=None):\n\t\"\"\"General function to add an oid entry to the MIB subtree.\"\"\"\n\tif self.debug:\n\t\tprint('DEBUG: %s %s %s %s'%(oid,type,value,label))\n\titem={'type': str(type), 'value': str(value)}\n\tif label is not None:\n\t item['label']=str(label)\n\tself.... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.add_str | python | def add_str(self,oid,value,label=None):
self.add_oid_entry(oid,'STRING',value,label=label) | Short helper to add a string value to the MIB subtree. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L218-L220 | [
"def add_oid_entry(self, oid, type, value, label=None):\n\t\"\"\"General function to add an oid entry to the MIB subtree.\"\"\"\n\tif self.debug:\n\t\tprint('DEBUG: %s %s %s %s'%(oid,type,value,label))\n\titem={'type': str(type), 'value': str(value)}\n\tif label is not None:\n\t item['label']=str(label)\n\tself.... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.add_ip | python | def add_ip(self,oid,value,label=None):
self.add_oid_entry(oid,'IPADDRESS',value,label=label) | Short helper to add an IP address value to the MIB subtree. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L222-L224 | [
"def add_oid_entry(self, oid, type, value, label=None):\n\t\"\"\"General function to add an oid entry to the MIB subtree.\"\"\"\n\tif self.debug:\n\t\tprint('DEBUG: %s %s %s %s'%(oid,type,value,label))\n\titem={'type': str(type), 'value': str(value)}\n\tif label is not None:\n\t item['label']=str(label)\n\tself.... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.add_cnt_32bit | python | def add_cnt_32bit(self,oid,value,label=None):
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label) | Short helper to add a 32 bit counter value to the MIB subtree. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L226-L229 | [
"def add_oid_entry(self, oid, type, value, label=None):\n\t\"\"\"General function to add an oid entry to the MIB subtree.\"\"\"\n\tif self.debug:\n\t\tprint('DEBUG: %s %s %s %s'%(oid,type,value,label))\n\titem={'type': str(type), 'value': str(value)}\n\tif label is not None:\n\t item['label']=str(label)\n\tself.... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.add_cnt_64bit | python | def add_cnt_64bit(self,oid,value,label=None):
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label) | Short helper to add a 64 bit counter value to the MIB subtree. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L231-L234 | [
"def add_oid_entry(self, oid, type, value, label=None):\n\t\"\"\"General function to add an oid entry to the MIB subtree.\"\"\"\n\tif self.debug:\n\t\tprint('DEBUG: %s %s %s %s'%(oid,type,value,label))\n\titem={'type': str(type), 'value': str(value)}\n\tif label is not None:\n\t item['label']=str(label)\n\tself.... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.add_gau | python | def add_gau(self,oid,value,label=None):
self.add_oid_entry(oid,'GAUGE',value,label=label) | Short helper to add a gauge value to the MIB subtree. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L236-L238 | [
"def add_oid_entry(self, oid, type, value, label=None):\n\t\"\"\"General function to add an oid entry to the MIB subtree.\"\"\"\n\tif self.debug:\n\t\tprint('DEBUG: %s %s %s %s'%(oid,type,value,label))\n\titem={'type': str(type), 'value': str(value)}\n\tif label is not None:\n\t item['label']=str(label)\n\tself.... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.add_tt | python | def add_tt(self,oid,value,label=None):
self.add_oid_entry(oid,'TIMETICKS',value,label=label) | Short helper to add a timeticks value to the MIB subtree. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L240-L242 | [
"def add_oid_entry(self, oid, type, value, label=None):\n\t\"\"\"General function to add an oid entry to the MIB subtree.\"\"\"\n\tif self.debug:\n\t\tprint('DEBUG: %s %s %s %s'%(oid,type,value,label))\n\titem={'type': str(type), 'value': str(value)}\n\tif label is not None:\n\t item['label']=str(label)\n\tself.... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.main_passpersist | python | def main_passpersist(self):
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush() | Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L244-L281 | null | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.commit | python | def commit(self):
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release() | Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L283-L301 | null | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.main_update | python | def main_update(self):
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise | Main function called by the updater thread.
Direct call is unnecessary. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L303-L339 | [
"def commit(self):\n\t\"\"\"\n\tCommit change made by the add_* methods.\n\tAll previous values with no update will be lost.\n\tThis method is automatically called by the updater thread.\n\t\"\"\"\n\n\t# Generate index before acquiring lock to keep locked section fast\n\t# Works because this thread is the only writ... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.get_setter | python | def get_setter(self, oid):
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter | Retrieve the nearest parent setter function for an OID | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L341-L350 | null | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.set | python | def set(self, oid, typevalue):
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable) | Call the default or user setter function if available | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L363-L381 | [
"def get_setter(self, oid):\n\t\"\"\"\n\tRetrieve the nearest parent setter function for an OID\n\t\"\"\"\n\tif hasattr(self.setter, oid):\n\t\treturn self.setter[oid]\n\tparents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]\n\tif parents:\n\t\treturn self.setter[max(parents)]\n\treturn se... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def start(self, user_func, refresh):
"""
Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds.
"""
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise
|
nagius/snmp_passpersist | snmp_passpersist.py | PassPersist.start | python | def start(self, user_func, refresh):
self.update=user_func
self.refresh=refresh
self.error=None
# First load
self.update()
self.commit()
# Start updater thread
up = threading.Thread(None,self.main_update,"Updater")
up.daemon = True
up.start()
# Main loop
while up.isAlive(): # Do not serve data if the Updater thread has died
try:
self.main_passpersist()
except:
up._Thread__stop()
raise | Start the SNMP's protocol handler and the updater thread
user_func is a reference to an update function, ran every 'refresh' seconds. | train | https://github.com/nagius/snmp_passpersist/blob/8cc584d2e90c920ae98a318164a55bde209a18f7/snmp_passpersist.py#L383-L407 | [
"def main_passpersist(self):\n\t\"\"\"\n\tMain function that handle SNMP's pass_persist protocol, called by\n\tthe start method.\n\tDirect call is unnecessary.\n\t\"\"\"\n\tline = sys.stdin.readline().strip()\n\tif not line:\n\t\traise EOFError()\n\n\tif 'PING' in line:\n\t\tprint(\"PONG\")\n\telif 'getnext' in lin... | class PassPersist(object):
"""
This class present a convenient way to creare a MIB subtree and expose it to snmp via it's passpersist protocol.
Two thread are used, one for talking with snmpd and a second that trigger the update process at a fixed interval.
The keyword 'DUMP' has been added to the protocol for testing purpose.
Usage example: in a file /path/to/your/script.py :
> #!/usr/bin/python -u
> import snmp_passpersist as snmp
>
> def update():
> pp.add_int('0.1',123)
>
> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
> pp.start(update,30) # Every 30s
With the folowing line in snmpd.conf :
pass_persist .1.3.6.1.3.53.8.0 /path/to/your/script.py
"""
@staticmethod
def encode(string):
"""
Encode the given string as an OID.
>>> import snmp_passpersist as snmp
>>> snmp.PassPersist.encode("hello")
'5.104.101.108.108.111'
>>>
"""
result=".".join([ str(ord(s)) for s in string ])
return "%s." % (len(string)) + result
def __init__(self, base_oid):
"""
Initialize internals structures.
base_oid is the OID prefix used for all entry (the root of the MIB tree).
"""
self.data=dict()
self.data_idx=list()
self.pending=dict()
self.lock=threading.RLock()
if not base_oid.endswith("."):
base_oid += "."
self.base_oid=base_oid
self.setter = dict()
self.debug = False
# The data structure is a dict that hold the unsorted MIB tree like this :
# data = {
# '1.1': { 'type':'INTEGER', 'value':4 },
# '1.3.2.1':{ 'type':'STRING', 'value':'vm1' }
# }
def get(self,oid):
"""Return snmp value for the given OID."""
try:
self.lock.acquire()
if oid not in self.data:
return "NONE"
else:
return self.base_oid + oid + '\n' + self.data[oid]['type'] + '\n' + str(self.data[oid]['value'])
finally:
self.lock.release()
def get_next(self,oid):
"""Return snmp value for the next OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
# remove trailing zeroes from the oid
while len(oid) > 0 and oid[-2:] == ".0" and oid not in self.data:
oid = oid[:-2];
return self.get(self.data_idx[self.data_idx.index(oid)+1])
except ValueError:
# Not found: try to match partial oid
for real_oid in self.data_idx:
if real_oid.startswith(oid):
return self.get(real_oid)
return "NONE" # Unknown OID
except IndexError:
return "NONE" # End of MIB
finally:
self.lock.release()
def get_first(self):
"""Return snmp value for the first OID."""
try: # Nested try..except because of Python 2.4
self.lock.acquire()
try:
return self.get(self.data_idx[0])
except (IndexError, ValueError):
return "NONE"
finally:
self.lock.release()
def cut_oid(self,full_oid):
"""
Remove the base OID from the given string.
>>> import snmp_passpersist as snmp
>>> pp=snmp.PassPersist(".1.3.6.1.3.53.8")
>>> pp.cut_oid(".1.3.6.1.3.53.8.28.12")
'28.12'
"""
if not full_oid.startswith(self.base_oid.rstrip('.')):
return None
else:
return full_oid[len(self.base_oid):]
def add_oid_entry(self, oid, type, value, label=None):
"""General function to add an oid entry to the MIB subtree."""
if self.debug:
print('DEBUG: %s %s %s %s'%(oid,type,value,label))
item={'type': str(type), 'value': str(value)}
if label is not None:
item['label']=str(label)
self.pending[oid]=item
def add_oid(self,oid,value,label=None):
"""Short helper to add an object ID value to the MIB subtree."""
self.add_oid_entry(oid,'OBJECTID',value,label=label)
def add_int(self,oid,value,label=None):
"""Short helper to add an integer value to the MIB subtree."""
self.add_oid_entry(oid,'INTEGER',value,label=label)
def add_oct(self,oid,value,label=None):
"""Short helper to add an octet value to the MIB subtree."""
self.add_oid_entry(oid,'OCTET',value,label=label)
def add_str(self,oid,value,label=None):
"""Short helper to add a string value to the MIB subtree."""
self.add_oid_entry(oid,'STRING',value,label=label)
def add_ip(self,oid,value,label=None):
"""Short helper to add an IP address value to the MIB subtree."""
self.add_oid_entry(oid,'IPADDRESS',value,label=label)
def add_cnt_32bit(self,oid,value,label=None):
"""Short helper to add a 32 bit counter value to the MIB subtree."""
# Truncate integer to 32bits ma,x
self.add_oid_entry(oid,'Counter32',int(value)%4294967296,label=label)
def add_cnt_64bit(self,oid,value,label=None):
"""Short helper to add a 64 bit counter value to the MIB subtree."""
# Truncate integer to 64bits ma,x
self.add_oid_entry(oid,'Counter64',int(value)%18446744073709551615,label=label)
def add_gau(self,oid,value,label=None):
"""Short helper to add a gauge value to the MIB subtree."""
self.add_oid_entry(oid,'GAUGE',value,label=label)
def add_tt(self,oid,value,label=None):
"""Short helper to add a timeticks value to the MIB subtree."""
self.add_oid_entry(oid,'TIMETICKS',value,label=label)
def main_passpersist(self):
"""
Main function that handle SNMP's pass_persist protocol, called by
the start method.
Direct call is unnecessary.
"""
line = sys.stdin.readline().strip()
if not line:
raise EOFError()
if 'PING' in line:
print("PONG")
elif 'getnext' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
elif oid == "":
# Fallback to the first entry
print(self.get_first())
else:
print(self.get_next(oid))
elif 'get' in line:
oid = self.cut_oid(sys.stdin.readline().strip())
if oid is None:
print("NONE")
else:
print(self.get(oid))
elif 'set' in line:
oid = sys.stdin.readline().strip()
typevalue = sys.stdin.readline().strip()
self.set(oid, typevalue)
elif 'DUMP' in line: # Just for debbuging
from pprint import pprint
pprint(self.data)
else:
print("NONE")
sys.stdout.flush()
def commit(self):
"""
Commit change made by the add_* methods.
All previous values with no update will be lost.
This method is automatically called by the updater thread.
"""
# Generate index before acquiring lock to keep locked section fast
# Works because this thread is the only writer of self.pending
pending_idx = sorted(list(self.pending.keys()), key=lambda k: tuple(int(part) for part in k.split('.')))
# Commit new data
try:
self.lock.acquire()
self.data=self.pending
self.pending=dict()
self.data_idx = pending_idx
finally:
self.lock.release()
def main_update(self):
"""
Main function called by the updater thread.
Direct call is unnecessary.
"""
# Renice updater thread to limit overload
try:
os.nice(1)
except AttributeError as er:
pass # os.nice is not available on windows
time.sleep(self.refresh)
try:
while True:
# We pick a timestamp to take in account the time used by update()
timestamp=time.time()
# Update data with user's defined function
self.update()
# We use this trick because we cannot use signals in a backoffice threads
# and alarm() mess up with readline() in the main thread.
delay=(timestamp+self.refresh)-time.time()
if delay > 0:
if delay > self.refresh:
time.sleep(self.refresh)
else:
time.sleep(delay)
# Commit change exactly every 'refresh' seconds, whatever update() takes long.
# Commited values are a bit old, but for RRD, punctuals values
# are better than fresh-but-not-time-constants values.
self.commit()
except Exception as e:
self.error=e
raise
def get_setter(self, oid):
"""
Retrieve the nearest parent setter function for an OID
"""
if hasattr(self.setter, oid):
return self.setter[oid]
parents = [ poid for poid in list(self.setter.keys()) if oid.startswith(poid) ]
if parents:
return self.setter[max(parents)]
return self.default_setter
def register_setter(self, oid, setter_func):
"""
Set reference to an user defined function for deal with set commands.
The user function receives the OID, type (see Type class) and value
and must return a true value on succes or one of errors in Error class
"""
self.setter[oid] = setter_func
def default_setter(self, oid, _type, value):
return Error.NotWritable
def set(self, oid, typevalue):
"""
Call the default or user setter function if available
"""
success = False
type_ = typevalue.split()[0]
value = typevalue.lstrip(type_).strip().strip('"')
ret_value = self.get_setter(oid)(oid, type_, value)
if ret_value:
if ret_value in ErrorValues or ret_value == 'DONE':
print(ret_value)
elif ret_value == True:
print('DONE')
elif ret_value == False:
print(Error.NotWritable)
else:
raise RuntimeError("wrong return value: %s" % str(ret_value))
else:
print(Error.NotWritable)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.