code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
'''
Created on 7 2010
@author: ivan
'''
from __future__ import with_statement
from foobnix.model.entity import CommonBean
import os
from foobnix.util.time_utils import normilize_time
from foobnix.util import LOG, file_utils
import chardet
'''
Created on 4
@author: ivan
'''
TITLE = "TITLE"
PERFORMER = "PERFORMER"
FILE = "FILE"
INDEX = "INDEX"
class CueTrack():
def __init__(self, title, performer, index, path):
self.title = title
self.performer = performer
self.index = index
self.duration = 0
self.path = path
def __str__(self):
return "Track: " + self.title + " " + self.performer + " " + self.index
def get_start_time_str(self):
return self.index[len("INDEX 01") + 1:]
def get_start_time_sec(self):
time = self.get_start_time_str()
"00:00:0"
m = time[:2]
s = time[len("00:"):len("00:") + 2]
return int(m) * 60 + int(s)
class CueFile():
def __init__(self):
self.title = None
self.performer = None
self.file = None
self.tracks = []
def append_track(self, track):
self.tracks.append(track)
def __str__(self):
return "CUEFILE: " + self.title + " " + self.performer + " " + self.file
class CueReader():
def __init__(self, cue_file):
self.cue_file = cue_file
self.is_valid = True
def get_line_value(self, str):
first = str.find('"') or str.find("'")
end = str.find('"', first + 1) or str.find("'", first + 1)
return str[first + 1:end]
def normalize(self, cue_file):
duration_tracks = []
tracks = cue_file.tracks
for i in xrange(len(tracks) - 1):
track = tracks[i]
next_track = tracks[i + 1]
duration = next_track.get_start_time_sec() - track. get_start_time_sec()
track.duration = duration
print "Duration", duration
if not track.path:
track.path = cue_file.file
duration_tracks.append(track)
cue_file.tracks = duration_tracks
return cue_file
def get_common_beans(self):
beans = []
cue = self.parse()
for track in cue.tracks:
bean = CommonBean(name=track.performer + " - " + track.title, path=track.path, type=CommonBean.TYPE_MUSIC_FILE)
bean.start_at = track.get_start_time_sec()
bean.duration = track.duration
bean.time = normilize_time(track.duration)
bean.parent = cue.performer + " - " + cue.title
beans.append(bean)
return beans
def is_cue_valid(self):
self.parse()
LOG.info("CUE VALID", self.cue_file, self.is_valid)
return self.is_valid
"""detect file encoding"""
def code_detecter(self, filename):
with open(filename) as codefile:
data = codefile.read()
return chardet.detect(data)['encoding']
def parse(self):
file = open(self.cue_file, "r")
code = self.code_detecter(self.cue_file);
LOG.debug("File encoding is", code)
is_title = True
cue_file = CueFile()
for line in file:
line = unicode(line, code)
line = str(line).strip()
if not line:
continue
if line.startswith(TITLE):
title = self.get_line_value(line)
if is_title:
cue_file.title = title
if line.startswith(PERFORMER):
performer = self.get_line_value(line)
if is_title:
cue_file.performer = performer
if line.startswith(FILE):
file = self.get_line_value(line)
dir = os.path.dirname(self.cue_file)
full_file = os.path.join(dir, file)
LOG.debug("CUE source", full_file)
exists = os.path.exists(full_file)
"""if there no source cue file"""
if not exists:
"""try to find other source"""
ext = file_utils.get_file_extenstion(full_file)
nor = full_file[:-len(ext)]
LOG.info("Normilized path", nor)
if os.path.exists(nor + ".ape"):
full_file = nor + ".ape"
elif os.path.exists(nor + ".flac"):
full_file = nor + ".flac"
elif os.path.exists(nor + ".wav"):
full_file = nor + ".wav"
elif os.path.exists(nor + ".mp3"):
full_file = nor + ".mp3"
else:
self.is_valid = False
return cue_file
if is_title:
cue_file.file = full_file
if line.startswith(INDEX):
index = self.get_line_value(line)
if line.startswith("TRACK") and line.find("AUDIO"):
if not is_title:
cue_track = CueTrack(title, performer, index, full_file)
cue_file.append_track(cue_track)
is_title = False
return self.normalize(cue_file)
| Python |
'''
Created on Mar 16, 2010
@author: ivan
'''
from foobnix.util import LOG
'''
Created on Mar 11, 2010
@author: ivan
'''
import gtk
from foobnix.model.entity import CommonBean
class RadioListModel:
POS_ICON = 0
POS_TRACK_NUMBER = 1
POS_NAME = 2
POS_PATH = 3
POS_COLOR = 4
POS_INDEX = 5
def __init__(self, widget):
self.widget = widget
self.current_list_model = gtk.ListStore(str, str, str, str, str, int)
cellpb = gtk.CellRendererPixbuf()
cellpb.set_property('cell-background', 'yellow')
iconColumn = gtk.TreeViewColumn(_('Icon'), cellpb, stock_id=0, cell_background=4)
numbetColumn = gtk.TreeViewColumn(_('N'), gtk.CellRendererText(), text=1, background=4)
descriptionColumn = gtk.TreeViewColumn(_('Music List'), gtk.CellRendererText(), text=2, background=4)
widget.append_column(iconColumn)
widget.append_column(numbetColumn)
widget.append_column(descriptionColumn)
widget.set_model(self.current_list_model)
def get_size(self):
return len(self.current_list_model)
def getBeenByPosition(self, position):
bean = CommonBean()
bean.icon = self.current_list_model[position][ self.POS_ICON]
bean.tracknumber = self.current_list_model[position][ self.POS_TRACK_NUMBER]
bean.name = self.current_list_model[position][ self.POS_NAME]
bean.path = self.current_list_model[position][ self.POS_PATH]
bean.color = self.current_list_model[position][ self.POS_COLOR]
bean.index = self.current_list_model[position][ self.POS_INDEX]
return bean
def get_selected_bean(self):
LOG.info(self.widget)
selection = self.widget.get_selection()
LOG.info(selection)
model, selected = selection.get_selected()
LOG.info(model, selected)
if selected:
bean = CommonBean()
bean.icon = model.get_value(selected, self.POS_ICON)
bean.tracknumber = model.get_value(selected, self.POS_TRACK_NUMBER)
bean.name = model.get_value(selected, self.POS_NAME)
bean.path = model.get_value(selected, self.POS_PATH)
bean.color = model.get_value(selected, self.POS_COLOR)
bean.index = model.get_value(selected, self.POS_INDEX)
return bean
def clear(self):
self.current_list_model.clear()
def append(self, playlistBean):
self.current_list_model.append([playlistBean.icon, playlistBean.tracknumber, playlistBean.name, playlistBean.path, playlistBean.color, playlistBean.index])
def __del__(self, *a):
LOG.info("del")
| Python |
'''
Created on 15 2010
@author: ivan
'''
from __future__ import with_statement
import os
from foobnix.util import LOG
FOOBNIX_DIR = (os.getenv("HOME") or os.getenv('USERPROFILE')) + "/.foobnix"
FOOBNIX_DIR_RADIO = "/opt/foobnix/radio"
EXTENSION = ".fpl"
class FPL():
def __init__(self, name, urls_dict):
self.name = name;
self.urls_dict = urls_dict;
def __str__(self):
return self.name + "radios" + str(self.lines_dict)
class RadioFolder():
def __init__(self):
self.results = []
"""get list of foobnix playlist files in the directory"""
def get_radio_list(self):
if not os.path.isdir(FOOBNIX_DIR_RADIO):
LOG.warn("Not a folder ", FOOBNIX_DIR_RADIO)
return None
result = []
"""read directory files by extestion and size > 0 """
for item in os.listdir(FOOBNIX_DIR_RADIO):
path = os.path.join(FOOBNIX_DIR_RADIO, item)
if item.endswith(EXTENSION) and os.path.isfile(path) and os.path.getsize(path) > 0:
LOG.info("Find radio station playlist", item)
result.append(item)
return result
"""parser playlist by name"""
def parse_play_list(self, list_name):
path = os.path.join(FOOBNIX_DIR_RADIO, list_name)
if not os.path.isfile(path):
LOG.warn("Not a file ", path)
return None
dict = {}
"""get name and stations"""
with open(path) as file:
for line in file:
if line or not line.startswith("#") and "=" in line:
name_end = line.find("=")
name = line[:name_end].strip()
stations = line[name_end + 1:].split(",")
if stations:
good_stations = []
for url in stations:
good_url = url.strip()
if good_url and (good_url.startswith("http://") or good_url.startswith("file://")):
if not good_url.endswith("wma"):
if not good_url.endswith("asx"):
if not good_url.endswith("ram"):
good_stations.append(good_url)
dict[name] = good_stations
return dict
def get_radio_FPLs(self):
if self.results:
LOG.info("Folder with radio already read")
return self.results
names = self.get_radio_list()
if not names:
return []
results = []
for play_name in names:
content = self.parse_play_list(play_name)
LOG.info("Create FPL", play_name)
play_name = play_name[:-len(EXTENSION)]
results.append(FPL(play_name, content))
return results
| Python |
'''
Created on Jul 16, 2010
@author: ivan
'''
import urllib2
site = "http://www.screamer-radio.com/"
site_full = site + "directory/browsegenre/51/"
def load_urls_name_page():
connect = urllib2.urlopen(site_full)
data = connect.read()
file = open("SKY.FM.fpl", "w")
for line in data.split("\n"):
if line.find("sky.fm") > 0:
url = line[line.find('<td><a href="')+len('<td><a href="')+1:line.find('/">')]
name = line[line.find('sky.fm -')+len('sky.fm -')+1:line.find('</a></td>')]
LOG.info(name, url
urls = get_urls(site + url)
file.write(name.strip() + " = " + urls + "\n");
file.close()
def get_urls(path):
connect = urllib2.urlopen(path)
data = connect.read()
result = ""
for line in data.split("\n"):
if line.find(") http://") >0:
result = result + line[line.find(') ') +2:line.find("<br />")]+", "
return result[:-2]
load_urls_name_page()
#LOG.info(get_urls("http://www.screamer-radio.com/directory/show/3825/") | Python |
# -*- coding: utf-8 -*-
'''
Created on Jul 16, 2010
@author: ivan
'''
import urllib2
import re
site = "http://guzei.com/online_radio/?search=france"
def load_urls_name_page():
file = open("GUZEI.COM.fpl", "w")
for j in xrange(33):
j = j + 1;
site ="http://guzei.com/online_radio/?p="+str(j);
connect = urllib2.urlopen(site)
data = connect.read()
result = {}
for line in data.split("\n"):
#LOG.info(line
reg_all = "([^{</}]*)"
all = '<a href="./listen.php\?online_radio_id=([0-9]*)" target="guzei_online" title="'+reg_all+'"><span class="name">'+reg_all+'</span></a>'
all1 ='<a href="./listen.php\?online_radio_id=([0-9]*)" target="guzei_online">'+reg_all+'</a>'
links = re.findall(all, line, re.IGNORECASE | re.UNICODE)
links1 = re.findall(all1, line, re.IGNORECASE | re.UNICODE)
if links:
i = 0
for line in links:
id = line[0]
name = line[2] +" - "+ str(links1[i][1])
i+=1
url = get_ulr_by_id(id)
res = name + " = " + url
LOG.info(j
LOG.info(res
file.write(res + "\n")
file.close()
def get_ulr_by_id(id):
url = "http://guzei.com/online_radio/listen.php?online_radio_id="+id
connect = urllib2.urlopen(url)
data = connect.read()
for line in data.split("\n"):
cr = 'Прямая ссылка на поток:'
if line.find(cr) >= 0:
link = line[line.find(cr)+len(cr):line.find('</p>')]
link = link.strip()
return link
#LOG.info(get_ulr_by_id("5369")
load_urls_name_page() | Python |
'''
Created on Jul 16, 2010
@author: ivan
'''
| Python |
'''
Created on 16 2010
@author: ivan
'''
import urllib2
site = "http://di.fm"
def load_urls_name_page():
connect = urllib2.urlopen(site)
data = connect.read()
result = {}
file = open("DI_FM.fpl", "w")
for line in data.split("\n"):
pre = '<td><a href="http://listen.di.fm/public3/'
if line.find(pre) > 0:
el = "<td><a href=\""
url = line[line.find(el) + len(el) :line.find("\" rel")]
LOG.info(url
name = url[url.rfind("/") + 1:]
name = name[:-4]
LOG.info(
file.write(name + " = " + url + "\n")
file.close()
load_urls_name_page()
| Python |
'''
Created on 16 2010
@author: ivan
'''
import urllib2
site = "http://myradio.ua/"
def load_urls_name_page():
connect = urllib2.urlopen(site)
data = connect.read()
result = {}
file = open("MYRADIO_UA.fpl", "w")
for line in data.split("\n"):
line = line.decode("cp1251")
pre = "<a href=\"chanel/";
start = line.find(pre)
end = line.find("</a>", start + 10)
if start > 0 and end > 0:
url = line[line.find("<a href=\"") + len(" < a href"):line.find(">", start)].replace('"', '')
name = line[line.find(">", start) + 1:line.find("</a>")]
result[url.strip()] = name
urls = get_radio_ulr(url.strip()).split(",")
line = name.strip() + " = " + urls[1] + ", " + urls[0]
file.write(line + "\n");
LOG.info(line
if url.strip() == "chanel/eurovision":
file.close();
return result
def get_radio_ulr(chanel):
connect = urllib2.urlopen(site + chanel)
data = connect.read()
result = ""
for line in data.rsplit("\n"):
"""<img class="roll" src="http://img.myradio.com.ua/img/center/big_listen_icons/winamp_low_out.jpg">"""
if line.find('<img class="roll" src="') > 0 and line.find('.m3u') and line.find("window") < 0 :
pre = '<div class="but"><a href="'
index_pre = line.find(pre)
url = line[index_pre + len(pre): line.find('.m3u', index_pre)]
url = site + url + ".m3u"
result = result + url + ", "
return result[:-2]
LOG.info( load_urls_name_page()
| Python |
'''
Created on Mar 16, 2010
@author: ivan
'''
from foobnix.radio.radio_model import RadioListModel
from foobnix.util.plsparser import getStationPath, getPlsName, get_content
from foobnix.model.entity import CommonBean
from foobnix.util.mouse_utils import is_double_click
import urllib2
from foobnix.util import LOG
class RadioListCntr():
def __init__(self, gxMain, playerCntr):
self.widget = gxMain.get_widget("radio_list_treeview")
addButton = gxMain.get_widget("add_radio_toolbutton")
removeButton = gxMain.get_widget("remove_radio_toolbuton")
self.urlText = gxMain.get_widget("radio_url_entry")
self.widget.connect("button-press-event", self.onPlaySong)
addButton.connect("clicked", self.onAddRadio)
removeButton.connect("clicked", self.onRemoveRadio)
self.current_list_model = RadioListModel(self.widget)
self.playerCntr = playerCntr
self.widget.connect("button-press-event", self.onPlaySong)
self.entityBeans = []
self.index = self.current_list_model.get_size();
def onAddRadio(self, *args):
urlStation = self.urlText.get_text()
if urlStation:
nameDef = urlStation
if urlStation.endswith(".pls"):
getUrl = getStationPath(urlStation)
if getUrl:
urlStation = getUrl
nameDef = getPlsName(nameDef) + " [" + urlStation + " ]"
LOG.info(nameDef)
elif urlStation.endswith(".m3u"):
content = get_content(urlStation)
for line in content.rsplit():
if not line.startswith("#"):
urlStation = line
nameDef = line
break
entity = CommonBean(name=nameDef, path=urlStation, type=CommonBean.TYPE_RADIO_URL, index=self.index + 1);
self.entityBeans.append(entity)
self.repopulate(self.entityBeans, (self.current_list_model.get_size()))
self.urlText.set_text("")
pass
def onRemoveRadio(self, *args):
model, iter = self.widget.get_selection().get_selected()
if iter:
playlistBean = self.current_list_model.get_selected_bean()
for i, entity in enumerate(self.entityBeans):
if entity.path == playlistBean.path:
self.index = 0
del self.entityBeans[i]
model.remove(iter)
def getState(self):
return [self.entityBeans, self.index]
def setState(self, state):
self.entityBeans = state[0]
self.index = state[1]
if self.entityBeans and self.index < len(self.entityBeans):
self.repopulate(self.entityBeans, self.index);
self.playerCntr.playSong(self.entityBeans[self.index])
def clear(self):
self.current_list_model.clear()
def onPlaySong(self, w, e):
if is_double_click(e):
LOG.info(w, e)
playlistBean = self.current_list_model.get_selected_bean()
playlistBean.type = CommonBean.TYPE_RADIO_URL
#self.repopulate(self.entityBeans, playlistBean.index);
self.index = playlistBean.index
if not playlistBean.path.startswith("http"):
return None
LOG.info(playlistBean.path)
remotefile = urllib2.urlopen(playlistBean.path)
LOG.info("INFO", remotefile)
if not remotefile.info() or remotefile.info()["Content-Type"].find("text") == -1:
self.playerCntr.playSong(playlistBean)
else:
LOG.error("Can't play html page")
return None
def getNextSong(self):
self.index += 1
if self.index >= len(self.entityBeans):
self.index = 0
playlistBean = self.current_list_model.getBeenByPosition(self.index)
self.repopulate(self.entityBeans, playlistBean.index);
return playlistBean
def getPrevSong(self):
self.index -= 1
if self.index < 0:
self.index = len(self.entityBeans) - 1
playlistBean = self.current_list_model.getBeenByPosition(self.index)
self.repopulate(self.entityBeans, playlistBean.index);
return playlistBean
def setPlaylist(self, entityBeans):
self.entityBeans = entityBeans
index = 0
if entityBeans:
self.playerCntr.playSong(entityBeans[index])
self.repopulate(entityBeans, index);
def repopulate(self, entityBeans, index):
self.current_list_model.clear()
for i in range(len(entityBeans)):
songBean = entityBeans[i]
songBean.color = self.getBackgroundColour(i)
songBean.name = songBean.getPlayListDescription()
songBean.index = i
if i == index:
songBean.setIconPlaying()
self.current_list_model.append(songBean)
else:
songBean.setIconNone()
self.current_list_model.append(songBean)
def getBackgroundColour(self, i):
if i % 2 :
return "#F2F2F2"
else:
return "#FFFFE5"
| Python |
'''
Created on Mar 14, 2010
@author: ivan
'''
from foobnix.base import BaseController
class PrefController(BaseController):
def __init__(self, gx_preferences_window):
self.pref = gx_preferences_window.get_widget("window")
self.pref.connect("delete-event", self.hide)
def show(self, sender):
self.pref.show()
def hide(self, *args):
self.pref.hide()
return True
| Python |
'''
Created on 01.06.2010
@author: ivan
'''
from foobnix.online.google.search import GoogleSearch
import time
from foobnix.util import LOG
def googleHelp(query):
LOG.info("Not Found, wait for results from google ...")
results = []
ask = query.encode('utf-8')
LOG.info(ask)
gs = GoogleSearch(ask, True, True)
LOG.info(gs)
gs.results_per_page = 10
results = gs.get_results()
LOG.info(results)
for res in results:
result = res.title.encode('utf8')
time.sleep(0.05)
results.append(str(result))
#LOG.info(googleHelp("madoonna 1")
def search():
LOG.info("Begin")
gs = GoogleSearch("quick and dirty")
gs.results_per_page = 5
results = gs.get_results()
LOG.info(results)
for res in results:
LOG.info(res.title.encode("utf8"))
LOG.info(res.desc.encode("utf8"))
LOG.info(res.url.encode("utf8"))
search()
| Python |
'''
Created on Jun 10, 2010
@author: ivan
'''
from foobnix.util.configuration import FConfiguration
from foobnix.util import LOG
import os
import urllib
import thread
from foobnix.online.song_resource import update_song_path
from mutagen.id3 import ID3NoHeaderError, ID3, TIT2, COMM, TPE1, TENC, TDRC, \
TALB
def dowload_song_thread(song):
thread.start_new_thread(download_song, (song,))
def save_song_thread(songs):
thread.start_new_thread(save_song, (songs,))
#save_song(songs)
def save_as_song_thread(songs, path):
LOG.debug("Begin download songs list", songs)
thread.start_new_thread(save_as_song, (songs, path,))
def save_song(songs):
for song in songs:
update_song_path(song)
file = get_file_store_path(song)
LOG.debug("Download song start", file)
if not os.path.exists(file + ".tmp"):
LOG.debug("Song PATH", song.path)
urllib.urlretrieve(song.path, file + ".tmp")
os.rename(file + ".tmp", file)
update_id3_tags(song, file)
LOG.debug("Download song finished", file)
else:
LOG.debug("Found file already dowloaded", file)
def save_as_song(songs, path):
for song in songs:
update_song_path(song)
if song.name.endswith(".mp3"):
file = path + "/" + song.name
else:
file = path + "/" + song.name + ".mp3"
LOG.debug("Download song start", file)
if not os.path.exists(file + ".tmp"):
urllib.urlretrieve(song.path, file + ".tmp")
os.rename(file + ".tmp", file)
update_id3_tags(song, file)
LOG.debug("Download song finished", file)
else:
LOG.debug("Found file already dowloaded", file)
def update_id3_tags(song, path):
if os.path.exists(str(path)):
LOG.debug("Begin update", path)
#audio = EasyID3(str(path))
try:
tags = ID3(path)
except ID3NoHeaderError:
LOG.info("Adding ID3 header;")
tags = ID3()
tags["TIT2"] = TIT2(encoding=3, text=song.getTitle())
tags["COMM"] = COMM(encoding=3, lang="eng", desc='desc', text='Grab by www.foobnix.com')
tags["TENC"] = TENC(encoding=3, text='www.foobnix.com')
tags["TPE1"] = TPE1(encoding=3, text=song.getArtist())
tags["TDRC"] = TDRC(encoding=3, text=song.year)
tags["TALB"] = TALB(encoding=3, text=song.album)
try:
tags.save(path)
except:
LOG.error("Tags can't be updated")
pass
LOG.debug("ID3 TAGS updated")
else:
LOG.error("ID3 FILE not found", path)
"""Dowload song proccess"""
def download_song(song):
if not FConfiguration().is_save_online:
LOG.debug("Saving (Caching) not enable")
return None
save_song([song])
pass
"""Determine file place"""
def get_file_store_path(song):
dir = FConfiguration().onlineMusicPath
if song.getArtist():
dir = dir + "/" + song.getArtist()
make_dirs(dir)
song = dir + "/" + song.name + ".mp3"
return song
def make_dirs(path):
if not os.path.isdir(path):
os.makedirs(path)
| Python |
# -*- coding: utf-8 -*-
#TODO: This file is under heavy refactoring, don't touch anything you think is wrong
'''
Created on Mar 16, 2010
@author: ivan
'''
import os
import gtk
from gobject import GObject #@UnresolvedImport
from foobnix.directory.directory_controller import DirectoryCntr
from foobnix.model.entity import CommonBean
from foobnix.online.information_controller import InformationController
from foobnix.online.online_model import OnlineListModel
from foobnix.online.search_panel import SearchPanel
from foobnix.player.player_controller import PlayerController
from foobnix.util import LOG
from foobnix.util.configuration import FConfiguration
from foobnix.util.mouse_utils import is_double_click, is_rigth_click, \
is_left_click
from foobnix.online.google_utils import google_search_resutls
from foobnix.online.dowload_util import get_file_store_path, \
save_as_song_thread, save_song_thread
from foobnix.online.song_resource import update_song_path
from foobnix.cue.cue_reader import CueReader
class OnlineListCntr(GObject):
def __init__(self, gxMain, playerCntr):
self.gx_main = gxMain
self.directoryCntr = None
self.playerCntr = playerCntr
self.search_panel = SearchPanel(gxMain)
self.count = 0
self.index = 0
self.online_notebook = gxMain.get_widget("online_notebook")
def register_directory_cntr(self, directoryCntr):
self.directoryCntr = directoryCntr
self.info = InformationController(self.gx_main, self.playerCntr, directoryCntr, self.search_panel)
def none(self, *a):
return False
def create_notebook_tab(self):
treeview = gtk.TreeView()
treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
treeview.set_rubber_banding(True)
treeview.set_reorderable(True)
model = OnlineListModel(treeview)
self.current_list_model = model
treeview.connect("drag-end", self.on_drag_end)
treeview.connect("button-press-event", self.onPlaySong, model)
treeview.show()
window = gtk.ScrolledWindow()
window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
window.add_with_viewport(treeview)
window.show()
return window
def append_notebook_page(self, name):
LOG.info("append new tab")
label = gtk.Label(name)
label.set_angle(90)
label.show()
event_box = gtk.EventBox()
event_box.add(label)
event_box.connect('event', self.on_tab_click)
self.online_notebook.prepend_page(self.create_notebook_tab(), event_box)
self.online_notebook.set_current_page(0)
if self.online_notebook.get_n_pages() > FConfiguration().count_of_tabs:
self.online_notebook.remove_page(self.online_notebook.get_n_pages() - 1)
def on_tab_click(self, w, e):
if e.type == gtk.gdk._2BUTTON_PRESS and e.button == 3:
LOG.info("Close Current TAB")
page = self.online_notebook.get_current_page()
self.online_notebook.remove_page(page)
def add_selected_to_playlist(self):
selected = self.current_list_model.get_selected_bean()
LOG.info("SELECTED", selected)
self.directoryCntr.set_active_view(DirectoryCntr.VIEW_VIRTUAL_LISTS)
if selected.type == CommonBean.TYPE_MUSIC_URL:
selected.parent = None
self.directoryCntr.append_virtual([selected])
elif selected.type in [CommonBean.TYPE_FOLDER, CommonBean.TYPE_GOOGLE_HELP, CommonBean.TYPE_RADIO_URL]:
selected.type = CommonBean.TYPE_FOLDER
results = []
for i in xrange(self.current_list_model.get_size()):
searchBean = self.current_list_model.getBeenByPosition(i)
#LOG.info("Search", searchBean
if str(searchBean.name) == str(selected.name):
searchBean.parent = None
results.append(searchBean)
elif str(searchBean.parent) == str(selected.name):
results.append(searchBean)
else:
LOG.info(str(searchBean.parent) + " != " + str(selected.name))
self.directoryCntr.append_virtual(results)
LOG.info("drug")
self.directoryCntr.leftNoteBook.set_current_page(0)
def on_drag_end(self, *ars):
self.add_selected_to_playlist()
def show_results(self, sender, query, beans, criteria=True):
self.append_notebook_page(query)
LOG.debug("Showing search results")
if beans:
if criteria:
self.append([self.SearchCriteriaBeen(query)])
self.append(beans)
else:
LOG.debug("Nothing found get try google suggests")
self.google_suggests(query)
def google_suggests(self, query):
self.append([self.TextBeen(query + _(" not found on last.fm, wait for google suggests ..."))])
suggests = google_search_resutls(query, 15)
if suggests:
for line in suggests:
self.append([self.TextBeen(line, color="YELLOW", type=CommonBean.TYPE_GOOGLE_HELP)])
else :
self.append([self.TextBeen(_("Google not found suggests"))])
def TextBeen(self, query, color="RED", type=CommonBean.TYPE_FOLDER):
return CommonBean(name=query, path=None, color=color, type=type)
def SearchCriteriaBeen(self, name):
return CommonBean(name=name, path=None, color="#4DCC33", type=CommonBean.TYPE_FOLDER)
def SearchingCriteriaBean(self, name):
return CommonBean(name="Searching: " + name, path=None, color="GREEN", type=CommonBean.TYPE_FOLDER)
def _populate_model(self, beans):
normilized = []
"""first add cue files"""
for bean in beans:
LOG.info("append", bean, bean.path)
if bean.path and bean.path.endswith(".cue"):
cues = CueReader(bean.path).get_common_beans()
for cue in cues:
self.current_list_model.append(cue)
normilized.append(cue)
"""end big file to the end"""
for bean in beans:
id3 = bean.getMp3TagsName()
if id3:
bean.id3, bean.name = bean.name, id3
if not bean.path or (bean.path and not bean.path.endswith(".cue")):
self.current_list_model.append(bean)
normilized.append(bean)
return normilized
def append(self, beans):
self._populate_model(beans)
self.current_list_model.repopulate(-1)
def append_and_play(self, beans):
beans = self._populate_model(beans)
if not beans:
return None
self.index = 0
self.current_list_model.repopulate(self.index)
song = beans[self.index]
LOG.info("PLAY", song)
self.playerCntr.playSong(song)
def on_play_selected(self, similar_songs_model):
playlistBean = similar_songs_model.get_selected_bean()
LOG.info("play", playlistBean)
LOG.info("type", playlistBean.type)
if playlistBean.type == CommonBean.TYPE_MUSIC_URL:
self.playBean(playlistBean)
elif playlistBean.type == CommonBean.TYPE_GOOGLE_HELP:
self.search_panel.set_text(playlistBean.name)
else:
self.playBean(playlistBean)
def show_save_as_dialog(self, songs):
LOG.debug("Show Save As Song dialog", songs)
chooser = gtk.FileChooserDialog(title=_("Choose directory to save song"), action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER, buttons=(gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
response = chooser.run()
if response == gtk.RESPONSE_OK:
path = chooser.get_filename()
save_as_song_thread(songs, path)
elif response == gtk.RESPONSE_CANCEL:
LOG.info('Closed, no files selected')
chooser.destroy()
def show_info(self, songs):
if not songs:
return None
result = ""
for song in songs:
result += song.getArtist() + " - " + song.getTitle() + "\n"
md = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO,
gtk.BUTTONS_CLOSE, result)
md.run()
md.destroy()
def changed(self, a, type=None):
if self.paths:
a.select_range(self.paths[0], self.paths[len(self.paths) - 1])
def onPlaySong(self, w, e, similar_songs_model):
self.current_list_model = similar_songs_model
songs = similar_songs_model.get_all_selected_beans()
self.index = similar_songs_model.get_selected_index()
#selected rows
treeselection = similar_songs_model.widget.get_selection()
model, self.paths = treeselection.get_selected_rows()
#LOG.debug("Seletected index", self.index, songs)
if is_left_click(e):
self.paths = None
LOG.debug("SAVE SELECTED", self.paths)
elif is_double_click(e):
self.paths = None
self.on_play_selected(similar_songs_model);
elif is_rigth_click(e):
treeselection.connect('changed', self.changed, True)
menu = gtk.Menu()
play = gtk.ImageMenuItem(gtk.STOCK_MEDIA_PLAY)
play.connect("activate", lambda * a: self.on_play_selected(similar_songs_model))
menu.add(play)
save = gtk.ImageMenuItem(gtk.STOCK_SAVE)
save.connect("activate", lambda * a: save_song_thread(songs))
menu.add(save)
save_as = gtk.ImageMenuItem(gtk.STOCK_SAVE_AS)
save_as.connect("activate", lambda * a: self.show_save_as_dialog(songs))
menu.add(save_as)
add = gtk.ImageMenuItem(gtk.STOCK_ADD)
add.connect("activate", lambda * a: self.add_selected_to_playlist())
menu.add(add)
remove = gtk.ImageMenuItem(gtk.STOCK_REMOVE)
remove.connect("activate", lambda * a: similar_songs_model.remove_selected())
menu.add(remove)
info = gtk.ImageMenuItem(gtk.STOCK_INFO)
info.connect("activate", lambda * a: self.show_info(songs))
menu.add(info)
menu.show_all()
menu.popup(None, None, None, e.button, e.time)
treeselection.select_all()
def playBean(self, playlistBean):
if playlistBean.type in [CommonBean.TYPE_MUSIC_URL, CommonBean.TYPE_MUSIC_FILE]:
self.setSongResource(playlistBean)
LOG.info("Song source path", playlistBean.path)
if not playlistBean.path:
self.count += 1
LOG.info(self.count)
playlistBean.setIconErorr()
if self.count < 5 :
return self.playBean(self.getNextSong())
return
self.playerCntr.set_mode(PlayerController.MODE_ONLINE_LIST)
self.playerCntr.playSong(playlistBean)
self.current_list_model.repopulate(self.index)
def setSongResource(self, playlistBean, update_song_info=True):
if not playlistBean.path:
if playlistBean.type == CommonBean.TYPE_MUSIC_URL:
file = get_file_store_path(playlistBean)
if os.path.isfile(file) and os.path.getsize(file) > 1:
LOG.info("Find file dowloaded")
playlistBean.path = file
playlistBean.type = CommonBean.TYPE_MUSIC_FILE
return True
else:
LOG.info("FILE NOT FOUND IN SYSTEM")
#Seach by vk engine
update_song_path(playlistBean)
if update_song_info:
"""retrive images and other info"""
self.info.show_song_info(playlistBean)
def nextBean(self):
if FConfiguration().isRandom:
return self.current_list_model.get_random_bean()
self.index += 1
if self.index >= self.current_list_model.get_size():
self.index = 0
if not FConfiguration().isRepeat:
self.index = self.current_list_model.get_size()
return None
return self.current_list_model.getBeenByPosition(self.index)
def prevBean(self):
if FConfiguration().isRandom:
return self.current_list_model.get_random_bean()
self.index -= 1
list = self.current_list_model.get_all_beans()
if self.index <= 0:
self.index = self.current_list_model.get_size()
playlistBean = self.current_list_model.getBeenByPosition(self.index)
return playlistBean
#TODO: This file is under heavy refactoring, don't touch anything you think is wrong
def getNextSong(self):
currentSong = self.nextBean()
if(currentSong.type == CommonBean.TYPE_FOLDER):
currentSong = self.nextBean()
self.setSongResource(currentSong)
LOG.info("PATH", currentSong.path)
self.current_list_model.repopulate(currentSong.index);
return currentSong
def setState(self, state):
#TODO
pass
def getState(self):
#TODO
pass
def getPrevSong(self):
playlistBean = self.prevBean()
if(playlistBean.type == CommonBean.TYPE_FOLDER):
self.getPrevSong()
self.setSongResource(playlistBean)
self.current_list_model.repopulate(playlistBean.index);
return playlistBean
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-sponsored-links-search/
#
# Code is licensed under MIT license.
#
import re
import urllib
import random
from htmlentitydefs import name2codepoint
from BeautifulSoup import BeautifulSoup
from browser import Browser, BrowserError
#
# TODO: join GoogleSearch and SponsoredLinks classes under a single base class
#
class SLError(Exception):
""" Sponsored Links Error """
pass
class SLParseError(Exception):
"""
Parse error in Google results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
GET_ALL_SLEEP_FUNCTION = object()
class SponsoredLink(object):
""" a single sponsored link """
def __init__(self, title, url, display_url, desc):
self.title = title
self.url = url
self.display_url = display_url
self.desc = desc
class SponsoredLinks(object):
SEARCH_URL_0 = "http://www.google.com/sponsoredlinks?q=%(query)s&btnG=Search+Sponsored+Links&hl=en"
NEXT_PAGE_0 = "http://www.google.com/sponsoredlinks?q=%(query)s&sa=N&start=%(start)d&hl=en"
SEARCH_URL_1 = "http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&btnG=Search+Sponsored+Links&hl=en"
NEXT_PAGE_1 = "http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&sa=N&start=%(start)d&hl=en"
def __init__(self, query, random_agent=False, debug=False):
self.query = query
self.debug = debug
self.browser = Browser(debug=debug)
self._page = 0
self.eor = False
self.results_info = None
self._results_per_page = 10
if random_agent:
self.browser.set_random_user_agent()
@property
def num_results(self):
if not self.results_info:
page = self._get_results_page()
self.results_info = self._extract_info(page)
if self.results_info['total'] == 0:
self.eor = True
return self.results_info['total']
def _get_results_per_page(self):
return self._results_per_page
def _set_results_par_page(self, rpp):
self._results_per_page = rpp
results_per_page = property(_get_results_per_page, _set_results_par_page)
def get_results(self):
if self.eor:
return []
page = self._get_results_page()
info = self._extract_info(page)
if self.results_info is None:
self.results_info = info
if info['to'] == info['total']:
self.eor = True
results = self._extract_results(page)
if not results:
self.eor = True
return []
self._page += 1
return results
def _get_all_results_sleep_fn(self):
return random.random()*5 + 1 # sleep from 1 - 6 seconds
def get_all_results(self, sleep_function=None):
if sleep_function is GET_ALL_SLEEP_FUNCTION:
sleep_function = self._get_all_results_sleep_fn
if sleep_function is None:
sleep_function = lambda: None
ret_results = []
while True:
res = self.get_results()
if not res:
return ret_results
ret_results.extend(res)
return ret_results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _extract_info(self, soup):
empty_info = { 'from': 0, 'to': 0, 'total': 0 }
stats_span = soup.find('span', id='stats')
if not stats_span:
return empty_info
txt = ''.join(stats_span.findAll(text=True))
txt = txt.replace(',', '').replace(" ", ' ')
matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt)
if not matches:
return empty_info
return {'from': int(matches.group(1)), 'to': int(matches.group(2)), 'total': int(matches.group(3))}
def _get_results_page(self):
if self._page == 0:
if self._results_per_page == 10:
url = SponsoredLinks.SEARCH_URL_0
else:
url = SponsoredLinks.SEARCH_URL_1
else:
if self._results_per_page == 10:
url = SponsoredLinks.NEXT_PAGE_0
else:
url = SponsoredLinks.NEXT_PAGE_1
safe_url = url % { 'query': urllib.quote_plus(self.query),
'start': self._page * self._results_per_page,
'num': self._results_per_page }
try:
page = self.browser.get_page(safe_url)
except BrowserError, e:
raise SLError, "Failed getting %s: %s" % (e.url, e.error)
return BeautifulSoup(page)
def _extract_results(self, soup):
results = soup.findAll('div', {'class': 'g'})
ret_res = []
for result in results:
eres = self._extract_result(result)
if eres:
ret_res.append(eres)
return ret_res
def _extract_result(self, result):
title, url = self._extract_title_url(result)
display_url = self._extract_display_url(result) # Warning: removes 'cite' from the result
desc = self._extract_description(result)
if not title or not url or not display_url or not desc:
return None
return SponsoredLink(title, url, display_url, desc)
def _extract_title_url(self, result):
title_a = result.find('a')
if not title_a:
self._maybe_raise(SLParseError, "Title tag in sponsored link was not found", result)
return None, None
title = ''.join(title_a.findAll(text=True))
title = self._html_unescape(title)
url = title_a['href']
match = re.search(r'q=(http[^&]+)&', url)
if not match:
self._maybe_raise(SLParseError, "URL inside a sponsored link was not found", result)
return None, None
url = urllib.unquote(match.group(1))
return title, url
def _extract_display_url(self, result):
cite = result.find('cite')
if not cite:
self._maybe_raise(SLParseError, "<cite> not found inside result", result)
return None
return ''.join(cite.findAll(text=True))
def _extract_description(self, result):
cite = result.find('cite')
if not cite:
return None
cite.extract()
desc_div = result.find('div', {'class': 'line23'})
if not desc_div:
self._maybe_raise(ParseError, "Description tag not found in sponsored link", result)
return None
desc_strs = desc_div.findAll(text=True)[0:-1]
desc = ''.join(desc_strs)
desc = desc.replace("\n", " ")
desc = desc.replace(" ", " ")
return self._html_unescape(desc)
def _html_unescape(self, str):
def entity_replacer(m):
entity = m.group(1)
if entity in name2codepoint:
return unichr(name2codepoint[entity])
else:
return m.group(0)
def ascii_replacer(m):
cp = int(m.group(1))
if cp <= 255:
return unichr(cp)
else:
return m.group(0)
s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U)
return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-search/
#
# Code is licensed under MIT license.
#
import re
import urllib
from htmlentitydefs import name2codepoint
from BeautifulSoup import BeautifulSoup
from browser import Browser, BrowserError
class SearchError(Exception):
"""
Base class for Google Search exceptions.
"""
pass
class ParseError(SearchError):
"""
Parse error in Google results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
class SearchResult:
def __init__(self, title, url, desc):
self.title = title
self.url = url
self.desc = desc
def __str__(self):
return 'Google Search Result: "%s"' % self.title
class GoogleSearch(object):
SEARCH_URL_0 = "http://www.google.com/search?hl=en&q=%(query)s&btnG=Google+Search"
NEXT_PAGE_0 = "http://www.google.com/search?hl=en&q=%(query)s&start=%(start)d"
SEARCH_URL_1 = "http://www.google.com/search?hl=en&q=%(query)s&num=%(num)d&btnG=Google+Search"
NEXT_PAGE_1 = "http://www.google.com/search?hl=en&q=%(query)s&num=%(num)d&start=%(start)d"
def __init__(self, query, random_agent=False, debug=False):
self.query = query
self.debug = debug
self.browser = Browser(debug=debug)
self.results_info = None
self.eor = False # end of results
self._page = 0
self._results_per_page = 10
self._last_from = 0
if random_agent:
self.browser.set_random_user_agent()
@property
def num_results(self):
if not self.results_info:
page = self._get_results_page()
self.results_info = self._extract_info(page)
if self.results_info['total'] == 0:
self.eor = True
return self.results_info['total']
def _get_page(self):
return self._page
def _set_page(self, page):
self._page = page
page = property(_get_page, _set_page)
def _get_results_per_page(self):
return self._results_per_page
def _set_results_par_page(self, rpp):
self._results_per_page = rpp
results_per_page = property(_get_results_per_page, _set_results_par_page)
def get_results(self):
""" Gets a page of results """
#if self.eor:
# return []
page = self._get_results_page()
search_info = self._extract_info(page)
#if not self.results_info:
# self.results_info = search_info
# if self.num_results == 0:
# self.eor = True
# return []
results = self._extract_results(page)
return results
if not results:
self.eor = True
return []
if self._page > 0 and search_info['from'] == self._last_from:
self.eor = True
return []
if search_info['to'] == search_info['total']:
self.eor = True
self._page += 1
self._last_from = search_info['from']
return results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _get_results_page(self):
if self._page == 0:
if self._results_per_page == 10:
url = GoogleSearch.SEARCH_URL_0
else:
url = GoogleSearch.SEARCH_URL_1
else:
if self._results_per_page == 10:
url = GoogleSearch.NEXT_PAGE_0
else:
url = GoogleSearch.NEXT_PAGE_1
safe_url = url % { 'query': urllib.quote_plus(self.query),
'start': self._page * self._results_per_page,
'num': self._results_per_page }
try:
page = self.browser.get_page(safe_url)
except BrowserError, e:
raise SearchError, "Failed getting %s: %s" % (e.url, e.error)
return BeautifulSoup(page)
def _extract_info(self, soup):
empty_info = {'from': 0, 'to': 0, 'total': 0}
div_ssb = soup.find('div', id='ssb')
if not div_ssb:
self._maybe_raise(ParseError, "Div with number of results was not found on Google search page", soup)
return empty_info
p = div_ssb.find('p')
if not p:
self._maybe_raise(ParseError, """<p> tag within <div id="ssb"> was not found on Google search page""", soup)
return empty_info
txt = ''.join(p.findAll(text=True))
txt = txt.replace(',', '')
matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt, re.U)
if not matches:
return empty_info
return {'from': int(matches.group(1)), 'to': int(matches.group(2)), 'total': int(matches.group(3))}
def _extract_results(self, soup):
results = soup.findAll('li', {'class': 'g'})
ret_res = []
for result in results:
eres = self._extract_result(result)
if eres:
ret_res.append(eres)
return ret_res
def _extract_result(self, result):
title, url = self._extract_title_url(result)
desc = self._extract_description(result)
if not title or not url or not desc:
return None
return SearchResult(title, url, desc)
def _extract_title_url(self, result):
#title_a = result.find('a', {'class': re.compile(r'\bl\b')})
title_a = result.find('a')
if not title_a:
self._maybe_raise(ParseError, "Title tag in Google search result was not found", result)
return None, None
title = ''.join(title_a.findAll(text=True))
title = self._html_unescape(title)
url = title_a['href']
match = re.match(r'/url\?q=(http[^&]+)&', url)
if match:
url = urllib.unquote(match.group(1))
return title, url
def _extract_description(self, result):
desc_div = result.find('div', {'class': re.compile(r'\bs\b')})
if not desc_div:
self._maybe_raise(ParseError, "Description tag in Google search result was not found", result)
return None
desc_strs = []
def looper(tag):
if not tag: return
for t in tag:
try:
if t.name == 'br': break
except AttributeError:
pass
try:
desc_strs.append(t.string)
except AttributeError:
desc_strs.append(t)
looper(desc_div)
looper(desc_div.find('wbr')) # BeautifulSoup does not self-close <wbr>
desc = ''.join(s for s in desc_strs if s)
return self._html_unescape(desc)
def _html_unescape(self, str):
def entity_replacer(m):
entity = m.group(1)
if entity in name2codepoint:
return unichr(name2codepoint[entity])
else:
return m.group(0)
def ascii_replacer(m):
cp = int(m.group(1))
if cp <= 255:
return unichr(cp)
else:
return m.group(0)
s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U)
return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-sponsored-links-search/
#
# Code is licensed under MIT license.
#
import re
import urllib
import random
from htmlentitydefs import name2codepoint
from BeautifulSoup import BeautifulSoup
from browser import Browser, BrowserError
#
# TODO: join GoogleSearch and SponsoredLinks classes under a single base class
#
class SLError(Exception):
""" Sponsored Links Error """
pass
class SLParseError(Exception):
"""
Parse error in Google results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
GET_ALL_SLEEP_FUNCTION = object()
class SponsoredLink(object):
""" a single sponsored link """
def __init__(self, title, url, display_url, desc):
self.title = title
self.url = url
self.display_url = display_url
self.desc = desc
class SponsoredLinks(object):
SEARCH_URL_0 = "http://www.google.com/sponsoredlinks?q=%(query)s&btnG=Search+Sponsored+Links&hl=en"
NEXT_PAGE_0 = "http://www.google.com/sponsoredlinks?q=%(query)s&sa=N&start=%(start)d&hl=en"
SEARCH_URL_1 = "http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&btnG=Search+Sponsored+Links&hl=en"
NEXT_PAGE_1 = "http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&sa=N&start=%(start)d&hl=en"
def __init__(self, query, random_agent=False, debug=False):
self.query = query
self.debug = debug
self.browser = Browser(debug=debug)
self._page = 0
self.eor = False
self.results_info = None
self._results_per_page = 10
if random_agent:
self.browser.set_random_user_agent()
@property
def num_results(self):
if not self.results_info:
page = self._get_results_page()
self.results_info = self._extract_info(page)
if self.results_info['total'] == 0:
self.eor = True
return self.results_info['total']
def _get_results_per_page(self):
return self._results_per_page
def _set_results_par_page(self, rpp):
self._results_per_page = rpp
results_per_page = property(_get_results_per_page, _set_results_par_page)
def get_results(self):
if self.eor:
return []
page = self._get_results_page()
info = self._extract_info(page)
if self.results_info is None:
self.results_info = info
if info['to'] == info['total']:
self.eor = True
results = self._extract_results(page)
if not results:
self.eor = True
return []
self._page += 1
return results
def _get_all_results_sleep_fn(self):
return random.random()*5 + 1 # sleep from 1 - 6 seconds
def get_all_results(self, sleep_function=None):
if sleep_function is GET_ALL_SLEEP_FUNCTION:
sleep_function = self._get_all_results_sleep_fn
if sleep_function is None:
sleep_function = lambda: None
ret_results = []
while True:
res = self.get_results()
if not res:
return ret_results
ret_results.extend(res)
return ret_results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _extract_info(self, soup):
empty_info = { 'from': 0, 'to': 0, 'total': 0 }
stats_span = soup.find('span', id='stats')
if not stats_span:
return empty_info
txt = ''.join(stats_span.findAll(text=True))
txt = txt.replace(',', '').replace(" ", ' ')
matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt)
if not matches:
return empty_info
return {'from': int(matches.group(1)), 'to': int(matches.group(2)), 'total': int(matches.group(3))}
def _get_results_page(self):
if self._page == 0:
if self._results_per_page == 10:
url = SponsoredLinks.SEARCH_URL_0
else:
url = SponsoredLinks.SEARCH_URL_1
else:
if self._results_per_page == 10:
url = SponsoredLinks.NEXT_PAGE_0
else:
url = SponsoredLinks.NEXT_PAGE_1
safe_url = url % { 'query': urllib.quote_plus(self.query),
'start': self._page * self._results_per_page,
'num': self._results_per_page }
try:
page = self.browser.get_page(safe_url)
except BrowserError, e:
raise SLError, "Failed getting %s: %s" % (e.url, e.error)
return BeautifulSoup(page)
def _extract_results(self, soup):
results = soup.findAll('div', {'class': 'g'})
ret_res = []
for result in results:
eres = self._extract_result(result)
if eres:
ret_res.append(eres)
return ret_res
def _extract_result(self, result):
title, url = self._extract_title_url(result)
display_url = self._extract_display_url(result) # Warning: removes 'cite' from the result
desc = self._extract_description(result)
if not title or not url or not display_url or not desc:
return None
return SponsoredLink(title, url, display_url, desc)
def _extract_title_url(self, result):
title_a = result.find('a')
if not title_a:
self._maybe_raise(SLParseError, "Title tag in sponsored link was not found", result)
return None, None
title = ''.join(title_a.findAll(text=True))
title = self._html_unescape(title)
url = title_a['href']
match = re.search(r'q=(http[^&]+)&', url)
if not match:
self._maybe_raise(SLParseError, "URL inside a sponsored link was not found", result)
return None, None
url = urllib.unquote(match.group(1))
return title, url
def _extract_display_url(self, result):
cite = result.find('cite')
if not cite:
self._maybe_raise(SLParseError, "<cite> not found inside result", result)
return None
return ''.join(cite.findAll(text=True))
def _extract_description(self, result):
cite = result.find('cite')
if not cite:
return None
cite.extract()
desc_div = result.find('div', {'class': 'line23'})
if not desc_div:
self._maybe_raise(ParseError, "Description tag not found in sponsored link", result)
return None
desc_strs = desc_div.findAll(text=True)[0:-1]
desc = ''.join(desc_strs)
desc = desc.replace("\n", " ")
desc = desc.replace(" ", " ")
return self._html_unescape(desc)
def _html_unescape(self, str):
def entity_replacer(m):
entity = m.group(1)
if entity in name2codepoint:
return unichr(name2codepoint[entity])
else:
return m.group(0)
def ascii_replacer(m):
cp = int(m.group(1))
if cp <= 255:
return unichr(cp)
else:
return m.group(0)
s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U)
return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-search/
#
# Code is licensed under MIT license.
#
import re
import urllib
from htmlentitydefs import name2codepoint
from BeautifulSoup import BeautifulSoup
from browser import Browser, BrowserError
class SearchError(Exception):
"""
Base class for Google Search exceptions.
"""
pass
class ParseError(SearchError):
"""
Parse error in Google results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
class SearchResult:
def __init__(self, title, url, desc):
self.title = title
self.url = url
self.desc = desc
def __str__(self):
return 'Google Search Result: "%s"' % self.title
class GoogleSearch(object):
SEARCH_URL_0 = "http://www.google.com/search?hl=en&q=%(query)s&btnG=Google+Search"
NEXT_PAGE_0 = "http://www.google.com/search?hl=en&q=%(query)s&start=%(start)d"
SEARCH_URL_1 = "http://www.google.com/search?hl=en&q=%(query)s&num=%(num)d&btnG=Google+Search"
NEXT_PAGE_1 = "http://www.google.com/search?hl=en&q=%(query)s&num=%(num)d&start=%(start)d"
def __init__(self, query, random_agent=False, debug=False):
self.query = query
self.debug = debug
self.browser = Browser(debug=debug)
self.results_info = None
self.eor = False # end of results
self._page = 0
self._results_per_page = 10
self._last_from = 0
if random_agent:
self.browser.set_random_user_agent()
@property
def num_results(self):
if not self.results_info:
page = self._get_results_page()
self.results_info = self._extract_info(page)
if self.results_info['total'] == 0:
self.eor = True
return self.results_info['total']
def _get_page(self):
return self._page
def _set_page(self, page):
self._page = page
page = property(_get_page, _set_page)
def _get_results_per_page(self):
return self._results_per_page
def _set_results_par_page(self, rpp):
self._results_per_page = rpp
results_per_page = property(_get_results_per_page, _set_results_par_page)
def get_results(self):
""" Gets a page of results """
#if self.eor:
# return []
page = self._get_results_page()
search_info = self._extract_info(page)
#if not self.results_info:
# self.results_info = search_info
# if self.num_results == 0:
# self.eor = True
# return []
results = self._extract_results(page)
return results
if not results:
self.eor = True
return []
if self._page > 0 and search_info['from'] == self._last_from:
self.eor = True
return []
if search_info['to'] == search_info['total']:
self.eor = True
self._page += 1
self._last_from = search_info['from']
return results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _get_results_page(self):
if self._page == 0:
if self._results_per_page == 10:
url = GoogleSearch.SEARCH_URL_0
else:
url = GoogleSearch.SEARCH_URL_1
else:
if self._results_per_page == 10:
url = GoogleSearch.NEXT_PAGE_0
else:
url = GoogleSearch.NEXT_PAGE_1
safe_url = url % { 'query': urllib.quote_plus(self.query),
'start': self._page * self._results_per_page,
'num': self._results_per_page }
try:
page = self.browser.get_page(safe_url)
except BrowserError, e:
raise SearchError, "Failed getting %s: %s" % (e.url, e.error)
return BeautifulSoup(page)
def _extract_info(self, soup):
empty_info = {'from': 0, 'to': 0, 'total': 0}
div_ssb = soup.find('div', id='ssb')
if not div_ssb:
self._maybe_raise(ParseError, "Div with number of results was not found on Google search page", soup)
return empty_info
p = div_ssb.find('p')
if not p:
self._maybe_raise(ParseError, """<p> tag within <div id="ssb"> was not found on Google search page""", soup)
return empty_info
txt = ''.join(p.findAll(text=True))
txt = txt.replace(',', '')
matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt, re.U)
if not matches:
return empty_info
return {'from': int(matches.group(1)), 'to': int(matches.group(2)), 'total': int(matches.group(3))}
def _extract_results(self, soup):
results = soup.findAll('li', {'class': 'g'})
ret_res = []
for result in results:
eres = self._extract_result(result)
if eres:
ret_res.append(eres)
return ret_res
def _extract_result(self, result):
title, url = self._extract_title_url(result)
desc = self._extract_description(result)
if not title or not url or not desc:
return None
return SearchResult(title, url, desc)
def _extract_title_url(self, result):
#title_a = result.find('a', {'class': re.compile(r'\bl\b')})
title_a = result.find('a')
if not title_a:
self._maybe_raise(ParseError, "Title tag in Google search result was not found", result)
return None, None
title = ''.join(title_a.findAll(text=True))
title = self._html_unescape(title)
url = title_a['href']
match = re.match(r'/url\?q=(http[^&]+)&', url)
if match:
url = urllib.unquote(match.group(1))
return title, url
def _extract_description(self, result):
desc_div = result.find('div', {'class': re.compile(r'\bs\b')})
if not desc_div:
self._maybe_raise(ParseError, "Description tag in Google search result was not found", result)
return None
desc_strs = []
def looper(tag):
if not tag: return
for t in tag:
try:
if t.name == 'br': break
except AttributeError:
pass
try:
desc_strs.append(t.string)
except AttributeError:
desc_strs.append(t)
looper(desc_div)
looper(desc_div.find('wbr')) # BeautifulSoup does not self-close <wbr>
desc = ''.join(s for s in desc_strs if s)
return self._html_unescape(desc)
def _html_unescape(self, str):
def entity_replacer(m):
entity = m.group(1)
if entity in name2codepoint:
return unichr(name2codepoint[entity])
else:
return m.group(0)
def ascii_replacer(m):
cp = int(m.group(1))
if cp <= 255:
return unichr(cp)
else:
return m.group(0)
s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U)
return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-search/
#
# Code is licensed under MIT license.
#
import random
import socket
import urllib
import urllib2
import httplib
BROWSERS = (
# Top most popular browsers in my access.log on 2009.02.12
# tail -50000 access.log |
# awk -F\" '{B[$6]++} END { for (b in B) { print B[b] ": " b } }' |
# sort -rn |
# head -20
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.0.6) Gecko/2009011912 Firefox/3.0.6',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.48 Safari/525.19',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.5) Gecko/2008121621 Ubuntu/8.04 (hardy) Firefox/3.0.5',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_6; en-us) AppleWebKit/525.27.1 (KHTML, like Gecko) Version/3.2.1 Safari/525.27.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
)
TIMEOUT = 5 # socket timeout
class BrowserError(Exception):
def __init__(self, url, error):
self.url = url
self.error = error
class PoolHTTPConnection(httplib.HTTPConnection):
def connect(self):
"""Connect to the host and port specified in __init__."""
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.settimeout(TIMEOUT)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class PoolHTTPHandler(urllib2.HTTPHandler):
def http_open(self, req):
return self.do_open(PoolHTTPConnection, req)
class Browser(object):
def __init__(self, user_agent=BROWSERS[0], debug=False, use_pool=False):
self.headers = {
'User-Agent': user_agent,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5'
}
self.debug = debug
def get_page(self, url, data=None):
handlers = [PoolHTTPHandler]
opener = urllib2.build_opener(*handlers)
if data: data = urllib.urlencode(data)
request = urllib2.Request(url, data, self.headers)
try:
response = opener.open(request)
return response.read()
except (urllib2.HTTPError, urllib2.URLError), e:
raise BrowserError(url, str(e))
except (socket.error, socket.sslerror), msg:
raise BrowserError(url, msg)
except socket.timeout, e:
raise BrowserError(url, "timeout")
except KeyboardInterrupt:
raise
except:
raise BrowserError(url, "unknown error")
def set_random_user_agent(self):
self.headers['User-Agent'] = random.choice(BROWSERS)
return self.headers['User-Agent']
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-sets/
#
# Code is licensed under MIT license.
#
import re
import urllib
import random
from htmlentitydefs import name2codepoint
from BeautifulSoup import BeautifulSoup
from browser import Browser, BrowserError
class GSError(Exception):
""" Google Sets Error """
pass
class GSParseError(Exception):
"""
Parse error in Google Sets results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
LARGE_SET = 1
SMALL_SET = 2
class GoogleSets(object):
URL_LARGE = "http://labs.google.com/sets?hl=en&q1=%s&q2=%s&q3=%s&q4=%s&q5=%s&btn=Large+Set"
URL_SMALL = "http://labs.google.com/sets?hl=en&q1=%s&q2=%s&q3=%s&q4=%s&q5=%s&btn=Small+Set+(15+items+or+fewer)"
def __init__(self, items, random_agent=False, debug=False):
self.items = items
self.debug = debug
self.browser = Browser(debug=debug)
if random_agent:
self.browser.set_random_user_agent()
def get_results(self, set_type=SMALL_SET):
page = self._get_results_page(set_type)
results = self._extract_results(page)
return results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _get_results_page(self, set_type):
if set_type == LARGE_SET:
url = GoogleSets.URL_LARGE
else:
url = GoogleSets.URL_SMALL
safe_items = [urllib.quote_plus(i) for i in self.items]
blank_items = 5 - len(safe_items)
if blank_items > 0:
safe_items += ['']*blank_items
safe_url = url % tuple(safe_items)
try:
page = self.browser.get_page(safe_url)
except BrowserError, e:
raise GSError, "Failed getting %s: %s" % (e.url, e.error)
return BeautifulSoup(page)
def _extract_results(self, soup):
a_links = soup.findAll('a', href=re.compile('/search'))
ret_res = [a.string for a in a_links]
return ret_res
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# A Google Python library:
# http://www.catonmat.net/blog/python-library-for-google-search/
#
# Distributed under MIT license:
#
# Copyright (c) 2009 Peteris Krumins
#
# Permission is hereby granted, free of charge, to any person
# Obtaining a copy of this software and associated documentation
# Files (the "Software"), to deal in the Software without
# Restriction, including without limitation the rights to use,
# Copy, modify, merge, publish, distribute, sublicense, and/or sell
# Copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# Conditions:
#
# The above copyright notice and this permission notice shall be
# Included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-search/
#
# Code is licensed under MIT license.
#
import random
import socket
import urllib
import urllib2
import httplib
BROWSERS = (
# Top most popular browsers in my access.log on 2009.02.12
# tail -50000 access.log |
# awk -F\" '{B[$6]++} END { for (b in B) { print B[b] ": " b } }' |
# sort -rn |
# head -20
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.0.6) Gecko/2009011912 Firefox/3.0.6',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.48 Safari/525.19',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30; .NET CLR 3.0.04506.648)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.5) Gecko/2008121621 Ubuntu/8.04 (hardy) Firefox/3.0.5',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_6; en-us) AppleWebKit/525.27.1 (KHTML, like Gecko) Version/3.2.1 Safari/525.27.1',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
)
TIMEOUT = 5 # socket timeout
class BrowserError(Exception):
def __init__(self, url, error):
self.url = url
self.error = error
class PoolHTTPConnection(httplib.HTTPConnection):
def connect(self):
"""Connect to the host and port specified in __init__."""
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.settimeout(TIMEOUT)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class PoolHTTPHandler(urllib2.HTTPHandler):
def http_open(self, req):
return self.do_open(PoolHTTPConnection, req)
class Browser(object):
def __init__(self, user_agent=BROWSERS[0], debug=False, use_pool=False):
self.headers = {
'User-Agent': user_agent,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5'
}
self.debug = debug
def get_page(self, url, data=None):
handlers = [PoolHTTPHandler]
opener = urllib2.build_opener(*handlers)
if data: data = urllib.urlencode(data)
request = urllib2.Request(url, data, self.headers)
try:
response = opener.open(request)
return response.read()
except (urllib2.HTTPError, urllib2.URLError), e:
raise BrowserError(url, str(e))
except (socket.error, socket.sslerror), msg:
raise BrowserError(url, msg)
except socket.timeout, e:
raise BrowserError(url, "timeout")
except KeyboardInterrupt:
raise
except:
raise BrowserError(url, "unknown error")
def set_random_user_agent(self):
self.headers['User-Agent'] = random.choice(BROWSERS)
return self.headers['User-Agent']
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# A Google Python library:
# http://www.catonmat.net/blog/python-library-for-google-search/
#
# Distributed under MIT license:
#
# Copyright (c) 2009 Peteris Krumins
#
# Permission is hereby granted, free of charge, to any person
# Obtaining a copy of this software and associated documentation
# Files (the "Software"), to deal in the Software without
# Restriction, including without limitation the rights to use,
# Copy, modify, merge, publish, distribute, sublicense, and/or sell
# Copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# Conditions:
#
# The above copyright notice and this permission notice shall be
# Included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
| Python |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2007, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.0.6"
__copyright__ = "Copyright (c) 2004-2008 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
#This hack makes Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
DEFAULT_OUTPUT_ENCODING = "utf-8"
# First, the classes that represent markup elements.
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.contents.index(self)
if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent:
# We're replacing this element with one of its siblings.
index = self.parent.contents.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
self.parent.contents.remove(self)
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if (isinstance(newChild, basestring)
or isinstance(newChild, unicode)) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent != None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent == self:
index = self.find(newChild)
if index and index < position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isString(val):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
contents = [i for i in self.contents]
for i in contents:
if isinstance(i, Tag):
i.decompose()
else:
i.extract()
self.extract()
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
for i in range(0, len(self.contents)):
yield self.contents[i]
raise StopIteration
def recursiveChildGenerator(self):
stack = [(self, 0)]
while stack:
tag, start = stack.pop()
if isinstance(tag, Tag):
for i in range(start, len(tag.contents)):
a = tag.contents[i]
yield a
if isinstance(a, Tag) and tag.contents:
if i < len(tag.contents) - 1:
stack.append((tag, i+1))
stack.append((a, 0))
break
raise StopIteration
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isString(attrs):
kwargs['class'] = attrs
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if isList(markup) and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isString(markup):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst == True and type(matchAgainst) == types.BooleanType:
result = markup != None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isString(markup):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif isList(matchAgainst):
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isString(markup):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return hasattr(l, '__iter__') \
or (type(l) in (types.ListType, types.TupleType))
def isString(s):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is stringlike."""
try:
return isinstance(s, unicode) or isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed()
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
if markup:
if self.markupMassage:
if not isList(self.markupMassage):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.find('start_') == 0 or methodName.find('end_') == 0 \
or methodName.find('do_') == 0:
return SGMLParser.__getattr__(self, methodName)
elif methodName.find('__') != 0:
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# Tags with just one string-owning child get the child as a
# 'string' property, so that soup.tag.string is shorthand for
# soup.tag.contents[0]
if len(self.currentTag.contents) == 1 and \
isinstance(self.currentTag.contents[0], NavigableString):
self.currentTag.string = self.currentTag.contents[0]
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = ''.join(self.currentData)
if not currentData.translate(self.STRIP_ASCII_SPACES):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers != None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers == None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center']
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)")
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if getattr(self, 'declaredHTMLEncoding') or \
(self.originalEncoding == self.fromEncoding):
# This is our second pass through the document, or
# else an encoding was specified explicitly and it
# worked. Rewrite the meta tag.
newAttr = self.CHARSET_RE.sub\
(lambda(match):match.group(1) +
"%SOUP-ENCODING%", contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the new information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big']
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml'):
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if type(sub) == types.TupleType:
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
xml_encoding_match = re.compile \
('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')\
.match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin.read())
print soup.prettify()
| Python |
import re
import sys
import urllib
import simplejson
baseUrl = "http://ajax.googleapis.com/ajax/services/language/translate"
def getSplits(text,splitLength=4500):
'''
Translate Api has a limit on length of text(4500 characters) that can be translated at once,
'''
return (text[index:index+splitLength] for index in xrange(0,len(text),splitLength))
def translate(text,src='en', to='ru'):
'''
A Python Wrapper for Google AJAX Language API:
* Uses Google Language Detection, in cases source language is not provided with the source text
* Splits up text if it's longer then 4500 characters, as a limit put up by the API
'''
params = ({'langpair': '%s|%s' % (src, to),
'v': '1.0'
})
retText=''
for text in getSplits(text):
params['q'] = text
resp = simplejson.load(urllib.urlopen('%s' % (baseUrl), data = urllib.urlencode(params)))
try:
retText += resp['responseData']['translatedText']
except:
raise
return retText
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-sets/
#
# Code is licensed under MIT license.
#
import re
import urllib
import random
from htmlentitydefs import name2codepoint
from BeautifulSoup import BeautifulSoup
from browser import Browser, BrowserError
class GSError(Exception):
""" Google Sets Error """
pass
class GSParseError(Exception):
"""
Parse error in Google Sets results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
LARGE_SET = 1
SMALL_SET = 2
class GoogleSets(object):
URL_LARGE = "http://labs.google.com/sets?hl=en&q1=%s&q2=%s&q3=%s&q4=%s&q5=%s&btn=Large+Set"
URL_SMALL = "http://labs.google.com/sets?hl=en&q1=%s&q2=%s&q3=%s&q4=%s&q5=%s&btn=Small+Set+(15+items+or+fewer)"
def __init__(self, items, random_agent=False, debug=False):
self.items = items
self.debug = debug
self.browser = Browser(debug=debug)
if random_agent:
self.browser.set_random_user_agent()
def get_results(self, set_type=SMALL_SET):
page = self._get_results_page(set_type)
results = self._extract_results(page)
return results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _get_results_page(self, set_type):
if set_type == LARGE_SET:
url = GoogleSets.URL_LARGE
else:
url = GoogleSets.URL_SMALL
safe_items = [urllib.quote_plus(i) for i in self.items]
blank_items = 5 - len(safe_items)
if blank_items > 0:
safe_items += ['']*blank_items
safe_url = url % tuple(safe_items)
try:
page = self.browser.get_page(safe_url)
except BrowserError, e:
raise GSError, "Failed getting %s: %s" % (e.url, e.error)
return BeautifulSoup(page)
def _extract_results(self, soup):
a_links = soup.findAll('a', href=re.compile('/search'))
ret_res = [a.string for a in a_links]
return ret_res
| Python |
# -*- coding: utf-8 -*-
'''
Created on Mar 17, 2010
@author: ivan
'''
import urllib2
import urllib
import re
import time
from string import replace
from foobnix.util import LOG
from foobnix.util.configuration import FConfiguration
from foobnix.model.entity import CommonBean
from xml.sax.saxutils import unescape
from setuptools.package_index import htmldecode
class Vkontakte:
def __init__(self, email, password):
self.email = email
self.password = password
self.cookie = None
self.execute_time = time.time()
def isLive(self):
return self.get_s_value()
def get_s_value(self):
host = 'http://login.vk.com/?act=login'
#host = 'http://vkontakte.ru/login.php'
post = urllib.urlencode({'email' : self.email,
'expire' : '',
'pass' : self.password,
'vk' : ''})
headers = {'User-Agent' : 'Mozilla/5.0 (X11; U; Linux i686; uk; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 GTB7.0',
'Host' : 'login.vk.com',
'Referer' : 'http://vkontakte.ru/index.php',
'Connection' : 'close',
'Pragma' : 'no-cache',
'Cache-Control' : 'no-cache',
}
conn = urllib2.Request(host, post, headers)
data = urllib2.urlopen(conn)
result = data.read()
value = re.findall(r"name='s' value='(.*?)'", result)
"""old response format"""
if not value:
value = re.findall(r"name='s' id='s' value='(.*?)'", result)
if value:
return value[0]
return None
def get_cookie(self):
if FConfiguration().cookie:
LOG.info("Get VK cookie from cache")
return FConfiguration().cookie
if self.cookie: return self.cookie
host = 'http://vkontakte.ru/login.php?op=slogin'
post = urllib.urlencode({'s' : self.get_s_value()})
headers = {'User-Agent' : 'Mozilla/5.0 (X11; U; Linux i686; uk; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 GTB7.0',
'Host' : 'vkontakte.ru',
'Referer' : 'http://login.vk.com/?act=login',
'Connection' : 'close',
'Cookie' : 'remixchk=5; remixsid=nonenone',
'Pragma' : 'no-cache',
'Cache-Control' : 'no-cache'
}
conn = urllib2.Request(host, post, headers)
data = urllib2.urlopen(conn)
cookie_src = data.info().get('Set-Cookie')
self.cookie = re.sub(r'(expires=.*?;\s|path=\/;\s|domain=\.vkontakte\.ru(?:,\s)?)', '', cookie_src)
FConfiguration().cookie = self.cookie
return self.cookie
def get_page(self, query):
if not query:
return None
#GET /gsearch.php?section=audio&q=madonna&name=1
host = 'http://vkontakte.ru/gsearch.php?section=audio&q=vasya#c[q]=some%20id&c[section]=audio&name=1'
post = urllib.urlencode({
"c[q]" : query,
"c[section]":"audio"
})
headers = {'User-Agent' : 'Mozilla/5.0 (X11; U; Linux i686; uk; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 GTB7.0',
'Host' : 'vkontakte.ru',
'Referer' : 'http://vkontakte.ru/index.php',
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With' : 'XMLHttpRequest',
'Connection' : 'close',
'Cookie' : 'remixlang=0; remixchk=5; audio_vol=100; %s' % self.get_cookie(),
'Pragma' : 'no-cache',
'Cache-Control' : ' no-cache'
}
conn = urllib2.Request(host, post, headers)
#Do not run to offten
cur_time = time.time()
if cur_time - self.execute_time < 0.5:
LOG.info("Sleep because to many requests...")
time.sleep(0.8)
self.execute_time = time.time()
data = urllib2.urlopen(conn);
result = data.read()
return result
def get_page_by_url(self, host_url):
if not host_url:
return host_url
host_url.replace("#", "&")
post = host_url[host_url.find("?") + 1:]
LOG.debug("post", post)
headers = {'User-Agent' : 'Mozilla/5.0 (X11; U; Linux i686; uk; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 GTB7.0',
'Host' : 'vkontakte.ru',
'Referer' : 'http://vkontakte.ru/index.php',
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With' : 'XMLHttpRequest',
'Connection' : 'close',
'Cookie' : 'remixlang=0; remixchk=5; audio_vol=100; %s' % self.get_cookie(),
'Pragma' : 'no-cache',
'Cache-Control' : ' no-cache'
}
conn = urllib2.Request(host_url, post, headers)
#Do not run to offten
cur_time = time.time()
if cur_time - self.execute_time < 0.5:
LOG.info("Sleep because to many requests...")
time.sleep(0.8)
self.execute_time = time.time()
data = urllib2.urlopen(conn);
result = data.read()
return result
def get_name_by(self, id, result_album):
for album in result_album:
id_album = album[0]
name = album[1]
if id_album == id:
return name
return None
def find_most_relative_song(self, song_title):
vkSongs = self.find_song_urls(song_title)
if not vkSongs:
return None
times_count = {}
for song in vkSongs:
time = song.time
if time in times_count:
times_count[time] = times_count[time] + 1
else:
times_count[time] = 1
#get most relatives times time
r_count = max(times_count.values())
r_time = self.find_time_value(times_count, r_count)
LOG.info("LOG.info(Song time", r_time)
LOG.info("LOG.info(Count of congs", r_count)
for song in vkSongs:
if song.time == r_time:
return song
return vkSongs[0]
def find_time_value(self, times_count, r_count):
for i in times_count:
if times_count[i] == r_count:
return i
return None
def convert_vk_songs_to_beans(self, vk_songs):
beans = []
for vk_song in vk_songs:
bean = CommonBean(name=vk_song.album + " - " + vk_song.track, path=vk_song.path, type=CommonBean.TYPE_MUSIC_URL);
bean.time = vk_song.time
beans.append(bean)
return beans
def find_song_urls(self, song_title):
page = self.get_page(song_title)
#page = page.decode('cp1251')
#page = page.decode("cp1251")
#unicode(page, "cp1251")
#LOG.info(page
reg_all = "([^<>]*)"
resultall = re.findall("return operate\(([\w() ,']*)\);", page, re.IGNORECASE)
result_album = re.findall(u"<b id=\\\\\"performer([0-9]*)\\\\\">" + reg_all + "<", page, re.IGNORECASE | re.UNICODE)
result_track = re.findall(u"<span id=\\\\\"title([0-9]*)\\\\\">" + reg_all + "<", page, re.IGNORECASE | re.UNICODE)
result_time = re.findall("<div class=\\\\\"duration\\\\\">" + reg_all + "<", page, re.IGNORECASE)
urls = []
ids = []
vkSongs = []
for result in resultall:
result = replace(result, "'", " ")
result = replace(result, ",", " ")
result = result.split()
if len(result) > 4:
id_id = result[0]
id_server = result[1]
id_folder = result[2]
id_file = result[3]
url = "http://cs" + id_server + ".vkontakte.ru/u" + id_folder + "/audio/" + id_file + ".mp3"
urls.append(url)
ids.append(id_id)
#LOG.info(len(resultall), resultall
#LOG.info(len(urls), urls
#LOG.info(len(result_album), result_album
#LOG.info(len(result_track), result_track
LOG.info(len(result_time), result_time)
for i in xrange(len(result_time)):
id = ids[i]
path = urls[i]
album = self.get_name_by(id, result_album)
track = self.get_name_by(id, result_track)
time = result_time[i]
vkSong = VKSong(path, album, track, time)
vkSongs.append(vkSong)
return self.convert_vk_songs_to_beans(vkSongs)
def get_songs_by_url(self, url):
LOG.debug("Search By URL")
result = self.get_page_by_url(url)
try:
result = unicode(result)
except:
result = result
LOG.error("VK connectino error, try other user")
reg_all = "([^{</}]*)"
result_url = re.findall(ur"http:([\\/.0-9A-Z]*)", result, re.IGNORECASE)
result_artist = re.findall(u"q]=" + reg_all + "'", result, re.IGNORECASE | re.UNICODE)
result_title = re.findall(u"\"title([0-9]*)\\\\\">" + reg_all + "", result, re.IGNORECASE | re.UNICODE)
result_time = re.findall("duration\\\\\">" + reg_all, result, re.IGNORECASE | re.UNICODE)
result_lyr = re.findall(ur"showLyrics" + reg_all, result, re.IGNORECASE | re.UNICODE)
LOG.info("lyr:::", result_lyr)
songs = []
j = 0
for i, artist in enumerate(result_artist):
path = "http:" + result_url[i].replace("\\/", "/")
title = self.to_good_chars(result_title[i][1])
if not title:
if len(result_lyr) > j:
title = result_lyr[j]
title = title[title.find(";'>") + 3:]
j += 1
artist = self.to_good_chars(artist)
song = VKSong(path, artist, title, result_time[i]);
songs.append(song)
LOG.info(len(songs))
return self.convert_vk_songs_to_beans(songs)
def to_good_chars(self, line):
try:
return htmldecode(line)
except:
return unescape(line)
class VKSong():
def __init__(self, path, album, track, time):
self.path = path
self.album = album
self.track = track
self.time = time
def getTime(self):
if self.time:
return time
else:
return "no time"
def getFullDescription(self):
return "[ " + self.s(self.album) + " ] " + self.s(self.track) + " " + self.s(self.time)
def __str__(self):
return "" + self.s(self.album) + " " + self.s(self.track) + " " + self.s(self.time) + " " + self.s(self.path)
def s(self, value):
if value:
return value
else:
return ""
def get_group_id(str):
search = "gid="
index = str.find("gid=")
return str[index + len(search):]
#vk = Vkontakte("qax@bigmir.net", "foobnix")
#LOG.info(vk.get_s_value()
#LOG.info(vk.get_cookie()
#line = "http://vkontakte.ru/audio.php?id=7185772"
#LOG.info(vk.find_most_relative_song("madonna")
#s = "http://vkontakte.ru/audio.php?id=2765347 < & > ' ' &"
#LOG.info(unescape(s)
| Python |
'''
Created on 24 Apr 2010
@author: Matik
'''
from foobnix.model.entity import CommonBean
from foobnix.thirdparty import pylast
from foobnix.thirdparty.pylast import WSError
from foobnix.util import LOG
from foobnix.util.configuration import FConfiguration
from foobnix.online.google.translate import translate
from foobnix.util.time_utils import normilize_time
__all__ = [
'connected',
'search_top_tracks',
'search_top_albums',
'search_tags_genre',
'search_top_similar',
'unimplemented_search',
]
network = None
try:
LOG.error('trying to connect to last.fm')
API_KEY = FConfiguration().API_KEY
API_SECRET = FConfiguration().API_SECRET
username = FConfiguration().lfm_login
password_hash = pylast.md5(FConfiguration().lfm_password)
network = pylast.get_lastfm_network(api_key=API_KEY, api_secret=API_SECRET, username=username, password_hash=password_hash)
except:
LOG.error("last.fm connection error")
def connected():
return network is not None
def search_top_albums(query):
#unicode(query, "utf-8")
artist = network.get_artist(query)
if not artist:
return None
try:
albums = artist.get_top_albums()
except WSError:
LOG.info("No artist with that name")
return None
beans = []
LOG.info("Albums: ", albums)
for i, album in enumerate(albums):
if i > 6:
break;
try:
album_txt = album.item
except AttributeError:
album_txt = album['item']
tracks = album_txt.get_tracks()
bean = CommonBean(name=album_txt.get_title() + " (" + album_txt.get_release_year() + ")", path="", color="GREEN", type=CommonBean.TYPE_FOLDER, parent=query);
beans.append(bean)
for track in tracks:
bean = CommonBean(name=track, path="", type=CommonBean.TYPE_MUSIC_URL, parent=album_txt.get_title());
beans.append(bean)
return beans
def search_tags_genre(query):
query = translate(query, src="ru", to="en")
beans = []
tag = network.get_tag(query)
bean = CommonBean(name=tag.get_name(), path="", color="GREEN", type=CommonBean.TYPE_GOOGLE_HELP, parent=None)
beans.append(bean)
try:
tracks = tag.get_top_tracks()
except:
return None
for j, track in enumerate(tracks):
if j > 20:
break
try:
track_item = track.item
except AttributeError:
track_item = track['item']
bean = CommonBean(name=track_item.get_artist().get_name() + " - " + track_item.get_title(), path="", type=CommonBean.TYPE_MUSIC_URL, parent=tag.get_name())
beans.append(bean)
tags = network.search_for_tag(query)
LOG.info("tags")
LOG.info(tags)
flag = True
for i, tag in enumerate(tags.get_next_page()):
if i == 0:
LOG.info("we find it top", tag, query)
continue
if i < 4:
bean = CommonBean(name=tag.get_name(), path="", color="GREEN", type=CommonBean.TYPE_GOOGLE_HELP, parent=None)
beans.append(bean)
tracks = tag.get_top_tracks()
for j, track in enumerate(tracks):
if j > 10:
break
try:
track_item = track.item
except AttributeError:
track_item = track['item']
bean = CommonBean(name=track_item.get_artist().get_name() + " - " + track_item.get_title(), path="", type=CommonBean.TYPE_MUSIC_URL, parent=tag.get_name())
beans.append(bean)
else:
if flag:
bean = CommonBean(name="OTHER TAGS", path="", color="#FF99FF", type=CommonBean.TYPE_FOLDER, parent=None)
beans.append(bean)
flag = False
bean = CommonBean(name=tag.get_name(), path="", color="GREEN", type=CommonBean.TYPE_GOOGLE_HELP, parent=None)
beans.append(bean)
return beans
def search_top_tracks(query):
#unicode(query, "utf-8")
artist = network.get_artist(query)
if not artist:
return None
try:
tracks = artist.get_top_tracks()
except WSError:
LOG.info("No artist with that name")
return None
beans = []
LOG.info("Tracks: ", tracks)
for track in tracks:
try:
track_item = track.item
except AttributeError:
track_item = track['item']
#LOG.info(track.get_duration()
bean = CommonBean(name=str(track_item), path="", type=CommonBean.TYPE_MUSIC_URL, parent=query);
#norm_duration = track_item.get_duration() / 1000
#LOG.info(track_item.get_duration(), norm_duration
#bean.time = normilize_time(norm_duration)
beans.append(bean)
return beans
def search_top_similar(query):
#unicode(query, "utf-8")
artist = network.get_artist(query)
if not artist:
return None
artists = artist.get_similar(10)
beans = []
for artist in artists:
try:
artist_txt = artist.item
except AttributeError:
artist_txt = artist['item']
LOG.info(artist, artist_txt)
title = str(artist_txt)
bean = CommonBean(name=title, path="", type=CommonBean.TYPE_FOLDER, color="GREEN", parent=query);
beans.append(bean)
tops = search_top_tracks(title)
for top in tops:
beans.append(top)
return beans
def unimplemented_search(query):
song = CommonBean(name=query, type=CommonBean.TYPE_MUSIC_URL)
artist = song.getArtist()
title = song.getTitle()
track = network.get_track(artist, title)
LOG.debug("Search similar songs", song.getArtist(), song.getTitle())
beans = []
if not track:
return []
"""similar tracks"""
try:
similars = track.get_similar()
except:
LOG.error("Similar not found")
return None
beans.append(song)
for tsong in similars:
try:
tsong_item = tsong.item
except AttributeError:
tsong_item = tsong['item']
beans.append(CommonBean(name=str(tsong_item), type=CommonBean.TYPE_MUSIC_URL, parent=query))
return beans
| Python |
'''
Created on 20.04.2010
@author: ivan
'''
from foobnix.model.entity import CommonBean
import os
from foobnix.util import LOG
from foobnix.online.vk import Vkontakte
from foobnix.util.configuration import FConfiguration
try:
vk = Vkontakte(FConfiguration().vk_login, FConfiguration().vk_password)
except:
vk = None
LOG.error("Vkontakte connection error")
def get_song_path(song):
if not song.path:
if song.type == CommonBean.TYPE_MUSIC_URL:
"""File exists in file system"""
if _check_set_local_path(song):
return song.path
return _get_vk_song(song).path
return None
def get_songs_by_url(url):
return vk.get_songs_by_url(url);
def find_song_urls(title):
return vk.find_song_urls(title)
def update_song_path(song):
if not song.path:
if song.type == CommonBean.TYPE_MUSIC_URL:
"""File exists in file system"""
if _check_set_local_path(song):
return song.path
vkSong = _get_vk_song(song)
if vkSong:
song.path = vkSong.path
song.time = vkSong.time
LOG.debug("Time", song.time)
else:
LOG.error("VK song source not found")
return None
def _check_set_local_path(song):
file = _get_cached_song(song)
if os.path.isfile(file) and os.path.getsize(file) > 1:
song.path = file
song.type = CommonBean.TYPE_MUSIC_FILE
LOG.info("Find file on local disk", song.path)
return True
return False
def _get_vk_song(song):
LOG.info("Starting search song path in Internet", song.path)
vkSong = vk.find_most_relative_song(song.name)
return vkSong
def _make_dirs(path):
if not os.path.isdir(path):
os.makedirs(path)
def _get_cached_song(song):
dir = FConfiguration().onlineMusicPath
if song.getArtist():
dir = dir + "/" + song.getArtist()
_make_dirs(dir)
song = dir + "/" + song.name + ".mp3"
LOG.info("Stored dir: ", song)
return song
| Python |
'''
Created on 18.04.2010
@author: ivan
'''
from foobnix.util import LOG
from foobnix.thirdparty import pylast
import urllib
import gtk
import thread
from foobnix.model.entity import CommonBean
from foobnix.base.base_list_controller import BaseListController
from foobnix.util.configuration import FConfiguration
from foobnix.online.song_resource import update_song_path
from foobnix.util.mouse_utils import is_double_left_click, \
is_rigth_click
from foobnix.online.dowload_util import save_song_thread, save_as_song_thread
class SimilartSongsController(BaseListController):
def __init__(self, gx_main, playerCntr, directoryCntr):
self.directoryCntr = directoryCntr
self.playerCntr = playerCntr
widget = gx_main.get_widget("treeview_similar_songs")
widget.get_parent().set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
BaseListController.__init__(self, widget)
self.parent = "Similar to"
def on_drag(self):
items = self.get_all_items()
songs = []
similar = _("Similar to: ") + self.parent
song = CommonBean(name=similar, type=CommonBean.TYPE_FOLDER)
songs.append(song)
for item in items:
song = CommonBean(name=item, type=CommonBean.TYPE_MUSIC_URL, parent=similar)
songs.append(song)
self.directoryCntr.append_virtual(songs)
def play_selected_song(self):
artist_track = self.get_selected_item()
song = CommonBean(name=artist_track, type=CommonBean.TYPE_MUSIC_URL)
update_song_path(song)
self.playerCntr.playSong(song)
def show_save_as_dialog(self, song):
LOG.debug("Show Save As Song dialog")
chooser = gtk.FileChooserDialog(title=_("Choose directory to save song"), action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER, buttons=(gtk.STOCK_OPEN, gtk.RESPONSE_OK))
chooser.set_default_response(gtk.RESPONSE_OK)
response = chooser.run()
if response == gtk.RESPONSE_OK:
path = chooser.get_filename()
save_as_song_thread(song, path)
elif response == gtk.RESPONSE_CANCEL:
LOG.info('Closed, no files selected')
chooser.destroy()
def show_info(self, song):
md = gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO,
gtk.BUTTONS_CLOSE, song.getArtist() + " - " + song.getTitle())
md.run()
md.destroy()
def on_button_press(self, w, e):
if is_double_left_click(e):
self.play_selected_song()
if is_rigth_click(e):
artist_track = self.get_selected_item()
song = CommonBean(name=artist_track, type=CommonBean.TYPE_MUSIC_URL)
menu = gtk.Menu()
play = gtk.ImageMenuItem(gtk.STOCK_MEDIA_PLAY)
play.connect("activate", lambda * a: self.play_selected_song())
menu.add(play)
save = gtk.ImageMenuItem(gtk.STOCK_SAVE)
save.connect("activate", lambda * a: save_song_thread([song]))
menu.add(save)
save_as = gtk.ImageMenuItem(gtk.STOCK_SAVE_AS)
save_as.connect("activate", lambda * a: self.show_save_as_dialog([song]))
menu.add(save_as)
add = gtk.ImageMenuItem(gtk.STOCK_ADD)
add.connect("activate", lambda * a: self.on_drag())
menu.add(add)
remove = gtk.ImageMenuItem(gtk.STOCK_REMOVE)
remove.connect("activate", lambda * a: self.remove_selected())
menu.add(remove)
info = gtk.ImageMenuItem(gtk.STOCK_INFO)
info.connect("activate", lambda * a: self.show_info(song))
menu.add(info)
menu.show_all()
menu.popup(None, None, None, e.button, e.time)
class SimilartArtistsController(BaseListController):
def __init__(self, gx_main, search_panel):
self.search_panel = search_panel
widget = gx_main.get_widget("treeview_similart_artists")
widget.get_parent().set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
BaseListController.__init__(self, widget)
def on_button_press(self, w, e):
if is_double_left_click(e):
artist = self.get_selected_item()
LOG.debug("Clicked Similar Artist:", artist)
self.search_panel.set_text(artist)
class SimilartTagsController(BaseListController):
def __init__(self, gx_main, search_panel):
self.search_panel = search_panel
widget = gx_main.get_widget("treeview_song_tags")
widget.get_parent().set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
BaseListController.__init__(self, widget)
def on_button_press(self, w, e):
if is_double_left_click(e):
tags = self.get_selected_item()
LOG.debug("Clicked tags:", tags)
self.search_panel.set_text(tags)
API_KEY = FConfiguration().API_KEY
API_SECRET = FConfiguration().API_SECRET
username = FConfiguration().lfm_login
password_hash = pylast.md5(FConfiguration().lfm_password)
#TODO: This file is under heavy refactoring, don't touch anything you think is wrong
try:
lastfm = pylast.get_lastfm_network(api_key=API_KEY, api_secret=API_SECRET, username=username, password_hash=password_hash)
except:
lastfm = None
LOG.error("last.fm connection error")
class InformationController():
def set_no_image_album(self):
heigth = gtk.gdk.screen_height()
if heigth < 800:
image_name = "blank-disc-small.jpg"
else:
image_name = "blank-disc.jpg"
try:
pix = gtk.gdk.pixbuf_new_from_file("/usr/local/share/pixmaps/" + image_name) #@UndefinedVariable
except:
try:
pix = gtk.gdk.pixbuf_new_from_file("/usr/share/pixmaps/" + image_name) #@UndefinedVariable
except:
pix = gtk.gdk.pixbuf_new_from_file("foobnix/pixmaps/" + image_name) #@UndefinedVariable
self.album_image.set_from_pixbuf(pix)
def __init__(self, gx_main, playerCntr, directoryCntr, search_panel):
self.album_image = gx_main.get_widget("image_widget")
self.set_no_image_album()
"""album name"""
self.album_name = gx_main.get_widget("label_album_name")
self.album_name.set_use_markup(True)
self.current_song_label = gx_main.get_widget("current_song_label")
self.current_song_label.set_use_markup(True)
"""Similar artists"""
self.similar_artists_cntr = SimilartArtistsController(gx_main, search_panel)
self.similar_artists_cntr.set_title(_('Similar Artists'))
"""similar songs"""
self.similar_songs_cntr = SimilartSongsController(gx_main, playerCntr, directoryCntr)
self.similar_songs_cntr.set_title(_("Similar Songs"))
"""song tags"""
self.song_tags_cntr = SimilartTagsController(gx_main, search_panel)
self.song_tags_cntr.set_title(_("Similar Tags"))
"""link buttons"""
self.lastfm_url = gx_main.get_widget("lastfm_linkbutton")
self.wiki_linkbutton = gx_main.get_widget("wiki_linkbutton")
self.mb_linkbutton = gx_main.get_widget("mb_linkbutton")
self.last_album_name = None
self.last_image = None
self.last_fm_network = lastfm
def show_song_info(self, song):
thread.start_new_thread(self.show_song_info_tread, (song,))
# self.show_song_info_tread(song)
def add_similar_song(self, song):
self.current_list_model.append([song.get_short_description(), song.path])
#self.similar_songs_cntr.add_item(song.get_name())
def add_similar_artist(self, artist):
self.similar_artists_cntr.add_item(artist)
def add_tag(self, tag):
self.song_tags_cntr.add_item(tag)
def show_song_info_tread(self, song):
self.song = song
LOG.info("Get all possible information about song")
image_url = self.get_album_image_url(song)
# LOG.error("Image url dowlaod error")
# image_url = None
if not image_url:
LOG.info("Image not found, load empty.")
self.set_no_image_album()
return None
try:
image_pix_buf = self.create_pbuf_image_from_url(image_url)
self.album_image.set_from_pixbuf(image_pix_buf)
except:
LOG.error("dowload image error")
def set_image(self, path):
pass
def get_album_image_url(self, song):
"""set urls"""
"""TODO TypeError: cannot concatenate 'str' and 'NoneType' objects """
self.lastfm_url.set_uri("http://www.lastfm.ru/search?q=" + song.getArtist() + "&type=artist")
self.wiki_linkbutton.set_uri("http://en.wikipedia.org/w/index.php?search=" + song.getArtist())
self.mb_linkbutton.set_uri("http://musicbrainz.org/search/textsearch.html?type=artist&query=" + song.getArtist())
self.current_song_label.set_markup("<b>" + song.getTitle() + "</b>")
track = self.last_fm_network.get_track(song.getArtist(), song.getTitle())
self.similar_songs_cntr.parent = song.getArtist() + " - " + song.getTitle()
LOG.info(track)
if not track:
return None
"""similar tracks"""
try:
similars = track.get_similar()
except:
LOG.error("Similar not found")
return None
self.similar_songs_cntr.clear()
for tsong in similars:
try:
tsong_item = tsong.item
except AttributeError:
tsong_item = tsong['item']
#tsong = CommonBean(name=str(tsong_item), type=CommonBean.TYPE_MUSIC_URL)
self.similar_songs_cntr.add_item(str(tsong_item))
"""similar tags"""
tags = track.get_top_tags(15)
self.song_tags_cntr.clear()
for tag in tags:
try:
tag_item = tag.item
except AttributeError:
tag_item = tag['item']
self.add_tag(tag_item.get_name())
"""similar artists"""
artist = track.get_artist()
similar_artists = artist.get_similar(15)
self.similar_artists_cntr.clear()
for artist in similar_artists:
try:
artist_item = artist.item
except AttributeError:
artist_item = artist['item']
self.add_similar_artist(artist_item.get_name())
album = track.get_album()
if album:
self.album_name.set_markup("<b>" + song.getArtist() + " - " + album.get_name() + " (" + album.get_release_year() + ")</b>")
else:
self.album_name.set_markup(u"<b>" + song.getArtist() + "</b>")
#album = self.last_fm_network.get_album(song.getArtist(), song.getTitle())
if not album:
return None
LOG.info("Find album", album)
if self.last_album_name == album.get_name():
LOG.info("Album the same, not need to dowlaod image")
#TODO need to implement album image cache
return self.last_image
if not album:
return None
LOG.info(album)
try:
self.last_album_name = album.get_name()
heigth = gtk.gdk.screen_height()
if heigth < 800:
self.last_image = album.get_cover_image(size=pylast.COVER_LARGE)
else:
self.last_image = album.get_cover_image(size=pylast.COVER_EXTRA_LARGE)
except:
LOG.info("image not found for:", song)
LOG.info("image:", self.last_image)
return self.last_image
def create_pbuf_image_from_url(self, url_to_image):
f = urllib.urlopen(url_to_image)
data = f.read()
pbl = gtk.gdk.PixbufLoader() #@UndefinedVariable
pbl.write(data)
pbuf = pbl.get_pixbuf()
pbl.close()
return pbuf
| Python |
'''
Created on Jun 10, 2010
@author: ivan
'''
from foobnix.online.google.search import GoogleSearch
from foobnix.util import LOG
"""Get search result titles from google"""
def google_search_resutls(query, results_count=10):
results = []
try:
LOG.debug("Start google search", query)
ask = query.encode('utf-8')
gs = GoogleSearch(ask)
gs.results_per_page = results_count
returns = gs.get_results()
for res in returns:
result = res.title.encode('utf8')
results.append(str(result))
except :
LOG.error("Google Search Result Error")
return results | Python |
'''
Created on Mar 16, 2010
@author: ivan
'''
from random import randint
from foobnix.util import LOG
'''
Created on Mar 11, 2010
@author: ivan
'''
import gtk
from foobnix.model.entity import CommonBean
class OnlineListModel:
POS_ICON = 0
POS_TRACK_NUMBER = 1
POS_NAME = 2
POS_PATH = 3
POS_COLOR = 4
POS_INDEX = 5
POS_TYPE = 6
POS_PARENT = 7
POS_TIME = 8
POS_START_AT = 9
POS_DURATION = 10
POS_ID3 = 11
def __init__(self, widget):
self.widget = widget
self.current_list_model = gtk.ListStore(str, str, str, str, str, int, str, str, str, str, str, str)
cellpb = gtk.CellRendererPixbuf()
cellpb.set_property('cell-background', 'yellow')
iconColumn = gtk.TreeViewColumn(None, cellpb, stock_id=0, cell_background=4)
iconColumn.set_fixed_width(5)
descriptionColumn = gtk.TreeViewColumn(_('Artist - Title'), gtk.CellRendererText(), text=self.POS_NAME, background=self.POS_COLOR)
descriptionColumn.set_sizing(gtk.TREE_VIEW_COLUMN_AUTOSIZE)
descriptionColumn.set_resizable(True)
timeColumn = gtk.TreeViewColumn(_('Time'), gtk.CellRendererText(), text=self.POS_TIME, background=self.POS_COLOR)
timeColumn.set_fixed_width(5)
timeColumn.set_min_width(5)
empty = gtk.TreeViewColumn(None, gtk.CellRendererText(), text= -1, background=self.POS_COLOR)
widget.append_column(iconColumn)
#widget.append_column(numbetColumn)
widget.append_column(descriptionColumn)
widget.append_column(timeColumn)
widget.append_column(empty)
widget.set_model(self.current_list_model)
def get_size(self):
return len(self.current_list_model)
def get_all_beans(self):
beans = []
for i in xrange(self.get_size()):
beans.append(self.getBeenByPosition(i))
return beans
def getBeenByPosition(self, position):
bean = CommonBean()
bean.icon = self.current_list_model[position][ self.POS_ICON]
bean.tracknumber = self.current_list_model[position][ self.POS_TRACK_NUMBER]
bean.name = self.current_list_model[position][ self.POS_NAME]
bean.path = self.current_list_model[position][ self.POS_PATH]
bean.color = self.current_list_model[position][ self.POS_COLOR]
bean.index = self.current_list_model[position][ self.POS_INDEX]
bean.type = self.current_list_model[position][ self.POS_TYPE]
bean.parent = self.current_list_model[position][ self.POS_PARENT]
bean.time = self.current_list_model[position][ self.POS_TIME]
bean.start_at = self.current_list_model[position][ self.POS_START_AT]
bean.duration = self.current_list_model[position][ self.POS_DURATION]
bean.id3 = self.current_list_model[position][ self.POS_ID3]
return bean
def get_random_bean(self):
index = randint(0, self.get_size())
return self.getBeenByPosition(index)
def getModel(self):
return self.current_list_model
def get_selected_bean(self):
selection = self.widget.get_selection()
model, paths = selection.get_selected_rows()
if not paths:
return None
return self._get_bean_by_path(paths[0])
def get_all_selected_beans(self):
selection = self.widget.get_selection()
model, paths = selection.get_selected_rows()
if not paths:
return None
beans = []
for path in paths:
selection.select_path(path)
bean = self._get_bean_by_path(path)
beans.append(bean)
return beans
def _get_bean_by_path(self, path):
model = self.current_list_model
iter = model.get_iter(path)
if iter:
bean = CommonBean()
bean.icon = model.get_value(iter, self.POS_ICON)
bean.tracknumber = model.get_value(iter, self.POS_TRACK_NUMBER)
bean.name = model.get_value(iter, self.POS_NAME)
bean.path = model.get_value(iter, self.POS_PATH)
bean.color = model.get_value(iter, self.POS_COLOR)
bean.index = model.get_value(iter, self.POS_INDEX)
bean.type = model.get_value(iter, self.POS_TYPE)
bean.parent = model.get_value(iter, self.POS_PARENT)
bean.time = model.get_value(iter, self.POS_TIME)
bean.start_at = model.get_value(iter, self.POS_START_AT)
bean.duration = model.get_value(iter, self.POS_DURATION)
bean.id3 = model.get_value(iter, self.POS_ID3)
return bean
return None
def clear(self):
self.current_list_model.clear()
def remove_selected(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected_rows()
iters = [model.get_iter(path) for path in selected]
LOG.debug("REMOVE:", iters)
for iter in iters:
model.remove(iter)
def append(self, bean):
self.current_list_model.append([bean.icon, bean.tracknumber, bean.name, bean.path, bean.color, bean.index, bean.type, bean.parent, bean.time, bean.start_at, bean.duration, bean.id3])
def __del__(self, *a):
LOG.info("del")
def get_selected_index(self):
selection = self.widget.get_selection()
#model, selected = selection.get_selected()
model, selected = selection.get_selected_rows()
if not selected:
return None
iter = self.current_list_model.get_iter(selected[0])
if iter:
i = model.get_string_from_iter(iter)
#LOG.info("!!I", i
#if i.find(":") == -1:
#return int(i)
return int(i)
return None
def repopulate(self, played_index):
LOG.info("Selected index", played_index)
list = self.get_all_beans()
self.clear()
for i in xrange(len(list)):
songBean = list[i]
if not songBean.color:
songBean.color = self.get_bg_color(i)
songBean.name = songBean.getPlayListDescription()
songBean.index = i
if i == played_index:
songBean.setIconPlaying()
self.append(songBean)
else:
songBean.setIconNone()
self.append(songBean)
def get_bg_color(self, i):
if i % 2 :
return "#F2F2F2"
else:
return "#FFFFE5"
| Python |
'''
Created on 25 Apr 2010
@author: Matik
'''
from __future__ import with_statement
import gtk
from gobject import TYPE_NONE, TYPE_PYOBJECT, TYPE_STRING #@UnresolvedImport
from foobnix.util import LOG
import foobnix.online.integration.lastfm as lastfm
from foobnix.base import BaseController, SIGNAL_RUN_FIRST
from threading import Thread
from foobnix.online.song_resource import get_songs_by_url, find_song_urls
class SearchResults(Thread):
def __init__ (self, query, function):
Thread.__init__(self)
self.query = query
self.function = function
def run(self):
self.function(self.query)
class SearchPanel(BaseController):
__gsignals__ = {
'show_search_results': (SIGNAL_RUN_FIRST, TYPE_NONE, (TYPE_STRING, TYPE_PYOBJECT)),
'starting_search': (SIGNAL_RUN_FIRST, TYPE_NONE, ()),
}
def __init__(self, gx_main):
BaseController.__init__(self)
self.search_routine = lastfm.search_top_tracks
self.create_search_mode_buttons(gx_main)
self.search_text = gx_main.get_widget("search_entry")
self.search_text.connect("activate", self.on_search) # GTK manual doesn't recommend to do this
#self.search_text.connect("key-press-event", self.on_key_pressed)
search_button = gx_main.get_widget("search_button")
search_button.connect("clicked", self.on_search)
def on_key_pressed(self, w, event):
if event.type == gtk.gdk.KEY_PRESS: #@UndefinedVariable
#Enter pressed
LOG.info("keyval", event.keyval, "keycode", event.hardware_keycode)
if event.hardware_keycode == 36:
self.on_search()
def get_search_query(self):
query = self.search_text.get_text()
if query and len(query.strip()) > 0:
LOG.info(query)
return query
#Nothing found
return None
def set_text(self, text):
self.search_text.set_text(text)
def create_search_mode_buttons(self, gx_main):
mode_to_button_map = {lastfm.search_top_tracks : 'top_songs_togglebutton',
lastfm.search_top_albums : 'top_albums_togglebutton',
lastfm.search_top_similar : 'top_similar_togglebutton',
find_song_urls : 'all_search_togglebutton',
lastfm.search_tags_genre : 'tags_togglebutton',
lastfm.unimplemented_search : 'tracks_togglebutton' }
self.search_mode_buttons = {}
for mode, name in mode_to_button_map.items():
button = gx_main.get_widget(name)
button.connect('toggled', self.on_search_mode_selected, mode)
self.search_mode_buttons[mode] = button
def on_search_mode_selected(self, clicked_button, selected_mode=None):
# Look if the clicked button was the only button that was checked. If yes, then turn
# it back to checked - we don't allow all buttons to be unchecked at the same time
if all([not button.get_active() for button in self.search_mode_buttons.values()]):
clicked_button.set_active(True)
# if the button should become unchecked, then do nothing
if not clicked_button.get_active():
return
# so, the button becomes checked. Uncheck all other buttons
for button in self.search_mode_buttons.values():
if button != clicked_button:
button.set_active(False)
self.search_routine = selected_mode
def capitilize_query(self, line):
if line.startswith("http"):
return line
line = line.strip()
result = ""
for l in line.split():
result += " " + l[0].upper() + l[1:]
return result
search_thread_id = SearchResults(None, None)
def on_search(self, *args):
LOG.debug('>>>>>>> search with ' + str(self.search_routine))
query = self.get_search_query()
if query:
query = self.capitilize_query(u"" + query)
#thread.start_new_thread(self.perform_search, (query,))
if not self.search_thread_id.isAlive():
#self.search_thread_id = SearchResults(query, self.perform_search)
#self.search_thread_id.start()
self.perform_search(query)
def perform_search(self, query):
beans = None
try:
if query.lower().startswith("http"):
beans = get_songs_by_url(query)
elif self.search_routine:
beans = self.search_routine(query)
except BaseException, ex:
LOG.error('Error while search for %s: %s' % (query, ex))
self.emit('show_search_results', query, beans)
| Python |
# -*- coding: utf-8 -*-
'''
Created on Mar 18, 2010
@author: ivan
'''
import urllib2
import re
from string import replace
def _engine_search(value):
value = replace(value, " ", "+")
host = "http://en.vpleer.ru/?q=" + value
LOG.info(host)
data = urllib2.urlopen(host)
return data.read()
def get_song_path(line):
path = re.findall(r"href=\"([\w#!:.?+=&%@!\-\/]*.mp3)", line)
if path:
return path
def get_auname(line):
path = re.findall(r"class=\"auname\">(\w*)", line)
if path:
return path
def get_ausong(line):
path = re.findall(r"class=\"auname\">(\w*)<", line)
if path:
return path
def find_song_urls(song_title):
data = _engine_search(song_title)
paths = get_song_path(data)
return paths
#LOG.info("Result:", find_song_urls("Ария - Антихрист")
| Python |
# -*- coding: utf-8 -*-
# lyricwiki.py
#
# Copyright 2009 Amr Hassan <amr.hassan@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import simplejson, urllib, os, hashlib, time
def _download(args):
"""
Downloads the json response and returns it
"""
base = "http://lyrics.wikia.com/api.php?"
str_args = {}
for key in args:
str_args[key] = args[key].encode("utf-8")
args = urllib.urlencode(str_args)
return urllib.urlopen(base + args).read()
def _get_page_titles(artist, title):
"""
Returns a list of available page titles
"""
args = {"action": "query",
"list": "search",
"srsearch": artist + " " + title,
"format": "json",
}
titles = ["%s:%s" % (artist, title), "%s:%s" % (artist.title(), title.title())]
content = simplejson.loads(_download(args))
for t in content["query"]["search"]:
titles.append(t["title"])
return titles
def _get_lyrics(artist, title):
for page_title in _get_page_titles(artist, title):
args = {"action": "query",
"prop": "revisions",
"rvprop": "content",
"titles": page_title,
"format": "json",
}
revisions = simplejson.loads(_download(args))["query"]["pages"].popitem()[1]
if not "revisions" in revisions:
continue
content = revisions["revisions"][0]["*"]
if content.startswith("#Redirect"):
n_title = content[content.find("[[") + 2:content.rfind("]]")]
return _get_lyrics(*n_title.split(":"))
if "<lyrics>" in content:
return content[content.find("<lyrics>") + len("<lyrics>") : content.find("</lyrics>")].strip()
elif "<lyric>" in content:
return content[content.find("<lyric>") + len("<lyric>") : content.find("</lyric>")].strip()
def get_lyrics(artist, title, cache_dir=None):
return "Lyrics Disabled"
"""
Get lyrics by artist and title
set cache_dir to a valid (existing) directory
to enable caching.
"""
path = None
if cache_dir and os.path.exists(cache_dir):
digest = hashlib.sha1(artist.lower().encode("utf-8") + title.lower().encode("utf-8")).hexdigest()
path = os.path.join(cache_dir, digest)
if os.path.exists(path):
fp = open(path)
return simplejson.load(fp)["lyrics"].strip()
lyrics = _get_lyrics(artist, title)
if path and lyrics:
fp = open(path, "w")
simplejson.dump({"time": time.time(), "artist": artist, "title": title,
"source": "lyricwiki", "lyrics": lyrics }, fp, indent=4)
fp.close()
return lyrics
| Python |
# -*- coding: utf-8 -*-
'''
Created on Mar 30, 2010
@author: ivan
'''
import gtk
from foobnix.util import LOG
class PrefListModel():
POS_NAME = 0
def __init__(self, widget, prefListMap):
self.widget = widget
self.prefListMap = prefListMap
self.current_list_model = gtk.ListStore(str)
renderer = gtk.CellRendererText()
renderer.connect('edited', self.editRow)
renderer.set_property('editable', True)
column = gtk.TreeViewColumn(_("My play lists"), renderer, text=0, font=2)
column.set_resizable(True)
widget.append_column(column)
widget.set_model(self.current_list_model)
def removeSelected(self):
selection = self.widget.get_selection()
selected = selection.get_selected()[1]
if selected:
self.current_list_model.remove(selected)
def editRow(self, w, event, value):
beforeRename = unicode(self.getSelected())
if value:
i = self.getSelectedIndex()
if i > 0 and not self.isContain(value):
self.current_list_model[i][self.POS_NAME] = value
"""copy songs with new name"""
LOG.info("beforeRename ", beforeRename, self.prefListMap.keys())
datas = self.prefListMap[beforeRename]
LOG.info(datas)
del self.prefListMap[beforeRename]
self.prefListMap[value] = datas
def getSelectedIndex(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected()
if selected:
i = model.get_string_from_iter(selected)
if i.find(":") == -1:
LOG.info("Selected index is " , i)
return int(i)
return None
def isContain(self, name):
for i in xrange(len(self.current_list_model)):
if str(self.current_list_model[i][self.POS_NAME]) == name:
return True
return False
def getSelected(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected()
if selected:
return model.get_value(selected, self.POS_NAME)
else:
return None
def clear(self):
self.current_list_model.clear()
def append(self, name):
self.current_list_model.append([name])
| Python |
'''
Created on Mar 11, 2010
@author: ivan
'''
from foobnix.model.entity import CommonBean
from foobnix.util import LOG
class VirturalLIstCntr():
def __init__(self):
self.items = []
def get_items(self):
return self.items
def get_item_by_index(self, index):
return self.items[index]
def append(self, item):
self.items.append(item)
def getState(self):
return self.items
def setState(self, items):
self.items = items
def remove(self, index):
if index > len(self.items):
"INDEX TOO BIG"
return
item = self.get_item_by_index(index)
LOG.info("DELETE", item.name)
self.items.remove(item)
def remove_with_childrens(self, index, parent=None):
type = self.get_item_by_index(index).type
LOG.info(type)
if type not in [CommonBean.TYPE_FOLDER, CommonBean.TYPE_GOOGLE_HELP] :
self.remove(index)
return
self.remove(index)
size = len(self.items)
for i in xrange(index, size):
LOG.info("index" + str(i),)
LOG.info(self.items[index].parent)
if self.items[index].parent == parent:
return
else:
self.remove(index)
| Python |
'''
Created on Mar 11, 2010
@author: ivan
'''
import gtk
import gobject
from foobnix.model.entity import CommonBean
from foobnix.util import LOG
class DirectoryModel():
POS_NAME = 0
POS_PATH = 1
POS_FONT = 2
POS_VISIBLE = 3
POS_TYPE = 4
POS_INDEX = 5
POS_PARENT = 6
POS_FILTER_CHILD_COUNT = 7
POS_START_AT = 8
POS_DURATION = 9
def __init__(self, widget):
self.widget = widget
self.current_list_model = gtk.TreeStore(str, str, str, gobject.TYPE_BOOLEAN, str, int, str, str, str, str)
renderer = gtk.CellRendererText()
#renderer.connect('edited', self.editRow)
#renderer.set_property('editable', True)
LOG.info("ATTTR", renderer.get_property("attributes"))
column = gtk.TreeViewColumn(_("Title"), renderer, text=0, font=2)
column.set_resizable(True)
widget.append_column(column)
filter = self.current_list_model.filter_new()
filter.set_visible_column(self.POS_VISIBLE)
widget.set_model(filter)
def editRow(self, w, event, value):
if value:
selection = self.widget.get_selection()
model, selected = selection.get_selected()
LOG.info("VAlue", value)
LOG.info(selected)
i = model.get_string_from_iter(selected)
LOG.info("I ", i)
if i.find(":") == -1:
LOG.info(i)
self.current_list_model[int(i)][self.POS_NAME] = value
def append(self, level, bean):
return self.current_list_model.append(level, [bean.name, bean.path, bean.font, bean.is_visible, bean.type, bean.index, bean.parent, bean.child_count, bean.start_at, bean.duration])
def clear(self):
self.current_list_model.clear()
def getModel(self):
return self.current_list_model
def setModel(self, model):
self.current_list_model = model
def get_selected_bean(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected()
return self._getBeanByIter(model, selected)
def deleteSelected(self):
model, iter = self.widget.get_selection().get_selected()
if iter:
model.remove(iter)
def _getBeanByIter(self, model, iter):
if iter:
bean = CommonBean()
bean.name = model.get_value(iter, self.POS_NAME)
bean.path = model.get_value(iter, self.POS_PATH)
bean.font = model.get_value(iter, self.POS_FONT)
bean.visible = model.get_value(iter, self.POS_VISIBLE)
bean.type = model.get_value(iter, self.POS_TYPE)
bean.index = model.get_value(iter, self.POS_INDEX)
bean.parent = model.get_value(iter, self.POS_PARENT)
bean.child_count = model.get_value(iter, self.POS_FILTER_CHILD_COUNT)
bean.start_at = model.get_value(iter, self.POS_START_AT)
bean.duration = model.get_value(iter, self.POS_DURATION)
return bean
return None
def getBeenByPosition(self, position):
bean = CommonBean()
bean.name = self.current_list_model[position][ self.POS_NAME]
bean.path = self.current_list_model[position][ self.POS_PATH]
bean.type = self.current_list_model[position][ self.POS_TYPE]
bean.visible = self.current_list_model[position][ self.POS_VISIBLE]
bean.font = self.current_list_model[position][ self.POS_FONT]
bean.parent = self.current_list_model[position][self.POS_PARENT]
bean.child_count = self.current_list_model[position][self.POS_FILTER_CHILD_COUNT]
bean.start_at = self.current_list_model[position][self.POS_START_AT]
bean.duration = self.current_list_model[position][self.POS_DURATION]
return bean
def getAllSongs(self):
result = []
for i in xrange(len(self.current_list_model)):
been = self.getBeenByPosition(i)
if been.type in [CommonBean.TYPE_MUSIC_FILE, CommonBean.TYPE_MUSIC_URL, CommonBean.TYPE_RADIO_URL]:
result.append(been)
return result
def getChildSongBySelected(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected()
n = model.iter_n_children(selected)
iterch = model.iter_children(selected)
results = []
for i in xrange(n):
song = self._getBeanByIter(model, iterch)
if song.type != CommonBean.TYPE_FOLDER:
results.append(self._getBeanByIter(model, iterch))
iterch = model.iter_next(iterch)
return results
def filterByName(self, query):
if len(query.strip()) > 0:
query = query.strip().decode("utf-8").lower()
for line in self.current_list_model:
name = str(line[self.POS_NAME]).lower()
if name.find(query) >= 0:
LOG.info("FIND PARENT:", name, query)
line[self.POS_VISIBLE] = True
else:
find = False
child_count = 0;
for child in line.iterchildren():
name = str(child[self.POS_NAME]).decode("utf-8").lower()
if name.find(query) >= 0:
child_count += 1
LOG.info("FIND CHILD :", name, query)
child[self.POS_VISIBLE] = True
line[self.POS_VISIBLE] = True
line[self.POS_FILTER_CHILD_COUNT] = child_count
find = True
else:
child[self.POS_VISIBLE] = False
if not find:
line[self.POS_VISIBLE] = False
else:
self.widget.expand_all()
else:
self.widget.collapse_all()
for line in self.current_list_model:
line[self.POS_VISIBLE] = True
for child in line.iterchildren():
child[self.POS_VISIBLE] = True
| Python |
# -*- coding: utf-8 -*-
'''
Created on Mar 11, 2010
@author: ivan
'''
import os
from foobnix.util import LOG
from foobnix.directory.directory_model import DirectoryModel
from foobnix.model.entity import CommonBean
from foobnix.util.configuration import FConfiguration
from foobnix.util.file_utils import isDirectory, get_file_extenstion
import gtk
from foobnix.directory.pref_list_model import PrefListModel
import gettext
from foobnix.util.mouse_utils import is_double_left_click
from mutagen.mp3 import MP3
from foobnix.util.time_utils import normilize_time
from foobnix.radio.radios import RadioFolder
from foobnix.cue.cue_reader import CueReader
gettext.install("foobnix", unicode=True)
class DirectoryCntr():
VIEW_LOCAL_MUSIC = 0
VIEW_RADIO_STATION = 1
VIEW_VIRTUAL_LISTS = 2
VIEW_CHARTS_LISTS = 2
DEFAULT_LIST = "Default list";
#DEFAULT_LIST_NAME = _("Default list");
def __init__(self, gxMain, playlistCntr, radioListCntr, virtualListCntr):
self.playlistCntr = playlistCntr
self.radioListCntr = radioListCntr
self.virtualListCntr = virtualListCntr
widget = gxMain.get_widget("direcotry_treeview")
self.current_list_model = DirectoryModel(widget)
widget.connect("button-press-event", self.onMouseClick)
widget.connect("key-release-event", self.onTreeViewDeleteItem)
widget.connect("drag-end", self.on_drag_get)
"Pref lists "
self.prefListMap = {self.DEFAULT_LIST : []}
self.currentListMap = self.DEFAULT_LIST
prefList = gxMain.get_widget("treeview3")
prefList.connect("button-press-event", self.onPreflListMouseClick)
prefList.connect("key-release-event", self.onPreflListDeleteItem)
prefList.connect("cursor-changed", self.onPreflListSelect)
self.prefModel = PrefListModel(prefList, self.prefListMap)
self.mainNoteBook = gxMain.get_widget("main_notebook")
self.mainNoteBook.set_current_page(0)
self.leftNoteBook = gxMain.get_widget("left_notebook")
self.filter = gxMain.get_widget("filter-entry")
self.filter.connect("key-release-event", self.onFiltering)
show_local = gxMain.get_widget("show_local_music_button")
show_local.connect("clicked", self.onChangeView, self.VIEW_LOCAL_MUSIC)
self.active_view = self.VIEW_LOCAL_MUSIC
show_radio = gxMain.get_widget("show_radio_button")
show_radio.connect("clicked", self.onChangeView, self.VIEW_RADIO_STATION)
show_play_list = gxMain.get_widget("show_lists_button")
show_play_list.connect("clicked", self.onChangeView, self.VIEW_VIRTUAL_LISTS)
#show_charts_ = gxMain.get_widget("show_charts_button")
#show_charts_.connect("clicked", self.onChangeView, self.VIEW_CHARTS_LISTS)
self.onChangeView
self.saved_model = None
self.radio_folder = RadioFolder()
def getState(self):
return self.prefListMap
def setState(self, preflists):
self.prefListMap = preflists
self.prefModel.prefListMap = preflists
for key in self.prefListMap:
LOG.info("add key to virtual list", unicode(key))
self.prefModel.append(key)
def getPrefListBeans(self, preflist=DEFAULT_LIST):
if preflist in self.prefListMap:
return self.prefListMap[preflist]
return None
def appendToPrefListBeans(self, beans, preflist=DEFAULT_LIST):
if not preflist in self.prefListMap:
LOG.info("Key not found")
self.prefListMap[preflist] = []
if beans:
for bean in beans:
self.prefListMap[preflist].append(bean)
def clearPrefList(self, listName):
if listName in self.prefListMap:
self.prefListMap[listName] = []
def onPreflListSelect(self, *args):
#self.view_list.set_active(self.VIEW_VIRTUAL_LISTS)
self.currentListMap = self.prefModel.getSelected()
if self.currentListMap in self.prefListMap:
beans = self.prefListMap[self.currentListMap]
self.display_virtual(beans)
else:
self.clear()
def onPreflListMouseClick(self, w, event):
if event.button == 3 and event.type == gtk.gdk._2BUTTON_PRESS: #@UndefinedVariable
LOG.debug("Create new paly list")
unknownListName = _("New play list")
if not self.prefModel.isContain(unknownListName):
self.prefModel.append(unknownListName)
self.prefListMap[unknownListName] = []
def onPreflListDeleteItem(self, w, event):
if event.type == gtk.gdk.KEY_RELEASE: #@UndefinedVariable
#Enter pressed
LOG.info(event.keyval)
LOG.info(event.hardware_keycode)
if event.hardware_keycode == 119 or event.hardware_keycode == 107:
if self.prefModel.getSelectedIndex() > 0:
del self.prefListMap[unicode(self.prefModel.getSelected())]
self.prefModel.removeSelected()
self.clear()
def all(self, *args):
for arg in args:
LOG.info(arg)
def getModel(self):
return self.current_list_model
def on_drag_get(self, *args):
self.populate_playlist(append=True)
"TODO: set active button state"
def set_active_view(self, view_type):
#self.view_list.set_active(view_type)
pass
def onChangeView(self, w, active_view):
self.active_view = active_view
self.leftNoteBook.set_current_page(0)
if active_view == self.VIEW_LOCAL_MUSIC:
self.clear()
self.addAll()
elif active_view == self.VIEW_RADIO_STATION:
self.clear()
files = self.radio_folder.get_radio_FPLs()
for fpl in files:
parent = self.current_list_model.append(None, CommonBean(name=fpl.name, path=None, font="bold", is_visible=True, type=CommonBean.TYPE_FOLDER))
for radio, urls in fpl.urls_dict.iteritems():
self.current_list_model.append(parent, CommonBean(name=radio, path=urls[0], font="normal", is_visible=True, type=CommonBean.TYPE_RADIO_URL, parent=fpl.name))
elif active_view == self.VIEW_VIRTUAL_LISTS:
items = self.getPrefListBeans(self.DEFAULT_LIST)
self.display_virtual(items)
def append_virtual(self, beans=None):
LOG.debug("Current virtual list", self.currentListMap)
if not self.currentListMap:
self.currentListMap = self.DEFAULT_LIST
self.appendToPrefListBeans(beans, self.currentListMap)
items = self.getPrefListBeans(self.currentListMap)
self.display_virtual(items)
def display_virtual(self, items):
self.clear()
"Displya list title"
self.current_list_model.append(None, CommonBean(name="[" + self.currentListMap + "]", path=None, font="bold", is_visible=True, type=CommonBean.TYPE_LABEL, parent=None, index=0))
if not items:
return None
parent = None
i = 1
for item in items:
LOG.info(item)
if item.parent == None:
parent = self.current_list_model.append(None, CommonBean(name=item.name, path=item.path, font="normal", is_visible=True, type=item.type, parent=item.parent, index=i))
else:
self.current_list_model.append(parent, CommonBean(name=item.name, path=item.path, font="normal", is_visible=True, type=item.type, parent=item.parent, index=i))
i += 1
def onTreeViewDeleteItem(self, w, event):
if self.active_view != self.VIEW_VIRTUAL_LISTS:
return
LOG.info(event)
if event.type == gtk.gdk.KEY_RELEASE: #@UndefinedVariable
#Enter pressed
LOG.info(event.keyval)
LOG.info(event.hardware_keycode)
if event.hardware_keycode == 119 or event.hardware_keycode == 107:
LOG.info("Delete")
bean = self.current_list_model.get_selected_bean()
LOG.info(bean.index)
if bean.index > 0:
self.virtualListCntr.items = self.prefListMap[self.currentListMap]
self.virtualListCntr.remove_with_childrens(bean.index - 1, bean.parent)
self.append_virtual()
def onFiltering(self, *args):
text = self.filter.get_text()
LOG.info("filtering by text", text)
self.current_list_model.filterByName(text)
def onMouseClick(self, w, event):
if is_double_left_click(event):
self.populate_playlist()
def update_songs_time(self, songs):
for song in songs:
if song.path and song.path.endswith("3") and not song.time:
try:
audio = MP3(song.path)
song.time = normilize_time(audio.info.length)
except:
pass
#audio = EasyID3(song.path)
#song.title = str(audio["title"][0])
#song.artist =str( audio["artist"][0])
#song.album = str(audio["album"][0])
#song.tracknumber= str(audio["tracknumber"][0])
#LOG.info(song.title, song.artist, song.album
def populate_playlist(self, append=False):
LOG.info("Drug begin")
directoryBean = self.current_list_model.get_selected_bean()
if not directoryBean:
return
LOG.info("Select: ", directoryBean.name, directoryBean.type)
LOG.info("Drug type", directoryBean.type)
if directoryBean.type in [CommonBean.TYPE_FOLDER, CommonBean.TYPE_GOOGLE_HELP] :
songs = self.current_list_model.getChildSongBySelected()
self.update_songs_time(songs)
LOG.info("Select songs", songs)
if not songs:
return
if append:
self.playlistCntr.append(songs)
else:
self.playlistCntr.append_notebook_page(directoryBean.name)
self.playlistCntr.append_and_play(songs)
elif directoryBean.type == CommonBean.TYPE_LABEL:
songs = self.current_list_model.getAllSongs()
if append:
self.playlistCntr.append(songs)
else:
self.playlistCntr.append_notebook_page(directoryBean.name)
self.playlistCntr.append_and_play(songs)
else:
if append:
self.playlistCntr.append([directoryBean])
else:
self.playlistCntr.append_notebook_page(directoryBean.name)
self.playlistCntr.append_and_play([directoryBean])
#LOG.info("PAGE", self.leftNoteBook.get_current_page()
#LOG.info("SET PAGE", self.mainNoteBook.set_current_page(0)
def getALLChildren(self, row, string):
for child in row.iterchildren():
name = child[self.POS_NAME].lower()
if name.find(string) >= 0:
LOG.info("FIND SUB :", name, string)
child[self.POS_VISIBLE] = True
else:
child[self.POS_VISIBLE] = False
def updateDirectoryByPath(self, path):
LOG.info("Update path", path)
self.musicFolder = path
self.current_list_model.clear()
self.addAll()
def clear(self):
self.current_list_model.clear()
def getAllSongsByPath(self, path):
dir = os.path.abspath(path)
list = os.listdir(dir)
list = sorted(list)
result = []
for file_name in list:
if get_file_extenstion(file_name) not in FConfiguration().supportTypes:
continue
full_path = path + "/" + file_name
if not isDirectory(full_path):
bean = CommonBean(name=file_name, path=full_path, type=CommonBean.TYPE_MUSIC_FILE)
result.append(bean)
LOG.debug(result)
return result
cachModel = []
def addAllThread(self):
"""
if self.cachModel:
for bean in self.cachModel:
self.current_list_model.append(None, bean)
return True
"""
level = None;
self.go_recursive(self.musicFolder, level)
if not len(self.current_list_model.getModel()):
self.current_list_model.append(level, CommonBean(name=_("Music not found in ") + FConfiguration().mediaLibraryPath, path=None, font="bold", is_visible=True, type=CommonBean.TYPE_FOLDER, parent=level))
else:
"""
for i in xrange(len(self.current_list_model.getModel())):
bean = self.current_list_model.getBeenByPosition(i)
self.cachModel.append(bean)
"""
def addAll(self):
#thread.start_new_thread(self.addAllThread, ())
self.addAllThread()
def sortedDirsAndFiles(self, path, list):
files = []
directories = []
#First add dirs
for file in list:
full_path = path + "/" + file
if isDirectory(full_path):
directories.append(file)
else:
files.append(file)
return sorted(directories) + sorted(files)
def isDirectoryWithMusic(self, path):
if isDirectory(path):
dir = os.path.abspath(path)
list = None
try:
list = os.listdir(dir)
except OSError, e:
LOG.info("Can'r get list of dir", e)
if not list:
return False
for file in list:
full_path = path + "/" + file
if isDirectory(full_path):
if self.isDirectoryWithMusic(full_path):
return True
else:
if get_file_extenstion(file) in FConfiguration().supportTypes:
return True
return False
def go_recursive(self, path, level):
dir = os.path.abspath(path)
list = os.listdir(dir)
list = self.sortedDirsAndFiles(path, list)
for file in list:
full_path = path + "/" + file
if not isDirectory(full_path) and get_file_extenstion(file) not in FConfiguration().supportTypes:
continue
"""check cue is valid"""
if full_path.endswith(".cue") and not CueReader(full_path).is_cue_valid():
continue
if self.isDirectoryWithMusic(full_path):
#LOG.debug("directory", file)
sub = self.current_list_model.append(level, CommonBean(name=file, path=full_path, font="bold", is_visible=True, type=CommonBean.TYPE_FOLDER, parent=level))
self.go_recursive(full_path, sub)
else:
if not isDirectory(full_path):
self.current_list_model.append(level, CommonBean(name=file, path=full_path, font="normal", is_visible=True, type=CommonBean.TYPE_MUSIC_FILE, parent=level))
#LOG.debug("file", file)
| Python |
'''
Created on Feb 26, 2010
@author: ivan
'''
from mutagen.easyid3 import EasyID3
from mutagen.mp3 import MP3, HeaderNotFoundError
from mutagen import File
import os
import gtk
from foobnix.util.configuration import FConfiguration
class CommonBean():
TYPE_FOLDER = "TYPE_FOLDER"
TYPE_LABEL = "TYPE_LABEL"
TYPE_GOOGLE_HELP = "TYPE_GOOGLE_HELP"
TYPE_MUSIC_FILE = "TYPE_MUSIC_FILE"
TYPE_MUSIC_URL = "TYPE_MUSIC_URL"
TYPE_RADIO_URL = "TYPE_RADIO_URL"
#Song attributes
album = ""
date = ""
genre = ""
tracknumber = ""
def __init__(self, name=None, path=None, type=None, is_visible=True, color=None, font="normal", index= -1, parent=None, id3=None):
self.name = name
self.path = path
self.type = type
self.icon = None
self.color = color
self.index = index
self.time = None
self.is_visible = is_visible
self.font = font
self.parent = parent
self.time = None
self.album = None
self.year = None
self.artist = None
self.title = None
self.child_count = None
self.start_at = None
self.duration = None
self.id3 = None
def getArtist(self):
if self.artist:
return self.artist
s = self.name.split(" - ")
if len(s) > 1:
artist = self.name.split(" - ")[0]
return ("" + artist).strip()
return ""
def getTitle(self):
if self.title:
return self.title
s = self.name.split(" - ")
result = ""
if len(s) > 1:
title = self.name.split(" - ")[1]
result = ("" + title).strip()
else:
result = self.name
for ex in FConfiguration().supportTypes:
if result.endswith(ex):
result = result[:-len(ex)]
return result
def setIconPlaying(self):
self.icon = gtk.STOCK_GO_FORWARD
def setIconErorr(self):
self.icon = gtk.STOCK_DIALOG_ERROR
def setIconNone(self):
self.icon = None
def getTitleDescription(self):
if self.title and self.artist and self.album:
return self.artist + " - [" + self.album + "] #" + self.tracknumber + " " + self.title
else:
if self.id3:
return "[" + self.id3 + "] " + self.name
else:
return self.name
def get_short_description(self):
if self.title:
return self.tracknumber + " - " + self.title
else:
return self.name
def getPlayListDescription(self):
if self.title and self.album:
return self.name + " - " + self.title + " (" + self.album + ")" + self.parent
return self.name
def getMp3TagsName(self):
audio = None
if not self.path:
return
if not self.type:
return
if self.type != self.TYPE_MUSIC_FILE:
return
if not os.path.exists(self.path):
return
try:
audio = MP3(self.path, ID3=EasyID3)
except HeaderNotFoundError:
try:
audio = File(self.path)
except HeaderNotFoundError:
pass
return None
artist = None
title = None
if audio and audio.has_key('artist'): artist = audio["artist"][0]
if audio and audio.has_key('title'): title = audio["title"][0]
if artist and title:
return artist + " - " + title
def __str__(self):
return "Common Bean :" + self.__contcat(
"name:", self.name,
"path:", self.path,
"type:", self.type,
"icon:", self.icon,
"color:", self.color,
"index:", self.index,
"time:", self.time,
"is_visible:", self.is_visible,
"font:", self.font,
"parent", self.parent)
def __contcat(self, *args):
result = ""
for arg in args:
result += " " + str(arg)
return result
| Python |
import gtk
def is_double_click(event):
if event.button == 1 and event.type == gtk.gdk._2BUTTON_PRESS: #@UndefinedVariable
return True
else:
return False
def is_double_left_click(event):
if event.button == 1 and event.type == gtk.gdk._2BUTTON_PRESS: #@UndefinedVariable
return True
else:
return False
def is_double_rigth_click(event):
if event.button == 3 and event.type == gtk.gdk._2BUTTON_PRESS: #@UndefinedVariable
return True
else:
return False
def is_rigth_click(event):
if event.button == 3 and event.type == gtk.gdk.BUTTON_PRESS: #@UndefinedVariable
return True
else:
return False
def is_left_click(event):
if event.button == 1 and event.type == gtk.gdk.BUTTON_PRESS: #@UndefinedVariable
return True
else:
return False
| Python |
'''
Created on Feb 26, 2010
@author: ivan
'''
import sys
import platform
import logging
def init():
LOG_FILENAME = '/tmp/foobnix.log'
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
logging.basicConfig(filename=LOG_FILENAME, level=logging.NOTSET)
def debug(*args):
init()
print "DEBUG:", args
logging.debug(args)
def info(*args):
init()
print "INFO:", args
logging.info(args)
def warn(*args):
init()
print "WARN:", args
logging.warn(args)
def error(*args):
init()
print >> sys.stderr, "ERROR", args
logging.error(args)
def print_debug_info():
init()
debug('*************** PLATFORM INFORMATION ************************')
debug('==Interpreter==')
debug('Version :', platform.python_version())
debug('Version tuple:', platform.python_version_tuple())
debug('Compiler :', platform.python_compiler())
debug('Build :', platform.python_build())
debug('==Platform==')
debug('Normal :', platform.platform())
debug('Aliased:', platform.platform(aliased=True))
debug('Terse :', platform.platform(terse=True))
debug('==Operating System and Hardware Info==')
debug('uname:', platform.uname())
debug('system :', platform.system())
debug('node :', platform.node())
debug('release :', platform.release())
debug('version :', platform.version())
debug('machine :', platform.machine())
debug('processor:', platform.processor())
debug('==Executable Architecture==')
debug('interpreter:', platform.architecture())
debug('/bin/ls :', platform.architecture('/bin/ls'))
debug('*******************************************************')
| Python |
'''
Created on Jul 27, 2010
@author: ivan
'''
import os
import ConfigParser
from foobnix.util import LOG
from foobnix.util.singleton import Singleton
class Config:
__metaclass__ = Singleton
FOOBNIX = "foobnix"
SUPPORTED_AUDIO_FORMATS = 'supported_audio_formats'
USER_DIR = os.getenv("HOME") or os.getenv('USERPROFILE')
CFG_LOCAL_FILE = USER_DIR + "/foobnix.cfg"
CFG_INSTALLED_FILE = USER_DIR + "/foobnix.cfg"
CFG_LOCAL_TEST_FILE = "../../foobnix.cfg"
def __init__(self):
self.config = None
if self.get_file_path():
self.config = ConfigParser.RawConfigParser()
self.config.read(self.get_file_path())
else:
LOG.error("Config file not found")
def get_file_path(self):
if os.path.isfile(self.CFG_LOCAL_FILE):
LOG.debug("Read local cfg file", self.CFG_LOCAL_FILE)
return self.CFG_LOCAL_FILE
elif os.path.isfile(self.CFG_INSTALLED_FILE):
LOG.debug("Read installed cfg file", self.CFG_INSTALLED_FILE)
return self.CFG_INSTALLED_FILE
elif os.path.isfile(self.CFG_LOCAL_TEST_FILE):
LOG.debug("Read local cfg file from test", self.CFG_LOCAL_TEST_FILE)
return self.CFG_LOCAL_TEST_FILE
else:
LOG.debug("Config file not found")
return None
def get(self, type):
if self.config:
LOG.debug("Get cfg value for", type)
return self.config.get(self.FOOBNIX, type)
else:
return None
def test():
print Config().get(Config.SUPPORTED_AUDIO_FORMATS)
print Config().get(Config.SUPPORTED_AUDIO_FORMATS)
print Config().get(Config.SUPPORTED_AUDIO_FORMATS)
test()
| Python |
'''
Created on Feb 26, 2010
@author: ivan
'''
import os
def isDirectory(path):
return os.path.isdir(path)
def get_file_extenstion(fileName):
return os.path.splitext(fileName)[1].lower()
| Python |
'''
Created on Feb 26, 2010
@author: ivan
'''
def convert_ns(time_int):
time_int = time_int / 1000000000
time_str = ""
if time_int >= 3600:
_hours = time_int / 3600
time_int = time_int - (_hours * 3600)
time_str = str(_hours) + ":"
if time_int >= 600:
_mins = time_int / 60
time_int = time_int - (_mins * 60)
time_str = time_str + str(_mins) + ":"
elif time_int >= 60:
_mins = time_int / 60
time_int = time_int - (_mins * 60)
time_str = time_str + "0" + str(_mins) + ":"
else:
time_str = time_str + "00:"
if time_int > 9:
time_str = time_str + str(time_int)
else:
time_str = time_str + "0" + str(time_int)
return time_str
def normilize_time(length):
length = int(length)
result = ""
hour = int(length / 60 / 60)
if hour:
if hour < 10:
hour = "0"+str(hour)
result = str(hour) + ":"
min = int(length / 60) - int(hour)*60
if min:
if min < 10:
min = "0"+str(min)
result += str(min) + ":"
sec = length - int(min) * 60 - int(hour)*3600
if sec < 10:
sec = "0"+str(sec)
result += str(sec)
return result | Python |
'''
Created on Jul 27, 2010
@author: ivan
'''
class Singleton(type):
def __call__(self, *args, **kw):
if self.instance is None:
self.instance = super(Singleton, self).__call__(*args, **kw)
return self.instance
def __init__(self, name, bases, dict):
super(Singleton, self).__init__(name, bases, dict)
self.instance = None | Python |
'''
Created on Mar 3, 2010
@author: ivan
'''
import urllib2
from foobnix.util import LOG
"Get content of the url"
def get_content(url):
if not url:
return None
try:
connect = urllib2.urlopen(url)
data = connect.read()
return data
except:
LOG.error("INCORRECT URL ERROR .... ", url)
return None
def getStationPath(url):
print "get station"
if not url:
return None
_file_url = url
urls = []
try:
connect = urllib2.urlopen(url)
data = connect.read()
urls = getStations(data, urls)
except:
print "INCORRECT URL ERROR .... ", url
if urls:
return urls[0]
def getStations(data, urls):
for line in data.rsplit():
line = line.lower()
if line.startswith("file"):
index = line.find("=")
url = line[index + 1 : ]
print url
urls.append(url)
return urls
def get_radio_source(url):
if url:
if url.endswith(".pls"):
source_url = getStationPath(url)
if source_url :
return source_url
elif url.endswith(".m3u"):
content = get_content(url)
for line in content.rsplit():
if not line.startswith("#"):
return line
return url
def getPlsName(_file_url):
index = _file_url.rfind("/")
return _file_url[index + 1:]
def getFirst(self, urls):
if urls:
return urls[0]
else:
return None
| Python |
# -*- coding: utf-8 -*-
'''
Created on Feb 27, 2010
@author: ivan
'''
from __future__ import with_statement
import pickle
import os
import tempfile
import ConfigParser
from foobnix.util.singleton import Singleton
from foobnix.util import LOG
FOOBNIX_TMP = "/opt/foobnix"
FOOBNIX_TMP_RADIO = os.path.join(FOOBNIX_TMP, "radio")
FOOBNIX_VERSION_FILE_NAME = "version"
"""get last version from file"""
def get_version():
result = "A"
version_file = None
if os.path.exists(os.path.join(FOOBNIX_TMP, FOOBNIX_VERSION_FILE_NAME)):
version_file = os.path.join(FOOBNIX_TMP, FOOBNIX_VERSION_FILE_NAME)
elif os.path.exists(FOOBNIX_VERSION_FILE_NAME):
version_file = os.path.join(FOOBNIX_VERSION_FILE_NAME)
with file(version_file, 'r') as v_file:
for line in v_file:
line = str(line).strip()
if line.find("VERSION=") >= 0:
result = line[len("VERSION="):]
elif line.find("RELEASE=") >= 0:
result += "-" + line[len("RELEASE="):]
return result
VERSION = get_version()
class FConfiguration:
FOOBNIX = "foobnix"
SUPPORTED_AUDIO_FORMATS = 'supported_audio_formats'
__metaclass__ = Singleton
USER_DIR = os.getenv("HOME") or os.getenv('USERPROFILE')
CFG_FILE = USER_DIR + "/foobnix_conf.pkl"
config = ConfigParser.RawConfigParser()
config.read(os.path.join(USER_DIR, "foobnix.cfg"))
def get(self, type):
return self.config.get(self.FOOBNIX, self.SUPPORTED_AUDIO_FORMATS)
def __init__(self, is_load_file=True):
self.mediaLibraryPath = tempfile.gettempdir()
self.onlineMusicPath = tempfile.gettempdir()
self.supportTypes = [".mp3", ".ogg", ".ape", ".flac", ".wma", ".cue"]
self.isRandom = False
self.isRepeat = True
self.isPlayOnStart = False
self.savedPlayList = []
self.savedRadioList = []
self.savedSongIndex = 0
self.volumeValue = 50.0
self.vpanelPostition = 234
self.hpanelPostition = 370
self.hpanel2Postition = 521
self.playlistState = None
self.radiolistState = None
self.virtualListState = {"Default list" : []}
self.is_save_online = False
self.song_source_relevance_algorithm = 0
self.online_tab_show_by = 0
self.vk_login = "qax@bigmir.net"
self.vk_password = "foobnix"
self.lfm_login = "foobnix"
self.lfm_password = "foobnix"
self.API_KEY = "bca6866edc9bdcec8d5e8c32f709bea1"
self.API_SECRET = "800adaf46e237805a4ec2a81404b3ff2"
self.cookie = None
self.count_of_tabs = 5
instance = self._loadCfgFromFile(is_load_file)
if instance:
try:
self.virtualListState = instance.virtualListState
self.playlistState = instance.playlistState
self.radiolistState = instance.radiolistState
self.mediaLibraryPath = instance.mediaLibraryPath
self.isRandom = instance.isRandom
self.isRepeat = instance.isRepeat
self.isPlayOnStart = instance.isPlayOnStart
self.savedPlayList = instance.savedPlayList
self.savedSongIndex = instance.savedSongIndex
self.volumeValue = instance.volumeValue
self.vpanelPostition = instance.vpanelPostition
self.hpanelPostition = instance.hpanelPostition
self.hpanel2Postition = instance.hpanel2Postition
self.savedRadioList = instance.savedRadioList
self.is_save_online = instance.is_save_online
self.onlineMusicPath = instance.onlineMusicPath
self.vk_login = instance.vk_login
self.vk_password = instance.vk_password
self.lfm_login = instance.lfm_login
self.lfm_password = instance.lfm_password
self.count_of_tabs = instance.count_of_tabs
self.cookie = instance.cookie
except AttributeError:
LOG.debug("Configuraton attributes are changed")
os.remove(self.CFG_FILE)
LOG.info("LOAD CONFIGS")
self.printArttibutes()
def save(self):
LOG.info("SAVE CONFIGS")
self.printArttibutes()
FConfiguration()._saveCfgToFile()
def printArttibutes(self):
for i in dir(self):
if not i.startswith("__"):
LOG.info(i, getattr(self, i))
def _saveCfgToFile(self):
#conf = FConfiguration()
save_file = file(self.CFG_FILE, 'w')
pickle.dump(self, save_file)
save_file.close()
LOG.debug("Save configuration")
def _loadCfgFromFile(self, is_load_file):
if not is_load_file:
return
try:
with file(self.CFG_FILE, 'r') as load_file:
load_file = file(self.CFG_FILE, 'r')
pickled = load_file.read()
# fixing mistyped 'configuration' package name
if 'confguration' in pickled:
pickled = pickled.replace('confguration', 'configuration')
return pickle.loads(pickled)
except IOError:
LOG.debug('Configuration file does not exist.')
except ImportError, ex:
LOG.error('Configuration file is corrupted. Removing it...')
os.remove(self.CFG_FILE)
except BaseException, ex:
LOG.error('Unexpected exception of type %s: "%s".' % (ex.__class__.__name__, ex))
| Python |
'''
Created on Apr 19, 2010
@author: matik
'''
from gobject import GObject, type_register, signal_new #@UnresolvedImport
import gobject
SIGNAL_RUN_FIRST = gobject.SIGNAL_RUN_FIRST #@UndefinedVariable
TYPE_NONE = gobject.TYPE_NONE
TYPE_PYOBJECT = gobject.TYPE_PYOBJECT
class BaseController(GObject):
def __init__(self):
self.__gobject_init__()
type_register(self.__class__)
| Python |
'''
Created on 20.04.2010
@author: ivan
'''
import gtk
"""Base list controller for similar functional on any listview or treeview"""
class BaseListController():
POSITION_NAME = 0
def __init__(self, widget):
self.widget = widget
self.model = gtk.ListStore(str)
self.title = None
self.column = gtk.TreeViewColumn(self.title, gtk.CellRendererText(), text=0)
self.widget.append_column(self.column)
self.widget.set_model(self.model)
self.widget.connect("button-press-event", self.on_button_press)
self.widget.connect("drag-end", self.__on_drag_end)
def set_title(self, title):
self.column.set_title(title)
def __on_drag_end(self, *args):
self.on_drag()
def on_button_press(self, w, e):
pass
def on_drag(self):
pass
def on_duble_click(self):
pass
def get_item_by_position(self, position):
return self.model[position][self.POSITION_NAME]
def get_all_items(self):
items = []
for item in self.model:
item = item[self.POSITION_NAME]
items.append(item)
return items
def get_selected_item(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected()
return self._get_item_by_iter(selected)
def _get_item_by_iter(self, iter):
if iter:
item_value = self.model.get_value(iter, self.POSITION_NAME)
return item_value
def add_item(self, value):
self.model.append([value])
def clear(self):
self.model.clear()
def remove_selected(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected()
model.remove(selected)
| Python |
# -*- coding: utf-8 -*-
'''
Created on Mar 11, 2010
@author: ivan
'''
from foobnix.lyric.lyr import get_lyrics
import thread
from foobnix.util import LOG
from foobnix.util.configuration import FConfiguration
from foobnix.online.google.translate import translate
from foobnix.util.mouse_utils import is_double_click
import time
class PlayerWidgetsCntl():
'''
'''
def __init__(self, gxMain, playerCntr):
self.playerCntr = playerCntr
self.volume = gxMain.get_widget("volume_hscale")
self.volume.connect("change-value", self.onVolumeChange)
self.seek = gxMain.get_widget("seek_eventbox")
self.seek.connect("button-press-event", self.onSeek)
self.seekBar = gxMain.get_widget("seek_progressbar")
self.timeLabel = gxMain.get_widget("seek_progressbar")
self.vpanel = gxMain.get_widget("vpaned1")
self.hpanel = gxMain.get_widget("hpaned1")
#self.hpanel.connect("button-press-event", self.on_show_hide_paned);
#self.hpanel.set_property("position-set", True)
#self.hpanel2 = gxMain.get_widget("vpaned1")
#print "POSITION", self.hpanel2.get_position()
self.lyric = gxMain.get_widget("lyric_textview")
self.textbuffer = self.lyric.get_buffer()
self.tr_lyric = gxMain.get_widget("translate_lyric_textview")
self.tr_textbuffer = self.tr_lyric.get_buffer()
spinbutton1_tabs = gxMain.get_widget("spinbutton1_tabs")
spinbutton1_tabs.set_value(FConfiguration().count_of_tabs)
spinbutton1_tabs.connect("value-changed", self.on_chage_tabs)
self.statusbar = gxMain.get_widget("statusbar")
self.lyric.set_editable(False)
navigationEvents = {
"on_play_button_clicked" :self.onPlayButton,
"on_stop_button_clicked" :self.onStopButton,
"on_pause_button_clicked" :self.onPauseButton,
"on_prev_button_clicked" :self.onPrevButton,
"on_next_button_clicked": self.onNextButton,
"on_view-full_activate":self.on_full_view,
"on_view-compact_activate":self.on_compact_view
}
gxMain.signal_autoconnect(navigationEvents)
def on_chage_tabs(self, w):
val = w.get_value_as_int()
FConfiguration().count_of_tabs = val
LOG.debug("Set size of tabs", val)
def on_full_view(self, *args):
self.hpanel.set_position(FConfiguration().hpanelPostition)
def on_compact_view(self, *args):
self.hpanel.set_position(0)
def on_show_hide_paned(self, w, e):
#TODO: Matik, could you view, this signal rise on any paned double click.
if is_double_click(e):
LOG.debug("double click", w)
if w.get_position() == 0:
self.on_full_view(w, e)
else:
self.on_compact_view(w, e)
time.sleep(0.2)
def setStatusText(self, text):
self.statusbar.push(0, text)
def setLiric(self, song):
thread.start_new_thread(self._setLiricThread, (song,))
def _setLiricThread(self, song):
self.tr_textbuffer.set_text("")
title = "" + song.getTitle()
for extension in FConfiguration().supportTypes:
if title.endswith(extension):
title = title.replace(extension, "")
break
LOG.info("Get lirics for:", song.getArtist(), title)
if song.getArtist() and song.getTitle():
try:
text = get_lyrics(song.getArtist(), title)
except:
self.setStatusText(_("Connection lyrics error"))
LOG.error("Connection lyrics error")
return None
if text:
header = "*** " + song.getArtist() + " - " + title + " ***"
self.textbuffer.set_text(header + "\n" + text)
LOG.info("try to translate")
text_tr = self.getTranstalted(text)
self.tr_textbuffer.set_text("*** " + song.getArtist() + " - " + title + " ***\n" + text_tr)
else:
self.textbuffer.set_text("Not Found lyrics for " + song.getArtist() + " - " + title + "\n")
def getTranstalted(self, text):
input = ""
result = ""
for line in text.rsplit("\n"):
line = line + "#";
input += line
res = translate(input, src="", to="ru")
for line in res.rsplit("#"):
result = result + line + "\n"
return result
def is_ascii(self, s):
return all(ord(c) < 128 for c in s)
def onPlayButton(self, *a):
self.playerCntr.playState()
def onStopButton(self, *a):
self.playerCntr.stopState()
def onPauseButton(self, *a):
self.playerCntr.pauseState()
def onPrevButton(self, *a):
self.playerCntr.prev()
def onNextButton(self, *a):
self.playerCntr.next()
def onSeek(self, widget, event):
if event.button == 1:
width = self.seek.allocation.width
x = event.x
seekValue = (x + 0.0) / width * 100
LOG.info(seekValue)
self.playerCntr.setSeek(seekValue);
def onVolumeChange(self, widget, obj3, volume):
self.playerCntr.setVolume(volume / 100)
pass # end of class
| Python |
'''
Created on Mar 11, 2010
@author: ivan
'''
import os
import gst
import gtk
import time
import urllib
import thread
from foobnix.util.time_utils import convert_ns
from foobnix.model.entity import CommonBean
from foobnix.util import LOG
from foobnix.base import BaseController
from foobnix.base import SIGNAL_RUN_FIRST, TYPE_NONE, TYPE_PYOBJECT
from foobnix.thirdparty import pylast
from foobnix.util.configuration import FConfiguration
from foobnix.online.dowload_util import dowload_song_thread
from foobnix.util.plsparser import get_radio_source
username = FConfiguration().lfm_login
password_hash = pylast.md5(FConfiguration().lfm_password)
try:
lastfm = pylast.get_lastfm_network(username=username, password_hash=password_hash)
scrobler = lastfm.get_scrobbler("fbx", "1.0")
except:
lastfm = None
scrobler = None
LOG.error("last.fm or scrobler connection error")
class PlayerController(BaseController):
MODE_RADIO = "RADIO"
MODE_PLAY_LIST = "PLAY_LIST"
MODE_ONLINE_LIST = "ONLINE_LIST"
__gsignals__ = {
'song_playback_started' : (SIGNAL_RUN_FIRST, TYPE_NONE, (TYPE_PYOBJECT,))
}
def __init__(self):
BaseController.__init__(self)
self.player = self.playerLocal()
self.songs = []
self.cIndex = 0
self.time_format = gst.Format(gst.FORMAT_TIME)
self.volume = 0
self.mode = self.MODE_PLAY_LIST
# TODO: rename playState() to play() and remove this hack
self.play = self.playState
self.pause = self.pauseState
self.erros = 0
self.prev_title = ""
def set_mode(self, mode):
self.mode = mode
def registerPlaylistCntr(self, playlistCntr):
self.playlistCntr = playlistCntr
def registerOnlineCntr(self, onlineCntr):
self.onlineCntr = onlineCntr
def registerWidgets(self, widgets):
self.widgets = widgets
count = 0
def playSong(self, song):
self.song = song
LOG.info("play song", song.name, song.getArtist(), song.getTitle())
self.stopState()
if not song:
LOG.info("NULL song can't playing")
return
LOG.info("Path before", song.path)
#Try to set resource
if song.path == None or song.path == "":
LOG.info("PL CNTR SET PATH")
self.onlineCntr.setSongResource(song)
else:
LOG.info("GET SONG INFO", song.getArtist(), song.getTitle())
self.onlineCntr.info.show_song_info(song)
LOG.info("Path after", song.path)
if song.path == None or song.path == "":
self.count += 1
LOG.info("SONG NOT FOUND", song.name)
LOG.info("Count is", self.count)
if self.count > 5:
return
return self.next()
self.count = 0
self.widgets.setLiric(song)
LOG.info("Type", song.type)
LOG.info("MODE", self.mode)
LOG.info("Name", song.name)
if song.type == CommonBean.TYPE_MUSIC_FILE:
self.player = self.playerLocal()
uri = 'file://' + urllib.pathname2url(song.path)
if os.name == 'nt':
uri = 'file:' + urllib.pathname2url(song.path)
self.player.set_property("uri", uri)
self.playerThreadId = thread.start_new_thread(self.playThread, (song,))
elif song.type == CommonBean.TYPE_RADIO_URL:
LOG.info("URL PLAYING", song.path)
self.player = self.playerHTTP()
path = get_radio_source(song.path)
self.player.set_property("uri", path)
self.widgets.seekBar.set_text("Url Playing...")
elif song.type == CommonBean.TYPE_MUSIC_URL:
LOG.info("URL PLAYING", song.path)
self.player = self.playerHTTP()
self.player.set_property("uri", song.path)
self.playerThreadId = thread.start_new_thread(self.playThread, (song,))
else:
self.widgets.seekBar.set_text("Error playing...")
return
self.playState()
self.setVolume(self.volume)
self.emit('song_playback_started', song)
def pauseState(self, *args):
self.player.set_state(gst.STATE_PAUSED)
def playState(self, *args):
self.player.set_state(gst.STATE_PLAYING)
def stopState(self):
if not self.player:
self.player = self.playerLocal()
self.setSeek(0.0)
self.widgets.seekBar.set_fraction(0.0)
self.widgets.seekBar.set_text("00:00 / 00:00")
self.playerThreadId = None
self.player.set_state(gst.STATE_NULL)
def volume_up(self, *args):
self.setVolume(self.getVolume() + 0.05)
def volume_down(self, *args):
self.setVolume(self.getVolume() - 0.05)
def setVolume(self, volumeValue):
self.volume = volumeValue
self.player.set_property('volume', volumeValue + 0.0)
def getVolume(self):
return self.volume
def playerHTTP(self):
LOG.info("Player For remote files")
self.playerThreadId = None
try:
self.player.set_state(gst.STATE_NULL)
except:
pass
self.player = None
time.sleep(2)
self.playbin = gst.element_factory_make("playbin", "player")
bus = self.playbin.get_bus()
bus.add_signal_watch()
bus.connect("message", self.onBusMessage)
return self.playbin
def playerLocal(self):
LOG.info("Player Local Files")
self.playerThreadId = None
try:
self.player.set_state(gst.STATE_NULL)
except:
pass
self.player = None
time.sleep(2)
self.playbin = gst.element_factory_make("playbin2", "player")
bus = self.playbin.get_bus()
bus.add_signal_watch()
bus.connect("message", self.onBusMessage)
return self.playbin
def next(self, *args):
if self.mode == self.MODE_ONLINE_LIST:
song = self.onlineCntr.getNextSong()
else:
song = self.playlistCntr.getNextSong()
if song:
self.playSong(song)
def prev(self, *args):
if self.mode == self.MODE_ONLINE_LIST:
song = self.onlineCntr.getPrevSong()
else:
song = self.playlistCntr.getPrevSong()
self.playSong(song)
def _isStatusNull(self):
return self.player.get_state()[1] == gst.STATE_NULL
def _get_state(self):
if self.player:
return self.player.get_state()[1]
else:
return None
def setSeek(self, persentValue):
if self._isStatusNull():
self.playerThreadId = None
return None
pos_max = 1
try:
pos_max = self.player.query_duration(self.time_format, None)[0]
except:
LOG.error("Seek for new position error")
if self.song and self.song.duration > 0:
pos_max = int(self.song.duration) * 1000000000
seek_ns = pos_max * persentValue / 100;
if self.song and self.song.duration > 0:
seek_ns = seek_ns + int(self.song.start_at) * 1000000000
LOG.info("SEC SEEK persent", seek_ns)
self.player.seek_simple(self.time_format, gst.SEEK_FLAG_FLUSH, seek_ns)
def set_seek_sec(self, sec):
if self._isStatusNull():
self.playerThreadId = None
return None
seek_ns = int(sec) * 1000000000 ;
LOG.info("SEC SEEK SEC", seek_ns)
self.player.seek_simple(self.time_format, gst.SEEK_FLAG_FLUSH, seek_ns)
def playThread(self, song=None):
LOG.info("Starts playing thread")
flag = True
is_scrobled = False
play_thread_id = self.playerThreadId
gtk.gdk.threads_enter()#@UndefinedVariable
self.widgets.seekBar.set_text("00:00 / 00:00")
gtk.gdk.threads_leave() #@UndefinedVariable
sec = 0;
print "SONG START", song.start_at
if song.start_at > 0:
self.set_seek_sec(song.start_at)
while play_thread_id == self.playerThreadId:
try:
LOG.info("Try")
time.sleep(0.2)
dur_int = self.player.query_duration(self.time_format, None)[0]
if song.duration > 0:
dur_int = int(song.duration) * 1000000000
duration_sec = dur_int / 1000000000
dur_str = convert_ns(dur_int)
gtk.gdk.threads_enter() #@UndefinedVariable
self.widgets.seekBar.set_text("00:00 / " + dur_str)
gtk.gdk.threads_leave() #@UndefinedVariable
break
except:
LOG.info("Error")
pass
time.sleep(0.5)
start_time = str(int(time.time()));
while play_thread_id == self.playerThreadId:
pos_int = 0
try:
pos_int = self.player.query_position(self.time_format, None)[0]
except gst.QueryError:
LOG.info("QueryError error...")
if song.duration > 0:
pos_int = pos_int - int(song.start_at) * 1000000000
pos_str = convert_ns(pos_int)
if play_thread_id == self.playerThreadId:
gtk.gdk.threads_enter() #@UndefinedVariable
timeStr = pos_str + " / " + dur_str
timePersent = (pos_int + 0.0) / (dur_int)
self.widgets.seekBar.set_text(timeStr)
self.widgets.seekBar.set_fraction(timePersent)
gtk.gdk.threads_leave() #@UndefinedVariable
"""report now playing song"""
if song.getArtist() and song.getTitle():
self.erros = 0
scrobler.report_now_playing(song.getArtist(), song.getTitle())
if song.duration > 0 and pos_int > (int(song.duration) - 2) * 1000000000:
self.next()
time.sleep(1)
"Download only if you listen this music"
if flag and song.type == CommonBean.TYPE_MUSIC_URL and timePersent > 0.35:
flag = False
dowload_song_thread(song)
if self._get_state() != gst.STATE_PAUSED:
sec += 1
#LOG.debug("Song duration", sec, timePersent);
if not is_scrobled and (sec >= duration_sec / 2 or (sec >= 45 and timePersent >= 0.9)):
is_scrobled = True
if song.getArtist() and song.getTitle():
scrobler.scrobble(song.getArtist(), song.getTitle(), start_time, "P", "", duration_sec)
LOG.debug("Song Successfully scrobbled", song.getArtist(), song.getTitle())
def onBusMessage(self, bus, message):
#LOG.info(message
"""Show radio info"""
type = message.type
if self.song.type == CommonBean.TYPE_RADIO_URL and type == gst.MESSAGE_TAG and message.parse_tag():
try:
LOG.info(message, message.structure)
self.erros = 0
title = message.structure['title']
self.widgets.seekBar.set_text("Radio: " + title)
LOG.info("show title!", title)
self.song.name = title
print self.prev_title, title
if title and self.song.type == CommonBean.TYPE_RADIO_URL and self.prev_title != title:
self.prev_title = title
LOG.info("show info!", self.song.name)
self.onlineCntr.info.show_song_info(self.song)
LOG.info(self.player.get_state()[1])
except:
LOG.warn("Messege info error appear")
pass
#LOG.info(message.parse_tag()['title']
elif type == gst.MESSAGE_EOS:
LOG.info("MESSAGE_EOS")
self.stopState()
self.playerThreadId = None
self.next()
elif type == gst.MESSAGE_ERROR:
LOG.info("MESSAGE_ERROR")
err, debug = message.parse_error()
LOG.info("Error: %s" % err, debug, err.domain, err.code)
if message.structure:
name = message.structure.get_name()
LOG.info("Structure name:", name)
# name == "missing-plugin" or
#in all cases we break playing, retry only if it paused.
if err.code != 1:
self.widgets.seekBar.set_text(str(err))
self.playerThreadId = None
self.player.set_state(gst.STATE_NULL)
#self.player = None
return None
self.widgets.seekBar.set_text(str(err))
self.playerThreadId = None
self.player.set_state(gst.STATE_NULL)
#self.player = None
time.sleep(4)
self.player.set_state(gst.STATE_NULL)
if self.song.type == CommonBean.TYPE_RADIO_URL and self.erros <= 1:
LOG.error("Error Num", self.erros)
self.erros = self.erros + 1;
self.playSong(self.song)
"""Try to play next"""
else:
#LOG.info(message
pass
| Python |
'''
Created on Mar 11, 2010
@author: ivan
'''
import gtk
from foobnix.model.entity import CommonBean
from foobnix.util import LOG
class PlaylistModel:
POS_ICON = 0
POS_TRACK_NUMBER = 1
POS_NAME = 2
POS_PATH = 3
POS_COLOR = 4
POS_INDEX = 5
POS_TYPE = 6
def __init__(self, widget):
self.widget = widget
self.current_list_model = gtk.ListStore(str, str, str, str, str, int, str)
cellpb = gtk.CellRendererPixbuf()
cellpb.set_property('cell-background', 'yellow')
iconColumn = gtk.TreeViewColumn(_('Icon'), cellpb, stock_id=0, cell_background=4)
numbetColumn = gtk.TreeViewColumn(_('N'), gtk.CellRendererText(), text=1, background=4)
descriptionColumn = gtk.TreeViewColumn(_("Music List"), gtk.CellRendererText(), text=2, background=4)
widget.append_column(iconColumn)
widget.append_column(numbetColumn)
widget.append_column(descriptionColumn)
widget.set_model(self.current_list_model)
def getBeenByPosition(self, position):
if position >= len(self.current_list_model):
LOG.error("Song index too much", position)
return None
bean = CommonBean()
bean.icon = self.current_list_model[position][ self.POS_ICON]
bean.tracknumber = self.current_list_model[position][ self.POS_TRACK_NUMBER]
bean.name = self.current_list_model[position][ self.POS_NAME]
bean.path = self.current_list_model[position][ self.POS_PATH]
bean.color = self.current_list_model[position][ self.POS_COLOR]
bean.index = self.current_list_model[position][ self.POS_INDEX]
bean.type = self.current_list_model[position][ self.POS_TYPE]
return bean
def get_all_beans(self):
beans = []
for i in xrange(len(self.current_list_model)):
beans.append(self.getBeenByPosition(i))
return beans
def set_all_beans(self, beans):
self.clear()
for bean in beans:
self.append(bean)
def append_all_beans(self, beans):
for bean in beans:
self.append(bean)
def getSelectedBean(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected()
if selected:
bean = CommonBean()
bean.icon = model.get_value(selected, self.POS_ICON)
bean.tracknumber = model.get_value(selected, self.POS_TRACK_NUMBER)
bean.name = model.get_value(selected, self.POS_NAME)
bean.path = model.get_value(selected, self.POS_PATH)
bean.color = model.get_value(selected, self.POS_COLOR)
bean.index = model.get_value(selected, self.POS_INDEX)
bean.type = model.get_value(selected, self.POS_TYPE)
return bean
def clear(self):
self.current_list_model.clear()
def append(self, bean):
self.current_list_model.append([bean.icon, bean.tracknumber, bean.name, bean.path, bean.color, bean.index, bean.type])
def __del__(self, *a):
LOG.info("del")
| Python |
'''
Created on Mar 11, 2010
@author: ivan
'''
from foobnix.playlist.playlist_model import PlaylistModel
from foobnix.model.entity import CommonBean
from foobnix.util.mouse_utils import is_double_click
from foobnix.player.player_controller import PlayerController
from random import randint
from foobnix.util.configuration import FConfiguration
from foobnix.directory.directory_controller import DirectoryCntr
from foobnix.util import LOG
class PlaylistCntr():
def __init__(self, widget, playerCntr):
self.current_list_model = PlaylistModel(widget)
self.playerCntr = playerCntr
widget.connect("button-press-event", self.onPlaySong)
self.index = 0;
widget.connect("drag-end", self.onDrugBean)
def registerDirectoryCntr(self, directoryCntr):
self.directoryCntr = directoryCntr
def onDrugBean(self, *ars):
selected = self.current_list_model.get_selected_bean()
LOG.info("Drug song", selected, selected.type)
self.directoryCntr.set_active_view(DirectoryCntr.VIEW_VIRTUAL_LISTS)
if selected.type in [CommonBean.TYPE_MUSIC_URL, CommonBean.TYPE_MUSIC_FILE, CommonBean.TYPE_RADIO_URL]:
selected.parent = None
self.directoryCntr.append_virtual([selected])
self.directoryCntr.leftNoteBook.set_current_page(0)
def getState(self):
return [self.get_playlist_beans(), self.index]
def get_playlist_beans(self):
return self.current_list_model.get_all_beans()
def set_playlist_beans(self, beans):
return self.current_list_model.set_all_beans(beans)
def setState(self, state):
self.set_playlist_beans(state[0])
self.index = state[1]
if self.get_playlist_beans():
self.repopulate(self.get_playlist_beans(), self.index);
#self.playerCntr.playSong(self.get_playlist_beans()[self.index])
def clear(self):
self.current_list_model.clear()
def onPlaySong(self, w, e):
if is_double_click(e):
playlistBean = self.current_list_model.get_selected_bean()
self.repopulate(self.get_playlist_beans(), playlistBean.index);
self.index = playlistBean.index
self.playerCntr.set_mode(PlayerController.MODE_PLAY_LIST)
self.playerCntr.playSong(playlistBean)
def getNextSong(self):
if FConfiguration().isRandom:
self.index = randint(0, len(self.get_playlist_beans()))
else:
self.index += 1
if self.index >= len(self.get_playlist_beans()):
self.index = 0
if not FConfiguration().isRepeat:
self.index = len(self.get_playlist_beans())
return None
playlistBean = self.current_list_model.getBeenByPosition(self.index)
if not playlistBean:
return None
self.repopulate(self.get_playlist_beans(), playlistBean.index);
return playlistBean
def getPrevSong(self):
if FConfiguration().isRandom:
self.index = randint(0, len(self.get_playlist_beans()))
else:
self.index -= 1
if self.index < 0:
self.index = len(self.get_playlist_beans()) - 1
playlistBean = self.current_list_model.getBeenByPosition(self.index)
self.repopulate(self.get_playlist_beans(), playlistBean.index);
return playlistBean
def setPlaylist(self, entityBeans):
LOG.info("Set play list")
self.clear()
self.set_playlist_beans(entityBeans)
self.index = 0
if entityBeans:
self.playerCntr.playSong(entityBeans[0])
self.repopulate(entityBeans, self.index);
def appendPlaylist(self, entityBeans):
LOG.info("Append play list")
self.current_list_model.append_all_beans(entityBeans)
#if self.get_playlist_beans():
#self.playerCntr.playSong(self.get_playlist_beans()[index])
self.repopulate(self.get_playlist_beans(), self.index);
def repopulate(self, entityBeans, index):
self.current_list_model.clear()
for i in range(len(entityBeans)):
songBean = entityBeans[i]
songBean.name = songBean.getPlayListDescription()
songBean.color = self.getBackgroundColour(i)
songBean.index = i
if i == index:
songBean.setIconPlaying()
self.current_list_model.append(songBean)
else:
songBean.setIconNone()
self.current_list_model.append(songBean)
def getBackgroundColour(self, i):
if i % 2 :
return "#F2F2F2"
else:
return "#FFFFE5"
| Python |
'''
Created on Mar 14, 2010
@author: ivan
'''
import gtk
from foobnix.window.window_controller import WindowController
from foobnix.player.player_controller import PlayerController
from foobnix.player.player_widgets_cntr import PlayerWidgetsCntl
from foobnix.directory.directory_controller import DirectoryCntr
from foobnix.trayicon import TrayIcon
from foobnix.application.app_configuration_controller import AppConfigurationCntrl
from foobnix.preferences.pref_controller import PrefController
from foobnix.radio.radio_controller import RadioListCntr
from foobnix.online.online_controller import OnlineListCntr
from foobnix.directory.virtuallist_controller import VirturalLIstCntr
from foobnix.base import BaseController
from foobnix.util.configuration import FConfiguration
from foobnix.online.search_panel import SearchPanel
import time
class AppController(BaseController):
def __init__(self, v):
BaseController.__init__(self)
self.player_controller = PlayerController()
#self.playlistCntr = PlaylistCntr(v.playlist, self.player_controller)
onlineCntr = OnlineListCntr(v.gxMain, self.player_controller)
self.playlistCntr = onlineCntr
self.virtualListCntr = VirturalLIstCntr()
self.radioListCntr = RadioListCntr(v.gxMain, self.player_controller)
self.playerWidgets = PlayerWidgetsCntl(v.gxMain, self.player_controller)
self.player_controller.registerWidgets(self.playerWidgets)
self.player_controller.registerPlaylistCntr(self.playlistCntr)
self.directoryCntr = DirectoryCntr(v.gxMain, self.playlistCntr, self.radioListCntr, self.virtualListCntr)
#self.playlistCntr.registerDirectoryCntr(self.directoryCntr)
self.appConfCntr = AppConfigurationCntrl(v.gxMain, self.directoryCntr)
onlineCntr.register_directory_cntr(self.directoryCntr)
self.player_controller.registerOnlineCntr(onlineCntr)
self.preferences_window_controller = PrefController(v.gxPref)
self.main_window_controller = WindowController(v.gxMain, v.gxAbout)
self.main_window_controller.connect('show_preferences', self.preferences_window_controller.show)
self.tray_icon = TrayIcon(v.gxTrayIcon)
self.tray_icon.connect('toggle_window_visibility', self.main_window_controller.toggle_visibility)
self.tray_icon.connect('exit', self.exit)
self.tray_icon.connect('play', self.player_controller.play)
self.tray_icon.connect('pause', self.player_controller.pause)
self.tray_icon.connect('prev', self.player_controller.prev)
self.tray_icon.connect('next', self.player_controller.next)
self.tray_icon.connect('volume_up', self.player_controller.volume_up)
self.tray_icon.connect('volume_down', self.player_controller.volume_down)
self.player_controller.connect('song_playback_started', self.tray_icon.on_song_started)
self.player_controller.connect('song_playback_started', self.main_window_controller.on_song_started)
self.main_window_controller.connect('exit', self.exit)
self.search_panel = SearchPanel(v.gxMain)
self.search_panel.connect('show_search_results', onlineCntr.show_results)
#self.search_panel.connect('starting_search', onlineCntr.clear)
self.restore_state()
def exit(self, sender):
self.save_state()
self.tray_icon.icon.set_visible(False)
gtk.main_quit()
def restore_state(self):
if FConfiguration().playlistState:
self.playlistCntr.setState(FConfiguration().playlistState)
if FConfiguration().virtualListState:
self.directoryCntr.setState(FConfiguration().virtualListState)
if FConfiguration().volumeValue:
self.playerWidgets.volume.set_value(FConfiguration().volumeValue)
self.player_controller.setVolume(FConfiguration().volumeValue / 100)
if FConfiguration().hpanelPostition:
self.playerWidgets.hpanel.set_position(FConfiguration().hpanelPostition)
# if FConfiguration().hpanel2Postition:
# self.playerWidgets.hpanel2.set_position(FConfiguration().hpanel2Postition)
if FConfiguration().vpanelPostition:
self.playerWidgets.vpanel.set_position(FConfiguration().vpanelPostition)
if FConfiguration().mediaLibraryPath:
self.appConfCntr.setMusicFolder(FConfiguration().mediaLibraryPath)
if FConfiguration().radiolistState:
self.radioListCntr.setState(FConfiguration().radiolistState)
self.appConfCntr.setVkLoginPass(FConfiguration().vk_login, FConfiguration().vk_password)
self.appConfCntr.setLfmLoginPass(FConfiguration().lfm_login, FConfiguration().lfm_password)
if FConfiguration().isPlayOnStart:
self.player_controller.next()
def save_state(self):
FConfiguration().playlistState = self.playlistCntr.getState()
FConfiguration().virtualListState = self.directoryCntr.getState()
FConfiguration().radiolistState = self.radioListCntr.getState()
FConfiguration().volumeValue = self.playerWidgets.volume.get_value()
if self.playerWidgets.vpanel.get_position() > 0:
FConfiguration().vpanelPostition = self.playerWidgets.vpanel.get_position()
if self.playerWidgets.hpanel.get_position() > 0:
FConfiguration().hpanelPostition = self.playerWidgets.hpanel.get_position()
#if self.playerWidgets.hpanel2.get_position() > 0:
# FConfiguration().hpanel2Postition = self.playerWidgets.hpanel2.get_position()
FConfiguration().mediaLibraryPath = self.appConfCntr.getMusicFolder()
FConfiguration().vk_login = self.appConfCntr.getVkLogin()
FConfiguration().vk_password = self.appConfCntr.getVkPassword()
FConfiguration().lfm_login = self.appConfCntr.getLfmLogin()
FConfiguration().lfm_password = self.appConfCntr.getLfmPassword()
FConfiguration().save()
| Python |
'''
Created on Mar 14, 2010
@author: ivan
'''
import gtk.glade
class AppView():
gladeMain = "foobnix/glade/foobnix.glade"
gladePref = "foobnix/glade/preferences.glade"
def __init__(self):
self.gxMain = self.glade_XML(self.gladeMain, "foobnixWindow")
self.gxTrayIcon = self.glade_XML(self.gladeMain, "popUpWindow")
self.gxPref = self.glade_XML(self.gladePref, "window")
self.gxAbout = self.glade_XML(self.gladeMain, "aboutdialog")
self.about_widget = self.gxAbout.get_widget("aboutdialog")
self.about_widget.connect("response", lambda * a: self.about_widget.hide())
self.playlist = self.gxMain.get_widget("playlist_treeview")
def close_dialog(self):
pass
def glade_XML(self, main, widget):
domain = "foobnix"
try:
return gtk.glade.XML(main, widget, domain)
except:
try:
return gtk.glade.XML("/usr/local/lib/python2.6/dist-packages/" + main, widget, domain)
except:
return gtk.glade.XML("/usr/lib/python2.5/site-packages/" + main, widget, domain)
| Python |
'''
Created on Mar 14, 2010
@author: ivan
'''
from foobnix.util.configuration import FConfiguration
from foobnix.util import LOG
class AppConfigurationCntrl():
def __init__(self, gxMain, directoryCntr):
self.directoryCntr = directoryCntr
self.folderChoser = gxMain.get_widget("music_dir_filechooserbutton")
self.folderChoser.connect("current-folder-changed", self.onChangeMusicFolder)
self.vk_entry_label = gxMain.get_widget("vk_entry_login")
self.vk_entry_passw = gxMain.get_widget("vk_entry_password")
self.lfm_entry_label = gxMain.get_widget("lfm_entry_login")
self.lfm_entry_passw = gxMain.get_widget("lfm_entry_password")
""" online music folder path """
self.online_dir = gxMain.get_widget("online_dir_filechooserbutton")
self.online_dir.connect("current-folder-changed", self.onChangeOnline)
self.online_dir.set_current_folder(FConfiguration().onlineMusicPath)
self.online_dir.set_sensitive(FConfiguration().is_save_online)
""" is save online music checkbox """
self.save_online = gxMain.get_widget("save_online_checkbutton")
self.save_online.connect("clicked", self.on_save_online)
self.save_online.set_active(FConfiguration().is_save_online)
self.by_first = gxMain.get_widget("radiobutton_by_first")
self.by_popularity = gxMain.get_widget("radiobutton_by_popularity")
self.by_time = gxMain.get_widget("radiobutton_by_time")
"""Random button"""
self.randomCheckButton = gxMain.get_widget("random_checkbutton")
self.randomCheckButton.set_active(FConfiguration().isRandom)
self.randomCheckButton.connect("clicked", self.onRandomClicked)
"""Repeat button"""
self.repeatCheckButton = gxMain.get_widget("repeat_checkbutton")
self.repeatCheckButton.set_active(FConfiguration().isRepeat)
self.repeatCheckButton.connect("clicked", self.onRepeatClicked)
"""Play on Start"""
self.playOnStartCheckButton = gxMain.get_widget("playonstart_checkbutton")
self.playOnStartCheckButton.set_active(FConfiguration().isPlayOnStart)
self.playOnStartCheckButton.connect("clicked", self.onPlayOnStartClicked)
def onPlayOnStartClicked(self, *args):
FConfiguration().isPlayOnStart = self.playOnStartCheckButton.get_active()
def onRepeatClicked(self, *args):
FConfiguration().isRepeat = self.repeatCheckButton.get_active()
def onRandomClicked(self, *args):
FConfiguration().isRandom = self.randomCheckButton.get_active()
def on_save_online(self, *args):
value = self.save_online.get_active()
if value:
self.online_dir.set_sensitive(True)
else:
self.online_dir.set_sensitive(False)
FConfiguration().is_save_online = value
def onChangeOnline(self, *args):
path = self.online_dir.get_filename()
LOG.info("Change music online folder", path)
FConfiguration().onlineMusicPath = path
""" Vkontatke"""
def setVkLoginPass(self, login, passwrod):
self.vk_entry_label.set_text(login)
self.vk_entry_passw.set_text(passwrod)
def getVkLogin(self): return self.vk_entry_label.get_text()
def getVkPassword(self): return self.vk_entry_passw.get_text()
""" Last.FM"""
def setLfmLoginPass(self, value, passwrod):
self.lfm_entry_label.set_text(value)
self.lfm_entry_passw.set_text(passwrod)
def getLfmLogin(self): return self.lfm_entry_label.get_text()
def getLfmPassword(self): return self.lfm_entry_passw.get_text()
def onChangeMusicFolder(self, path):
self.musicFolder = self.folderChoser.get_filename()
LOG.info("Change music folder", self.musicFolder)
self.directoryCntr.updateDirectoryByPath(self.musicFolder)
def setMusicFolder(self, path):
LOG.info("Set Folder", path)
self.folderChoser.set_current_folder(path)
def getMusicFolder(self):
return self.folderChoser.get_filename()
| Python |
'''
Created on Mar 13, 2010
@author: ivan
'''
import gtk
import os.path
from foobnix.base import BaseController
from foobnix.base import SIGNAL_RUN_FIRST, TYPE_NONE
class TrayIcon(BaseController):
"""
A class that represents tray icon and a widget that pops up when the icon is right-clicked.
"""
_BASIC_SIGNAL = (SIGNAL_RUN_FIRST, TYPE_NONE, ())
__gsignals__ = {
'exit' : _BASIC_SIGNAL,
'toggle_window_visibility' : _BASIC_SIGNAL,
'play' : _BASIC_SIGNAL,
'pause' : _BASIC_SIGNAL,
'next' : _BASIC_SIGNAL,
'prev' : _BASIC_SIGNAL,
'volume_up' : _BASIC_SIGNAL,
'volume_down' : _BASIC_SIGNAL
}
def __init__(self, gx_tray_icon):
BaseController.__init__(self)
self.popup = gx_tray_icon.get_widget("popUpWindow")
self.text1 = gx_tray_icon.get_widget("text1")
self.text2 = gx_tray_icon.get_widget("text2")
self.icon = gtk.StatusIcon()
self.icon.set_tooltip("Foobnix music playerEngine")
# TODO: move the path to config
icon_path = "/usr/local/share/pixmaps/foobnix.png"
icon_path2 = "/usr/share/pixmaps/foobnix.png"
if os.path.exists(icon_path):
self.icon.set_from_file(icon_path)
elif os.path.exists(icon_path2):
self.icon.set_from_file(icon_path2)
else:
self.icon.set_from_stock("gtk-media-play")
self.icon.connect("activate", lambda * a: self.emit('toggle_window_visibility'))
self.icon.connect("popup-menu", lambda * a: self.popup.show())
try:
self.icon.connect("scroll-event", self.on_mouse_wheel_scrolled)
except:
pass
popup_signals = {
"on_close_clicked" : lambda * a: self.emit('exit'),
"on_play_clicked" : lambda * a: self.emit('play'),
"on_pause_clicked" : lambda * a: self.emit('pause'),
"on_next_clicked" : lambda * a: self.emit('next'),
"on_prev_clicked" : lambda * s: self.emit('prev'),
"on_cancel_clicked": lambda * a: self.popup.hide()
}
gx_tray_icon.signal_autoconnect(popup_signals)
def setText1(self, text):
self.text1.set_text(text)
def setText2(self, text):
self.text2.set_text(text)
def on_song_started(self, sender, song):
self.setText1(song.name)
def on_mouse_wheel_scrolled(self, w, event):
if event.direction == gtk.gdk.SCROLL_UP: #@UndefinedVariable
self.emit('volume_up')
else:
self.emit('volume_down')
# TODO: move next line to player_controller
# self.playerWidgets.volume.set_value(volume * 100)
| Python |
'''
Created on Mar 13, 2010
@author: ivan
'''
import gtk
from foobnix.util.configuration import VERSION
from foobnix.base import BaseController
from foobnix.base import SIGNAL_RUN_FIRST, TYPE_NONE
class WindowController(BaseController):
__gsignals__ = {
'exit' : (SIGNAL_RUN_FIRST, TYPE_NONE, ()),
'show_preferences' : (SIGNAL_RUN_FIRST, TYPE_NONE, ()),
}
def __init__(self, gx_main_window, gx_about):
BaseController.__init__(self)
self.decorate(gx_main_window)
popup_signals = {
"on_gtk-preferences_activate": lambda * a: self.emit('show_preferences'),
"on_file_quit_activate": lambda * a: self.emit('exit'),
"on_menu_about_activate": self.show_about_window
}
gx_main_window.signal_autoconnect(popup_signals)
self.main_window = gx_main_window.get_widget("foobnixWindow")
self.main_window.connect("delete-event", self.hide)
self.main_window.set_title("Foobnix " + VERSION)
self.main_window.maximize()
self.about_window = gx_about.get_widget("aboutdialog")
self.about_window.connect("delete-event", self.hide_about_window)
def on_song_started(self, sender, song):
self.main_window.set_title(song.getTitleDescription())
def show_about_window(self, *args):
self.about_window.show()
def hide_about_window(self, *args):
self.about_window.hide()
return True
def show(self):
self.main_window.show()
def hide(self, *args):
self.main_window.hide()
return True
def toggle_visibility(self, *a):
visible = self.main_window.get_property('visible')
self.main_window.set_property('visible', not visible)
def decorate(self, gx):
rc_st = '''
style "menubar-style" {
GtkMenuBar::shadow_type = none
GtkMenuBar::internal-padding = 0
}
class "GtkMenuBar" style "menubar-style"
'''
gtk.rc_parse_string(rc_st)
style = gx.get_widget("label31").get_style()
background_color = style.bg[gtk.STATE_NORMAL]
text_color = style.fg[gtk.STATE_NORMAL]
menu_bar = gx.get_widget("menubar3")
menu_bar.modify_bg(gtk.STATE_NORMAL, background_color)
# making main menu look a bit better
for item in menu_bar.get_children():
current = item.get_children()[0]
current.modify_fg(gtk.STATE_NORMAL, text_color)
| Python |
#!/usr/bin/env python
import os, glob, shutil
from distutils.core import setup
from foobnix.util.configuration import VERSION, FOOBNIX_TMP, FOOBNIX_TMP_RADIO
if not os.path.exists(FOOBNIX_TMP):
os.mkdir(FOOBNIX_TMP)
os.mkdir(FOOBNIX_TMP_RADIO)
def capture(cmd):
return os.popen(cmd).read().strip()
def removeall(path):
if not os.path.isdir(path):
return
files = os.listdir(path)
for x in files:
fullpath = os.path.join(path, x)
if os.path.isfile(fullpath):
f = os.remove
rmgeneric(fullpath, f)
elif os.path.isdir(fullpath):
removeall(fullpath)
f = os.rmdir
rmgeneric(fullpath, f)
def rmgeneric(path, __func__):
try:
__func__(path)
except OSError, (errno, strerror):
pass
# Create mo files:
if not os.path.exists("mo/"):
os.mkdir("mo/")
for lang in ('ru', 'uk', 'he'):
pofile = "po/" + lang + ".po"
mofile = "mo/" + lang + "/foobnix.mo"
if not os.path.exists("mo/" + lang + "/"):
os.mkdir("mo/" + lang + "/")
print "generating", mofile
os.system("msgfmt %s -o %s" % (pofile, mofile))
# Copy script "foobnix" file to foobnix dir:
shutil.copyfile("foobnix.py", "foobnix/foobnix")
versionfile = file("foobnix/version.py", "wt")
versionfile.write("""
# generated by setup.py
VERSION = %r
""" % VERSION)
versionfile.close()
setup(name='foobnix',
version=VERSION,
description='GTK+ client for the Music Player Daemon (MPD).',
author='Ivan Ivanenko',
author_email='ivan.ivanenko@gmail.com',
url='www.foobnix.com',
classifiers=[
'Development Status :: Beta',
'Environment :: X11 Applications',
'Intended Audience :: End Users/Desktop',
'License :: GNU General Public License (GPL)',
'Operating System :: Linux',
'Programming Language :: Python',
'Topic :: Multimedia :: Sound :: Players',
],
packages=[
"foobnix",
"foobnix.application",
"foobnix.base",
"foobnix.cue",
"foobnix.directory",
"foobnix.glade",
"foobnix.lyric",
"foobnix.model",
"foobnix.online",
"foobnix.online.google",
"foobnix.online.integration",
"foobnix.player",
"foobnix.playlist",
"foobnix.preferences",
"foobnix.radio",
"foobnix.thirdparty",
"foobnix.trayicon",
"foobnix.util",
"foobnix.window"
],
package_data={'foobnix': ['glade/*.glade', 'glade/*.png']},
#package_dir={"src/foobnix": "foobnix/"},
scripts=['foobnix/foobnix'],
data_files=[('share/foobnix', ['README']),
(FOOBNIX_TMP, ['version']),
('/usr/share/applications', ['foobnix.desktop']),
('/usr/share/pixmaps', glob.glob('foobnix/pixmaps/*')),
(FOOBNIX_TMP_RADIO, glob.glob('radio/*')),
('share/man/man1', ['foobnix.1']),
('/usr/share/locale/uk/LC_MESSAGES', ['mo/uk/foobnix.mo']),
('/usr/share/locale/he/LC_MESSAGES', ['mo/he/foobnix.mo']),
('/usr/share/locale/ru/LC_MESSAGES', ['mo/ru/foobnix.mo'])
]
)
# Cleanup (remove /build, /mo, and *.pyc files:
print "Cleaning up..."
try:
removeall("build/")
os.rmdir("build/")
pass
except:
pass
try:
removeall("mo/")
os.rmdir("mo/")
except:
pass
try:
for f in os.listdir("."):
if os.path.isfile(f):
if os.path.splitext(os.path.basename(f))[1] == ".pyc":
os.remove(f)
except:
pass
try:
os.remove("foobnix/foobnix")
except:
pass
try:
os.remove("foobnix/version.py")
except:
pass
try:
os.remove(os.getenv("HOME") + "/foobnix_conf.pkl")
except:
pass
| Python |
#!/usr/bin/env python
'''
Created on Mar 10, 2010
@author: ivan
'''
import pygst
from foobnix.util import LOG
pygst.require('0.10')
import pygtk
pygtk.require20()
import gtk
import gobject
import gettext
from foobnix.application.app_view import AppView
from foobnix.application.app_controller import AppController
import __main__, os
def is_only_instance():
# Determine if there are more than the current instance of the application
# running at the current time.
return os.system("(( $(ps -ef | grep python | grep '[" +
__main__.__file__[0] + "]" + __main__.__file__[1:] +
"' | wc -l) > 1 ))") != 0
if __name__ == "__main__":
LOG.print_debug_info()
if is_only_instance():
APP_NAME = "foobnix"
gettext.install(APP_NAME, unicode=True)
gettext.textdomain(APP_NAME)
gtk.glade.textdomain(APP_NAME)
AppController(AppView())
gobject.threads_init() #@UndefinedVariable
gtk.main()
LOG.info(_("Success"))
else:
LOG.warn("Other instance of player is already running")
| Python |
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s' % (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| Python |
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s' % (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| Python |
#!/usr/bin/env python
import os, glob, shutil
from distutils.core import setup, Extension
from foobnix.util.confguration import VERSION
def capture(cmd):
return os.popen(cmd).read().strip()
def removeall(path):
if not os.path.isdir(path):
return
files = os.listdir(path)
for x in files:
fullpath = os.path.join(path, x)
if os.path.isfile(fullpath):
f = os.remove
rmgeneric(fullpath, f)
elif os.path.isdir(fullpath):
removeall(fullpath)
f = os.rmdir
rmgeneric(fullpath, f)
def rmgeneric(path, __func__):
try:
__func__(path)
except OSError, (errno, strerror):
pass
# Create mo files:
if not os.path.exists("mo/"):
os.mkdir("mo/")
for lang in ('ru', 'uk'):
pofile = "po/" + lang + ".po"
mofile = "mo/" + lang + "/foobnix.mo"
if not os.path.exists("mo/" + lang + "/"):
os.mkdir("mo/" + lang + "/")
print "generating", mofile
os.system("msgfmt %s -o %s" % (pofile, mofile))
# Copy script "foobnix" file to foobnix dir:
shutil.copyfile("foobnix.py", "foobnix/foobnix")
versionfile = file("foobnix/version.py", "wt")
versionfile.write("""
# generated by setup.py
VERSION = %r
""" % VERSION)
versionfile.close()
setup(name='foobnix',
version=VERSION,
description='GTK+ client for the Music Player Daemon (MPD).',
author='Ivan Ivanenko',
author_email='ivan.ivanenko@gmail.com',
url='www.foobnix.com',
classifiers=[
'Development Status :: Beta',
'Environment :: X11 Applications',
'Intended Audience :: End Users/Desktop',
'License :: GNU General Public License (GPL)',
'Operating System :: Linux',
'Programming Language :: Python',
'Topic :: Multimedia :: Sound :: Players',
],
packages=[
"foobnix",
"foobnix.application",
"foobnix.directory",
"foobnix.glade",
"foobnix.lyric",
"foobnix.model",
"foobnix.online",
"foobnix.online.google",
"foobnix.player",
"foobnix.playlist",
"foobnix.preferences",
"foobnix.radio",
"foobnix.tryicon",
"foobnix.util",
"foobnix.window",
],
package_data={'foobnix': ['glade/*.glade', 'glade/*.png']},
#package_dir={"src/foobnix": "foobnix/"},
scripts=['foobnix/foobnix'],
data_files=[('share/foobnix', ['README', 'CHANGELOG', 'TODO', 'TRANSLATORS']),
('share/applications', ['foobnix.desktop']),
('share/pixmaps', glob.glob('foobnix/pixmaps/*')),
('share/man/man1', ['foobnix.1']),
('/usr/share/locale/uk/LC_MESSAGES', ['mo/uk/foobnix.mo']),
('/usr/share/locale/ru/LC_MESSAGES', ['mo/ru/foobnix.mo'])
]
)
# Cleanup (remove /build, /mo, and *.pyc files:
print "Cleaning up..."
try:
removeall("build/")
os.rmdir("build/")
pass
except:
pass
try:
removeall("mo/")
os.rmdir("mo/")
except:
pass
try:
for f in os.listdir("."):
if os.path.isfile(f):
if os.path.splitext(os.path.basename(f))[1] == ".pyc":
os.remove(f)
except:
pass
try:
os.remove("foobnix/foobnix")
except:
pass
try:
os.remove("foobnix/version.py")
except:
pass
try:
os.remove(os.getenv("HOME") + "/foobnix_conf.pkl")
except:
pass
| Python |
#!/usr/bin/env python
'''
Created on Mar 10, 2010
@author: ivan
'''
import os
import gtk
import gettext
from foobnix.application.app_view import AppView
from foobnix.application.app_controller import AppController
class App():
def __init__(self):
v = AppView()
AppController(v)
if __name__ == "__main__":
APP_NAME = "foobnix"
gettext.install(APP_NAME, unicode=True)
gettext.textdomain(APP_NAME)
gtk.glade.textdomain(APP_NAME)
app = App()
gtk.gdk.threads_init() #@UndefinedVariable
gtk.main()
print _("Success")
| Python |
'''
Created on Mar 16, 2010
@author: ivan
'''
'''
Created on Mar 11, 2010
@author: ivan
'''
import gtk
from foobnix.model.entity import CommonBean
class RadioListModel:
POS_ICON = 0
POS_TRACK_NUMBER = 1
POS_NAME = 2
POS_PATH = 3
POS_COLOR = 4
POS_INDEX = 5
def __init__(self, widget):
self.widget = widget
self.model = gtk.ListStore(str, str, str, str, str, int)
cellpb = gtk.CellRendererPixbuf()
cellpb.set_property('cell-background', 'yellow')
iconColumn = gtk.TreeViewColumn(_('Icon'), cellpb, stock_id=0, cell_background=4)
numbetColumn = gtk.TreeViewColumn(_('N'), gtk.CellRendererText(), text=1, background=4)
descriptionColumn = gtk.TreeViewColumn(_('Music List'), gtk.CellRendererText(), text=2, background=4)
widget.append_column(iconColumn)
widget.append_column(numbetColumn)
widget.append_column(descriptionColumn)
widget.set_model(self.model)
def getSize(self):
return len(self.model)
def getBeenByPosition(self, position):
bean = CommonBean()
bean.icon = self.model[position][ self.POS_ICON]
bean.tracknumber = self.model[position][ self.POS_TRACK_NUMBER]
bean.name = self.model[position][ self.POS_NAME]
bean.path = self.model[position][ self.POS_PATH]
bean.color = self.model[position][ self.POS_COLOR]
bean.index = self.model[position][ self.POS_INDEX]
return bean
def getSelectedBean(self):
print self.widget
selection = self.widget.get_selection()
print selection
model, selected = selection.get_selected()
print model, selected
if selected:
bean = CommonBean()
bean.icon = model.get_value(selected, self.POS_ICON)
bean.tracknumber = model.get_value(selected, self.POS_TRACK_NUMBER)
bean.name = model.get_value(selected, self.POS_NAME)
bean.path = model.get_value(selected, self.POS_PATH)
bean.color = model.get_value(selected, self.POS_COLOR)
bean.index = model.get_value(selected, self.POS_INDEX)
return bean
def clear(self):
self.model.clear()
def append(self, playlistBean):
self.model.append([playlistBean.icon, playlistBean.tracknumber, playlistBean.name, playlistBean.path, playlistBean.color, playlistBean.index])
def __del__(self,*a):
print "del"
| Python |
'''
Created on Mar 16, 2010
@author: ivan
'''
from foobnix.radio.radio_model import RadioListModel
from foobnix.util.plsparser import getStationPath, getPlsName
'''
Created on Mar 11, 2010
@author: ivan
'''
import gtk
from foobnix.playlist.playlist_model import PlaylistModel
from foobnix.model.entity import CommonBean
from foobnix.util.mouse_utils import is_double_click
class RadioListCntr():
def __init__(self, gxMain, playerCntr):
self.widget = gxMain.get_widget("radio_list_treeview")
addButton = gxMain.get_widget("add_radio_toolbutton")
removeButton = gxMain.get_widget("remove_radio_toolbuton")
self.urlText = gxMain.get_widget("radio_url_entry")
self.widget.connect("button-press-event", self.onPlaySong)
addButton.connect("clicked", self.onAddRadio)
removeButton.connect("clicked", self.onRemoveRadio)
self.model = RadioListModel(self.widget)
self.playerCntr = playerCntr
self.widget.connect("button-press-event", self.onPlaySong)
self.entityBeans = []
self.index = self.model.getSize();
def onAddRadio(self, *args):
urlStation = self.urlText.get_text()
if urlStation:
nameDef = urlStation
if urlStation.endswith(".pls"):
getUrl = getStationPath(urlStation)
if getUrl:
urlStation = getUrl
nameDef = getPlsName(nameDef) + " [" + urlStation + " ]"
print nameDef
entity = CommonBean(name=nameDef, path=urlStation, type=CommonBean.TYPE_RADIO_URL, index=self.index + 1);
self.entityBeans.append(entity)
self.repopulate(self.entityBeans, (self.model.getSize()))
self.urlText.set_text("")
pass
def onRemoveRadio(self, *args):
model, iter = self.widget.get_selection().get_selected()
if iter:
playlistBean = self.model.getSelectedBean()
for i, entity in enumerate(self.entityBeans):
if entity.path == playlistBean.path:
self.index = 0
del self.entityBeans[i]
model.remove(iter)
def getState(self):
return [self.entityBeans, self.index]
def setState(self, state):
self.entityBeans = state[0]
self.index = state[1]
if self.entityBeans and self.index < len(self.entityBeans):
self.repopulate(self.entityBeans, self.index);
self.playerCntr.playSong(self.entityBeans[self.index])
def clear(self):
self.model.clear()
def onPlaySong(self, w, e):
if is_double_click(e):
print w
print e
playlistBean = self.model.getSelectedBean()
playlistBean.type = CommonBean.TYPE_RADIO_URL
#self.repopulate(self.entityBeans, playlistBean.index);
self.index = playlistBean.index
self.playerCntr.playSong(playlistBean)
def getNextSong(self):
self.index += 1
if self.index >= len(self.entityBeans):
self.index = 0
playlistBean = self.model.getBeenByPosition(self.index)
self.repopulate(self.entityBeans, playlistBean.index);
return playlistBean
def getPrevSong(self):
self.index -= 1
if self.index < 0:
self.index = len(self.entityBeans) - 1
playlistBean = self.model.getBeenByPosition(self.index)
self.repopulate(self.entityBeans, playlistBean.index);
return playlistBean
def setPlaylist(self, entityBeans):
self.entityBeans = entityBeans
index = 0
if entityBeans:
self.playerCntr.playSong(entityBeans[index])
self.repopulate(entityBeans, index);
def repopulate(self, entityBeans, index):
self.model.clear()
for i in range(len(entityBeans)):
songBean = entityBeans[i]
songBean.color = self.getBackgroundColour(i)
songBean.name = songBean.getPlayListDescription()
songBean.index = i
if i == index:
songBean.setIconPlaying()
self.model.append(songBean)
else:
songBean.setIconNone()
self.model.append(songBean)
def getBackgroundColour(self, i):
if i % 2 :
return "#F2F2F2"
else:
return "#FFFFE5"
| Python |
'''
Created on Mar 14, 2010
@author: ivan
'''
class PrefController():
def __init__(self, gxPref):
self.gxPref = gxPref
self.pref = gxPref.get_widget("window")
self.pref.connect("delete-event", self.hide)
def show(self):
print "show"
self.pref.show()
def hide(self, *args):
print "hide"
self.pref.hide()
return True
| Python |
# -*- coding: utf-8 -*-
'''
Created on Mar 16, 2010
@author: ivanf
'''
from foobnix.radio.radio_model import RadioListModel
from foobnix.util.plsparser import getStationPath, getPlsName
from foobnix.online import pylast
import time
from foobnix.online.online_model import OnlineListModel
from foobnix.online.rupleer import find_song_urls
from foobnix.player.player_controller import PlayerController
from foobnix.online.vk import Vkontakte
from foobnix.online.search_controller import search_top_albums, \
search_top_tracks, search_top_similar, search_tags_genre
import thread
from foobnix.directory.directory_controller import DirectoryCntr
from foobnix.util.confguration import FConfiguration
from foobnix.online.google.search import GoogleSearch, SearchError
import urllib2
import os
import urllib
import threading
from foobnix.online.google.translate import Translator
from foobnix.util import LOG
'''
Created on Mar 11, 2010
@author: ivan
'''
import gtk
from foobnix.playlist.playlist_model import PlaylistModel
from foobnix.model.entity import CommonBean
from foobnix.util.mouse_utils import is_double_click
class OnlineListCntr():
API_KEY = FConfiguration().API_KEY
API_SECRET = FConfiguration().API_SECRET
username = FConfiguration().lfm_login
password_hash = pylast.md5(FConfiguration().lfm_password)
TOP_SONGS = "TOP_SONG"
TOP_ALBUMS = "TOP_ALBUMS"
TOP_SIMILAR = "TOP_SIMILAR"
TOP_SEARCH = "TOP_SEARCH"
TOP_TAGS_GENRE = "TOP_TAGS_GENRE"
def make_dirs(self, path):
if not os.path.isdir(path):
os.makedirs(path)
def __init__(self, gxMain, playerCntr, directoryCntr, playerWidgets):
self.playerCntr = playerCntr
self.directoryCntr = directoryCntr
self.playerWidgets = playerWidgets
self.search_text = gxMain.get_widget("search_entry")
self.search_text.connect("key-press-event", self.on_key_pressed)
search_button = gxMain.get_widget("search_button")
search_button.connect("clicked", self.on_search)
self.radio_song = gxMain.get_widget("radiobutton_song")
self.radio_album = gxMain.get_widget("radiobutton_album")
self.radio_similar = gxMain.get_widget("radiobutton_similar")
self.radio_search = gxMain.get_widget("radiobutton_search")
self.radio_tags_genre = gxMain.get_widget("radiobutton_tags")
self.treeview = gxMain.get_widget("online_treeview")
self.treeview.connect("drag-end", self.on_drag_end)
self.treeview .connect("button-press-event", self.onPlaySong)
self.model = OnlineListModel(self.treeview)
self.entityBeans = []
self.index = self.model.getSize();
try:
self.network = pylast.get_lastfm_network(api_key=self.API_KEY, api_secret=self.API_SECRET, username=self.username, password_hash=self.password_hash)
#self.scrobler = self.network.get_scrobbler("tst", "1.0")
except:
self.playerWidgets.setStatusText(_("lasf.fm connection error"))
LOG.error("lasf.fm connection error")
#return None
self.vk = Vkontakte(FConfiguration().vk_login, FConfiguration().vk_password)
if not self.vk.isLive():
self.playerWidgets.setStatusText(_("Vkontakte connection error"))
LOG.error("Vkontakte connection error")
self.play_attempt = 0
self.playerThreadId = None
pass #end of init
def report_now_playing(self, song):
if song.getArtist() and song.getTitle():
print "Reporting about ... ARTIST: " + song.getArtist(), "TITLE: ", song.getTitle()
#self.scrobler.report_now_playing(song.getArtist(), song.getTitle())
else:
print _("Artist and title not correct")
def scrobble(self, artist, title, time_started, source, mode, duration, album="", track_number="", mbid=""):
self.scrobler.scrobble(artist, title, time_started, source, mode, duration, album, track_number, mbid)
def on_drag_end(self, *ars):
selected = self.model.getSelectedBean()
print "SELECTED", selected
self.directoryCntr.set_active_view(DirectoryCntr.VIEW_VIRTUAL_LISTS)
if selected.type == CommonBean.TYPE_MUSIC_URL:
selected.parent = None
self.directoryCntr.append_virtual([selected])
elif selected.type in [CommonBean.TYPE_FOLDER, CommonBean.TYPE_GOOGLE_HELP]:
selected.type = CommonBean.TYPE_FOLDER
results = []
for i in xrange(self.model.getSize()):
searchBean = self.model.getBeenByPosition(i)
#print "Search", searchBean
if str(searchBean.name) == str(selected.name):
searchBean.parent = None
results.append(searchBean)
elif str(searchBean.parent) == str(selected.name):
results.append(searchBean)
else:
print str(searchBean.parent) + " != " + str(selected.name)
self.directoryCntr.append_virtual(results)
print "drug"
self.directoryCntr.leftNoteBook.set_current_page(0)
def on_key_pressed(self, w, event):
if event.type == gtk.gdk.KEY_PRESS: #@UndefinedVariable
#Enter pressed
print "keyval", event.keyval, "keycode", event.hardware_keycode
if event.hardware_keycode == 36:
self.on_search()
def get_search_by(self):
if self.radio_song.get_active(): return self.TOP_SONGS
if self.radio_album.get_active(): return self.TOP_ALBUMS
if self.radio_similar.get_active(): return self.TOP_SIMILAR
if self.radio_search.get_active(): return self.TOP_SEARCH
if self.radio_tags_genre.get_active(): return self.TOP_TAGS_GENRE
#default is
return self.TOP_SONGS
def get_search_query(self):
query = self.search_text.get_text()
if query and len(query.strip()) > 0:
print query
return query
#Nothing found
return None
lock = threading.Lock()
def on_search(self, *args):
if self.playerThreadId:
return None
if not self.vk.isLive():
LOG.error("VK is not availiable")
LOG.error("Vkontakte connection error")
return None
self.lock.acquire()
self.clear()
query = self.get_search_query()
if query:
query = self.capitilize_query(u"" + query)
self.append([self.TextBeen("Searching... " + query + " please wait", color="GREEN")])
if self.get_search_by() == self.TOP_ALBUMS:
self.playerThreadId = thread.start_new_thread(self.search_top_albums, (query,))
#thread.start_new_thread(self.search_dots, (query,))
elif self.get_search_by() == self.TOP_SONGS:
self.playerThreadId = thread.start_new_thread(self.search_top_tracks, (query,))
#thread.start_new_thread(self.search_dots, (query,))
elif self.get_search_by() == self.TOP_SIMILAR:
self.playerThreadId = thread.start_new_thread(self.search_top_similar, (query,))
elif self.get_search_by() == self.TOP_SEARCH:
self.playerThreadId = thread.start_new_thread(self.search_vk_engine, (query,))
#thread.start_new_thread(self.search_dots, (query,))
elif self.get_search_by() == self.TOP_TAGS_GENRE:
self.playerThreadId = thread.start_new_thread(self.search_tags_genre, (query,))
#self.show_results(query, beans)
self.lock.release()
pass
def capitilize_query(self, line):
line = line.strip()
result = ""
for l in line.split():
result += " " + l[0].upper() + l[1:]
return result
def search_dots(self, query):
dots = "..."
while self.playerThreadId != None:
dots += "."
self.clear()
self.append([self.SearchingCriteriaBean(query + dots)])
time.sleep(2)
def search_top_albums(self, query):
beans = search_top_albums(self.network, query)
self.show_results(query, beans)
def search_top_tracks(self, query):
beans = search_top_tracks(self.network, query)
self.show_results(query, beans)
def is_ascii(self, s):
return all(ord(c) < 128 for c in s)
def search_tags_genre(self, query):
if not self.is_ascii(query):
translator = Translator()
query = translator.translate(query.encode(), lang_from="ru")
self.append([self.TextBeen("Translated: " + query, color="LIGHT GREEN")])
beans = search_tags_genre(self.network, query)
self.show_results(query, beans, False)
def search_top_similar(self, query):
try:
beans = search_top_similar(self.network, query)
self.show_results(query, beans)
except:
self.playerThreadId = None
self.googleHelp(query)
def search_vk_engine(self, query):
vkSongs = self.vk.find_song_urls(query)
beans = self.convertVKstoBeans(vkSongs)
self.show_results(query, beans)
def show_results(self, query, beans, criteria=True):
self.clear()
print "Show results...."
if beans:
if criteria:
self.append([self.SearchCriteriaBeen(query)])
self.append(beans)
else:
self.googleHelp(query)
self.playerThreadId = None
def googleHelp(self, query):
self.append([self.TextBeen("Not Found, wait for results from google ...")])
try:
ask = query.encode('utf-8')
gs = GoogleSearch(ask)
gs.results_per_page = 10
results = gs.get_results()
for res in results:
result = res.title.encode('utf8')
time.sleep(0.05)
self.append([self.TextBeen(str(result), color="YELLOW", type=CommonBean.TYPE_GOOGLE_HELP)])
except :
print "Search failed: %s"
def convertVKstoBeans(self, vkSongs):
beans = []
for vkSong in vkSongs:
bean = CommonBean(name=vkSong.getFullDescription(), path=vkSong.path, type=CommonBean.TYPE_MUSIC_URL);
beans.append(bean)
return beans
def TextBeen(self, query, color="RED", type=CommonBean.TYPE_FOLDER):
return CommonBean(name=query, path=None, color=color, type=type)
def SearchCriteriaBeen(self, name):
return CommonBean(name=name, path=None, color="#4DCC33", type=CommonBean.TYPE_FOLDER)
def SearchingCriteriaBean(self, name):
return CommonBean(name="Searching: " + name, path=None, color="GREEN", type=CommonBean.TYPE_FOLDER)
def append(self, beans):
for bean in beans:
self.entityBeans.append(bean)
self.repopulate(self.entityBeans, -1)
def clear(self):
self.entityBeans = []
self.model.clear()
def onPlaySong(self, w, e):
if is_double_click(e):
playlistBean = self.model.getSelectedBean()
print "play", playlistBean
print "type", playlistBean.type
if playlistBean.type == CommonBean.TYPE_MUSIC_URL:
#thread.start_new_thread(self.playBean, (playlistBean,))
self.playBean(playlistBean)
elif playlistBean.type == CommonBean.TYPE_GOOGLE_HELP:
self.search_text.set_text(playlistBean.name)
count = 0
def playBean(self, playlistBean):
if playlistBean.type == CommonBean.TYPE_MUSIC_URL:
self.setSongResource(playlistBean)
print "Find path", playlistBean.path
if not playlistBean.path:
self.count += 1
print self.count
playlistBean.setIconErorr()
if self.count < 5 :
return self.playBean(self.getNextSong())
return
count = 0
self.playerCntr.set_mode(PlayerController.MODE_ONLINE_LIST)
self.playerCntr.playSong(playlistBean)
self.index = playlistBean.index
self.repopulate(self.entityBeans, self.index)
def downloadSong(self, song):
if not FConfiguration().is_save_online:
print "Source not saved ...., please set in configuration"
return None
print "===Dowload song start"
#time.sleep(5)
file = self.get_file_store_path(song)
#remotefile = urllib2.urlopen(song.path)
#f = open(file, 'wb')
#f.write(remotefile.read())
#f.close()
#urllib.file = self.get_file_store_path(song
if not os.path.exists(file + ".tmp"):
r = urllib.urlretrieve(song.path, file + ".tmp")
os.rename(file + ".tmp", file)
print r
print "===Dowload song End ", file
else:
print "Exists ..."
def get_file_store_path(self, song):
dir = FConfiguration().onlineMusicPath
if song.getArtist():
dir = dir + "/" + song.getArtist()
self.make_dirs(dir)
song = dir + "/" + song.name + ".mp3"
print "Stored dir: ", song
return song
def setSongResource(self, playlistBean):
if not playlistBean.path:
if playlistBean.type == CommonBean.TYPE_MUSIC_URL:
file = self.get_file_store_path(playlistBean)
if os.path.isfile(file) and os.path.getsize(file) > 1:
print "Find file dowloaded"
playlistBean.path = file
playlistBean.type = CommonBean.TYPE_MUSIC_FILE
return True
else:
print "FILE NOT FOUND IN SYSTEM"
#Seach by pvleer engine
#playlistBean.path = find_song_urls(playlistBean.name)[0]
#Seach by vk engine
vkSong = self.vk.find_most_relative_song(playlistBean.name)
#print vkSongs
if vkSong:
print "GET PATH", vkSong.path
#playlistBean.name = playlistBean.name + " vk[" + str(vk.album) + " " + str(vk.track) + " " + str(vk.time) + "]"
#self.dowloadThread(playlistBean)
#self.downloadSong(playlistBean)
playlistBean.path = vkSong.path
else:
playlistBean.path = None
def dowloadThread(self, bean):
thread.start_new_thread(self.downloadSong, (bean,))
def nextBean(self):
self.index += 1
if self.index >= len(self.entityBeans):
self.index = 0
playlistBean = self.model.getBeenByPosition(self.index)
return playlistBean
def prevBean(self):
self.index -= 1
if self.index <= 0:
self.index = len(self.entityBeans)
playlistBean = self.model.getBeenByPosition(self.index)
return playlistBean
def getNextSong(self):
currentSong = self.nextBean()
if(currentSong.type == CommonBean.TYPE_FOLDER):
currentSong = self.nextBean()
self.setSongResource(currentSong)
print "PATH", currentSong.path
self.repopulate(self.entityBeans, currentSong.index);
return currentSong
def getPrevSong(self):
playlistBean = self.prevBean()
if(playlistBean.type == CommonBean.TYPE_FOLDER):
self.getPrevSong()
self.setSongResource(playlistBean)
self.repopulate(self.entityBeans, playlistBean.index);
return playlistBean
def setPlaylist(self, entityBeans):
self.entityBeans = entityBeans
index = 0
if entityBeans:
self.playerCntr.playSong(entityBeans[index])
self.repopulate(entityBeans, index);
def repopulate(self, entityBeans, index):
self.model.clear()
for i in range(len(entityBeans)):
songBean = entityBeans[i]
if not songBean.color:
songBean.color = self.getBackgroundColour(i)
songBean.name = songBean.getPlayListDescription()
songBean.index = i
if i == index:
songBean.setIconPlaying()
self.model.append(songBean)
else:
songBean.setIconNone()
self.model.append(songBean)
def getBackgroundColour(self, i):
if i % 2 :
return "#F2F2F2"
else:
return "#FFFFE5"
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-sponsored-links-search/
#
# Code is licensed under MIT license.
#
import re
import urllib
import random
from htmlentitydefs import name2codepoint
from BeautifulSoup import BeautifulSoup
from browser import Browser, BrowserError
#
# TODO: join GoogleSearch and SponsoredLinks classes under a single base class
#
class SLError(Exception):
""" Sponsored Links Error """
pass
class SLParseError(Exception):
"""
Parse error in Google results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
GET_ALL_SLEEP_FUNCTION = object()
class SponsoredLink(object):
""" a single sponsored link """
def __init__(self, title, url, display_url, desc):
self.title = title
self.url = url
self.display_url = display_url
self.desc = desc
class SponsoredLinks(object):
SEARCH_URL_0 = "http://www.google.com/sponsoredlinks?q=%(query)s&btnG=Search+Sponsored+Links&hl=en"
NEXT_PAGE_0 = "http://www.google.com/sponsoredlinks?q=%(query)s&sa=N&start=%(start)d&hl=en"
SEARCH_URL_1 = "http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&btnG=Search+Sponsored+Links&hl=en"
NEXT_PAGE_1 = "http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&sa=N&start=%(start)d&hl=en"
def __init__(self, query, random_agent=False, debug=False):
self.query = query
self.debug = debug
self.browser = Browser(debug=debug)
self._page = 0
self.eor = False
self.results_info = None
self._results_per_page = 10
if random_agent:
self.browser.set_random_user_agent()
@property
def num_results(self):
if not self.results_info:
page = self._get_results_page()
self.results_info = self._extract_info(page)
if self.results_info['total'] == 0:
self.eor = True
return self.results_info['total']
def _get_results_per_page(self):
return self._results_per_page
def _set_results_par_page(self, rpp):
self._results_per_page = rpp
results_per_page = property(_get_results_per_page, _set_results_par_page)
def get_results(self):
if self.eor:
return []
page = self._get_results_page()
info = self._extract_info(page)
if self.results_info is None:
self.results_info = info
if info['to'] == info['total']:
self.eor = True
results = self._extract_results(page)
if not results:
self.eor = True
return []
self._page += 1
return results
def _get_all_results_sleep_fn(self):
return random.random()*5 + 1 # sleep from 1 - 6 seconds
def get_all_results(self, sleep_function=None):
if sleep_function is GET_ALL_SLEEP_FUNCTION:
sleep_function = self._get_all_results_sleep_fn
if sleep_function is None:
sleep_function = lambda: None
ret_results = []
while True:
res = self.get_results()
if not res:
return ret_results
ret_results.extend(res)
return ret_results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _extract_info(self, soup):
empty_info = { 'from': 0, 'to': 0, 'total': 0 }
stats_span = soup.find('span', id='stats')
if not stats_span:
return empty_info
txt = ''.join(stats_span.findAll(text=True))
txt = txt.replace(',', '').replace(" ", ' ')
matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt)
if not matches:
return empty_info
return {'from': int(matches.group(1)), 'to': int(matches.group(2)), 'total': int(matches.group(3))}
def _get_results_page(self):
if self._page == 0:
if self._results_per_page == 10:
url = SponsoredLinks.SEARCH_URL_0
else:
url = SponsoredLinks.SEARCH_URL_1
else:
if self._results_per_page == 10:
url = SponsoredLinks.NEXT_PAGE_0
else:
url = SponsoredLinks.NEXT_PAGE_1
safe_url = url % { 'query': urllib.quote_plus(self.query),
'start': self._page * self._results_per_page,
'num': self._results_per_page }
try:
page = self.browser.get_page(safe_url)
except BrowserError, e:
raise SLError, "Failed getting %s: %s" % (e.url, e.error)
return BeautifulSoup(page)
def _extract_results(self, soup):
results = soup.findAll('div', {'class': 'g'})
ret_res = []
for result in results:
eres = self._extract_result(result)
if eres:
ret_res.append(eres)
return ret_res
def _extract_result(self, result):
title, url = self._extract_title_url(result)
display_url = self._extract_display_url(result) # Warning: removes 'cite' from the result
desc = self._extract_description(result)
if not title or not url or not display_url or not desc:
return None
return SponsoredLink(title, url, display_url, desc)
def _extract_title_url(self, result):
title_a = result.find('a')
if not title_a:
self._maybe_raise(SLParseError, "Title tag in sponsored link was not found", result)
return None, None
title = ''.join(title_a.findAll(text=True))
title = self._html_unescape(title)
url = title_a['href']
match = re.search(r'q=(http[^&]+)&', url)
if not match:
self._maybe_raise(SLParseError, "URL inside a sponsored link was not found", result)
return None, None
url = urllib.unquote(match.group(1))
return title, url
def _extract_display_url(self, result):
cite = result.find('cite')
if not cite:
self._maybe_raise(SLParseError, "<cite> not found inside result", result)
return None
return ''.join(cite.findAll(text=True))
def _extract_description(self, result):
cite = result.find('cite')
if not cite:
return None
cite.extract()
desc_div = result.find('div', {'class': 'line23'})
if not desc_div:
self._maybe_raise(ParseError, "Description tag not found in sponsored link", result)
return None
desc_strs = desc_div.findAll(text=True)[0:-1]
desc = ''.join(desc_strs)
desc = desc.replace("\n", " ")
desc = desc.replace(" ", " ")
return self._html_unescape(desc)
def _html_unescape(self, str):
def entity_replacer(m):
entity = m.group(1)
if entity in name2codepoint:
return unichr(name2codepoint[entity])
else:
return m.group(0)
def ascii_replacer(m):
cp = int(m.group(1))
if cp <= 255:
return unichr(cp)
else:
return m.group(0)
s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U)
return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
| Python |
# -*- coding: utf-8 -*-
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-search/
#
# Code is licensed under MIT license.
#
import re
import urllib
from htmlentitydefs import name2codepoint
from BeautifulSoup import BeautifulSoup
from browser import Browser, BrowserError
class SearchError(Exception):
"""
Base class for Google Search exceptions.
"""
pass
class ParseError(SearchError):
"""
Parse error in Google results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
class SearchResult:
def __init__(self, title, url, desc):
self.title = title
self.url = url
self.desc = desc
def __str__(self):
return 'Google Search Result: "%s"' % self.title
class GoogleSearch(object):
SEARCH_URL_0 = "http://www.google.com.ua/search?hl=en&q=%(query)s&btnG=Google+Search"
#SEARCH_URL_0 = "search?hl=uk&q=%(query)&btnG=Пошук&meta=&aq=f&aqi=&aql=&oq=&gs_rfai="
NEXT_PAGE_0 = "http://www.google.com.ua/search?hl=en&q=%(query)s&start=%(start)d"
SEARCH_URL_1 = "http://www.google.com.ua/search?hl=en&q=%(query)s&num=%(num)d&btnG=Google+Search"
NEXT_PAGE_1 = "http://www.google.com.ua/search?hl=en&q=%(query)s&num=%(num)d&start=%(start)d"
def __init__(self, query, random_agent=False, debug=False):
self.query = query
self.debug = debug
self.browser = Browser(debug=debug)
self.results_info = None
self.eor = False # end of results
self._page = 0
self._results_per_page = 10
self._last_from = 0
if random_agent:
self.browser.set_random_user_agent()
@property
def num_results(self):
if not self.results_info:
page = self._get_results_page()
self.results_info = self._extract_info(page)
if self.results_info['total'] == 0:
self.eor = True
return self.results_info['total']
def _get_page(self):
return self._page
def _set_page(self, page):
self._page = page
page = property(_get_page, _set_page)
def _get_results_per_page(self):
return self._results_per_page
def _set_results_par_page(self, rpp):
self._results_per_page = rpp
results_per_page = property(_get_results_per_page, _set_results_par_page)
def get_results(self):
""" Gets a page of results """
if self.eor:
return []
page = self._get_results_page()
search_info = self._extract_info(page)
if not self.results_info:
self.results_info = search_info
if self.num_results == 0:
self.eor = True
return []
results = self._extract_results(page)
if not results:
self.eor = True
return []
if self._page > 0 and search_info['from'] == self._last_from:
self.eor = True
return []
if search_info['to'] == search_info['total']:
self.eor = True
self._page += 1
self._last_from = search_info['from']
return results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _get_results_page(self):
if self._page == 0:
if self._results_per_page == 10:
url = GoogleSearch.SEARCH_URL_0
else:
url = GoogleSearch.SEARCH_URL_1
else:
if self._results_per_page == 10:
url = GoogleSearch.NEXT_PAGE_0
else:
url = GoogleSearch.NEXT_PAGE_1
safe_url = url % { 'query': urllib.quote_plus(self.query),
'start': self._page * self._results_per_page,
'num': self._results_per_page }
try:
page = self.browser.get_page(safe_url)
except BrowserError, e:
raise SearchError, "Failed getting %s: %s" % (e.url, e.error)
return BeautifulSoup(page)
def _extract_info(self, soup):
empty_info = {'from': 0, 'to': 0, 'total': 0}
div_ssb = soup.find('div', id='ssb')
if not div_ssb:
self._maybe_raise(ParseError, "Div with number of results was not found on Google search page", soup)
return empty_info
p = div_ssb.find('p')
if not p:
self._maybe_raise(ParseError, """<p> tag within <div id="ssb"> was not found on Google search page""", soup)
return empty_info
txt = ''.join(p.findAll(text=True))
txt = txt.replace(',', '')
matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt, re.U)
if not matches:
return empty_info
return {'from': int(matches.group(1)), 'to': int(matches.group(2)), 'total': int(matches.group(3))}
def _extract_results(self, soup):
results = soup.findAll('li', {'class': 'g'})
ret_res = []
for result in results:
eres = self._extract_result(result)
if eres:
ret_res.append(eres)
return ret_res
def _extract_result(self, result):
title, url = self._extract_title_url(result)
desc = self._extract_description(result)
if not title or not url or not desc:
return None
return SearchResult(title, url, desc)
def _extract_title_url(self, result):
#title_a = result.find('a', {'class': re.compile(r'\bl\b')})
title_a = result.find('a')
if not title_a:
self._maybe_raise(ParseError, "Title tag in Google search result was not found", result)
return None, None
title = ''.join(title_a.findAll(text=True))
title = self._html_unescape(title)
url = title_a['href']
match = re.match(r'/url\?q=(http[^&]+)&', url)
if match:
url = urllib.unquote(match.group(1))
return title, url
def _extract_description(self, result):
desc_div = result.find('div', {'class': re.compile(r'\bs\b')})
if not desc_div:
self._maybe_raise(ParseError, "Description tag in Google search result was not found", result)
return None
desc_strs = []
def looper(tag):
if not tag: return
for t in tag:
try:
if t.name == 'br': break
except AttributeError:
pass
try:
desc_strs.append(t.string)
except AttributeError:
desc_strs.append(t)
looper(desc_div)
looper(desc_div.find('wbr')) # BeautifulSoup does not self-close <wbr>
desc = ''.join(s for s in desc_strs if s)
return self._html_unescape(desc)
def _html_unescape(self, str):
def entity_replacer(m):
entity = m.group(1)
if entity in name2codepoint:
return unichr(name2codepoint[entity])
else:
return m.group(0)
def ascii_replacer(m):
cp = int(m.group(1))
if cp <= 255:
return unichr(cp)
else:
return m.group(0)
s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U)
return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-sponsored-links-search/
#
# Code is licensed under MIT license.
#
import re
import urllib
import random
from htmlentitydefs import name2codepoint
from BeautifulSoup import BeautifulSoup
from browser import Browser, BrowserError
#
# TODO: join GoogleSearch and SponsoredLinks classes under a single base class
#
class SLError(Exception):
""" Sponsored Links Error """
pass
class SLParseError(Exception):
"""
Parse error in Google results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
GET_ALL_SLEEP_FUNCTION = object()
class SponsoredLink(object):
""" a single sponsored link """
def __init__(self, title, url, display_url, desc):
self.title = title
self.url = url
self.display_url = display_url
self.desc = desc
class SponsoredLinks(object):
SEARCH_URL_0 = "http://www.google.com/sponsoredlinks?q=%(query)s&btnG=Search+Sponsored+Links&hl=en"
NEXT_PAGE_0 = "http://www.google.com/sponsoredlinks?q=%(query)s&sa=N&start=%(start)d&hl=en"
SEARCH_URL_1 = "http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&btnG=Search+Sponsored+Links&hl=en"
NEXT_PAGE_1 = "http://www.google.com/sponsoredlinks?q=%(query)s&num=%(num)d&sa=N&start=%(start)d&hl=en"
def __init__(self, query, random_agent=False, debug=False):
self.query = query
self.debug = debug
self.browser = Browser(debug=debug)
self._page = 0
self.eor = False
self.results_info = None
self._results_per_page = 10
if random_agent:
self.browser.set_random_user_agent()
@property
def num_results(self):
if not self.results_info:
page = self._get_results_page()
self.results_info = self._extract_info(page)
if self.results_info['total'] == 0:
self.eor = True
return self.results_info['total']
def _get_results_per_page(self):
return self._results_per_page
def _set_results_par_page(self, rpp):
self._results_per_page = rpp
results_per_page = property(_get_results_per_page, _set_results_par_page)
def get_results(self):
if self.eor:
return []
page = self._get_results_page()
info = self._extract_info(page)
if self.results_info is None:
self.results_info = info
if info['to'] == info['total']:
self.eor = True
results = self._extract_results(page)
if not results:
self.eor = True
return []
self._page += 1
return results
def _get_all_results_sleep_fn(self):
return random.random()*5 + 1 # sleep from 1 - 6 seconds
def get_all_results(self, sleep_function=None):
if sleep_function is GET_ALL_SLEEP_FUNCTION:
sleep_function = self._get_all_results_sleep_fn
if sleep_function is None:
sleep_function = lambda: None
ret_results = []
while True:
res = self.get_results()
if not res:
return ret_results
ret_results.extend(res)
return ret_results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _extract_info(self, soup):
empty_info = { 'from': 0, 'to': 0, 'total': 0 }
stats_span = soup.find('span', id='stats')
if not stats_span:
return empty_info
txt = ''.join(stats_span.findAll(text=True))
txt = txt.replace(',', '').replace(" ", ' ')
matches = re.search(r'Results (\d+) - (\d+) of (?:about )?(\d+)', txt)
if not matches:
return empty_info
return {'from': int(matches.group(1)), 'to': int(matches.group(2)), 'total': int(matches.group(3))}
def _get_results_page(self):
if self._page == 0:
if self._results_per_page == 10:
url = SponsoredLinks.SEARCH_URL_0
else:
url = SponsoredLinks.SEARCH_URL_1
else:
if self._results_per_page == 10:
url = SponsoredLinks.NEXT_PAGE_0
else:
url = SponsoredLinks.NEXT_PAGE_1
safe_url = url % { 'query': urllib.quote_plus(self.query),
'start': self._page * self._results_per_page,
'num': self._results_per_page }
try:
page = self.browser.get_page(safe_url)
except BrowserError, e:
raise SLError, "Failed getting %s: %s" % (e.url, e.error)
return BeautifulSoup(page)
def _extract_results(self, soup):
results = soup.findAll('div', {'class': 'g'})
ret_res = []
for result in results:
eres = self._extract_result(result)
if eres:
ret_res.append(eres)
return ret_res
def _extract_result(self, result):
title, url = self._extract_title_url(result)
display_url = self._extract_display_url(result) # Warning: removes 'cite' from the result
desc = self._extract_description(result)
if not title or not url or not display_url or not desc:
return None
return SponsoredLink(title, url, display_url, desc)
def _extract_title_url(self, result):
title_a = result.find('a')
if not title_a:
self._maybe_raise(SLParseError, "Title tag in sponsored link was not found", result)
return None, None
title = ''.join(title_a.findAll(text=True))
title = self._html_unescape(title)
url = title_a['href']
match = re.search(r'q=(http[^&]+)&', url)
if not match:
self._maybe_raise(SLParseError, "URL inside a sponsored link was not found", result)
return None, None
url = urllib.unquote(match.group(1))
return title, url
def _extract_display_url(self, result):
cite = result.find('cite')
if not cite:
self._maybe_raise(SLParseError, "<cite> not found inside result", result)
return None
return ''.join(cite.findAll(text=True))
def _extract_description(self, result):
cite = result.find('cite')
if not cite:
return None
cite.extract()
desc_div = result.find('div', {'class': 'line23'})
if not desc_div:
self._maybe_raise(ParseError, "Description tag not found in sponsored link", result)
return None
desc_strs = desc_div.findAll(text=True)[0:-1]
desc = ''.join(desc_strs)
desc = desc.replace("\n", " ")
desc = desc.replace(" ", " ")
return self._html_unescape(desc)
def _html_unescape(self, str):
def entity_replacer(m):
entity = m.group(1)
if entity in name2codepoint:
return unichr(name2codepoint[entity])
else:
return m.group(0)
def ascii_replacer(m):
cp = int(m.group(1))
if cp <= 255:
return unichr(cp)
else:
return m.group(0)
s = re.sub(r'&#(\d+);', ascii_replacer, str, re.U)
return re.sub(r'&([^;]+);', entity_replacer, s, re.U)
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-translate/
#
# Code is licensed under MIT license.
#
from browser import Browser, BrowserError
from urllib import quote_plus
import simplejson as json
class TranslationError(Exception):
pass
class Translator(object):
translate_url = "http://ajax.googleapis.com/ajax/services/language/translate?v=1.0&q=%(message)s&langpair=%(from)s%%7C%(to)s"
def __init__(self):
self.browser = Browser()
def translate(self, message, lang_to='en', lang_from=''):
"""
Given a 'message' translate it from 'lang_from' to 'lang_to'.
If 'lang_from' is empty, auto-detects the language.
Returns the translated message.
"""
if lang_to not in _languages:
raise TranslationError, "Language %s is not supported as lang_to." % lang_to
if lang_from not in _languages and lang_from != '':
raise TranslationError, "Language %s is not supported as lang_from." % lang_from
message = quote_plus(message)
real_url = Translator.translate_url % { 'message': message,
'from': lang_from,
'to': lang_to }
try:
translation = self.browser.get_page(real_url)
data = json.loads(translation)
if data['responseStatus'] != 200:
raise TranslationError, "Failed translating: %s" % data['responseDetails']
return data['responseData']['translatedText']
except BrowserError, e:
raise TranslationError, "Failed translating (getting %s failed): %s" % (e.url, e.error)
except ValueError, e:
raise TranslationError, "Failed translating (json failed): %s" % e.message
except KeyError, e:
raise TranslationError, "Failed translating, response didn't contain the translation"
return None
class DetectionError(Exception):
pass
class Language(object):
def __init__(self, lang, confidence, is_reliable):
self.lang_code = lang
self.lang = _languages[lang]
self.confidence = confidence
self.is_reliable = is_reliable
def __repr__(self):
return '<Language: %s (%s)>' % (self.lang_code, self.lang)
class LanguageDetector(object):
detect_url = "http://ajax.googleapis.com/ajax/services/language/detect?v=1.0&q=%(message)s"
def __init__(self):
self.browser = Browser()
def detect(self, message):
"""
Given a 'message' detects its language.
Returns Language object.
"""
message = quote_plus(message)
real_url = LanguageDetector.detect_url % { 'message': message }
try:
detection = self.browser.get_page(real_url)
data = json.loads(detection)
if data['responseStatus'] != 200:
raise DetectError, "Failed detecting language: %s" % data['responseDetails']
rd = data['responseData']
return Language(rd['language'], rd['confidence'], rd['isReliable'])
except BrowserError, e:
raise DetectError, "Failed detecting language (getting %s failed): %s" % (e.url, e.error)
except ValueError, e:
raise DetectErrro, "Failed detecting language (json failed): %s" % e.message
except KeyError, e:
raise DetectError, "Failed detecting language, response didn't contain the necessary data"
return None
_languages = {
'af': 'Afrikaans',
'sq': 'Albanian',
'am': 'Amharic',
'ar': 'Arabic',
'hy': 'Armenian',
'az': 'Azerbaijani',
'eu': 'Basque',
'be': 'Belarusian',
'bn': 'Bengali',
'bh': 'Bihari',
'bg': 'Bulgarian',
'my': 'Burmese',
'ca': 'Catalan',
'chr': 'Cherokee',
'zh': 'Chinese',
'zh-CN': 'Chinese_simplified',
'zh-TW': 'Chinese_traditional',
'hr': 'Croatian',
'cs': 'Czech',
'da': 'Danish',
'dv': 'Dhivehi',
'nl': 'Dutch',
'en': 'English',
'eo': 'Esperanto',
'et': 'Estonian',
'tl': 'Filipino',
'fi': 'Finnish',
'fr': 'French',
'gl': 'Galician',
'ka': 'Georgian',
'de': 'German',
'el': 'Greek',
'gn': 'Guarani',
'gu': 'Gujarati',
'iw': 'Hebrew',
'hi': 'Hindi',
'hu': 'Hungarian',
'is': 'Icelandic',
'id': 'Indonesian',
'iu': 'Inuktitut',
'ga': 'Irish',
'it': 'Italian',
'ja': 'Japanese',
'kn': 'Kannada',
'kk': 'Kazakh',
'km': 'Khmer',
'ko': 'Korean',
'ku': 'Kurdish',
'ky': 'Kyrgyz',
'lo': 'Laothian',
'lv': 'Latvian',
'lt': 'Lithuanian',
'mk': 'Macedonian',
'ms': 'Malay',
'ml': 'Malayalam',
'mt': 'Maltese',
'mr': 'Marathi',
'mn': 'Mongolian',
'ne': 'Nepali',
'no': 'Norwegian',
'or': 'Oriya',
'ps': 'Pashto',
'fa': 'Persian',
'pl': 'Polish',
'pt-PT': 'Portuguese',
'pa': 'Punjabi',
'ro': 'Romanian',
'ru': 'Russian',
'sa': 'Sanskrit',
'sr': 'Serbian',
'sd': 'Sindhi',
'si': 'Sinhalese',
'sk': 'Slovak',
'sl': 'Slovenian',
'es': 'Spanish',
'sw': 'Swahili',
'sv': 'Swedish',
'tg': 'Tajik',
'ta': 'Tamil',
'tl': 'Tagalog',
'te': 'Telugu',
'th': 'Thai',
'bo': 'Tibetan',
'tr': 'Turkish',
'uk': 'Ukrainian',
'ur': 'Urdu',
'uz': 'Uzbek',
'ug': 'Uighur',
'vi': 'Vietnamese',
'cy': 'Welsh',
'yi': 'Yiddish'
};
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-search/
#
# Code is licensed under MIT license.
#
import random
import socket
import urllib
import urllib2
import httplib
BROWSERS = (
# Top most popular browsers in my access.log on 2009.02.12
# tail -50000 access.log |
# awk -F\" '{B[$6]++} END { for (b in B) { print B[b] ": " b } }' |
# sort -rn |
# head -20
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru-RU; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; ru-RU; rv:1.9.0.6) Gecko/2009011912 Firefox/3.0.6',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru-RU; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux i686; ru-RU; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; ru-RU; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; ru-RU; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.48 Safari/525.19'
)
TIMEOUT = 5 # socket timeout
class BrowserError(Exception):
def __init__(self, url, error):
self.url = url
self.error = error
class PoolHTTPConnection(httplib.HTTPConnection):
def connect(self):
"""Connect to the host and port specified in __init__."""
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.settimeout(TIMEOUT)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class PoolHTTPHandler(urllib2.HTTPHandler):
def http_open(self, req):
return self.do_open(PoolHTTPConnection, req)
class Browser(object):
def __init__(self, user_agent=BROWSERS[0], debug=False, use_pool=False):
self.headers = {
'User-Agent': user_agent,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5'
}
self.debug = debug
def get_page(self, url, data=None):
handlers = [PoolHTTPHandler]
opener = urllib2.build_opener(*handlers)
if data: data = urllib.urlencode(data)
request = urllib2.Request(url, data, self.headers)
try:
response = opener.open(request)
return response.read()
except (urllib2.HTTPError, urllib2.URLError), e:
raise BrowserError(url, str(e))
except (socket.error, socket.sslerror), msg:
raise BrowserError(url, msg)
except socket.timeout, e:
raise BrowserError(url, "timeout")
except KeyboardInterrupt:
raise
except:
raise BrowserError(url, "unknown error")
def set_random_user_agent(self):
self.headers['User-Agent'] = random.choice(BROWSERS)
return self.headers['User-Agent']
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-sets/
#
# Code is licensed under MIT license.
#
import re
import urllib
import random
from htmlentitydefs import name2codepoint
from BeautifulSoup import BeautifulSoup
from browser import Browser, BrowserError
class GSError(Exception):
""" Google Sets Error """
pass
class GSParseError(Exception):
"""
Parse error in Google Sets results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
LARGE_SET = 1
SMALL_SET = 2
class GoogleSets(object):
URL_LARGE = "http://labs.google.com/sets?hl=en&q1=%s&q2=%s&q3=%s&q4=%s&q5=%s&btn=Large+Set"
URL_SMALL = "http://labs.google.com/sets?hl=en&q1=%s&q2=%s&q3=%s&q4=%s&q5=%s&btn=Small+Set+(15+items+or+fewer)"
def __init__(self, items, random_agent=False, debug=False):
self.items = items
self.debug = debug
self.browser = Browser(debug=debug)
if random_agent:
self.browser.set_random_user_agent()
def get_results(self, set_type=SMALL_SET):
page = self._get_results_page(set_type)
results = self._extract_results(page)
return results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _get_results_page(self, set_type):
if set_type == LARGE_SET:
url = GoogleSets.URL_LARGE
else:
url = GoogleSets.URL_SMALL
safe_items = [urllib.quote_plus(i) for i in self.items]
blank_items = 5 - len(safe_items)
if blank_items > 0:
safe_items += ['']*blank_items
safe_url = url % tuple(safe_items)
try:
page = self.browser.get_page(safe_url)
except BrowserError, e:
raise GSError, "Failed getting %s: %s" % (e.url, e.error)
return BeautifulSoup(page)
def _extract_results(self, soup):
a_links = soup.findAll('a', href=re.compile('/search'))
ret_res = [a.string for a in a_links]
return ret_res
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-search/
#
# Code is licensed under MIT license.
#
import random
import socket
import urllib
import urllib2
import httplib
BROWSERS = (
# Top most popular browsers in my access.log on 2009.02.12
# tail -50000 access.log |
# awk -F\" '{B[$6]++} END { for (b in B) { print B[b] ": " b } }' |
# sort -rn |
# head -20
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru-RU; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6',
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; ru-RU; rv:1.9.0.6) Gecko/2009011912 Firefox/3.0.6',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru-RU; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (X11; U; Linux i686; ru-RU; rv:1.9.0.6) Gecko/2009020911 Ubuntu/8.10 (intrepid) Firefox/3.0.6',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; ru-RU; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; ru-RU; rv:1.9.0.6) Gecko/2009011913 Firefox/3.0.6 (.NET CLR 3.5.30729)',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.48 Safari/525.19'
)
TIMEOUT = 5 # socket timeout
class BrowserError(Exception):
def __init__(self, url, error):
self.url = url
self.error = error
class PoolHTTPConnection(httplib.HTTPConnection):
def connect(self):
"""Connect to the host and port specified in __init__."""
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.settimeout(TIMEOUT)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class PoolHTTPHandler(urllib2.HTTPHandler):
def http_open(self, req):
return self.do_open(PoolHTTPConnection, req)
class Browser(object):
def __init__(self, user_agent=BROWSERS[0], debug=False, use_pool=False):
self.headers = {
'User-Agent': user_agent,
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5'
}
self.debug = debug
def get_page(self, url, data=None):
handlers = [PoolHTTPHandler]
opener = urllib2.build_opener(*handlers)
if data: data = urllib.urlencode(data)
request = urllib2.Request(url, data, self.headers)
try:
response = opener.open(request)
return response.read()
except (urllib2.HTTPError, urllib2.URLError), e:
raise BrowserError(url, str(e))
except (socket.error, socket.sslerror), msg:
raise BrowserError(url, msg)
except socket.timeout, e:
raise BrowserError(url, "timeout")
except KeyboardInterrupt:
raise
except:
raise BrowserError(url, "unknown error")
def set_random_user_agent(self):
self.headers['User-Agent'] = random.choice(BROWSERS)
return self.headers['User-Agent']
| Python |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2007, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.0.6"
__copyright__ = "Copyright (c) 2004-2008 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
#This hack makes Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
DEFAULT_OUTPUT_ENCODING = "utf-8"
# First, the classes that represent markup elements.
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.contents.index(self)
if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent:
# We're replacing this element with one of its siblings.
index = self.parent.contents.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
self.parent.contents.remove(self)
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if (isinstance(newChild, basestring)
or isinstance(newChild, unicode)) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent != None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent == self:
index = self.find(newChild)
if index and index < position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isString(val):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
contents = [i for i in self.contents]
for i in contents:
if isinstance(i, Tag):
i.decompose()
else:
i.extract()
self.extract()
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
for i in range(0, len(self.contents)):
yield self.contents[i]
raise StopIteration
def recursiveChildGenerator(self):
stack = [(self, 0)]
while stack:
tag, start = stack.pop()
if isinstance(tag, Tag):
for i in range(start, len(tag.contents)):
a = tag.contents[i]
yield a
if isinstance(a, Tag) and tag.contents:
if i < len(tag.contents) - 1:
stack.append((tag, i+1))
stack.append((a, 0))
break
raise StopIteration
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isString(attrs):
kwargs['class'] = attrs
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if isList(markup) and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isString(markup):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst == True and type(matchAgainst) == types.BooleanType:
result = markup != None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isString(markup):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif isList(matchAgainst):
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isString(markup):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return hasattr(l, '__iter__') \
or (type(l) in (types.ListType, types.TupleType))
def isString(s):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is stringlike."""
try:
return isinstance(s, unicode) or isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed()
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
if markup:
if self.markupMassage:
if not isList(self.markupMassage):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.find('start_') == 0 or methodName.find('end_') == 0 \
or methodName.find('do_') == 0:
return SGMLParser.__getattr__(self, methodName)
elif methodName.find('__') != 0:
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# Tags with just one string-owning child get the child as a
# 'string' property, so that soup.tag.string is shorthand for
# soup.tag.contents[0]
if len(self.currentTag.contents) == 1 and \
isinstance(self.currentTag.contents[0], NavigableString):
self.currentTag.string = self.currentTag.contents[0]
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = ''.join(self.currentData)
if not currentData.translate(self.STRIP_ASCII_SPACES):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers != None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers == None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center']
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)")
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if getattr(self, 'declaredHTMLEncoding') or \
(self.originalEncoding == self.fromEncoding):
# This is our second pass through the document, or
# else an encoding was specified explicitly and it
# worked. Rewrite the meta tag.
newAttr = self.CHARSET_RE.sub\
(lambda(match):match.group(1) +
"%SOUP-ENCODING%", contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the new information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big']
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml'):
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if type(sub) == types.TupleType:
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
xml_encoding_match = re.compile \
('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')\
.match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin.read())
print soup.prettify()
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-translate/
#
# Code is licensed under MIT license.
#
from browser import Browser, BrowserError
from urllib import quote_plus
import simplejson as json
class TranslationError(Exception):
pass
class Translator(object):
translate_url = "http://ajax.googleapis.com/ajax/services/language/translate?v=1.0&q=%(message)s&langpair=%(from)s%%7C%(to)s"
def __init__(self):
self.browser = Browser()
def translate(self, message, lang_to='en', lang_from=''):
"""
Given a 'message' translate it from 'lang_from' to 'lang_to'.
If 'lang_from' is empty, auto-detects the language.
Returns the translated message.
"""
if lang_to not in _languages:
raise TranslationError, "Language %s is not supported as lang_to." % lang_to
if lang_from not in _languages and lang_from != '':
raise TranslationError, "Language %s is not supported as lang_from." % lang_from
message = quote_plus(message)
real_url = Translator.translate_url % { 'message': message,
'from': lang_from,
'to': lang_to }
try:
translation = self.browser.get_page(real_url)
data = json.loads(translation)
if data['responseStatus'] != 200:
raise TranslationError, "Failed translating: %s" % data['responseDetails']
return data['responseData']['translatedText']
except BrowserError, e:
raise TranslationError, "Failed translating (getting %s failed): %s" % (e.url, e.error)
except ValueError, e:
raise TranslationError, "Failed translating (json failed): %s" % e.message
except KeyError, e:
raise TranslationError, "Failed translating, response didn't contain the translation"
return None
class DetectionError(Exception):
pass
class Language(object):
def __init__(self, lang, confidence, is_reliable):
self.lang_code = lang
self.lang = _languages[lang]
self.confidence = confidence
self.is_reliable = is_reliable
def __repr__(self):
return '<Language: %s (%s)>' % (self.lang_code, self.lang)
class LanguageDetector(object):
detect_url = "http://ajax.googleapis.com/ajax/services/language/detect?v=1.0&q=%(message)s"
def __init__(self):
self.browser = Browser()
def detect(self, message):
"""
Given a 'message' detects its language.
Returns Language object.
"""
message = quote_plus(message)
real_url = LanguageDetector.detect_url % { 'message': message }
try:
detection = self.browser.get_page(real_url)
data = json.loads(detection)
if data['responseStatus'] != 200:
raise DetectError, "Failed detecting language: %s" % data['responseDetails']
rd = data['responseData']
return Language(rd['language'], rd['confidence'], rd['isReliable'])
except BrowserError, e:
raise DetectError, "Failed detecting language (getting %s failed): %s" % (e.url, e.error)
except ValueError, e:
raise DetectErrro, "Failed detecting language (json failed): %s" % e.message
except KeyError, e:
raise DetectError, "Failed detecting language, response didn't contain the necessary data"
return None
_languages = {
'af': 'Afrikaans',
'sq': 'Albanian',
'am': 'Amharic',
'ar': 'Arabic',
'hy': 'Armenian',
'az': 'Azerbaijani',
'eu': 'Basque',
'be': 'Belarusian',
'bn': 'Bengali',
'bh': 'Bihari',
'bg': 'Bulgarian',
'my': 'Burmese',
'ca': 'Catalan',
'chr': 'Cherokee',
'zh': 'Chinese',
'zh-CN': 'Chinese_simplified',
'zh-TW': 'Chinese_traditional',
'hr': 'Croatian',
'cs': 'Czech',
'da': 'Danish',
'dv': 'Dhivehi',
'nl': 'Dutch',
'en': 'English',
'eo': 'Esperanto',
'et': 'Estonian',
'tl': 'Filipino',
'fi': 'Finnish',
'fr': 'French',
'gl': 'Galician',
'ka': 'Georgian',
'de': 'German',
'el': 'Greek',
'gn': 'Guarani',
'gu': 'Gujarati',
'iw': 'Hebrew',
'hi': 'Hindi',
'hu': 'Hungarian',
'is': 'Icelandic',
'id': 'Indonesian',
'iu': 'Inuktitut',
'ga': 'Irish',
'it': 'Italian',
'ja': 'Japanese',
'kn': 'Kannada',
'kk': 'Kazakh',
'km': 'Khmer',
'ko': 'Korean',
'ku': 'Kurdish',
'ky': 'Kyrgyz',
'lo': 'Laothian',
'lv': 'Latvian',
'lt': 'Lithuanian',
'mk': 'Macedonian',
'ms': 'Malay',
'ml': 'Malayalam',
'mt': 'Maltese',
'mr': 'Marathi',
'mn': 'Mongolian',
'ne': 'Nepali',
'no': 'Norwegian',
'or': 'Oriya',
'ps': 'Pashto',
'fa': 'Persian',
'pl': 'Polish',
'pt-PT': 'Portuguese',
'pa': 'Punjabi',
'ro': 'Romanian',
'ru': 'Russian',
'sa': 'Sanskrit',
'sr': 'Serbian',
'sd': 'Sindhi',
'si': 'Sinhalese',
'sk': 'Slovak',
'sl': 'Slovenian',
'es': 'Spanish',
'sw': 'Swahili',
'sv': 'Swedish',
'tg': 'Tajik',
'ta': 'Tamil',
'tl': 'Tagalog',
'te': 'Telugu',
'th': 'Thai',
'bo': 'Tibetan',
'tr': 'Turkish',
'uk': 'Ukrainian',
'ur': 'Urdu',
'uz': 'Uzbek',
'ug': 'Uighur',
'vi': 'Vietnamese',
'cy': 'Welsh',
'yi': 'Yiddish'
};
| Python |
#!/usr/bin/python
#
# Peteris Krumins (peter@catonmat.net)
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-sets/
#
# Code is licensed under MIT license.
#
import re
import urllib
import random
from htmlentitydefs import name2codepoint
from BeautifulSoup import BeautifulSoup
from browser import Browser, BrowserError
class GSError(Exception):
""" Google Sets Error """
pass
class GSParseError(Exception):
"""
Parse error in Google Sets results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
LARGE_SET = 1
SMALL_SET = 2
class GoogleSets(object):
URL_LARGE = "http://labs.google.com/sets?hl=en&q1=%s&q2=%s&q3=%s&q4=%s&q5=%s&btn=Large+Set"
URL_SMALL = "http://labs.google.com/sets?hl=en&q1=%s&q2=%s&q3=%s&q4=%s&q5=%s&btn=Small+Set+(15+items+or+fewer)"
def __init__(self, items, random_agent=False, debug=False):
self.items = items
self.debug = debug
self.browser = Browser(debug=debug)
if random_agent:
self.browser.set_random_user_agent()
def get_results(self, set_type=SMALL_SET):
page = self._get_results_page(set_type)
results = self._extract_results(page)
return results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _get_results_page(self, set_type):
if set_type == LARGE_SET:
url = GoogleSets.URL_LARGE
else:
url = GoogleSets.URL_SMALL
safe_items = [urllib.quote_plus(i) for i in self.items]
blank_items = 5 - len(safe_items)
if blank_items > 0:
safe_items += ['']*blank_items
safe_url = url % tuple(safe_items)
try:
page = self.browser.get_page(safe_url)
except BrowserError, e:
raise GSError, "Failed getting %s: %s" % (e.url, e.error)
return BeautifulSoup(page)
def _extract_results(self, soup):
a_links = soup.findAll('a', href=re.compile('/search'))
ret_res = [a.string for a in a_links]
return ret_res
| Python |
# -*- coding: utf-8 -*-
'''
Created on Mar 17, 2010
@author: ivan
'''
# -*- coding: utf-8 -*-
import urllib2
import urllib
import re
import gst
import time
from string import replace
from base64 import encode
import sys
# -*- coding: utf-8 -*-
class Vkontakte:
def __init__(self, email, password):
self.email = email
self.password = password
self.cookie = None
self.execute_time = time.time()
def isLive(self):
return self.get_s_value()
def get_s_value(self):
host = 'http://login.vk.com/?act=login'
post = urllib.urlencode({'email' : self.email,
'expire' : '',
'pass' : self.password,
'vk' : ''})
headers = {'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru; rv:1.9.0.13) Gecko/2009073022 Firefox/3.0.13 (.NET CLR 3.5.30729)',
'Host' : 'login.vk.com',
'Referer' : 'http://vkontakte.ru/index.php',
'Connection' : 'close',
'Pragma' : 'no-cache',
'Cache-Control' : 'no-cache',
}
conn = urllib2.Request(host, post, headers)
data = urllib2.urlopen(conn)
result = data.read()
value = re.findall(r"name='s' id='s' value='(.*?)'", result)
if value:
return value[0]
return None
def get_cookie(self):
if self.cookie: return self.cookie
host = 'http://vkontakte.ru/login.php?op=slogin'
post = urllib.urlencode({'s' : self.get_s_value()})
headers = {'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru; rv:1.9.0.13) Gecko/2009073022 Firefox/3.0.13',
'Host' : 'vkontakte.ru',
'Referer' : 'http://login.vk.com/?act=login',
'Connection' : 'close',
'Cookie' : 'remixchk=5; remixsid=nonenone',
'Pragma' : 'no-cache',
'Cache-Control' : 'no-cache'
}
conn = urllib2.Request(host, post, headers)
data = urllib2.urlopen(conn)
cookie_src = data.info().get('Set-Cookie')
self.cookie = re.sub(r'(expires=.*?;\s|path=\/;\s|domain=\.vkontakte\.ru(?:,\s)?)', '', cookie_src)
return self.cookie
def get_page(self, query):
if not query:
return None
host = 'http://vkontakte.ru/gsearch.php?section=audio&q=vasya#c[q]=some%20id&c[section]=audio'
post = urllib.urlencode({
"c[q]" : query,
"c[section]":"audio"
})
headers = {'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru; rv:1.9.0.13) Gecko/2009073022 Firefox/3.0.13',
'Host' : 'vkontakte.ru',
'Referer' : 'http://vkontakte.ru/index.php',
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With' : 'XMLHttpRequest',
'Connection' : 'close',
'Cookie' : 'remixlang=0; remixchk=5; audio_vol=100; %s' % self.get_cookie(),
'Pragma' : 'no-cache',
'Cache-Control' : ' no-cache'
}
conn = urllib2.Request(host, post, headers)
#Do not run to offten
cur_time = time.time()
if cur_time - self.execute_time < 0.5:
print "Sleep because to many requests..."
time.sleep(0.8)
self.execute_time = time.time()
data = urllib2.urlopen(conn);
result = data.read()
return result
def get_name_by(self, id, result_album):
for album in result_album:
id_album = album[0]
name = album[1]
if id_album == id:
return name
return None
def find_most_relative_song(self, song_title):
vkSongs = self.find_song_urls(song_title)
if not vkSongs:
return None
times_count = {}
for song in vkSongs:
time = song.time
if time in times_count:
times_count[time] = times_count[time] + 1
else:
times_count[time] = 1
#get most relatives times time
r_count = max(times_count.values())
r_time = self.find_time_value(times_count, r_count)
print "Print Song time", r_time
print "Print Count of congs", r_count
for song in vkSongs:
if song.time == r_time:
return song
return vkSongs[0]
def find_time_value(self, times_count, r_count):
for i in times_count:
if times_count[i] == r_count:
return i
return None
def find_song_urls(self, song_title):
page = self.get_page(song_title)
page = page.decode('cp1251')
#page = page.decode("cp1251")
#unicode(page, "cp1251")
#print page
reg_all = "([А-ЯA-Z0-9_ #!:;.?+=&%@!\-\/'()]*)"
resultall = re.findall("return operate\(([\w() ,']*)\);", page, re.IGNORECASE)
result_album = re.findall(u"<b id=\\\\\"performer([0-9]*)\\\\\">" + reg_all + "<", page, re.IGNORECASE | re.UNICODE)
result_track = re.findall(u"<span id=\\\\\"title([0-9]*)\\\\\">" + reg_all + "<", page, re.IGNORECASE | re.UNICODE)
result_time = re.findall("<div class=\\\\\"duration\\\\\">" + reg_all + "<", page, re.IGNORECASE)
urls = []
ids = []
vkSongs = []
for result in resultall:
result = replace(result, "'", " ")
result = replace(result, ",", " ")
result = result.split()
if len(result) > 4:
id_id = result[0]
id_server = result[1]
id_folder = result[2]
id_file = result[3]
id_un2 = result[3]
url = "http://cs" + id_server + ".vkontakte.ru/u" + id_folder + "/audio/" + id_file + ".mp3"
urls.append(url)
ids.append(id_id)
#print len(resultall), resultall
#print len(urls), urls
#print len(result_album), result_album
#print len(result_track), result_track
#print len(result_time), result_time
for i in xrange(len(result_time)):
id = ids[i]
path = urls[i]
album = self.get_name_by(id, result_album)
track = self.get_name_by(id, result_track)
time = result_time[i]
vkSong = VKSong(path, album, track, time)
vkSongs.append(vkSong)
return vkSongs
class VKSong():
def __init__(self, path, album, track, time):
self.path = path
self.album = album
self.track = track
self.time = time
def getTime(self):
if self.time:
return time
else:
return "no time"
def getFullDescription(self):
return "[ " + self.s(self.album) + " ] " + self.s(self.track) + " " + self.s(self.time)
def __str__(self):
return "" + self.s(self.album) + " " + self.s(self.track) + " " + self.s(self.time) + " " + self.s(self.path)
def s(self, value):
if value:
return value
else:
return ""
#vk = Vkontakte("qax@bigmir.net", "foobnix")
#vkSongs = vk.find_song_urls("rammstein du hast")
#vkSongs = vk.find_song_urls("rammstein du hast1")
#vkSongs = vk.find_song_urls("rammstein du hast1")
#vkSongs = vk.find_song_urls("rammstein du hast3")
#print "RESULT ", vk.find_more_relative_song("rammstein du hast")
#for vkSong in vkSongs:
# print vkSong
| Python |
'''
Created on Mar 18, 2010
@author: ivan
'''
from foobnix.model.entity import CommonBean
from foobnix.online.pylast import WSError
def search_top_albums(network, query):
#unicode(query, "utf-8")
artist = network.get_artist(query)
if not artist:
return None
try:
albums = artist.get_top_albums()
except WSError:
print "No artist with that name"
return None
beans = []
print "Albums: ", albums
for i, album in enumerate(albums):
if i > 6:
break;
try:
album_txt = album.item
except AttributeError:
album_txt = album['item']
tracks = album_txt.get_tracks()
bean = CommonBean(name=album_txt.get_title(), path="", color="GREEN", type=CommonBean.TYPE_FOLDER, parent=query);
beans.append(bean)
for track in tracks:
bean = CommonBean(name=track, path="", type=CommonBean.TYPE_MUSIC_URL, parent=album_txt.get_title());
beans.append(bean)
return beans
def search_tags_genre(network, query):
beans = []
tag = network.get_tag(query)
bean = CommonBean(name=tag.get_name(), path="", color="GREEN", type=CommonBean.TYPE_GOOGLE_HELP, parent=None)
beans.append(bean)
try:
tracks = tag.get_top_tracks()
except:
return None
for j, track in enumerate(tracks):
if j > 20:
break
try:
track_item = track.item
except AttributeError:
track_item = track['item']
bean = CommonBean(name=track_item.get_artist().get_name() + " - " + track_item.get_title(), path="", type=CommonBean.TYPE_MUSIC_URL, parent=tag.get_name())
beans.append(bean)
tags = network.search_for_tag(query)
print "tags"
print tags
flag = True
for i, tag in enumerate(tags.get_next_page()):
if i == 0:
print "we find it top", tag, query
continue
if i < 4:
bean = CommonBean(name=tag.get_name(), path="", color="GREEN", type=CommonBean.TYPE_GOOGLE_HELP, parent=None)
beans.append(bean)
tracks = tag.get_top_tracks()
for j, track in enumerate(tracks):
if j > 10:
break
try:
track_item = track.item
except AttributeError:
track_item = track['item']
bean = CommonBean(name=track_item.get_artist().get_name() + " - " + track_item.get_title(), path="", type=CommonBean.TYPE_MUSIC_URL, parent=tag.get_name())
beans.append(bean)
else:
if flag:
bean = CommonBean(name="OTHER TAGS", path="", color="#FF99FF", type=CommonBean.TYPE_FOLDER, parent=None)
beans.append(bean)
flag = False
bean = CommonBean(name=tag.get_name(), path="", color="GREEN", type=CommonBean.TYPE_GOOGLE_HELP, parent=None)
beans.append(bean)
return beans
def search_top_tracks(network, query):
#unicode(query, "utf-8")
artist = network.get_artist(query)
if not artist:
return None
try:
tracks = artist.get_top_tracks()
except WSError:
print "No artist with that name"
return None
beans = []
print "Tracks: ", tracks
for track in tracks:
try:
track_item = track.item
except AttributeError:
track_item = track['item']
#print track.get_duration()
bean = CommonBean(name=str(track_item), path="", type=CommonBean.TYPE_MUSIC_URL, parent=query);
beans.append(bean)
return beans
def search_top_similar(network, query):
#unicode(query, "utf-8")
artist = network.get_artist(query)
if not artist:
return None
artists = artist.get_similar(10)
beans = []
for artist in artists:
try:
artist_txt = artist.item
except AttributeError:
artist_txt = artist['item']
print artist, artist_txt
title = str(artist_txt)
bean = CommonBean(name=title, path="", type=CommonBean.TYPE_FOLDER, color="GREEN", parent=query);
beans.append(bean)
tops = search_top_tracks(network, title)
for top in tops:
beans.append(top)
return beans
| Python |
'''
Created on Mar 16, 2010
@author: ivan
'''
'''
Created on Mar 11, 2010
@author: ivan
'''
import gtk
from foobnix.model.entity import CommonBean
class OnlineListModel:
POS_ICON = 0
POS_TRACK_NUMBER = 1
POS_NAME = 2
POS_PATH = 3
POS_COLOR = 4
POS_INDEX = 5
POS_TYPE = 6
POS_PARENT = 7
def __init__(self, widget):
self.widget = widget
self.model = gtk.ListStore(str, str, str, str, str, int, str, str)
cellpb = gtk.CellRendererPixbuf()
cellpb.set_property('cell-background', 'yellow')
iconColumn = gtk.TreeViewColumn(_('Icon'), cellpb, stock_id=0, cell_background=4)
numbetColumn = gtk.TreeViewColumn(_('N'), gtk.CellRendererText(), text=1, background=4)
descriptionColumn = gtk.TreeViewColumn(_('Music List'), gtk.CellRendererText(), text=2, background=4)
widget.append_column(iconColumn)
widget.append_column(numbetColumn)
widget.append_column(descriptionColumn)
widget.set_model(self.model)
def getSize(self):
return len(self.model)
def getBeenByPosition(self, position):
bean = CommonBean()
bean.icon = self.model[position][ self.POS_ICON]
bean.tracknumber = self.model[position][ self.POS_TRACK_NUMBER]
bean.name = self.model[position][ self.POS_NAME]
bean.path = self.model[position][ self.POS_PATH]
bean.color = self.model[position][ self.POS_COLOR]
bean.index = self.model[position][ self.POS_INDEX]
bean.type = self.model[position][ self.POS_TYPE]
bean.parent = self.model[position][ self.POS_PARENT]
return bean
def getModel(self):
return self.model
def getSelectedBean(self):
print self.widget
selection = self.widget.get_selection()
print selection
model, selected = selection.get_selected()
print model, selected
if selected:
bean = CommonBean()
bean.icon = model.get_value(selected, self.POS_ICON)
bean.tracknumber = model.get_value(selected, self.POS_TRACK_NUMBER)
bean.name = model.get_value(selected, self.POS_NAME)
bean.path = model.get_value(selected, self.POS_PATH)
bean.color = model.get_value(selected, self.POS_COLOR)
bean.index = model.get_value(selected, self.POS_INDEX)
bean.type = model.get_value(selected, self.POS_TYPE)
bean.parent = model.get_value(selected, self.POS_PARENT)
return bean
def clear(self):
self.model.clear()
def append(self, bean):
print bean
self.model.append([bean.icon, bean.tracknumber, bean.name, bean.path, bean.color, bean.index, bean.type, bean.parent])
def __del__(self, *a):
print "del"
| Python |
# -*- coding: utf-8 -*-
#
# pylast - A Python interface to Last.fm (and other API compatible social networks)
# Copyright (C) 2008-2009 Amr Hassan
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# http://code.google.com/p/pylast/
__version__ = '0.4'
__author__ = 'Amr Hassan'
__copyright__ = "Copyright (C) 2008-2009 Amr Hassan"
__license__ = "gpl"
__email__ = 'amr.hassan@gmail.com'
import hashlib
import httplib
import urllib
import threading
from xml.dom import minidom
import xml.dom
import time
import shelve
import tempfile
import sys
import htmlentitydefs
try:
import collections
except ImportError:
pass
STATUS_INVALID_SERVICE = 2
STATUS_INVALID_METHOD = 3
STATUS_AUTH_FAILED = 4
STATUS_INVALID_FORMAT = 5
STATUS_INVALID_PARAMS = 6
STATUS_INVALID_RESOURCE = 7
STATUS_TOKEN_ERROR = 8
STATUS_INVALID_SK = 9
STATUS_INVALID_API_KEY = 10
STATUS_OFFLINE = 11
STATUS_SUBSCRIBERS_ONLY = 12
STATUS_INVALID_SIGNATURE = 13
STATUS_TOKEN_UNAUTHORIZED = 14
STATUS_TOKEN_EXPIRED = 15
EVENT_ATTENDING = '0'
EVENT_MAYBE_ATTENDING = '1'
EVENT_NOT_ATTENDING = '2'
PERIOD_OVERALL = 'overall'
PERIOD_3MONTHS = '3month'
PERIOD_6MONTHS = '6month'
PERIOD_12MONTHS = '12month'
DOMAIN_ENGLISH = 0
DOMAIN_GERMAN = 1
DOMAIN_SPANISH = 2
DOMAIN_FRENCH = 3
DOMAIN_ITALIAN = 4
DOMAIN_POLISH = 5
DOMAIN_PORTUGUESE = 6
DOMAIN_SWEDISH = 7
DOMAIN_TURKISH = 8
DOMAIN_RUSSIAN = 9
DOMAIN_JAPANESE = 10
DOMAIN_CHINESE = 11
COVER_SMALL = 0
COVER_MEDIUM = 1
COVER_LARGE = 2
COVER_EXTRA_LARGE = 3
COVER_MEGA = 4
IMAGES_ORDER_POPULARITY = "popularity"
IMAGES_ORDER_DATE = "dateadded"
USER_MALE = 'Male'
USER_FEMALE = 'Female'
SCROBBLE_SOURCE_USER = "P"
SCROBBLE_SOURCE_NON_PERSONALIZED_BROADCAST = "R"
SCROBBLE_SOURCE_PERSONALIZED_BROADCAST = "E"
SCROBBLE_SOURCE_LASTFM = "L"
SCROBBLE_SOURCE_UNKNOWN = "U"
SCROBBLE_MODE_PLAYED = ""
SCROBBLE_MODE_LOVED = "L"
SCROBBLE_MODE_BANNED = "B"
SCROBBLE_MODE_SKIPPED = "S"
"""
A list of the implemented webservices (from http://www.last.fm/api/intro)
=====================================
# Album
* album.addTags DONE
* album.getInfo DONE
* album.getTags DONE
* album.removeTag DONE
* album.search DONE
# Artist
* artist.addTags DONE
* artist.getEvents DONE
* artist.getImages DONE
* artist.getInfo DONE
* artist.getPodcast TODO
* artist.getShouts DONE
* artist.getSimilar DONE
* artist.getTags DONE
* artist.getTopAlbums DONE
* artist.getTopFans DONE
* artist.getTopTags DONE
* artist.getTopTracks DONE
* artist.removeTag DONE
* artist.search DONE
* artist.share DONE
* artist.shout DONE
# Auth
* auth.getMobileSession DONE
* auth.getSession DONE
* auth.getToken DONE
# Event
* event.attend DONE
* event.getAttendees DONE
* event.getInfo DONE
* event.getShouts DONE
* event.share DONE
* event.shout DONE
# Geo
* geo.getEvents
* geo.getTopArtists
* geo.getTopTracks
# Group
* group.getMembers DONE
* group.getWeeklyAlbumChart DONE
* group.getWeeklyArtistChart DONE
* group.getWeeklyChartList DONE
* group.getWeeklyTrackChart DONE
# Library
* library.addAlbum DONE
* library.addArtist DONE
* library.addTrack DONE
* library.getAlbums DONE
* library.getArtists DONE
* library.getTracks DONE
# Playlist
* playlist.addTrack DONE
* playlist.create DONE
* playlist.fetch DONE
# Radio
* radio.getPlaylist
* radio.tune
# Tag
* tag.getSimilar DONE
* tag.getTopAlbums DONE
* tag.getTopArtists DONE
* tag.getTopTags DONE
* tag.getTopTracks DONE
* tag.getWeeklyArtistChart DONE
* tag.getWeeklyChartList DONE
* tag.search DONE
# Tasteometer
* tasteometer.compare DONE
# Track
* track.addTags DONE
* track.ban DONE
* track.getInfo DONE
* track.getSimilar DONE
* track.getTags DONE
* track.getTopFans DONE
* track.getTopTags DONE
* track.love DONE
* track.removeTag DONE
* track.search DONE
* track.share DONE
# User
* user.getEvents DONE
* user.getFriends DONE
* user.getInfo DONE
* user.getLovedTracks DONE
* user.getNeighbours DONE
* user.getPastEvents DONE
* user.getPlaylists DONE
* user.getRecentStations TODO
* user.getRecentTracks DONE
* user.getRecommendedArtists DONE
* user.getRecommendedEvents DONE
* user.getShouts DONE
* user.getTopAlbums DONE
* user.getTopArtists DONE
* user.getTopTags DONE
* user.getTopTracks DONE
* user.getWeeklyAlbumChart DONE
* user.getWeeklyArtistChart DONE
* user.getWeeklyChartList DONE
* user.getWeeklyTrackChart DONE
* user.shout DONE
# Venue
* venue.getEvents DONE
* venue.getPastEvents DONE
* venue.search DONE
"""
class Network(object):
"""
A music social network website that is Last.fm or one exposing a Last.fm compatible API
"""
def __init__(self, name, homepage, ws_server, api_key, api_secret, session_key, submission_server, username, password_hash,
domain_names, urls):
"""
name: the name of the network
homepage: the homepage url
ws_server: the url of the webservices server
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
submission_server: the url of the server to which tracks are submitted (scrobbled)
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password thingy
domain_names: a dict mapping each DOMAIN_* value to a string domain name
urls: a dict mapping types to urls
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
Either a valid session_key or a combination of username and password_hash must be present for scrobbling.
You should use a preconfigured network object through a get_*_network(...) method instead of creating an object
of this class, unless you know what you're doing.
"""
self.ws_server = ws_server
self.submission_server = submission_server
self.name = name
self.homepage = homepage
self.api_key = api_key
self.api_secret = api_secret
self.session_key = session_key
self.username = username
self.password_hash = password_hash
self.domain_names = domain_names
self.urls = urls
self.cache_backend = None
self.proxy_enabled = False
self.proxy = None
self.last_call_time = 0
#generate a session_key if necessary
if (self.api_key and self.api_secret) and not self.session_key and (self.username and self.password_hash):
sk_gen = SessionKeyGenerator(self)
self.session_key = sk_gen.get_session_key(self.username, self.password_hash)
def get_artist(self, artist_name):
"""
Return an Artist object
"""
return Artist(artist_name, self)
def get_track(self, artist, title):
"""
Return a Track object
"""
return Track(artist, title, self)
def get_album(self, artist, title):
"""
Return an Album object
"""
return Album(artist, title, self)
def get_authenticated_user(self):
"""
Returns the authenticated user
"""
return AuthenticatedUser(self)
def get_country(self, country_name):
"""
Returns a country object
"""
return Country(country_name, self)
def get_group(self, name):
"""
Returns a Group object
"""
return Group(name, self)
def get_user(self, username):
"""
Returns a user object
"""
return User(username, self)
def get_tag(self, name):
"""
Returns a tag object
"""
return Tag(name, self)
def get_scrobbler(self, client_id, client_version):
"""
Returns a Scrobbler object used for submitting tracks to the server
Quote from http://www.last.fm/api/submissions:
========
Client identifiers are used to provide a centrally managed database of
the client versions, allowing clients to be banned if they are found to
be behaving undesirably. The client ID is associated with a version
number on the server, however these are only incremented if a client is
banned and do not have to reflect the version of the actual client application.
During development, clients which have not been allocated an identifier should
use the identifier tst, with a version number of 1.0. Do not distribute code or
client implementations which use this test identifier. Do not use the identifiers
used by other clients.
=========
To obtain a new client identifier please contact:
* Last.fm: submissions@last.fm
* # TODO: list others
...and provide us with the name of your client and its homepage address.
"""
return Scrobbler(self, client_id, client_version)
def _get_language_domain(self, domain_language):
"""
Returns the mapped domain name of the network to a DOMAIN_* value
"""
if domain_language in self.domain_names:
return self.domain_names[domain_language]
def _get_url(self, domain, type):
return "http://%s/%s" %(self._get_language_domain(domain), self.urls[type])
def _get_ws_auth(self):
"""
Returns a (API_KEY, API_SECRET, SESSION_KEY) tuple.
"""
return (self.api_key, self.api_secret, self.session_key)
def _delay_call(self):
"""
Makes sure that web service calls are at least a second apart
"""
# delay time in seconds
DELAY_TIME = 1.0
now = time.time()
if (now - self.last_call_time) < DELAY_TIME:
time.sleep(1)
self.last_call_time = now
def create_new_playlist(self, title, description):
"""
Creates a playlist for the authenticated user and returns it
title: The title of the new playlist.
description: The description of the new playlist.
"""
params = {}
params['title'] = _unicode(title)
params['description'] = _unicode(description)
doc = _Request(self, 'playlist.create', params).execute(False)
e_id = doc.getElementsByTagName("id")[0].firstChild.data
user = doc.getElementsByTagName('playlists')[0].getAttribute('user')
return Playlist(user, e_id, self)
def get_top_tags(self, limit=None):
"""Returns a sequence of the most used tags as a sequence of TopItem objects."""
doc = _Request(self, "tag.getTopTags").execute(True)
seq = []
for node in doc.getElementsByTagName("tag"):
tag = Tag(_extract(node, "name"), self)
weight = _number(_extract(node, "count"))
if len(seq) < limit:
seq.append(TopItem(tag, weight))
return seq
def enable_proxy(self, host, port):
"""Enable a default web proxy"""
self.proxy = [host, _number(port)]
self.proxy_enabled = True
def disable_proxy(self):
"""Disable using the web proxy"""
self.proxy_enabled = False
def is_proxy_enabled(self):
"""Returns True if a web proxy is enabled."""
return self.proxy_enabled
def _get_proxy(self):
"""Returns proxy details."""
return self.proxy
def enable_caching(self, file_path = None):
"""Enables caching request-wide for all cachable calls.
In choosing the backend used for caching, it will try _SqliteCacheBackend first if
the module sqlite3 is present. If not, it will fallback to _ShelfCacheBackend which uses shelve.Shelf objects.
* file_path: A file path for the backend storage file. If
None set, a temp file would probably be created, according the backend.
"""
if not file_path:
file_path = tempfile.mktemp(prefix="pylast_tmp_")
self.cache_backend = _ShelfCacheBackend(file_path)
def disable_caching(self):
"""Disables all caching features."""
self.cache_backend = None
def is_caching_enabled(self):
"""Returns True if caching is enabled."""
return not (self.cache_backend == None)
def _get_cache_backend(self):
return self.cache_backend
def search_for_album(self, album_name):
"""Searches for an album by its name. Returns a AlbumSearch object.
Use get_next_page() to retreive sequences of results."""
return AlbumSearch(album_name, self)
def search_for_artist(self, artist_name):
"""Searches of an artist by its name. Returns a ArtistSearch object.
Use get_next_page() to retreive sequences of results."""
return ArtistSearch(artist_name, self)
def search_for_tag(self, tag_name):
"""Searches of a tag by its name. Returns a TagSearch object.
Use get_next_page() to retreive sequences of results."""
return TagSearch(tag_name, self)
def search_for_track(self, artist_name, track_name):
"""Searches of a track by its name and its artist. Set artist to an empty string if not available.
Returns a TrackSearch object.
Use get_next_page() to retreive sequences of results."""
return TrackSearch(artist_name, track_name, self)
def search_for_venue(self, venue_name, country_name):
"""Searches of a venue by its name and its country. Set country_name to an empty string if not available.
Returns a VenueSearch object.
Use get_next_page() to retreive sequences of results."""
return VenueSearch(venue_name, country_name, self)
def get_track_by_mbid(self, mbid):
"""Looks up a track by its MusicBrainz ID"""
params = {"mbid": _unicode(mbid)}
doc = _Request(self, "track.getInfo", params).execute(True)
return Track(_extract(doc, "name", 1), _extract(doc, "name"), self)
def get_artist_by_mbid(self, mbid):
"""Loooks up an artist by its MusicBrainz ID"""
params = {"mbid": _unicode(mbid)}
doc = _Request(self, "artist.getInfo", params).execute(True)
return Artist(_extract(doc, "name"), self)
def get_album_by_mbid(self, mbid):
"""Looks up an album by its MusicBrainz ID"""
params = {"mbid": _unicode(mbid)}
doc = _Request(self, "album.getInfo", params).execute(True)
return Album(_extract(doc, "artist"), _extract(doc, "name"), self)
def get_lastfm_network(api_key="", api_secret="", session_key = "", username = "", password_hash = ""):
"""
Returns a preconfigured Network object for Last.fm
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
Either a valid session_key or a combination of username and password_hash must be present for scrobbling.
Most read-only webservices only require an api_key and an api_secret, see about obtaining them from:
http://www.last.fm/api/account
"""
return Network (
name = "Last.fm",
homepage = "http://last.fm",
ws_server = ("ws.audioscrobbler.com", "/2.0/"),
api_key = api_key,
api_secret = api_secret,
session_key = session_key,
submission_server = "http://post.audioscrobbler.com:80/",
username = username,
password_hash = password_hash,
domain_names = {
DOMAIN_ENGLISH: 'www.last.fm',
DOMAIN_GERMAN: 'www.lastfm.de',
DOMAIN_SPANISH: 'www.lastfm.es',
DOMAIN_FRENCH: 'www.lastfm.fr',
DOMAIN_ITALIAN: 'www.lastfm.it',
DOMAIN_POLISH: 'www.lastfm.pl',
DOMAIN_PORTUGUESE: 'www.lastfm.com.br',
DOMAIN_SWEDISH: 'www.lastfm.se',
DOMAIN_TURKISH: 'www.lastfm.com.tr',
DOMAIN_RUSSIAN: 'www.lastfm.ru',
DOMAIN_JAPANESE: 'www.lastfm.jp',
DOMAIN_CHINESE: 'cn.last.fm',
},
urls = {
"album": "music/%(artist)s/%(album)s",
"artist": "music/%(artist)s",
"event": "event/%(id)s",
"country": "place/%(country_name)s",
"playlist": "user/%(user)s/library/playlists/%(appendix)s",
"tag": "tag/%(name)s",
"track": "music/%(artist)s/_/%(title)s",
"group": "group/%(name)s",
"user": "user/%(name)s",
}
)
def get_librefm_network(api_key="", api_secret="", session_key = "", username = "", password_hash = ""):
"""
Returns a preconfigured Network object for Libre.fm
api_key: a provided API_KEY
api_secret: a provided API_SECRET
session_key: a generated session_key or None
username: a username of a valid user
password_hash: the output of pylast.md5(password) where password is the user's password
if username and password_hash were provided and not session_key, session_key will be
generated automatically when needed.
"""
return Network (
name = "Libre.fm",
homepage = "http://alpha.dev.libre.fm",
ws_server = ("alpha.dev.libre.fm", "/2.0/"),
api_key = api_key,
api_secret = api_secret,
session_key = session_key,
submission_server = "http://turtle.libre.fm:80/",
username = username,
password_hash = password_hash,
domain_names = {
DOMAIN_ENGLISH: "alpha.dev.libre.fm",
DOMAIN_GERMAN: "alpha.dev.libre.fm",
DOMAIN_SPANISH: "alpha.dev.libre.fm",
DOMAIN_FRENCH: "alpha.dev.libre.fm",
DOMAIN_ITALIAN: "alpha.dev.libre.fm",
DOMAIN_POLISH: "alpha.dev.libre.fm",
DOMAIN_PORTUGUESE: "alpha.dev.libre.fm",
DOMAIN_SWEDISH: "alpha.dev.libre.fm",
DOMAIN_TURKISH: "alpha.dev.libre.fm",
DOMAIN_RUSSIAN: "alpha.dev.libre.fm",
DOMAIN_JAPANESE: "alpha.dev.libre.fm",
DOMAIN_CHINESE: "alpha.dev.libre.fm",
},
urls = {
"album": "artist/%(artist)s/album/%(album)s",
"artist": "artist/%(artist)s",
"event": "event/%(id)s",
"country": "place/%(country_name)s",
"playlist": "user/%(user)s/library/playlists/%(appendix)s",
"tag": "tag/%(name)s",
"track": "music/%(artist)s/_/%(title)s",
"group": "group/%(name)s",
"user": "user/%(name)s",
}
)
class _ShelfCacheBackend(object):
"""Used as a backend for caching cacheable requests."""
def __init__(self, file_path = None):
self.shelf = shelve.open(file_path)
def get_xml(self, key):
return self.shelf[key]
def set_xml(self, key, xml_string):
self.shelf[key] = xml_string
def has_key(self, key):
return key in self.shelf.keys()
class _ThreadedCall(threading.Thread):
"""Facilitates calling a function on another thread."""
def __init__(self, sender, funct, funct_args, callback, callback_args):
threading.Thread.__init__(self)
self.funct = funct
self.funct_args = funct_args
self.callback = callback
self.callback_args = callback_args
self.sender = sender
def run(self):
output = []
if self.funct:
if self.funct_args:
output = self.funct(*self.funct_args)
else:
output = self.funct()
if self.callback:
if self.callback_args:
self.callback(self.sender, output, *self.callback_args)
else:
self.callback(self.sender, output)
class _Request(object):
"""Representing an abstract web service operation."""
def __init__(self, network, method_name, params = {}):
self.params = params
self.network = network
(self.api_key, self.api_secret, self.session_key) = network._get_ws_auth()
self.params["api_key"] = self.api_key
self.params["method"] = method_name
if network.is_caching_enabled():
self.cache = network._get_cache_backend()
if self.session_key:
self.params["sk"] = self.session_key
self.sign_it()
def sign_it(self):
"""Sign this request."""
if not "api_sig" in self.params.keys():
self.params['api_sig'] = self._get_signature()
def _get_signature(self):
"""Returns a 32-character hexadecimal md5 hash of the signature string."""
keys = self.params.keys()[:]
keys.sort()
string = ""
for name in keys:
string += name
string += self.params[name]
string += self.api_secret
return md5(string)
def _get_cache_key(self):
"""The cache key is a string of concatenated sorted names and values."""
keys = self.params.keys()
keys.sort()
cache_key = str()
for key in keys:
if key != "api_sig" and key != "api_key" and key != "sk":
cache_key += key + _string(self.params[key])
return hashlib.sha1(cache_key).hexdigest()
def _get_cached_response(self):
"""Returns a file object of the cached response."""
if not self._is_cached():
response = self._download_response()
self.cache.set_xml(self._get_cache_key(), response)
return self.cache.get_xml(self._get_cache_key())
def _is_cached(self):
"""Returns True if the request is already in cache."""
return self.cache.has_key(self._get_cache_key())
def _download_response(self):
"""Returns a response body string from the server."""
# Delay the call if necessary
#self.network._delay_call() # enable it if you want.
data = []
for name in self.params.keys():
data.append('='.join((name, urllib.quote_plus(_string(self.params[name])))))
data = '&'.join(data)
headers = {
"Content-type": "application/x-www-form-urlencoded",
'Accept-Charset': 'utf-8',
'User-Agent': "pylast" + '/' + __version__
}
(HOST_NAME, HOST_SUBDIR) = self.network.ws_server
if self.network.is_proxy_enabled():
conn = httplib.HTTPConnection(host = self._get_proxy()[0], port = self._get_proxy()[1])
conn.request(method='POST', url="http://" + HOST_NAME + HOST_SUBDIR,
body=data, headers=headers)
else:
conn = httplib.HTTPConnection(host=HOST_NAME)
conn.request(method='POST', url=HOST_SUBDIR, body=data, headers=headers)
response = conn.getresponse()
response_text = _unicode(response.read())
self._check_response_for_errors(response_text)
return response_text
def execute(self, cacheable = False):
"""Returns the XML DOM response of the POST Request from the server"""
if self.network.is_caching_enabled() and cacheable:
response = self._get_cached_response()
else:
response = self._download_response()
return minidom.parseString(_string(response))
def _check_response_for_errors(self, response):
"""Checks the response for errors and raises one if any exists."""
doc = minidom.parseString(_string(response))
e = doc.getElementsByTagName('lfm')[0]
if e.getAttribute('status') != "ok":
e = doc.getElementsByTagName('error')[0]
status = e.getAttribute('code')
details = e.firstChild.data.strip()
raise WSError(self.network, status, details)
class SessionKeyGenerator(object):
"""Methods of generating a session key:
1) Web Authentication:
a. network = get_*_network(API_KEY, API_SECRET)
b. sg = SessionKeyGenerator(network)
c. url = sg.get_web_auth_url()
d. Ask the user to open the url and authorize you, and wait for it.
e. session_key = sg.get_web_auth_session_key(url)
2) Username and Password Authentication:
a. network = get_*_network(API_KEY, API_SECRET)
b. username = raw_input("Please enter your username: ")
c. password_hash = pylast.md5(raw_input("Please enter your password: ")
d. session_key = SessionKeyGenerator(network).get_session_key(username, password_hash)
A session key's lifetime is infinie, unless the user provokes the rights of the given API Key.
If you create a Network object with just a API_KEY and API_SECRET and a username and a password_hash, a
SESSION_KEY will be automatically generated for that network and stored in it so you don't have to do this
manually, unless you want to.
"""
def __init__(self, network):
self.network = network
self.web_auth_tokens = {}
def _get_web_auth_token(self):
"""Retrieves a token from the network for web authentication.
The token then has to be authorized from getAuthURL before creating session.
"""
request = _Request(self.network, 'auth.getToken')
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
e = doc.getElementsByTagName('token')[0]
return e.firstChild.data
def get_web_auth_url(self):
"""The user must open this page, and you first, then call get_web_auth_session_key(url) after that."""
token = self._get_web_auth_token()
url = '%(homepage)s/api/auth/?api_key=%(api)s&token=%(token)s' % \
{"homepage": self.network.homepage, "api": self.network.api_key, "token": token}
self.web_auth_tokens[url] = token
return url
def get_web_auth_session_key(self, url):
"""Retrieves the session key of a web authorization process by its url."""
if url in self.web_auth_tokens.keys():
token = self.web_auth_tokens[url]
else:
token = "" #that's gonna raise a WSError of an unauthorized token when the request is executed.
request = _Request(self.network, 'auth.getSession', {'token': token})
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
return doc.getElementsByTagName('key')[0].firstChild.data
def get_session_key(self, username, password_hash):
"""Retrieve a session key with a username and a md5 hash of the user's password."""
params = {"username": username, "authToken": md5(username + password_hash)}
request = _Request(self.network, "auth.getMobileSession", params)
# default action is that a request is signed only when
# a session key is provided.
request.sign_it()
doc = request.execute()
return _extract(doc, "key")
def _namedtuple(name, children):
"""
collections.namedtuple is available in (python >= 2.6)
"""
v = sys.version_info
if v[1] >= 6 and v[0] < 3:
return collections.namedtuple(name, children)
else:
def fancydict(*args):
d = {}
i = 0
for child in children:
d[child.strip()] = args[i]
i += 1
return d
return fancydict
TopItem = _namedtuple("TopItem", ["item", "weight"])
SimilarItem = _namedtuple("SimilarItem", ["item", "match"])
LibraryItem = _namedtuple("LibraryItem", ["item", "playcount", "tagcount"])
PlayedTrack = _namedtuple("PlayedTrack", ["track", "playback_date", "timestamp"])
LovedTrack = _namedtuple("LovedTrack", ["track", "date", "timestamp"])
ImageSizes = _namedtuple("ImageSizes", ["original", "large", "largesquare", "medium", "small", "extralarge"])
Image = _namedtuple("Image", ["title", "url", "dateadded", "format", "owner", "sizes", "votes"])
Shout = _namedtuple("Shout", ["body", "author", "date"])
def _string_output(funct):
def r(*args):
return _string(funct(*args))
return r
class _BaseObject(object):
"""An abstract webservices object."""
network = None
def __init__(self, network):
self.network = network
def _request(self, method_name, cacheable = False, params = None):
if not params:
params = self._get_params()
return _Request(self.network, method_name, params).execute(cacheable)
def _get_params(self):
"""Returns the most common set of parameters between all objects."""
return {}
def __hash__(self):
return hash(self.network) + \
hash(str(type(self)) + "".join(self._get_params().keys() + self._get_params().values()).lower())
class _Taggable(object):
"""Common functions for classes with tags."""
def __init__(self, ws_prefix):
self.ws_prefix = ws_prefix
def add_tags(self, *tags):
"""Adds one or several tags.
* *tags: Any number of tag names or Tag objects.
"""
for tag in tags:
self._add_tag(tag)
def _add_tag(self, tag):
"""Adds one or several tags.
* tag: one tag name or a Tag object.
"""
if isinstance(tag, Tag):
tag = tag.get_name()
params = self._get_params()
params['tags'] = _unicode(tag)
self._request(self.ws_prefix + '.addTags', False, params)
def _remove_tag(self, single_tag):
"""Remove a user's tag from this object."""
if isinstance(single_tag, Tag):
single_tag = single_tag.get_name()
params = self._get_params()
params['tag'] = _unicode(single_tag)
self._request(self.ws_prefix + '.removeTag', False, params)
def get_tags(self):
"""Returns a list of the tags set by the user to this object."""
# Uncacheable because it can be dynamically changed by the user.
params = self._get_params()
doc = self._request(self.ws_prefix + '.getTags', False, params)
tag_names = _extract_all(doc, 'name')
tags = []
for tag in tag_names:
tags.append(Tag(tag, self.network))
return tags
def remove_tags(self, *tags):
"""Removes one or several tags from this object.
* *tags: Any number of tag names or Tag objects.
"""
for tag in tags:
self._remove_tag(tag)
def clear_tags(self):
"""Clears all the user-set tags. """
self.remove_tags(*(self.get_tags()))
def set_tags(self, *tags):
"""Sets this object's tags to only those tags.
* *tags: any number of tag names.
"""
c_old_tags = []
old_tags = []
c_new_tags = []
new_tags = []
to_remove = []
to_add = []
tags_on_server = self.get_tags()
for tag in tags_on_server:
c_old_tags.append(tag.get_name().lower())
old_tags.append(tag.get_name())
for tag in tags:
c_new_tags.append(tag.lower())
new_tags.append(tag)
for i in range(0, len(old_tags)):
if not c_old_tags[i] in c_new_tags:
to_remove.append(old_tags[i])
for i in range(0, len(new_tags)):
if not c_new_tags[i] in c_old_tags:
to_add.append(new_tags[i])
self.remove_tags(*to_remove)
self.add_tags(*to_add)
def get_top_tags(self, limit = None):
"""Returns a list of the most frequently used Tags on this object."""
doc = self._request(self.ws_prefix + '.getTopTags', True)
elements = doc.getElementsByTagName('tag')
seq = []
for element in elements:
if limit and len(seq) >= limit:
break
tag_name = _extract(element, 'name')
tagcount = _extract(element, 'count')
seq.append(TopItem(Tag(tag_name, self.network), tagcount))
return seq
class WSError(Exception):
"""Exception related to the Network web service"""
def __init__(self, network, status, details):
self.status = status
self.details = details
self.network = network
@_string_output
def __str__(self):
return self.details
def get_id(self):
"""Returns the exception ID, from one of the following:
STATUS_INVALID_SERVICE = 2
STATUS_INVALID_METHOD = 3
STATUS_AUTH_FAILED = 4
STATUS_INVALID_FORMAT = 5
STATUS_INVALID_PARAMS = 6
STATUS_INVALID_RESOURCE = 7
STATUS_TOKEN_ERROR = 8
STATUS_INVALID_SK = 9
STATUS_INVALID_API_KEY = 10
STATUS_OFFLINE = 11
STATUS_SUBSCRIBERS_ONLY = 12
STATUS_TOKEN_UNAUTHORIZED = 14
STATUS_TOKEN_EXPIRED = 15
"""
return self.status
class Album(_BaseObject, _Taggable):
"""An album."""
title = None
artist = None
def __init__(self, artist, title, network):
"""
Create an album instance.
# Parameters:
* artist: An artist name or an Artist object.
* title: The album title.
"""
_BaseObject.__init__(self, network)
_Taggable.__init__(self, 'album')
if isinstance(artist, Artist):
self.artist = artist
else:
self.artist = Artist(artist, self.network)
self.title = title
@_string_output
def __repr__(self):
return u"%s - %s" %(self.get_artist().get_name(), self.get_title())
def __eq__(self, other):
return (self.get_title().lower() == other.get_title().lower()) and (self.get_artist().get_name().lower() == other.get_artist().get_name().lower())
def __ne__(self, other):
return (self.get_title().lower() != other.get_title().lower()) or (self.get_artist().get_name().lower() != other.get_artist().get_name().lower())
def _get_params(self):
return {'artist': self.get_artist().get_name(), 'album': self.get_title(), }
def get_artist(self):
"""Returns the associated Artist object."""
return self.artist
def get_title(self):
"""Returns the album title."""
return self.title
def get_name(self):
"""Returns the album title (alias to Album.get_title)."""
return self.get_title()
def get_release_date(self):
"""Retruns the release date of the album."""
return _extract(self._request("album.getInfo", cacheable = True), "releasedate")
def get_cover_image(self, size = COVER_EXTRA_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract_all(self._request("album.getInfo", cacheable = True), 'image')[size]
def get_id(self):
"""Returns the ID"""
return _extract(self._request("album.getInfo", cacheable = True), "id")
def get_playcount(self):
"""Returns the number of plays on the network"""
return _number(_extract(self._request("album.getInfo", cacheable = True), "playcount"))
def get_listener_count(self):
"""Returns the number of liteners on the network"""
return _number(_extract(self._request("album.getInfo", cacheable = True), "listeners"))
def get_top_tags(self, limit=None):
"""Returns a list of the most-applied tags to this album."""
doc = self._request("album.getInfo", True)
e = doc.getElementsByTagName("toptags")[0]
seq = []
for name in _extract_all(e, "name"):
if len(seq) < limit:
seq.append(Tag(name, self.network))
return seq
def get_tracks(self):
"""Returns the list of Tracks on this album."""
uri = 'lastfm://playlist/album/%s' %self.get_id()
return XSPF(uri, self.network).get_tracks()
def get_mbid(self):
"""Returns the MusicBrainz id of the album."""
return _extract(self._request("album.getInfo", cacheable = True), "mbid")
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the album page on the network.
# Parameters:
* domain_name str: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_artist().get_name())
album = _url_safe(self.get_title())
return self.network._get_url(domain_name, "album") %{'artist': artist, 'album': album}
def get_wiki_published_date(self):
"""Returns the date of publishing this version of the wiki."""
doc = self._request("album.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "published")
def get_wiki_summary(self):
"""Returns the summary of the wiki."""
doc = self._request("album.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "summary")
def get_wiki_content(self):
"""Returns the content of the wiki."""
doc = self._request("album.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "content")
class Artist(_BaseObject, _Taggable):
"""An artist."""
name = None
def __init__(self, name, network):
"""Create an artist object.
# Parameters:
* name str: The artist's name.
"""
_BaseObject.__init__(self, network)
_Taggable.__init__(self, 'artist')
self.name = name
@_string_output
def __repr__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name().lower() != other.get_name().lower()
def _get_params(self):
return {'artist': self.get_name()}
def get_name(self):
"""Returns the name of the artist."""
return self.name
def get_cover_image(self, size = COVER_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract_all(self._request("artist.getInfo", True), "image")[size]
def get_playcount(self):
"""Returns the number of plays on the network."""
return _number(_extract(self._request("artist.getInfo", True), "playcount"))
def get_mbid(self):
"""Returns the MusicBrainz ID of this artist."""
doc = self._request("artist.getInfo", True)
return _extract(doc, "mbid")
def get_listener_count(self):
"""Returns the number of liteners on the network."""
return _number(_extract(self._request("artist.getInfo", True), "listeners"))
def is_streamable(self):
"""Returns True if the artist is streamable."""
return bool(_number(_extract(self._request("artist.getInfo", True), "streamable")))
def get_bio_published_date(self):
"""Returns the date on which the artist's biography was published."""
return _extract(self._request("artist.getInfo", True), "published")
def get_bio_summary(self):
"""Returns the summary of the artist's biography."""
return _extract(self._request("artist.getInfo", True), "summary")
def get_bio_content(self):
"""Returns the content of the artist's biography."""
return _extract(self._request("artist.getInfo", True), "content")
def get_upcoming_events(self):
"""Returns a list of the upcoming Events for this artist."""
doc = self._request('artist.getEvents', True)
ids = _extract_all(doc, 'id')
events = []
for e_id in ids:
events.append(Event(e_id, self.network))
return events
def get_similar(self, limit = None):
"""Returns the similar artists on the network."""
params = self._get_params()
if limit:
params['limit'] = _unicode(limit)
doc = self._request('artist.getSimilar', True, params)
names = _extract_all(doc, "name")
matches = _extract_all(doc, "match")
artists = []
for i in range(0, len(names)):
artists.append(SimilarItem(Artist(names[i], self.network), _number(matches[i])))
return artists
def get_top_albums(self):
"""Retuns a list of the top albums."""
doc = self._request('artist.getTopAlbums', True)
seq = []
for node in doc.getElementsByTagName("album"):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _extract(node, "playcount")
seq.append(TopItem(Album(artist, name, self.network), playcount))
return seq
def get_top_tracks(self):
"""Returns a list of the most played Tracks by this artist."""
doc = self._request("artist.getTopTracks", True)
seq = []
for track in doc.getElementsByTagName('track'):
title = _extract(track, "name")
artist = _extract(track, "name", 1)
playcount = _number(_extract(track, "playcount"))
seq.append( TopItem(Track(artist, title, self.network), playcount) )
return seq
def get_top_fans(self, limit = None):
"""Returns a list of the Users who played this artist the most.
# Parameters:
* limit int: Max elements.
"""
doc = self._request('artist.getTopFans', True)
seq = []
elements = doc.getElementsByTagName('user')
for element in elements:
if limit and len(seq) >= limit:
break
name = _extract(element, 'name')
weight = _number(_extract(element, 'weight'))
seq.append(TopItem(User(name, self.network), weight))
return seq
def share(self, users, message = None):
"""Shares this artist (sends out recommendations).
# Parameters:
* users [User|str,]: A list that can contain usernames, emails, User objects, or all of them.
* message str: A message to include in the recommendation message.
"""
#last.fm currently accepts a max of 10 recipient at a time
while(len(users) > 10):
section = users[0:9]
users = users[9:]
self.share(section, message)
nusers = []
for user in users:
if isinstance(user, User):
nusers.append(user.get_name())
else:
nusers.append(user)
params = self._get_params()
recipients = ','.join(nusers)
params['recipient'] = recipients
if message: params['message'] = _unicode(message)
self._request('artist.share', False, params)
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the artist page on the network.
# Parameters:
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_name())
return self.network._get_url(domain_name, "artist") %{'artist': artist}
def get_images(self, order=IMAGES_ORDER_POPULARITY, limit=None):
"""
Returns a sequence of Image objects
if limit is None it will return all
order can be IMAGES_ORDER_POPULARITY or IMAGES_ORDER_DATE
"""
images = []
params = self._get_params()
params["order"] = order
nodes = _collect_nodes(limit, self, "artist.getImages", True, params)
for e in nodes:
if _extract(e, "name"):
user = User(_extract(e, "name"), self.network)
else:
user = None
images.append(Image(
_extract(e, "title"),
_extract(e, "url"),
_extract(e, "dateadded"),
_extract(e, "format"),
user,
ImageSizes(*_extract_all(e, "size")),
(_extract(e, "thumbsup"), _extract(e, "thumbsdown"))
)
)
return images
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "artist.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("artist.Shout", False, params)
class Event(_BaseObject):
"""An event."""
id = None
def __init__(self, event_id, network):
_BaseObject.__init__(self, network)
self.id = _unicode(event_id)
@_string_output
def __repr__(self):
return "Event #" + self.get_id()
def __eq__(self, other):
return self.get_id() == other.get_id()
def __ne__(self, other):
return self.get_id() != other.get_id()
def _get_params(self):
return {'event': self.get_id()}
def attend(self, attending_status):
"""Sets the attending status.
* attending_status: The attending status. Possible values:
o EVENT_ATTENDING
o EVENT_MAYBE_ATTENDING
o EVENT_NOT_ATTENDING
"""
params = self._get_params()
params['status'] = _unicode(attending_status)
self._request('event.attend', False, params)
def get_attendees(self):
"""
Get a list of attendees for an event
"""
doc = self._request("event.getAttendees", False)
users = []
for name in _extract_all(doc, "name"):
users.append(User(name, self.network))
return users
def get_id(self):
"""Returns the id of the event on the network. """
return self.id
def get_title(self):
"""Returns the title of the event. """
doc = self._request("event.getInfo", True)
return _extract(doc, "title")
def get_headliner(self):
"""Returns the headliner of the event. """
doc = self._request("event.getInfo", True)
return Artist(_extract(doc, "headliner"), self.network)
def get_artists(self):
"""Returns a list of the participating Artists. """
doc = self._request("event.getInfo", True)
names = _extract_all(doc, "artist")
artists = []
for name in names:
artists.append(Artist(name, self.network))
return artists
def get_venue(self):
"""Returns the venue where the event is held."""
doc = self._request("event.getInfo", True)
v = doc.getElementsByTagName("venue")[0]
venue_id = _number(_extract(v, "id"))
return Venue(venue_id, self.network)
def get_start_date(self):
"""Returns the date when the event starts."""
doc = self._request("event.getInfo", True)
return _extract(doc, "startDate")
def get_description(self):
"""Returns the description of the event. """
doc = self._request("event.getInfo", True)
return _extract(doc, "description")
def get_cover_image(self, size = COVER_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
doc = self._request("event.getInfo", True)
return _extract_all(doc, "image")[size]
def get_attendance_count(self):
"""Returns the number of attending people. """
doc = self._request("event.getInfo", True)
return _number(_extract(doc, "attendance"))
def get_review_count(self):
"""Returns the number of available reviews for this event. """
doc = self._request("event.getInfo", True)
return _number(_extract(doc, "reviews"))
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the event page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
return self.network._get_url(domain_name, "event") %{'id': self.get_id()}
def share(self, users, message = None):
"""Shares this event (sends out recommendations).
* users: A list that can contain usernames, emails, User objects, or all of them.
* message: A message to include in the recommendation message.
"""
#last.fm currently accepts a max of 10 recipient at a time
while(len(users) > 10):
section = users[0:9]
users = users[9:]
self.share(section, message)
nusers = []
for user in users:
if isinstance(user, User):
nusers.append(user.get_name())
else:
nusers.append(user)
params = self._get_params()
recipients = ','.join(nusers)
params['recipient'] = recipients
if message: params['message'] = _unicode(message)
self._request('event.share', False, params)
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "event.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("event.Shout", False, params)
class Country(_BaseObject):
"""A country at Last.fm."""
name = None
def __init__(self, name, network):
_BaseObject.__init__(self, network)
self.name = name
@_string_output
def __repr__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name() != other.get_name()
def _get_params(self):
return {'country': self.get_name()}
def _get_name_from_code(self, alpha2code):
# TODO: Have this function lookup the alpha-2 code and return the country name.
return alpha2code
def get_name(self):
"""Returns the country name. """
return self.name
def get_top_artists(self):
"""Returns a sequence of the most played artists."""
doc = self._request('geo.getTopArtists', True)
seq = []
for node in doc.getElementsByTagName("artist"):
name = _extract(node, 'name')
playcount = _extract(node, "playcount")
seq.append(TopItem(Artist(name, self.network), playcount))
return seq
def get_top_tracks(self):
"""Returns a sequence of the most played tracks"""
doc = self._request("geo.getTopTracks", True)
seq = []
for n in doc.getElementsByTagName('track'):
title = _extract(n, 'name')
artist = _extract(n, 'name', 1)
playcount = _number(_extract(n, "playcount"))
seq.append( TopItem(Track(artist, title, self.network), playcount))
return seq
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the event page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
country_name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "country") %{'country_name': country_name}
class Library(_BaseObject):
"""A user's Last.fm library."""
user = None
def __init__(self, user, network):
_BaseObject.__init__(self, network)
if isinstance(user, User):
self.user = user
else:
self.user = User(user, self.network)
self._albums_index = 0
self._artists_index = 0
self._tracks_index = 0
@_string_output
def __repr__(self):
return repr(self.get_user()) + "'s Library"
def _get_params(self):
return {'user': self.user.get_name()}
def get_user(self):
"""Returns the user who owns this library."""
return self.user
def add_album(self, album):
"""Add an album to this library."""
params = self._get_params()
params["artist"] = album.get_artist.get_name()
params["album"] = album.get_name()
self._request("library.addAlbum", False, params)
def add_artist(self, artist):
"""Add an artist to this library."""
params = self._get_params()
params["artist"] = artist.get_name()
self._request("library.addArtist", False, params)
def add_track(self, track):
"""Add a track to this library."""
params = self._get_params()
params["track"] = track.get_title()
self._request("library.addTrack", False, params)
def get_albums(self, limit=50):
"""
Returns a sequence of Album objects
if limit==None it will return all (may take a while)
"""
seq = []
for node in _collect_nodes(limit, self, "library.getAlbums", True):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(Album(artist, name, self.network), playcount, tagcount))
return seq
def get_artists(self, limit=50):
"""
Returns a sequence of Album objects
if limit==None it will return all (may take a while)
"""
seq = []
for node in _collect_nodes(limit, self, "library.getArtists", True):
name = _extract(node, "name")
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(Artist(name, self.network), playcount, tagcount))
return seq
def get_tracks(self, limit=50):
"""
Returns a sequence of Album objects
if limit==None it will return all (may take a while)
"""
seq = []
for node in _collect_nodes(limit, self, "library.getTracks", True):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _number(_extract(node, "playcount"))
tagcount = _number(_extract(node, "tagcount"))
seq.append(LibraryItem(Track(artist, name, self.network), playcount, tagcount))
return seq
class Playlist(_BaseObject):
"""A Last.fm user playlist."""
id = None
user = None
def __init__(self, user, id, network):
_BaseObject.__init__(self, network)
if isinstance(user, User):
self.user = user
else:
self.user = User(user, self.network)
self.id = _unicode(id)
@_string_output
def __repr__(self):
return repr(self.user) + "'s playlist # " + repr(self.id)
def _get_info_node(self):
"""Returns the node from user.getPlaylists where this playlist's info is."""
doc = self._request("user.getPlaylists", True)
for node in doc.getElementsByTagName("playlist"):
if _extract(node, "id") == str(self.get_id()):
return node
def _get_params(self):
return {'user': self.user.get_name(), 'playlistID': self.get_id()}
def get_id(self):
"""Returns the playlist id."""
return self.id
def get_user(self):
"""Returns the owner user of this playlist."""
return self.user
def get_tracks(self):
"""Returns a list of the tracks on this user playlist."""
uri = u'lastfm://playlist/%s' %self.get_id()
return XSPF(uri, self.network).get_tracks()
def add_track(self, track):
"""Adds a Track to this Playlist."""
params = self._get_params()
params['artist'] = track.get_artist().get_name()
params['track'] = track.get_title()
self._request('playlist.addTrack', False, params)
def get_title(self):
"""Returns the title of this playlist."""
return _extract(self._get_info_node(), "title")
def get_creation_date(self):
"""Returns the creation date of this playlist."""
return _extract(self._get_info_node(), "date")
def get_size(self):
"""Returns the number of tracks in this playlist."""
return _number(_extract(self._get_info_node(), "size"))
def get_description(self):
"""Returns the description of this playlist."""
return _extract(self._get_info_node(), "description")
def get_duration(self):
"""Returns the duration of this playlist in milliseconds."""
return _number(_extract(self._get_info_node(), "duration"))
def is_streamable(self):
"""Returns True if the playlist is streamable.
For a playlist to be streamable, it needs at least 45 tracks by 15 different artists."""
if _extract(self._get_info_node(), "streamable") == '1':
return True
else:
return False
def has_track(self, track):
"""Checks to see if track is already in the playlist.
* track: Any Track object.
"""
return track in self.get_tracks()
def get_cover_image(self, size = COVER_LARGE):
"""
Returns a uri to the cover image
size can be one of:
COVER_MEGA
COVER_EXTRA_LARGE
COVER_LARGE
COVER_MEDIUM
COVER_SMALL
"""
return _extract(self._get_info_node(), "image")[size]
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the playlist on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
english_url = _extract(self._get_info_node(), "url")
appendix = english_url[english_url.rfind("/") + 1:]
return self.network._get_url(domain_name, "playlist") %{'appendix': appendix, "user": self.get_user().get_name()}
class Tag(_BaseObject):
"""A Last.fm object tag."""
# TODO: getWeeklyArtistChart (too lazy, i'll wait for when someone requests it)
name = None
def __init__(self, name, network):
_BaseObject.__init__(self, network)
self.name = name
def _get_params(self):
return {'tag': self.get_name()}
@_string_output
def __repr__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name().lower() != other.get_name().lower()
def get_name(self):
"""Returns the name of the tag. """
return self.name
def get_similar(self):
"""Returns the tags similar to this one, ordered by similarity. """
doc = self._request('tag.getSimilar', True)
seq = []
names = _extract_all(doc, 'name')
for name in names:
seq.append(Tag(name, self.network))
return seq
def get_top_albums(self):
"""Retuns a list of the top albums."""
doc = self._request('tag.getTopAlbums', True)
seq = []
for node in doc.getElementsByTagName("album"):
name = _extract(node, "name")
artist = _extract(node, "name", 1)
playcount = _extract(node, "playcount")
seq.append(TopItem(Album(artist, name, self.network), playcount))
return seq
def get_top_tracks(self):
"""Returns a list of the most played Tracks by this artist."""
doc = self._request("tag.getTopTracks", True)
seq = []
for track in doc.getElementsByTagName('track'):
title = _extract(track, "name")
artist = _extract(track, "name", 1)
playcount = _number(_extract(track, "playcount"))
seq.append( TopItem(Track(artist, title, self.network), playcount) )
return seq
def get_top_artists(self):
"""Returns a sequence of the most played artists."""
doc = self._request('tag.getTopArtists', True)
seq = []
for node in doc.getElementsByTagName("artist"):
name = _extract(node, 'name')
playcount = _extract(node, "playcount")
seq.append(TopItem(Artist(name, self.network), playcount))
return seq
def get_weekly_chart_dates(self):
"""Returns a list of From and To tuples for the available charts."""
doc = self._request("tag.getWeeklyChartList", True)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append( (node.getAttribute("from"), node.getAttribute("to")) )
return seq
def get_weekly_artist_charts(self, from_date = None, to_date = None):
"""Returns the weekly artist charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("tag.getWeeklyArtistChart", True, params)
seq = []
for node in doc.getElementsByTagName("artist"):
item = Artist(_extract(node, "name"), self.network)
weight = _number(_extract(node, "weight"))
seq.append(TopItem(item, weight))
return seq
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the tag page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "tag") %{'name': name}
class Track(_BaseObject, _Taggable):
"""A Last.fm track."""
artist = None
title = None
def __init__(self, artist, title, network):
_BaseObject.__init__(self, network)
_Taggable.__init__(self, 'track')
if isinstance(artist, Artist):
self.artist = artist
else:
self.artist = Artist(artist, self.network)
self.title = title
@_string_output
def __repr__(self):
return self.get_artist().get_name() + ' - ' + self.get_title()
def __eq__(self, other):
return (self.get_title().lower() == other.get_title().lower()) and (self.get_artist().get_name().lower() == other.get_artist().get_name().lower())
def __ne__(self, other):
return (self.get_title().lower() != other.get_title().lower()) or (self.get_artist().get_name().lower() != other.get_artist().get_name().lower())
def _get_params(self):
return {'artist': self.get_artist().get_name(), 'track': self.get_title()}
def get_artist(self):
"""Returns the associated Artist object."""
return self.artist
def get_title(self):
"""Returns the track title."""
return self.title
def get_name(self):
"""Returns the track title (alias to Track.get_title)."""
return self.get_title()
def get_id(self):
"""Returns the track id on the network."""
doc = self._request("track.getInfo", True)
return _extract(doc, "id")
def get_duration(self):
"""Returns the track duration."""
doc = self._request("track.getInfo", True)
return _number(_extract(doc, "duration"))
def get_mbid(self):
"""Returns the MusicBrainz ID of this track."""
doc = self._request("track.getInfo", True)
return _extract(doc, "mbid")
def get_listener_count(self):
"""Returns the listener count."""
doc = self._request("track.getInfo", True)
return _number(_extract(doc, "listeners"))
def get_playcount(self):
"""Returns the play count."""
doc = self._request("track.getInfo", True)
return _number(_extract(doc, "playcount"))
def is_streamable(self):
"""Returns True if the track is available at Last.fm."""
doc = self._request("track.getInfo", True)
return _extract(doc, "streamable") == "1"
def is_fulltrack_available(self):
"""Returns True if the fulltrack is available for streaming."""
doc = self._request("track.getInfo", True)
return doc.getElementsByTagName("streamable")[0].getAttribute("fulltrack") == "1"
def get_album(self):
"""Returns the album object of this track."""
doc = self._request("track.getInfo", True)
albums = doc.getElementsByTagName("album")
if len(albums) == 0:
return
node = doc.getElementsByTagName("album")[0]
return Album(_extract(node, "artist"), _extract(node, "title"), self.network)
def get_wiki_published_date(self):
"""Returns the date of publishing this version of the wiki."""
doc = self._request("track.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "published")
def get_wiki_summary(self):
"""Returns the summary of the wiki."""
doc = self._request("track.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "summary")
def get_wiki_content(self):
"""Returns the content of the wiki."""
doc = self._request("track.getInfo", True)
if len(doc.getElementsByTagName("wiki")) == 0:
return
node = doc.getElementsByTagName("wiki")[0]
return _extract(node, "content")
def love(self):
"""Adds the track to the user's loved tracks. """
self._request('track.love')
def ban(self):
"""Ban this track from ever playing on the radio. """
self._request('track.ban')
def get_similar(self):
"""Returns similar tracks for this track on the network, based on listening data. """
doc = self._request('track.getSimilar', True)
seq = []
for node in doc.getElementsByTagName("track"):
title = _extract(node, 'name')
artist = _extract(node, 'name', 1)
match = _number(_extract(node, "match"))
seq.append(SimilarItem(Track(artist, title, self.network), match))
return seq
def get_top_fans(self, limit = None):
"""Returns a list of the Users who played this track."""
doc = self._request('track.getTopFans', True)
seq = []
elements = doc.getElementsByTagName('user')
for element in elements:
if limit and len(seq) >= limit:
break
name = _extract(element, 'name')
weight = _number(_extract(element, 'weight'))
seq.append(TopItem(User(name, self.network), weight))
return seq
def share(self, users, message = None):
"""Shares this track (sends out recommendations).
* users: A list that can contain usernames, emails, User objects, or all of them.
* message: A message to include in the recommendation message.
"""
#last.fm currently accepts a max of 10 recipient at a time
while(len(users) > 10):
section = users[0:9]
users = users[9:]
self.share(section, message)
nusers = []
for user in users:
if isinstance(user, User):
nusers.append(user.get_name())
else:
nusers.append(user)
params = self._get_params()
recipients = ','.join(nusers)
params['recipient'] = recipients
if message: params['message'] = _unicode(message)
self._request('track.share', False, params)
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the track page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
artist = _url_safe(self.get_artist().get_name())
title = _url_safe(self.get_title())
return self.network._get_url(domain_name, "track") %{'domain': self.network._get_language_domain(domain_name), 'artist': artist, 'title': title}
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "track.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("track.Shout", False, params)
class Group(_BaseObject):
"""A Last.fm group."""
name = None
def __init__(self, group_name, network):
_BaseObject.__init__(self, network)
self.name = group_name
@_string_output
def __repr__(self):
return self.get_name()
def __eq__(self, other):
return self.get_name().lower() == other.get_name().lower()
def __ne__(self, other):
return self.get_name() != other.get_name()
def _get_params(self):
return {'group': self.get_name()}
def get_name(self):
"""Returns the group name. """
return self.name
def get_weekly_chart_dates(self):
"""Returns a list of From and To tuples for the available charts."""
doc = self._request("group.getWeeklyChartList", True)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append( (node.getAttribute("from"), node.getAttribute("to")) )
return seq
def get_weekly_artist_charts(self, from_date = None, to_date = None):
"""Returns the weekly artist charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("group.getWeeklyArtistChart", True, params)
seq = []
for node in doc.getElementsByTagName("artist"):
item = Artist(_extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_album_charts(self, from_date = None, to_date = None):
"""Returns the weekly album charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("group.getWeeklyAlbumChart", True, params)
seq = []
for node in doc.getElementsByTagName("album"):
item = Album(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_track_charts(self, from_date = None, to_date = None):
"""Returns the weekly track charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("group.getWeeklyTrackChart", True, params)
seq = []
for node in doc.getElementsByTagName("track"):
item = Track(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the group page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "group") %{'name': name}
def get_members(self, limit=50):
"""
Returns a sequence of User objects
if limit==None it will return all
"""
nodes = _collect_nodes(limit, self, "group.getMembers", False)
users = []
for node in nodes:
users.append(User(_extract(node, "name"), self.network))
return users
class XSPF(_BaseObject):
"A Last.fm XSPF playlist."""
uri = None
def __init__(self, uri, network):
_BaseObject.__init__(self, network)
self.uri = uri
def _get_params(self):
return {'playlistURL': self.get_uri()}
@_string_output
def __repr__(self):
return self.get_uri()
def __eq__(self, other):
return self.get_uri() == other.get_uri()
def __ne__(self, other):
return self.get_uri() != other.get_uri()
def get_uri(self):
"""Returns the Last.fm playlist URI. """
return self.uri
def get_tracks(self):
"""Returns the tracks on this playlist."""
doc = self._request('playlist.fetch', True)
seq = []
for n in doc.getElementsByTagName('track'):
title = _extract(n, 'title')
artist = _extract(n, 'creator')
seq.append(Track(artist, title, self.network))
return seq
class User(_BaseObject):
"""A Last.fm user."""
name = None
def __init__(self, user_name, network):
_BaseObject.__init__(self, network)
self.name = user_name
self._past_events_index = 0
self._recommended_events_index = 0
self._recommended_artists_index = 0
@_string_output
def __repr__(self):
return self.get_name()
def __eq__(self, another):
return self.get_name() == another.get_name()
def __ne__(self, another):
return self.get_name() != another.get_name()
def _get_params(self):
return {"user": self.get_name()}
def get_name(self):
"""Returns the nuser name."""
return self.name
def get_upcoming_events(self):
"""Returns all the upcoming events for this user. """
doc = self._request('user.getEvents', True)
ids = _extract_all(doc, 'id')
events = []
for e_id in ids:
events.append(Event(e_id, self.network))
return events
def get_friends(self, limit = 50):
"""Returns a list of the user's friends. """
seq = []
for node in _collect_nodes(limit, self, "user.getFriends", False):
seq.append(User(_extract(node, "name"), self.network))
return seq
def get_loved_tracks(self, limit=50):
"""Returns this user's loved track as a sequence of LovedTrack objects
in reverse order of their timestamp, all the way back to the first track.
If limit==None, it will try to pull all the available data.
This method uses caching. Enable caching only if you're pulling a
large amount of data.
Use extract_items() with the return of this function to
get only a sequence of Track objects with no playback dates. """
params = self._get_params()
if limit:
params['limit'] = _unicode(limit)
seq = []
for track in _collect_nodes(limit, self, "user.getLovedTracks", True, params):
title = _extract(track, "name")
artist = _extract(track, "name", 1)
date = _extract(track, "date")
timestamp = track.getElementsByTagName("date")[0].getAttribute("uts")
seq.append(LovedTrack(Track(artist, title, self.network), date, timestamp))
return seq
def get_neighbours(self, limit = 50):
"""Returns a list of the user's friends."""
params = self._get_params()
if limit:
params['limit'] = _unicode(limit)
doc = self._request('user.getNeighbours', True, params)
seq = []
names = _extract_all(doc, 'name')
for name in names:
seq.append(User(name, self.network))
return seq
def get_past_events(self, limit=50):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for n in _collect_nodes(limit, self, "user.getPastEvents", False):
seq.append(Event(_extract(n, "id"), self.network))
return seq
def get_playlists(self):
"""Returns a list of Playlists that this user owns."""
doc = self._request("user.getPlaylists", True)
playlists = []
for playlist_id in _extract_all(doc, "id"):
playlists.append(Playlist(self.get_name(), playlist_id, self.network))
return playlists
def get_now_playing(self):
"""Returns the currently playing track, or None if nothing is playing. """
params = self._get_params()
params['limit'] = '1'
doc = self._request('user.getRecentTracks', False, params)
e = doc.getElementsByTagName('track')[0]
if not e.hasAttribute('nowplaying'):
return None
artist = _extract(e, 'artist')
title = _extract(e, 'name')
return Track(artist, title, self.network)
def get_recent_tracks(self, limit = 10):
"""Returns this user's played track as a sequence of PlayedTrack objects
in reverse order of their playtime, all the way back to the first track.
If limit==None, it will try to pull all the available data.
This method uses caching. Enable caching only if you're pulling a
large amount of data.
Use extract_items() with the return of this function to
get only a sequence of Track objects with no playback dates. """
params = self._get_params()
if limit:
params['limit'] = _unicode(limit)
seq = []
for track in _collect_nodes(limit, self, "user.getRecentTracks", True, params):
if track.hasAttribute('nowplaying'):
continue #to prevent the now playing track from sneaking in here
title = _extract(track, "name")
artist = _extract(track, "artist")
date = _extract(track, "date")
timestamp = track.getElementsByTagName("date")[0].getAttribute("uts")
seq.append(PlayedTrack(Track(artist, title, self.network), date, timestamp))
return seq
def get_top_albums(self, period = PERIOD_OVERALL):
"""Returns the top albums played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
doc = self._request('user.getTopAlbums', True, params)
seq = []
for album in doc.getElementsByTagName('album'):
name = _extract(album, 'name')
artist = _extract(album, 'name', 1)
playcount = _extract(album, "playcount")
seq.append(TopItem(Album(artist, name, self.network), playcount))
return seq
def get_top_artists(self, period = PERIOD_OVERALL):
"""Returns the top artists played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
doc = self._request('user.getTopArtists', True, params)
seq = []
for node in doc.getElementsByTagName('artist'):
name = _extract(node, 'name')
playcount = _extract(node, "playcount")
seq.append(TopItem(Artist(name, self.network), playcount))
return seq
def get_top_tags(self, limit = None):
"""Returns a sequence of the top tags used by this user with their counts as (Tag, tagcount).
* limit: The limit of how many tags to return.
"""
doc = self._request("user.getTopTags", True)
seq = []
for node in doc.getElementsByTagName("tag"):
if len(seq) < limit:
seq.append(TopItem(Tag(_extract(node, "name"), self.network), _extract(node, "count")))
return seq
def get_top_tracks(self, period = PERIOD_OVERALL):
"""Returns the top tracks played by a user.
* period: The period of time. Possible values:
o PERIOD_OVERALL
o PERIOD_3MONTHS
o PERIOD_6MONTHS
o PERIOD_12MONTHS
"""
params = self._get_params()
params['period'] = period
doc = self._request('user.getTopTracks', True, params)
seq = []
for track in doc.getElementsByTagName('track'):
name = _extract(track, 'name')
artist = _extract(track, 'name', 1)
playcount = _extract(track, "playcount")
seq.append(TopItem(Track(artist, name, self.network), playcount))
return seq
def get_weekly_chart_dates(self):
"""Returns a list of From and To tuples for the available charts."""
doc = self._request("user.getWeeklyChartList", True)
seq = []
for node in doc.getElementsByTagName("chart"):
seq.append( (node.getAttribute("from"), node.getAttribute("to")) )
return seq
def get_weekly_artist_charts(self, from_date = None, to_date = None):
"""Returns the weekly artist charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("user.getWeeklyArtistChart", True, params)
seq = []
for node in doc.getElementsByTagName("artist"):
item = Artist(_extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_album_charts(self, from_date = None, to_date = None):
"""Returns the weekly album charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("user.getWeeklyAlbumChart", True, params)
seq = []
for node in doc.getElementsByTagName("album"):
item = Album(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def get_weekly_track_charts(self, from_date = None, to_date = None):
"""Returns the weekly track charts for the week starting from the from_date value to the to_date value."""
params = self._get_params()
if from_date and to_date:
params["from"] = from_date
params["to"] = to_date
doc = self._request("user.getWeeklyTrackChart", True, params)
seq = []
for node in doc.getElementsByTagName("track"):
item = Track(_extract(node, "artist"), _extract(node, "name"), self.network)
weight = _number(_extract(node, "playcount"))
seq.append(TopItem(item, weight))
return seq
def compare_with_user(self, user, shared_artists_limit = None):
"""Compare this user with another Last.fm user.
Returns a sequence (tasteometer_score, (shared_artist1, shared_artist2, ...))
user: A User object or a username string/unicode object.
"""
if isinstance(user, User):
user = user.get_name()
params = self._get_params()
if shared_artists_limit:
params['limit'] = _unicode(shared_artists_limit)
params['type1'] = 'user'
params['type2'] = 'user'
params['value1'] = self.get_name()
params['value2'] = user
doc = self._request('tasteometer.compare', False, params)
score = _extract(doc, 'score')
artists = doc.getElementsByTagName('artists')[0]
shared_artists_names = _extract_all(artists, 'name')
shared_artists_seq = []
for name in shared_artists_names:
shared_artists_seq.append(Artist(name, self.network))
return (score, shared_artists_seq)
def get_url(self, domain_name = DOMAIN_ENGLISH):
"""Returns the url of the user page on the network.
* domain_name: The network's language domain. Possible values:
o DOMAIN_ENGLISH
o DOMAIN_GERMAN
o DOMAIN_SPANISH
o DOMAIN_FRENCH
o DOMAIN_ITALIAN
o DOMAIN_POLISH
o DOMAIN_PORTUGUESE
o DOMAIN_SWEDISH
o DOMAIN_TURKISH
o DOMAIN_RUSSIAN
o DOMAIN_JAPANESE
o DOMAIN_CHINESE
"""
name = _url_safe(self.get_name())
return self.network._get_url(domain_name, "user") %{'name': name}
def get_library(self):
"""Returns the associated Library object. """
return Library(self, self.network)
def get_shouts(self, limit=50):
"""
Returns a sequqence of Shout objects
"""
shouts = []
for node in _collect_nodes(limit, self, "user.getShouts", False):
shouts.append(Shout(
_extract(node, "body"),
User(_extract(node, "author"), self.network),
_extract(node, "date")
)
)
return shouts
def shout(self, message):
"""
Post a shout
"""
params = self._get_params()
params["message"] = message
self._request("user.Shout", False, params)
class AuthenticatedUser(User):
def __init__(self, network):
User.__init__(self, "", network);
def _get_params(self):
return {"user": self.get_name()}
def get_name(self):
"""Returns the name of the authenticated user."""
doc = self._request("user.getInfo", True, {"user": ""}) # hack
self.name = _extract(doc, "name")
return self.name
def get_id(self):
"""Returns the user id."""
doc = self._request("user.getInfo", True)
return _extract(doc, "id")
def get_cover_image(self):
"""Returns the user's avatar."""
doc = self._request("user.getInfo", True)
return _extract(doc, "image")
def get_language(self):
"""Returns the language code of the language used by the user."""
doc = self._request("user.getInfo", True)
return _extract(doc, "lang")
def get_country(self):
"""Returns the name of the country of the user."""
doc = self._request("user.getInfo", True)
return Country(_extract(doc, "country"), self.network)
def get_age(self):
"""Returns the user's age."""
doc = self._request("user.getInfo", True)
return _number(_extract(doc, "age"))
def get_gender(self):
"""Returns the user's gender. Either USER_MALE or USER_FEMALE."""
doc = self._request("user.getInfo", True)
value = _extract(doc, "gender")
if value == 'm':
return USER_MALE
elif value == 'f':
return USER_FEMALE
return None
def is_subscriber(self):
"""Returns whether the user is a subscriber or not. True or False."""
doc = self._request("user.getInfo", True)
return _extract(doc, "subscriber") == "1"
def get_playcount(self):
"""Returns the user's playcount so far."""
doc = self._request("user.getInfo", True)
return _number(_extract(doc, "playcount"))
def get_recommended_events(self, limit=50):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for node in _collect_nodes(limit, self, "user.getRecommendedEvents", False):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def get_recommended_artists(self, limit=50):
"""
Returns a sequence of Event objects
if limit==None it will return all
"""
seq = []
for node in _collect_nodes(limit, self, "user.getRecommendedArtists", False):
seq.append(Artist(_extract(node, "name"), self.network))
return seq
class _Search(_BaseObject):
"""An abstract class. Use one of its derivatives."""
def __init__(self, ws_prefix, search_terms, network):
_BaseObject.__init__(self, network)
self._ws_prefix = ws_prefix
self.search_terms = search_terms
self._last_page_index = 0
def _get_params(self):
params = {}
for key in self.search_terms.keys():
params[key] = self.search_terms[key]
return params
def get_total_result_count(self):
"""Returns the total count of all the results."""
doc = self._request(self._ws_prefix + ".search", True)
return _extract(doc, "opensearch:totalResults")
def _retreive_page(self, page_index):
"""Returns the node of matches to be processed"""
params = self._get_params()
params["page"] = str(page_index)
doc = self._request(self._ws_prefix + ".search", True, params)
return doc.getElementsByTagName(self._ws_prefix + "matches")[0]
def _retrieve_next_page(self):
self._last_page_index += 1
return self._retreive_page(self._last_page_index)
class AlbumSearch(_Search):
"""Search for an album by name."""
def __init__(self, album_name, network):
_Search.__init__(self, "album", {"album": album_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Album objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("album"):
seq.append(Album(_extract(node, "artist"), _extract(node, "name"), self.network))
return seq
class ArtistSearch(_Search):
"""Search for an artist by artist name."""
def __init__(self, artist_name, network):
_Search.__init__(self, "artist", {"artist": artist_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Artist objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("artist"):
seq.append(Artist(_extract(node, "name"), self.network))
return seq
class TagSearch(_Search):
"""Search for a tag by tag name."""
def __init__(self, tag_name, network):
_Search.__init__(self, "tag", {"tag": tag_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Tag objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("tag"):
seq.append(Tag(_extract(node, "name"), self.network))
return seq
class TrackSearch(_Search):
"""Search for a track by track title. If you don't wanna narrow the results down
by specifying the artist name, set it to empty string."""
def __init__(self, artist_name, track_title, network):
_Search.__init__(self, "track", {"track": track_title, "artist": artist_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Track objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("track"):
seq.append(Track(_extract(node, "artist"), _extract(node, "name"), self.network))
return seq
class VenueSearch(_Search):
"""Search for a venue by its name. If you don't wanna narrow the results down
by specifying a country, set it to empty string."""
def __init__(self, venue_name, country_name, network):
_Search.__init__(self, "venue", {"venue": venue_name, "country": country_name}, network)
def get_next_page(self):
"""Returns the next page of results as a sequence of Track objects."""
master_node = self._retrieve_next_page()
seq = []
for node in master_node.getElementsByTagName("venue"):
seq.append(Venue(_extract(node, "id"), self.network))
return seq
class Venue(_BaseObject):
"""A venue where events are held."""
# TODO: waiting for a venue.getInfo web service to use.
id = None
def __init__(self, id, network):
_BaseObject.__init__(self, network)
self.id = _number(id)
@_string_output
def __repr__(self):
return "Venue #" + str(self.id)
def __eq__(self, other):
return self.get_id() == other.get_id()
def _get_params(self):
return {"venue": self.get_id()}
def get_id(self):
"""Returns the id of the venue."""
return self.id
def get_upcoming_events(self):
"""Returns the upcoming events in this venue."""
doc = self._request("venue.getEvents", True)
seq = []
for node in doc.getElementsByTagName("event"):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def get_past_events(self):
"""Returns the past events held in this venue."""
doc = self._request("venue.getEvents", True)
seq = []
for node in doc.getElementsByTagName("event"):
seq.append(Event(_extract(node, "id"), self.network))
return seq
def md5(text):
"""Returns the md5 hash of a string."""
h = hashlib.md5()
h.update(_string(text))
return h.hexdigest()
def async_call(sender, call, callback = None, call_args = None, callback_args = None):
"""This is the function for setting up an asynchronous operation.
* call: The function to call asynchronously.
* callback: The function to call after the operation is complete, Its prototype has to be like:
callback(sender, output[, param1, param3, ... ])
* call_args: A sequence of args to be passed to call.
* callback_args: A sequence of args to be passed to callback.
"""
thread = _ThreadedCall(sender, call, call_args, callback, callback_args)
thread.start()
def _unicode(text):
if type(text) == unicode:
return text
if type(text) == int:
return unicode(text)
return unicode(text, "utf-8")
def _string(text):
if type(text) == str:
return text
if type(text) == int:
return str(text)
return text.encode("utf-8")
def _collect_nodes(limit, sender, method_name, cacheable, params=None):
"""
Returns a sequqnce of dom.Node objects about as close to
limit as possible
"""
if not limit: limit = sys.maxint
if not params: params = sender._get_params()
nodes = []
page = 1
end_of_pages = False
while len(nodes) < limit and not end_of_pages:
params["page"] = str(page)
doc = sender._request(method_name, cacheable, params)
main = doc.documentElement.childNodes[1]
if main.hasAttribute("totalPages"):
total_pages = _number(main.getAttribute("totalPages"))
elif main.hasAttribute("totalpages"):
total_pages = _number(main.getAttribute("totalpages"))
else:
raise Exception("No total pages attribute")
for node in main.childNodes:
if not node.nodeType == xml.dom.Node.TEXT_NODE and len(nodes) < limit:
nodes.append(node)
if page >= total_pages:
end_of_pages = True
page += 1
return nodes
def _extract(node, name, index = 0):
"""Extracts a value from the xml string"""
nodes = node.getElementsByTagName(name)
if len(nodes):
if nodes[index].firstChild:
return _unescape_htmlentity(nodes[index].firstChild.data.strip())
else:
return None
def _extract_all(node, name, limit_count = None):
"""Extracts all the values from the xml string. returning a list."""
seq = []
for i in range(0, len(node.getElementsByTagName(name))):
if len(seq) == limit_count:
break
seq.append(_extract(node, name, i))
return seq
def _url_safe(text):
"""Does all kinds of tricks on a text to make it safe to use in a url."""
if type(text) == unicode:
text = text.encode('utf-8')
return urllib.quote_plus(urllib.quote_plus(text)).lower()
def _number(string):
"""
Extracts an int from a string. Returns a 0 if None or an empty string was passed
"""
if not string:
return 0
elif string == "":
return 0
else:
try:
return int(string)
except ValueError:
return float(string)
def _unescape_htmlentity(string):
string = _unicode(string)
mapping = htmlentitydefs.name2codepoint
for key in mapping:
string = string.replace("&%s;" %key, unichr(mapping[key]))
return string
def extract_items(topitems_or_libraryitems):
"""Extracts a sequence of items from a sequence of TopItem or LibraryItem objects."""
seq = []
for i in topitems_or_libraryitems:
seq.append(i.item)
return seq
class ScrobblingError(Exception):
def __init__(self, message):
Exception.__init__(self)
self.message = message
@_string_output
def __str__(self):
return self.message
class BannedClientError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "This version of the client has been banned")
class BadAuthenticationError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "Bad authentication token")
class BadTimeError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "Time provided is not close enough to current time")
class BadSessionError(ScrobblingError):
def __init__(self):
ScrobblingError.__init__(self, "Bad session id, consider re-handshaking")
class _ScrobblerRequest(object):
def __init__(self, url, params, network, type="POST"):
self.params = params
self.type = type
(self.hostname, self.subdir) = urllib.splithost(url[len("http:"):])
self.network = network
def execute(self):
"""Returns a string response of this request."""
connection = httplib.HTTPConnection(self.hostname)
data = []
for name in self.params.keys():
value = urllib.quote_plus(self.params[name])
data.append('='.join((name, value)))
data = "&".join(data)
headers = {
"Content-type": "application/x-www-form-urlencoded",
"Accept-Charset": "utf-8",
"User-Agent": "pylast" + "/" + __version__,
"HOST": self.hostname
}
if self.type == "GET":
connection.request("GET", self.subdir + "?" + data, headers = headers)
else:
connection.request("POST", self.subdir, data, headers)
response = connection.getresponse().read()
self._check_response_for_errors(response)
return response
def _check_response_for_errors(self, response):
"""When passed a string response it checks for erros, raising
any exceptions as necessary."""
lines = response.split("\n")
status_line = lines[0]
if status_line == "OK":
return
elif status_line == "BANNED":
raise BannedClientError()
elif status_line == "BADAUTH":
raise BadAuthenticationError()
elif status_line == "BadTimeError":
raise BadTimeError()
elif status_line == "BadSessionError":
raise BadSessionError()
elif status_line.startswith("FAILED "):
reason = status_line[status_line.find("FAILED ")+len("FAILED "):]
raise ScrobblingError(reason)
class Scrobbler(object):
"""A class for scrobbling tracks to Last.fm"""
session_id = None
nowplaying_url = None
submissions_url = None
def __init__(self, network, client_id, client_version):
self.client_id = client_id
self.client_version = client_version
self.username = network.username
self.password = network.password_hash
self.network = network
def _do_handshake(self):
"""Handshakes with the server"""
timestamp = str(int(time.time()))
if self.password and self.username:
token = md5(self.password + timestamp)
elif self.network.api_key and self.network.api_secret and self.network.session_key:
if not self.username:
self.username = self.network.get_authenticated_user().get_name()
token = md5(self.network.api_secret + timestamp)
params = {"hs": "true", "p": "1.2.1", "c": self.client_id,
"v": self.client_version, "u": self.username, "t": timestamp,
"a": token}
if self.network.session_key and self.network.api_key:
params["sk"] = self.network.session_key
params["api_key"] = self.network.api_key
server = self.network.submission_server
response = _ScrobblerRequest(server, params, self.network, "GET").execute().split("\n")
self.session_id = response[1]
self.nowplaying_url = response[2]
self.submissions_url = response[3]
def _get_session_id(self, new = False):
"""Returns a handshake. If new is true, then it will be requested from the server
even if one was cached."""
if not self.session_id or new:
self._do_handshake()
return self.session_id
def report_now_playing(self, artist, title, album = "", duration = "", track_number = "", mbid = ""):
params = {"s": self._get_session_id(), "a": artist, "t": title,
"b": album, "l": duration, "n": track_number, "m": mbid}
try:
_ScrobblerRequest(self.nowplaying_url, params, self.network).execute()
except BadSessionError:
self._do_handshake()
self.report_now_playing(artist, title, album, duration, track_number, mbid)
def scrobble(self, artist, title, time_started, source, mode, duration, album="", track_number="", mbid=""):
"""Scrobble a track. parameters:
artist: Artist name.
title: Track title.
time_started: UTC timestamp of when the track started playing.
source: The source of the track
SCROBBLE_SOURCE_USER: Chosen by the user (the most common value, unless you have a reason for choosing otherwise, use this).
SCROBBLE_SOURCE_NON_PERSONALIZED_BROADCAST: Non-personalised broadcast (e.g. Shoutcast, BBC Radio 1).
SCROBBLE_SOURCE_PERSONALIZED_BROADCAST: Personalised recommendation except Last.fm (e.g. Pandora, Launchcast).
SCROBBLE_SOURCE_LASTFM: ast.fm (any mode). In this case, the 5-digit recommendation_key value must be set.
SCROBBLE_SOURCE_UNKNOWN: Source unknown.
mode: The submission mode
SCROBBLE_MODE_PLAYED: The track was played.
SCROBBLE_MODE_LOVED: The user manually loved the track (implies a listen)
SCROBBLE_MODE_SKIPPED: The track was skipped (Only if source was Last.fm)
SCROBBLE_MODE_BANNED: The track was banned (Only if source was Last.fm)
duration: Track duration in seconds.
album: The album name.
track_number: The track number on the album.
mbid: MusicBrainz ID.
"""
params = {"s": self._get_session_id(), "a[0]": _string(artist), "t[0]": _string(title),
"i[0]": str(time_started), "o[0]": source, "r[0]": mode, "l[0]": str(duration),
"b[0]": _string(album), "n[0]": track_number, "m[0]": mbid}
_ScrobblerRequest(self.submissions_url, params, self.network).execute()
| Python |
# -*- coding: utf-8 -*-
'''
Created on Mar 18, 2010
@author: ivan
'''
import urllib2
import re
from base64 import encode
import urllib
from string import replace
def _engine_search(value):
value = replace(value, " ", "+")
host = "http://en.vpleer.ru/?q=" + value
print host
data = urllib2.urlopen(host)
return data.read()
def get_song_path(line):
path = re.findall(r"href=\"([\w#!:.?+=&%@!\-\/]*.mp3)", line)
if path:
return path
def get_auname(line):
path = re.findall(r"class=\"auname\">(\w*)", line)
if path:
return path
def get_ausong(line):
path = re.findall(r"class=\"auname\">(\w*)<", line)
if path:
return path
def find_song_urls(song_title):
data = _engine_search(song_title)
paths = get_song_path(data)
return paths
#print "Result:", find_song_urls("Ария - Антихрист")
| Python |
# -*- coding: utf-8 -*-
# lyricwiki.py
#
# Copyright 2009 Amr Hassan <amr.hassan@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import simplejson, urllib, os, hashlib, time
def _download(args):
"""
Downloads the json response and returns it
"""
base = "http://lyrics.wikia.com/api.php?"
str_args = {}
for key in args:
str_args[key] = args[key].encode("utf-8")
args = urllib.urlencode(str_args)
return urllib.urlopen(base + args).read()
def _get_page_titles(artist, title):
"""
Returns a list of available page titles
"""
args = {"action": "query",
"list": "search",
"srsearch": artist + " " + title,
"format": "json",
}
titles = ["%s:%s" % (artist, title), "%s:%s" % (artist.title(), title.title())]
content = simplejson.loads(_download(args))
for t in content["query"]["search"]:
titles.append(t["title"])
return titles
def _get_lyrics(artist, title):
for page_title in _get_page_titles(artist, title):
args = {"action": "query",
"prop": "revisions",
"rvprop": "content",
"titles": page_title,
"format": "json",
}
revisions = simplejson.loads(_download(args))["query"]["pages"].popitem()[1]
if not "revisions" in revisions:
continue
content = revisions["revisions"][0]["*"]
if content.startswith("#Redirect"):
n_title = content[content.find("[[") + 2:content.rfind("]]")]
return _get_lyrics(*n_title.split(":"))
if "<lyrics>" in content:
return content[content.find("<lyrics>") + len("<lyrics>") : content.find("</lyrics>")].strip()
elif "<lyric>" in content:
return content[content.find("<lyric>") + len("<lyric>") : content.find("</lyric>")].strip()
def get_lyrics(artist, title, cache_dir=None):
"""
Get lyrics by artist and title
set cache_dir to a valid (existing) directory
to enable caching.
"""
path = None
if cache_dir and os.path.exists(cache_dir):
digest = hashlib.sha1(artist.lower().encode("utf-8") + title.lower().encode("utf-8")).hexdigest()
path = os.path.join(cache_dir, digest)
if os.path.exists(path):
fp = open(path)
return simplejson.load(fp)["lyrics"].strip()
lyrics = _get_lyrics(artist, title)
if path and lyrics:
fp = open(path, "w")
simplejson.dump({"time": time.time(), "artist": artist, "title": title,
"source": "lyricwiki", "lyrics": lyrics }, fp, indent=4)
fp.close()
return lyrics
| Python |
# -*- coding: utf-8 -*-
'''
Created on Mar 30, 2010
@author: ivan
'''
import gtk
import time
from foobnix.util import LOG
class PrefListModel():
POS_NAME = 0
def __init__(self, widget, prefListMap):
self.widget = widget
self.prefListMap = prefListMap
self.model = gtk.ListStore(str)
renderer = gtk.CellRendererText()
renderer.connect('edited', self.editRow)
renderer.set_property('editable', True)
column = gtk.TreeViewColumn(_("My play lists"), renderer, text=0, font=2)
column.set_resizable(True)
widget.append_column(column)
widget.set_model(self.model)
def removeSelected(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected()
if selected:
self.model.remove(selected)
def editRow(self, w, event, value):
beforeRename = unicode(self.getSelected())
if value:
i = self.getSelectedIndex()
if i > 0 and not self.isContain(value):
self.model[i][self.POS_NAME] = value
"""copy songs with new name"""
print "beforeRename ", beforeRename, self.prefListMap.keys()
datas = self.prefListMap[beforeRename]
print datas
del self.prefListMap[beforeRename]
self.prefListMap[value] = datas
def getSelectedIndex(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected()
if selected:
i = model.get_string_from_iter(selected)
if i.find(":") == -1:
LOG.info("Selected index is " , i)
return int(i)
return None
def isContain(self, name):
for i in xrange(len(self.model)):
if str(self.model[i][self.POS_NAME]) == name:
return True
return False
def getSelected(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected()
if selected:
return model.get_value(selected, self.POS_NAME)
else:
return None
def clear(self):
self.model.clear()
def append(self, name):
self.model.append([name])
| Python |
'''
Created on Mar 11, 2010
@author: ivan
'''
from foobnix.model.entity import CommonBean
class VirturalLIstCntr():
def __init__(self):
self.items = []
def get_items(self):
return self.items
def get_item_by_index(self, index):
return self.items[index]
def append(self, item):
self.items.append(item)
def getState(self):
return self.items
def setState(self, items):
self.items = items
def remove(self, index):
if index > len(self.items):
"INDEX TOO BIG"
return
item = self.get_item_by_index(index)
print "DELETE", item.name
self.items.remove(item)
def remove_with_childrens(self, index, parent=None):
type = self.get_item_by_index(index).type
print type
if type not in [CommonBean.TYPE_FOLDER, CommonBean.TYPE_GOOGLE_HELP] :
self.remove(index)
return
self.remove(index)
size = len(self.items)
for i in xrange(index, size):
print "index" + str(i),
print self.items[index].parent
if self.items[index].parent == parent:
return
else:
self.remove(index)
| Python |
'''
Created on Mar 11, 2010
@author: ivan
'''
import gtk
import gobject
from foobnix.model.entity import CommonBean
class DirectoryModel():
POS_NAME = 0
POS_PATH = 1
POS_FONT = 2
POS_VISIBLE = 3
POS_TYPE = 4
POS_INDEX = 5
POS_PARENT = 5
def __init__(self, widget):
self.widget = widget
self.model = gtk.TreeStore(str, str, str, gobject.TYPE_BOOLEAN, str, int,str)
renderer = gtk.CellRendererText()
#renderer.connect('edited', self.editRow)
#renderer.set_property('editable', True)
print "ATTTR", renderer.get_property("attributes")
column = gtk.TreeViewColumn(_("Title"), renderer, text=0, font=2)
column.set_resizable(True)
widget.append_column(column)
filter = self.model.filter_new()
filter.set_visible_column(self.POS_VISIBLE)
widget.set_model(filter)
def editRow(self, w, event, value):
if value:
selection = self.widget.get_selection()
model, selected = selection.get_selected()
print "VAlue", value
print selected
i = model.get_string_from_iter(selected)
print "I ", i
if i.find(":") == -1:
print i
self.model[int(i)][self.POS_NAME] = value
def append(self, level, bean):
return self.model.append(level, [bean.name, bean.path, bean.font, bean.is_visible, bean.type, bean.index, bean.parent])
def clear(self):
self.model.clear()
def getModel(self):
return self.model
def setModel(self, model):
self.model = model
def getSelectedBean(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected()
return self._getBeanByIter(model, selected)
def deleteSelected(self):
model, iter = self.widget.get_selection().get_selected()
if iter:
model.remove(iter)
def _getBeanByIter(self, model, iter):
if iter:
bean = CommonBean()
bean.name = model.get_value(iter, self.POS_NAME)
bean.path = model.get_value(iter, self.POS_PATH)
bean.font = model.get_value(iter, self.POS_FONT)
bean.visible = model.get_value(iter, self.POS_VISIBLE)
bean.type = model.get_value(iter, self.POS_TYPE)
bean.index = model.get_value(iter, self.POS_INDEX)
bean.parent = model.get_value(iter, self.POS_PARENT)
return bean
return None
def getBeenByPosition(self, position):
bean = CommonBean()
bean.name = self.model[position][ self.POS_NAME]
bean.path = self.model[position][ self.POS_PATH]
bean.type = self.model[position][ self.POS_TYPE]
bean.visible = self.model[position][ self.POS_VISIBLE]
bean.font = self.model[position][ self.POS_FONT]
bean.parent = self.model[position][self.POS_PARENT]
return bean
def getAllSongs(self):
result = []
for i in xrange(len(self.model)):
been = self.getBeenByPosition(i)
if been.type in [CommonBean.TYPE_MUSIC_FILE, CommonBean.TYPE_MUSIC_URL, CommonBean.TYPE_RADIO_URL]:
result.append(been)
return result
def getChildSongBySelected(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected()
n = model.iter_n_children(selected)
iterch = model.iter_children(selected)
results = []
for i in xrange(n):
song = self._getBeanByIter(model, iterch)
if song.type != CommonBean.TYPE_FOLDER:
results.append(self._getBeanByIter(model, iterch))
iterch = model.iter_next(iterch)
return results
def filterByName(self, string):
if len(string.strip()) > 0:
for line in self.model:
name = line[self.POS_NAME].lower()
string = string.strip().lower()
if name.find(string) >= 0:
print "FIND :", name, string
line[self.POS_VISIBLE] = True
else:
line[self.POS_VISIBLE] = False
else:
for line in self.model:
line[self.POS_VISIBLE] = True
| Python |
# -*- coding: utf-8 -*-
'''
Created on Mar 11, 2010
@author: ivan
'''
import os
from foobnix.util import LOG
from foobnix.directory.directory_model import DirectoryModel
from foobnix.model.entity import CommonBean
from foobnix.util.confguration import FConfiguration
from foobnix.util.file_utils import isDirectory, getExtenstion
from foobnix.util.mouse_utils import is_double_click
import gtk
from foobnix.directory.virtuallist_controller import VirturalLIstCntr
import copy
from foobnix.directory.pref_list_model import PrefListModel
import thread
import gettext
from threading import Lock
import time
gettext.install("foobnix", unicode=True)
class DirectoryCntr():
VIEW_ARTIST_ALBUM = 0
VIEW_RADIO_STATION = 1
VIEW_VIRTUAL_LISTS = 2
DEFAULT_LIST = "Default list";
#DEFAULT_LIST_NAME = _("Default list");
def __init__(self, gxMain, playlistCntr, radioListCntr, virtualListCntr):
self.playlistCntr = playlistCntr
self.radioListCntr = radioListCntr
self.virtualListCntr = virtualListCntr
widget = gxMain.get_widget("direcotry_treeview")
self.model = DirectoryModel(widget)
widget.connect("button-press-event", self.onMouseClick)
widget.connect("key-release-event", self.onTreeViewDeleteItem)
#widget.connect("drag-begin", self.all)
#widget.connect("drag-data-get", self.all)
#widget.connect("drag-data-received", self.all)
#widget.connect("drag-drop", self.all)
widget.connect("drag-end", self.on_drag_get)
#widget.connect("drag-failed", self.all)
#widget.connect("drag-leave", self.all)
"Pref lists "
self.prefListMap = {self.DEFAULT_LIST : []}
self.currentListMap = self.DEFAULT_LIST
prefList = gxMain.get_widget("treeview3")
prefList.connect("button-press-event", self.onPreflListMouseClick)
prefList.connect("key-release-event", self.onPreflListDeleteItem)
prefList.connect("cursor-changed", self.onPreflListSelect)
self.prefModel = PrefListModel(prefList, self.prefListMap)
self.mainNoteBook = gxMain.get_widget("main_notebook")
self.leftNoteBook = gxMain.get_widget("left_notebook")
self.filter = gxMain.get_widget("filter-combobox-entry")
#self.filter.connect("key-press-event", self.onFiltering)
self.filter.connect("key-release-event", self.onFiltering)
self.view_list = gxMain.get_widget("view_list_combobox")
cell = gtk.CellRendererText()
self.view_list.pack_start(cell, True)
self.view_list.add_attribute(cell, 'text', 0)
liststore = gtk.ListStore(str)
self.view_list.set_model(liststore)
self.view_list.append_text(_("by artist/album"))
self.view_list.append_text(_("by radio/stations"))
self.view_list.append_text(_("by play lists"))
self.view_list.set_active(0)
self.view_list.connect("changed", self.onChangeView)
self.saved_model = None
def getState(self):
return self.prefListMap
def setState(self, preflists):
self.prefListMap = preflists
self.prefModel.prefListMap = preflists
for key in self.prefListMap:
LOG.info("add key to virtual list", unicode(key))
self.prefModel.append(key)
def getPrefListBeans(self, preflist=DEFAULT_LIST):
if preflist in self.prefListMap:
return self.prefListMap[preflist]
return None
def appendToPrefListBeans(self, beans, preflist=DEFAULT_LIST):
if not preflist in self.prefListMap:
print "Key not found"
self.prefListMap[preflist] = []
if beans:
for bean in beans:
self.prefListMap[preflist].append(bean)
def clearPrefList(self, listName):
if listName in self.prefListMap:
self.prefListMap[listName] = []
def onPreflListSelect(self, *args):
self.view_list.set_active(self.VIEW_VIRTUAL_LISTS)
self.currentListMap = self.prefModel.getSelected()
if self.currentListMap in self.prefListMap:
beans = self.prefListMap[self.currentListMap]
self.display_virtual(beans)
else:
self.clear()
def onPreflListMouseClick(self, w, event):
if event.button == 3 and event.type == gtk.gdk._2BUTTON_PRESS: #@UndefinedVariable
LOG.debug("Create new paly list")
unknownListName = _("New play list")
if not self.prefModel.isContain(unknownListName):
self.prefModel.append(unknownListName)
self.prefListMap[unknownListName] = []
def onPreflListDeleteItem(self, w, event):
if event.type == gtk.gdk.KEY_RELEASE: #@UndefinedVariable
#Enter pressed
print event.keyval
print event.hardware_keycode
if event.hardware_keycode == 119 or event.hardware_keycode == 107:
if self.prefModel.getSelectedIndex() > 0:
del self.prefListMap[unicode(self.prefModel.getSelected())]
self.prefModel.removeSelected()
self.clear()
def all(self, *args):
for arg in args:
print args
def getModel(self):
return self.model
def on_drag_get(self, *args):
self.populate_playlist(append=True)
def set_active_view(self, view_type):
self.view_list.set_active(view_type)
def onChangeView(self, *args):
self.leftNoteBook.set_current_page(0)
active_index = self.view_list.get_active()
if active_index == self.VIEW_ARTIST_ALBUM:
self.clear()
self.addAll()
elif active_index == self.VIEW_RADIO_STATION:
self.clear()
beans = self.radioListCntr.getState()[0]
print beans
for bean in beans:
self.model.append(None, CommonBean(bean.name, bean.path, "normal", True, CommonBean.TYPE_MUSIC_URL))
elif active_index == self.VIEW_VIRTUAL_LISTS:
items = self.getPrefListBeans(self.DEFAULT_LIST)
self.display_virtual(items)
def append_virtual(self, beans=None):
LOG.debug("Current virtual list", self.currentListMap)
if not self.currentListMap:
self.currentListMap = self.DEFAULT_LIST
self.appendToPrefListBeans(beans, self.currentListMap)
items = self.getPrefListBeans(self.currentListMap)
self.display_virtual(items)
def display_virtual(self, items):
self.clear()
"Displya list title"
self.model.append(None, CommonBean(name="[" + self.currentListMap + "]", path=None, font="bold", is_visible=True, type=CommonBean.TYPE_LABEL, parent=None, index=0))
if not items:
return None
parent = None
i = 1
for item in items:
print item
if item.parent == None:
parent = self.model.append(None, CommonBean(name=item.name, path=item.path, font="normal", is_visible=True, type=item.type, parent=item.parent, index=i))
else:
self.model.append(parent, CommonBean(name=item.name, path=item.path, font="normal", is_visible=True, type=item.type, parent=item.parent, index=i))
i += 1
def onTreeViewDeleteItem(self, w, event):
if self.view_list.get_active() != self.VIEW_VIRTUAL_LISTS:
return
print event
if event.type == gtk.gdk.KEY_RELEASE: #@UndefinedVariable
#Enter pressed
print event.keyval
print event.hardware_keycode
if event.hardware_keycode == 119 or event.hardware_keycode == 107:
print "Delete"
bean = self.model.getSelectedBean()
print bean.index
if bean.index > 0:
self.virtualListCntr.items = self.prefListMap[self.currentListMap]
self.virtualListCntr.remove_with_childrens(bean.index - 1, bean.parent)
self.append_virtual()
def onFiltering(self, *args):
text = self.filter.get_children()[0].get_text()
print "filtering by text", text
self.model.filterByName(text)
def onMouseClick(self, w, event):
if event.button == 1 and event.type == gtk.gdk._2BUTTON_PRESS: #@UndefinedVariable
self.populate_playlist()
if event.button == 3 and event.type == gtk.gdk._2BUTTON_PRESS: #@UndefinedVariable
print "Create new"
#self.append_virtual([CommonBean(name="New Artist", type=CommonBean.TYPE_FOLDER, parent=None)])
def populate_playlist(self, append=False):
print "Drug begin"
directoryBean = self.model.getSelectedBean()
if not directoryBean:
return
print "Select: ", directoryBean.name, directoryBean.type
print "Drug type", directoryBean.type
if directoryBean.type in [CommonBean.TYPE_FOLDER, CommonBean.TYPE_GOOGLE_HELP] :
songs = self.model.getChildSongBySelected()
print "Select songs", songs
if not songs:
return
if append:
self.playlistCntr.appendPlaylist(songs)
else:
self.playlistCntr.setPlaylist(songs)
elif directoryBean.type == CommonBean.TYPE_LABEL:
songs = self.model.getAllSongs()
if append:
self.playlistCntr.appendPlaylist(songs)
else:
self.playlistCntr.setPlaylist(songs)
else:
if append:
self.playlistCntr.appendPlaylist([directoryBean])
else:
self.playlistCntr.setPlaylist([directoryBean])
#print "PAGE", self.leftNoteBook.get_current_page()
print "SET PAGE", self.mainNoteBook.set_current_page(0)
def getALLChildren(self, row, string):
for child in row.iterchildren():
name = child[self.POS_NAME].lower()
if name.find(string) >= 0:
print "FIND SUB :", name, string
child[self.POS_VISIBLE] = True
else:
child[self.POS_VISIBLE] = False
def updateDirectoryByPath(self, path):
print "Update path", path
self.musicFolder = path
self.model.clear()
self.addAll()
def clear(self):
self.model.clear()
def getAllSongsByPath(self, path):
dir = os.path.abspath(path)
list = os.listdir(dir)
list = sorted(list)
result = []
for file_name in list:
if getExtenstion(file_name) not in FConfiguration().supportTypes:
continue
full_path = path + "/" + file_name
if not isDirectory(full_path):
bean = CommonBean(name=file_name, path=full_path, type=CommonBean.TYPE_MUSIC_FILE)
result.append(bean)
LOG.debug(result)
return result
cachModel = []
def addAllThread(self):
"""
if self.cachModel:
for bean in self.cachModel:
self.model.append(None, bean)
return True
"""
level = None;
self.go_recursive(self.musicFolder, level)
if not len(self.model.getModel()):
self.model.append(level, CommonBean(name=_("Music not found in ") + FConfiguration().mediaLibraryPath, path=None, font="bold", is_visible=True, type=CommonBean.TYPE_FOLDER, parent=level))
else:
"""
for i in xrange(len(self.model.getModel())):
bean = self.model.getBeenByPosition(i)
self.cachModel.append(bean)
"""
def addAll(self):
#thread.start_new_thread(self.addAllThread, ())
self.addAllThread()
def sortedDirsAndFiles(self, path, list):
files = []
directories = []
#First add dirs
for file in list:
full_path = path + "/" + file
if isDirectory(full_path):
directories.append(file)
else:
files.append(file)
return sorted(directories) + sorted(files)
def isDirectoryWithMusic(self, path):
if isDirectory(path):
dir = os.path.abspath(path)
list = None
try:
list = os.listdir(dir)
except OSError, e:
print "Can'r get list of dir", e
if not list:
return False
for file in list:
full_path = path + "/" + file
if isDirectory(full_path):
if self.isDirectoryWithMusic(full_path):
return True
else:
if getExtenstion(file) in FConfiguration().supportTypes:
return True
return False
def go_recursive(self, path, level):
dir = os.path.abspath(path)
list = os.listdir(dir)
list = self.sortedDirsAndFiles(path, list)
for file in list:
full_path = path + "/" + file
if not isDirectory(full_path) and getExtenstion(file) not in FConfiguration().supportTypes:
continue
if self.isDirectoryWithMusic(full_path):
#LOG.debug("directory", file)
sub = self.model.append(level, CommonBean(name=file, path=full_path, font="bold", is_visible=True, type=CommonBean.TYPE_FOLDER, parent=level))
self.go_recursive(full_path, sub)
else:
if not isDirectory(full_path):
self.model.append(level, CommonBean(name=file, path=full_path, font="normal", is_visible=True, type=CommonBean.TYPE_MUSIC_FILE, parent=level))
#LOG.debug("file", file)
| Python |
'''
Created on Feb 26, 2010
@author: ivan
'''
from mutagen.easyid3 import EasyID3
from mutagen.mp3 import MP3, HeaderNotFoundError
from mutagen import File
import os
import gtk
class CommonBean():
TYPE_FOLDER = "TYPE_FOLDER"
TYPE_LABEL = "TYPE_LABEL"
TYPE_GOOGLE_HELP = "TYPE_GOOGLE_HELP"
TYPE_MUSIC_FILE = "TYPE_MUSIC_FILE"
TYPE_MUSIC_URL = "TYPE_MUSIC_URL"
TYPE_RADIO_URL = "TYPE_RADIO_URL"
#Song attributes
album = ""
artist = ""
title = ""
date = ""
genre = ""
tracknumber = ""
def __init__(self, name=None, path=None, type=None, is_visible=True, color=None, font="normal", index= -1, parent=None):
self.name = name
self.path = path
self.type = type
self.icon = None
self.color = color
self.index = index
self.time = None
self.is_visible = is_visible
self.font = font
self.parent = parent
#self._getMp3Tags()
def getArtist(self):
s = self.name.split(" - ")
if len(s) > 1:
return self.name.split(" - ")[0]
return None
def getTitle(self):
s = self.name.split(" - ")
if len(s) > 1:
return self.name.split(" - ")[1]
def setIconPlaying(self):
self.icon = gtk.STOCK_GO_FORWARD
def setIconErorr(self):
self.icon = gtk.STOCK_DIALOG_ERROR
def setIconNone(self):
self.icon = None
def getTitleDescription(self):
if self.title and self.artist and self.album:
return self.artist + " - [" + self.album + "] #" + self.tracknumber + " " + self.title
else:
return self.name
def getPlayListDescription(self):
if self.title and self.album:
return self.name + " - " + self.title + " (" + self.album + ")" + self.parent
return self.name
def _getMp3Tags(self):
audio = None
if not self.path:
return
if not self.type:
return
if self.type != self.TYPE_MUSIC_FILE:
return
if not os.path.exists(self.path):
return
try:
audio = MP3(self.path, ID3=EasyID3)
except HeaderNotFoundError:
try:
audio = File(self.path)
except HeaderNotFoundError:
pass
if audio and audio.has_key('album'): self.album = audio["album"][0]
if audio and audio.has_key('artist'): self.artist = audio["artist"][0]
if audio and audio.has_key('title'): self.title = audio["title"][0]
if audio and audio.has_key('date'): self.date = audio["date"][0]
if audio and audio.has_key('genre'): self.genre = audio["genre"][0]
if audio and audio.has_key('tracknumber'): self.tracknumber = audio["tracknumber"][0]
def __str__(self):
return "Common Bean :" + self.__contcat(
"name:", self.name,
"path:", self.path,
"type:", self.type,
"icon:", self.icon,
"color:", self.color,
"index:", self.index,
"time:", self.time,
"is_visible:", self.is_visible,
"font:", self.font,
"parent", self.parent)
def __contcat(self, *args):
result = ""
for arg in args:
result += " " + str(arg)
return result
| Python |
import gtk
def is_double_click(event):
if event.button == 1 and event.type == gtk.gdk._2BUTTON_PRESS: #@UndefinedVariable
return True
else:
return False | Python |
'''
Created on Feb 26, 2010
@author: ivan
'''
def debug(*args):
print "DEBUG:", args
def info(*args):
print "INFO:", args
def error(*args):
print "ERROR:", args
| Python |
import sys, os, time, atexit
from signal import SIGTERM
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile):
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
pass
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
#sys.exit(0)
pass
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
print "running"
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Foobnix already running?\n"
sys.stderr.write(message % self.pidfile)
print "rutting"
return False
# Start the daemon
self.daemonize()
#self.run()
print "OK"
return True
| Python |
'''
Created on Feb 26, 2010
@author: ivan
'''
import os
def isDirectory(path):
return os.path.isdir(path)
def getExtenstion(fileName):
return fileName[-4:].lower()
| Python |
'''
Created on Feb 26, 2010
@author: ivan
'''
def convert_ns(time_int):
time_int = time_int / 1000000000
time_str = ""
if time_int >= 3600:
_hours = time_int / 3600
time_int = time_int - (_hours * 3600)
time_str = str(_hours) + ":"
if time_int >= 600:
_mins = time_int / 60
time_int = time_int - (_mins * 60)
time_str = time_str + str(_mins) + ":"
elif time_int >= 60:
_mins = time_int / 60
time_int = time_int - (_mins * 60)
time_str = time_str + "0" + str(_mins) + ":"
else:
time_str = time_str + "00:"
if time_int > 9:
time_str = time_str + str(time_int)
else:
time_str = time_str + "0" + str(time_int)
return time_str | Python |
'''
Created on Mar 3, 2010
@author: ivan
'''
import urllib2
def getStationPath(url):
print "get station"
if not url:
return None
_file_url = url
urls = []
try:
connect = urllib2.urlopen(url)
data = connect.read()
urls = getStations(data, urls)
except:
print "INCORRECT URL ERROR .... ", url
return urls[0]
def getStations(data, urls):
for line in data.rsplit():
line = line.lower()
if line.startswith("file"):
index = line.find("=")
url = line[index + 1 : ]
print url
urls.append(url)
return urls
def getPlsName(_file_url):
index = _file_url.rfind("/")
return _file_url[index + 1:]
def getFirst(self, urls):
if urls:
return urls[0]
else:
return None
| Python |
# -*- coding: utf-8 -*-
'''
Created on Feb 27, 2010
@author: ivan
'''
import pickle
import os
from foobnix.util import LOG
import tempfile
VERSION = "0.1.0"
class Singleton(type):
def __call__(self, *args, **kw):
if self.instance is None:
self.instance = super(Singleton, self).__call__(*args, **kw)
return self.instance
def __init__(self, name, bases, dict):
super(Singleton, self).__init__(name, bases, dict)
self.instance = None
class FConfiguration:
__metaclass__ = Singleton
CFG_FILE = os.getenv("HOME") + "/foobnix_conf.pkl"
def __init__(self, is_load_file=True):
self.mediaLibraryPath = tempfile.gettempdir()
self.onlineMusicPath = tempfile.gettempdir()
self.supportTypes = [".mp3", ".ogg", ".ape", ".flac",".wma"]
self.isRandom = False
self.isRepeat = True
self.isPlayOnStart = False
self.savedPlayList = []
self.savedRadioList = []
self.savedSongIndex = 0
self.volumeValue = 50.0
self.vpanelPostition = 500
self.hpanelPostition = 350
self.playlistState = None
self.radiolistState = None
self.virtualListState = {"Default list" : []}
self.is_save_online = False
self.song_source_relevance_algorithm = 0
self.online_tab_show_by = 0
self.vk_login = "qax@bigmir.net"
self.vk_password = "foobnix"
self.lfm_login = "foobnix"
self.lfm_password = "foobnix"
self.API_KEY = "bca6866edc9bdcec8d5e8c32f709bea1"
self.API_SECRET = "800adaf46e237805a4ec2a81404b3ff2"
instance = self._loadCfgFromFile(is_load_file)
if instance:
try:
self.virtualListState = instance.virtualListState
self.playlistState = instance.playlistState
self.radiolistState = instance.radiolistState
self.mediaLibraryPath = instance.mediaLibraryPath
self.isRandom = instance.isRandom
self.isRepeat = instance.isRepeat
self.isPlayOnStart = instance.isPlayOnStart
self.savedPlayList = instance.savedPlayList
self.savedSongIndex = instance.savedSongIndex
self.volumeValue = instance.volumeValue
self.vpanelPostition = instance.vpanelPostition
self.hpanelPostition = instance.hpanelPostition
self.savedRadioList = instance.savedRadioList
self.is_save_online = instance.is_save_online
self.onlineMusicPath = instance.onlineMusicPath
self.vk_login = instance.vk_login
self.vk_password = instance.vk_password
self.lfm_login = instance.lfm_login
self.lfm_password = instance.lfm_password
except AttributeError:
LOG.debug("Configuraton attributes are changed")
os.remove(self.CFG_FILE)
print "LOAD CONFIGS"
self.printArttibutes()
def save(self):
print "SAVE CONFIGS"
self.printArttibutes()
FConfiguration()._saveCfgToFile()
def printArttibutes(self):
for i in dir(self):
if not i.startswith("__"):
print i, getattr(self, i)
def _saveCfgToFile(self):
#conf = FConfiguration()
save_file = file(self.CFG_FILE, 'w')
pickle.dump(self, save_file)
save_file.close()
LOG.debug("Save configuration")
def _loadCfgFromFile(self, is_load_file):
if not is_load_file:
return
try:
load_file = file(self.CFG_FILE, 'r')
except IOError:
LOG.debug("file not exists")
return None
try:
conf = pickle.load(load_file)
except type:
print type
LOG.debug("Error loading configuration")
load_file.close()
LOG.debug("Delete file")
os.remove(self.CFG_FILE)
conf = FConfiguration(False)
return conf
load_file.close()
LOG.debug("Load configuration")
return conf
| Python |
# -*- coding: utf-8 -*-
'''
Created on Mar 11, 2010
@author: ivan
'''
from foobnix.lyric.lyr import get_lyrics
import thread
import gtk
from foobnix.util import LOG
from foobnix.util.confguration import FConfiguration
class PlayerWidgetsCntl():
'''
'''
def __init__(self, gxMain, playerCntr):
self.playerCntr = playerCntr
self.volume = gxMain.get_widget("volume_hscale")
self.volume.connect("change-value",self.onVolumeChange)
self.seek = gxMain.get_widget("seek_eventbox")
self.seek.connect("button-press-event",self.onSeek)
self.seekBar = gxMain.get_widget("seek_progressbar")
self.timeLabel = gxMain.get_widget("seek_progressbar")
self.vpanel = gxMain.get_widget("vpaned1")
self.hpanel = gxMain.get_widget("hpaned1")
self.lyric = gxMain.get_widget("lyric_textview")
self.textbuffer = self.lyric.get_buffer()
self.statusbar = gxMain.get_widget("statusbar")
self.lyric.set_editable(False)
navigationEvents = {
"on_play_button_clicked" :self.onPlayButton,
"on_stop_button_clicked" :self.onStopButton,
"on_pause_button_clicked" :self.onPauseButton,
"on_prev_button_clicked" :self.onPrevButton,
"on_next_button_clicked": self.onNextButton
}
gxMain.signal_autoconnect(navigationEvents)
def setStatusText(self, text):
self.statusbar.push(0,text)
def setLiric(self, song):
thread.start_new_thread(self._setLiricThread, (song,))
def _setLiricThread(self, song):
title = ""+song.getTitle()
for extension in FConfiguration().supportTypes:
if title.endswith(extension):
title = title.replace(extension,"")
break
print "Get lirics for:", song.getArtist(), title
if song.getArtist() and song.getTitle():
try:
text = get_lyrics(song.getArtist(), title)
except:
self.setStatusText(_("Connection lyrics error"))
LOG.error("Connection lyrics error")
return None
if text:
self.textbuffer.set_text("*** "+ song.getArtist() +" - " +title +" ***\n" +text)
else:
self.textbuffer.set_text("Not Found lyrics for "+song.getArtist() +" - "+ title + "\n")
def onPlayButton(self, *a):
self.playerCntr.playState()
def onStopButton(self, *a):
self.playerCntr.stopState()
def onPauseButton(self, *a):
self.playerCntr.pauseState()
def onPrevButton(self, *a):
self.playerCntr.prev()
def onNextButton(self, *a):
self.playerCntr.next()
def onSeek(self, widget, event):
if event.button == 1:
width = self.seek.allocation.width
x = event.x
seekValue = (x + 0.0) / width * 100
print seekValue
self.playerCntr.setSeek(seekValue);
def onVolumeChange(self, widget, obj3, volume):
self.playerCntr.setVolume(volume / 100)
pass # end of class | Python |
'''
Created on Mar 11, 2010
@author: ivan
'''
import gst
import gtk
import time
import thread
from foobnix.util.time_utils import convert_ns
from foobnix.model.entity import CommonBean
from foobnix.util import LOG
from foobnix.util.confguration import FConfiguration
class PlayerController:
MODE_RADIO = "RADIO"
MODE_PLAY_LIST = "PLAY_LIST"
MODE_ONLINE_LIST = "ONLINE_LIST"
def __init__(self):
self.player = self.playerLocal()
self.songs = []
self.cIndex = 0
self.time_format = gst.Format(gst.FORMAT_TIME)
self.volume = 0
self.mode = self.MODE_PLAY_LIST
pass
def set_mode(self, mode):
self.mode = mode
def registerWindowController(self, windowController):
self.windowController = windowController
def registerTrayIcon(self, trayIcon):
self.trayIcon = trayIcon
def registerPlaylistCntr(self, playlistCntr):
self.playlistCntr = playlistCntr
def registerOnlineCntr(self, onlineCntr):
self.onlineCntr = onlineCntr
def registerWidgets(self, widgets):
self.widgets = widgets
count = 0
def playSong(self, song):
print "play song"
self.stopState()
if not song:
LOG.info("NULL song can't playing")
return
print "Path before", song.path
#Try to set resource
if song.path == None or song.path == "":
print "PL CNTR SET PATH"
self.onlineCntr.setSongResource(song)
print "Path after", song.path
if song.path == None or song.path == "":
self.count += 1
print "SONG NOT FOUND", song.name
print "Count is", self.count
if self.count > 5:
return
return self.next()
self.count = 0
self.widgets.setLiric(song)
print "Type", song.type
print "MODE", self.mode
print "Name", song.name
if song.type == CommonBean.TYPE_MUSIC_FILE:
self.player = self.playerLocal()
self.player.set_property("uri", "file://" + song.path)
self.playerThreadId = thread.start_new_thread(self.playThread, (song,))
elif song.type == CommonBean.TYPE_RADIO_URL:
print "URL PLAYING", song.path
self.player = self.playerHTTP()
self.player.set_property("uri", song.path)
self.widgets.seekBar.set_text("Url Playing...")
elif song.type == CommonBean.TYPE_MUSIC_URL:
print "URL PLAYING", song.path
self.player = self.playerHTTP()
self.player.set_property("uri", song.path)
self.playerThreadId = thread.start_new_thread(self.playThread, (song,))
else:
self.widgets.seekBar.set_text("Error playing...")
return
self.playState()
self.setVolume(self.volume)
self.windowController.setTitle(song.getTitleDescription())
self.trayIcon.setText1(song.getTitleDescription())
def pauseState(self):
self.player.set_state(gst.STATE_PAUSED)
def playState(self):
self.player.set_state(gst.STATE_PLAYING)
def stopState(self):
self.setSeek(0.0)
self.widgets.seekBar.set_fraction(0.0)
self.widgets.seekBar.set_text("00:00 / 00:00")
self.playerThreadId = None
self.player.set_state(gst.STATE_NULL)
def setVolume(self, volumeValue):
self.volume = volumeValue
self.player.set_property('volume', volumeValue + 0.0)
def getVolume(self):
return self.volume
def playerHTTP(self):
LOG.info("Player For remote files")
self.playbin = gst.element_factory_make("playbin", "player")
bus = self.playbin.get_bus()
bus.add_signal_watch()
bus.connect("message", self.onBusMessage)
return self.playbin
def playerLocal(self):
LOG.info("Player Local Files")
self.playbin = gst.element_factory_make("playbin2", "player")
bus = self.playbin.get_bus()
bus.add_signal_watch()
bus.connect("message", self.onBusMessage)
return self.playbin
def next(self, *a):
if self.mode == self.MODE_ONLINE_LIST:
song = self.onlineCntr.getNextSong()
else:
song = self.playlistCntr.getNextSong()
if song:
self.playSong(song)
def prev(self):
if self.mode == self.MODE_ONLINE_LIST:
song = self.onlineCntr.getPrevSong()
else:
song = self.playlistCntr.getPrevSong()
self.playSong(song)
def _isStatusNull(self):
return self.player.get_state()[1] == gst.STATE_NULL
def setSeek(self, persentValue):
if self._isStatusNull():
self.playerThreadId = None
return None
pos_max = self.player.query_duration(self.time_format, None)[0]
seek_ns = pos_max * persentValue / 100;
self.player.seek_simple(self.time_format, gst.SEEK_FLAG_FLUSH, seek_ns)
def playThread(self, song=None):
LOG.info("Starts playing thread")
flag = True
play_thread_id = self.playerThreadId
gtk.gdk.threads_enter()#@UndefinedVariable
self.widgets.seekBar.set_text("00:00 / 00:00")
gtk.gdk.threads_leave() #@UndefinedVariable
while play_thread_id == self.playerThreadId:
try:
print "Try"
time.sleep(0.2)
dur_int = self.player.query_duration(self.time_format, None)[0]
#self.currentSong= dur_int / 1000000000
dur_str = convert_ns(dur_int)
gtk.gdk.threads_enter() #@UndefinedVariable
self.widgets.seekBar.set_text("00:00 / " + dur_str)
gtk.gdk.threads_leave() #@UndefinedVariable
break
except:
print "Error"
pass
time.sleep(0.2)
while play_thread_id == self.playerThreadId:
pos_int = 0
try:
pos_int = self.player.query_position(self.time_format, None)[0]
except gst.QueryError:
print "QueryError error..."
pos_str = convert_ns(pos_int)
if play_thread_id == self.playerThreadId:
gtk.gdk.threads_enter() #@UndefinedVariable
timeStr = pos_str + " / " + dur_str
timePersent = (pos_int + 0.0) / dur_int
self.widgets.seekBar.set_text(timeStr)
self.widgets.seekBar.set_fraction(timePersent)
gtk.gdk.threads_leave() #@UndefinedVariable
time.sleep(0.5)
"Download only if you listen this music"
if flag and song.type == CommonBean.TYPE_MUSIC_URL and timePersent > 0.25:
flag = False
self.onlineCntr.dowloadThread(song)
def onBusMessage(self, bus, message):
type = message.type
if type == gst.MESSAGE_EOS:
print "MESSAGE_EOS"
self.stopState()
self.playerThreadId = None
self.next()
elif type == gst.MESSAGE_ERROR:
print "MESSAGE_ERROR"
err, debug = message.parse_error()
print "Error: %s" % err, debug
self.stopState()
self.playerThreadId = None
| Python |
'''
Created on Mar 11, 2010
@author: ivan
'''
import gtk
from foobnix.model.entity import CommonBean
from foobnix.util import LOG
class PlaylistModel:
POS_ICON = 0
POS_TRACK_NUMBER = 1
POS_NAME = 2
POS_PATH = 3
POS_COLOR = 4
POS_INDEX = 5
POS_TYPE = 6
def __init__(self, widget):
self.widget = widget
self.model = gtk.ListStore(str, str, str, str, str, int, str)
cellpb = gtk.CellRendererPixbuf()
cellpb.set_property('cell-background', 'yellow')
iconColumn = gtk.TreeViewColumn(_('Icon'), cellpb, stock_id=0, cell_background=4)
numbetColumn = gtk.TreeViewColumn(_('N'), gtk.CellRendererText(), text=1, background=4)
descriptionColumn = gtk.TreeViewColumn(_("Music List"), gtk.CellRendererText(), text=2, background=4)
widget.append_column(iconColumn)
widget.append_column(numbetColumn)
widget.append_column(descriptionColumn)
widget.set_model(self.model)
def getBeenByPosition(self, position):
if position >= len(self.model):
LOG.error("Song index too much", position)
return None
bean = CommonBean()
bean.icon = self.model[position][ self.POS_ICON]
bean.tracknumber = self.model[position][ self.POS_TRACK_NUMBER]
bean.name = self.model[position][ self.POS_NAME]
bean.path = self.model[position][ self.POS_PATH]
bean.color = self.model[position][ self.POS_COLOR]
bean.index = self.model[position][ self.POS_INDEX]
bean.type = self.model[position][ self.POS_TYPE]
return bean
def get_all_beans(self):
beans = []
for i in xrange(len(self.model)):
beans.append(self.getBeenByPosition(i))
return beans
def set_all_beans(self, beans):
self.clear()
for bean in beans:
self.append(bean)
def append_all_beans(self, beans):
for bean in beans:
self.append(bean)
def getSelectedBean(self):
selection = self.widget.get_selection()
model, selected = selection.get_selected()
if selected:
bean = CommonBean()
bean.icon = model.get_value(selected, self.POS_ICON)
bean.tracknumber = model.get_value(selected, self.POS_TRACK_NUMBER)
bean.name = model.get_value(selected, self.POS_NAME)
bean.path = model.get_value(selected, self.POS_PATH)
bean.color = model.get_value(selected, self.POS_COLOR)
bean.index = model.get_value(selected, self.POS_INDEX)
bean.type = model.get_value(selected, self.POS_TYPE)
return bean
def clear(self):
self.model.clear()
def append(self, bean):
self.model.append([bean.icon, bean.tracknumber, bean.name, bean.path, bean.color, bean.index, bean.type])
def __del__(self, *a):
print "del"
| Python |
'''
Created on Mar 11, 2010
@author: ivan
'''
import gtk
from foobnix.playlist.playlist_model import PlaylistModel
from foobnix.model.entity import CommonBean
from foobnix.util.mouse_utils import is_double_click
from foobnix.player.player_controller import PlayerController
from random import random, randint
from foobnix.util.confguration import FConfiguration
from foobnix.directory.directory_controller import DirectoryCntr
from foobnix.util import LOG
class PlaylistCntr():
def __init__(self, widget, playerCntr):
self.model = PlaylistModel(widget)
self.playerCntr = playerCntr
widget.connect("button-press-event", self.onPlaySong)
self.index = 0;
widget.connect("drag-end", self.onDrugBean)
def registerDirectoryCntr(self, directoryCntr):
self.directoryCntr=directoryCntr
def onDrugBean(self, *ars):
selected = self.model.getSelectedBean()
LOG.info("Drug song", selected, selected.type)
self.directoryCntr.set_active_view(DirectoryCntr.VIEW_VIRTUAL_LISTS)
if selected.type in [CommonBean.TYPE_MUSIC_URL, CommonBean.TYPE_MUSIC_FILE]:
selected.parent = None
self.directoryCntr.append_virtual([selected])
self.directoryCntr.leftNoteBook.set_current_page(0)
def getState(self):
return [self.get_playlist_beans(), self.index]
def get_playlist_beans(self):
return self.model.get_all_beans()
def set_playlist_beans(self, beans):
return self.model.set_all_beans(beans)
def setState(self, state):
self.set_playlist_beans(state[0])
self.index = state[1]
if self.get_playlist_beans():
self.repopulate(self.get_playlist_beans(), self.index);
#self.playerCntr.playSong(self.get_playlist_beans()[self.index])
def clear(self):
self.model.clear()
def onPlaySong(self, w, e):
if is_double_click(e):
playlistBean = self.model.getSelectedBean()
self.repopulate(self.get_playlist_beans(), playlistBean.index);
self.index = playlistBean.index
self.playerCntr.set_mode(PlayerController.MODE_PLAY_LIST)
self.playerCntr.playSong(playlistBean)
def getNextSong(self):
if FConfiguration().isRandom:
self.index = randint(0,len(self.get_playlist_beans()))
else:
self.index += 1
if self.index >= len(self.get_playlist_beans()):
self.index = 0
if not FConfiguration().isRepeat:
self.index = len(self.get_playlist_beans())
return None
playlistBean = self.model.getBeenByPosition(self.index)
if not playlistBean:
return None
self.repopulate(self.get_playlist_beans(), playlistBean.index);
return playlistBean
def getPrevSong(self):
if FConfiguration().isRandom:
self.index = randint(0,len(self.get_playlist_beans()))
else:
self.index -= 1
if self.index < 0:
self.index = len(self.get_playlist_beans()) - 1
playlistBean = self.model.getBeenByPosition(self.index)
self.repopulate(self.get_playlist_beans(), playlistBean.index);
return playlistBean
def setPlaylist(self, entityBeans):
print "Set play list"
self.clear()
self.set_playlist_beans(entityBeans)
self.index = 0
if entityBeans:
self.playerCntr.playSong(entityBeans[0])
self.repopulate(entityBeans, self.index);
def appendPlaylist(self, entityBeans):
print "Append play list"
self.model.append_all_beans(entityBeans)
#if self.get_playlist_beans():
#self.playerCntr.playSong(self.get_playlist_beans()[index])
self.repopulate(self.get_playlist_beans(), self.index);
def repopulate(self, entityBeans, index):
self.model.clear()
for i in range(len(entityBeans)):
songBean = entityBeans[i]
songBean.name = songBean.getPlayListDescription()
songBean.color = self.getBackgroundColour(i)
songBean.index = i
if i == index:
songBean.setIconPlaying()
self.model.append(songBean)
else:
songBean.setIconNone()
self.model.append(songBean)
def getBackgroundColour(self, i):
if i % 2 :
return "#F2F2F2"
else:
return "#FFFFE5"
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.