bugged
stringlengths 4
228k
| fixed
stringlengths 0
96.3M
| __index_level_0__
int64 0
481k
|
|---|---|---|
def jsProcessData (self, data): """process data produced by document.write() JavaScript""" self._debug(NIGHTMARE, "JS: document.write", `data`) self.js_output += 1 # parse recursively self.js_html.feed(data)
|
def jsProcessData (self, data): """process data produced by document.write() JavaScript""" self.js_output += 1 # parse recursively self.js_html.feed(data)
| 16,500
|
def jsProcessPopup (self): """process javascript popup""" self._debug(NIGHTMARE, "JS: popup") self.js_popup += 1
|
def jsProcessPopup (self): """process javascript popup""" self.js_popup += 1
| 16,501
|
def buf_append_data (self, data): """we have to make sure that we have no two following DATA things in the tag buffer. Why? To be 100% sure that an ENCLOSED match really matches enclosed data. """ self._debug(NIGHTMARE, "buf_append_data") if data[0]==DATA and self.buf and self.buf[-1][0]==DATA: self.buf[-1][1] += data[1] else: self.buf.append(data)
|
def buf_append_data (self, data): """we have to make sure that we have no two following DATA things in the tag buffer. Why? To be 100% sure that an ENCLOSED match really matches enclosed data. """ if data[0]==DATA and self.buf and self.buf[-1][0]==DATA: self.buf[-1][1] += data[1] else: self.buf.append(data)
| 16,502
|
def flushbuf (self): """clear and return the output buffer""" self._debug(NIGHTMARE, "flushbuf") data = self.outbuf.getvalue() self.outbuf.close() self.outbuf = StringIO() return data
|
def flushbuf (self): """clear and return the output buffer""" data = self.outbuf.getvalue() self.outbuf.close() self.outbuf = StringIO() return data
| 16,503
|
def _debugbuf (self): """print debugging information about data buffer status""" self._debug(NIGHTMARE, "self.outbuf", `self.outbuf.getvalue()`) self._debug(NIGHTMARE, "self.buf", `self.buf`) self._debug(NIGHTMARE, "self.waitbuf", `self.waitbuf`) self._debug(NIGHTMARE, "self.inbuf", `self.inbuf.getvalue()`)
|
def _debugbuf (self): """print debugging information about data buffer status""" self._debug(NIGHTMARE, "self.outbuf", `self.outbuf.getvalue()`) self._debug(NIGHTMARE, "self.buf", `self.buf`) self._debug(NIGHTMARE, "self.waitbuf", `self.waitbuf`) self._debug(NIGHTMARE, "self.inbuf", `self.inbuf.getvalue()`)
| 16,504
|
def feed (self, data): """feed some data to the parser""" if self.state=='parse': # look if we must replay something if self.waited: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state!='parse': return data = self.inbuf.getvalue() + data self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data self._debug(NIGHTMARE, "feed", `data`) self.parser.feed(data) else: self._debug(NIGHTMARE, "feed") pass else: # wait state --> put in input buffer self._debug(NIGHTMARE, "wait") self.inbuf.write(data)
|
def feed (self, data): """feed some data to the parser""" if self.state=='parse': # look if we must replay something if self.waited: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state!='parse': return data = self.inbuf.getvalue() + data self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data self.parser.feed(data) else: self._debug(NIGHTMARE, "feed") pass else: # wait state --> put in input buffer self._debug(NIGHTMARE, "wait") self.inbuf.write(data)
| 16,505
|
def feed (self, data): """feed some data to the parser""" if self.state=='parse': # look if we must replay something if self.waited: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state!='parse': return data = self.inbuf.getvalue() + data self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data self._debug(NIGHTMARE, "feed", `data`) self.parser.feed(data) else: self._debug(NIGHTMARE, "feed") pass else: # wait state --> put in input buffer self._debug(NIGHTMARE, "wait") self.inbuf.write(data)
|
def feed (self, data): """feed some data to the parser""" if self.state=='parse': # look if we must replay something if self.waited: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state!='parse': return data = self.inbuf.getvalue() + data self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data self._debug(NIGHTMARE, "feed", `data`) self.parser.feed(data) else: pass else: # wait state --> put in input buffer self._debug(NIGHTMARE, "wait") self.inbuf.write(data)
| 16,506
|
def feed (self, data): """feed some data to the parser""" if self.state=='parse': # look if we must replay something if self.waited: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state!='parse': return data = self.inbuf.getvalue() + data self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data self._debug(NIGHTMARE, "feed", `data`) self.parser.feed(data) else: self._debug(NIGHTMARE, "feed") pass else: # wait state --> put in input buffer self._debug(NIGHTMARE, "wait") self.inbuf.write(data)
|
def feed (self, data): """feed some data to the parser""" if self.state=='parse': # look if we must replay something if self.waited: self.waited = 0 waitbuf, self.waitbuf = self.waitbuf, [] self.replay(waitbuf) if self.state!='parse': return data = self.inbuf.getvalue() + data self.inbuf.close() self.inbuf = StringIO() if data: # only feed non-empty data self._debug(NIGHTMARE, "feed", `data`) self.parser.feed(data) else: self._debug(NIGHTMARE, "feed") pass else: # wait state --> put in input buffer self.inbuf.write(data)
| 16,507
|
def flush (self): self._debug(HURT_ME_PLENTY, "flush") # flushing in wait state raises a filter exception if self.state=='wait': raise FilterWait("HtmlParser[%d]: waiting for data"%self.level) self.parser.flush()
|
def flush (self): # flushing in wait state raises a filter exception if self.state=='wait': raise FilterWait("HtmlParser[%d]: waiting for data"%self.level) self.parser.flush()
| 16,508
|
def replay (self, waitbuf): """call the handler functions again with buffer data""" self._debug(NIGHTMARE, "replay", waitbuf) for item in waitbuf: if item[0]==DATA: self._data(item[1]) elif item[0]==STARTTAG: self.startElement(item[1], item[2]) elif item[0]==ENDTAG: self.endElement(item[1]) elif item[0]==COMMENT: self.comment(item[1])
|
def replay (self, waitbuf): """call the handler functions again with buffer data""" for item in waitbuf: if item[0]==DATA: self._data(item[1]) elif item[0]==STARTTAG: self.startElement(item[1], item[2]) elif item[0]==ENDTAG: self.endElement(item[1]) elif item[0]==COMMENT: self.comment(item[1])
| 16,509
|
def cdata (self, data): """character data""" self._debug(NIGHTMARE, "cdata", `data`) return self._data(data)
|
def cdata (self, data): """character data""" return self._data(data)
| 16,510
|
def characters (self, data): """characters""" self._debug(NIGHTMARE, "characters", `data`) return self._data(data)
|
def characters (self, data): """characters""" return self._data(data)
| 16,511
|
def comment (self, data): """a comment; accept only non-empty comments""" self._debug(NIGHTMARE, "comment", `data`) item = [COMMENT, data] if self.state=='wait': return self.waitbuf.append(item) if self.comments and data: self.buf.append(item)
|
def comment (self, data): """a comment; accept only non-empty comments""" item = [COMMENT, data] if self.state=='wait': return self.waitbuf.append(item) if self.comments and data: self.buf.append(item)
| 16,512
|
def doctype (self, data): self._debug(NIGHTMARE, "doctype", `data`) return self._data("<!DOCTYPE%s>"%data)
|
def doctype (self, data): return self._data("<!DOCTYPE%s>"%data)
| 16,513
|
def pi (self, data): self._debug(NIGHTMARE, "pi", `data`) return self._data("<?%s?>"%data)
|
def pi (self, data): return self._data("<?%s?>"%data)
| 16,514
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug(NIGHTMARE, "startElement", `tag`) tag = check_spelling(tag, self.url) item = [STARTTAG, tag, attrs] if self.state=='wait': return self.waitbuf.append(item) rulelist = [] filtered = 0 if tag=="meta" and \ attrs.get('http-equiv', '').lower() =='pics-label': labels = resolve_html_entities(attrs.get('content', '')) # note: if there are no pics rules, this loop is empty for rule in self.pics: msg = check_pics(rule, labels) if msg: raise FilterPics(msg) # first labels match counts self.pics = [] elif tag=="body": # headers finished if self.pics: # no pics data found self.pics = [] # look for filter rules which apply for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug(NIGHTMARE, "matched rule %s on tag %s" % (`rule.title`, `tag`)) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = "True" if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug(NIGHTMARE, "put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.buf) self.rulestack.append((pos, rulelist)) if filtered: self.buf_append_data(item) elif self.js_filter: # if its not yet filtered, try filter javascript self.jsStartElement(tag, attrs) else: self.buf.append(item) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.js_filter: self.buf2data()
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data tag = check_spelling(tag, self.url) item = [STARTTAG, tag, attrs] if self.state=='wait': return self.waitbuf.append(item) rulelist = [] filtered = 0 if tag=="meta" and \ attrs.get('http-equiv', '').lower() =='pics-label': labels = resolve_html_entities(attrs.get('content', '')) # note: if there are no pics rules, this loop is empty for rule in self.pics: msg = check_pics(rule, labels) if msg: raise FilterPics(msg) # first labels match counts self.pics = [] elif tag=="body": # headers finished if self.pics: # no pics data found self.pics = [] # look for filter rules which apply for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug(NIGHTMARE, "matched rule %s on tag %s" % (`rule.title`, `tag`)) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = "True" if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug(NIGHTMARE, "put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.buf) self.rulestack.append((pos, rulelist)) if filtered: self.buf_append_data(item) elif self.js_filter: # if its not yet filtered, try filter javascript self.jsStartElement(tag, attrs) else: self.buf.append(item) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.js_filter: self.buf2data()
| 16,515
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug(NIGHTMARE, "startElement", `tag`) tag = check_spelling(tag, self.url) item = [STARTTAG, tag, attrs] if self.state=='wait': return self.waitbuf.append(item) rulelist = [] filtered = 0 if tag=="meta" and \ attrs.get('http-equiv', '').lower() =='pics-label': labels = resolve_html_entities(attrs.get('content', '')) # note: if there are no pics rules, this loop is empty for rule in self.pics: msg = check_pics(rule, labels) if msg: raise FilterPics(msg) # first labels match counts self.pics = [] elif tag=="body": # headers finished if self.pics: # no pics data found self.pics = [] # look for filter rules which apply for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug(NIGHTMARE, "matched rule %s on tag %s" % (`rule.title`, `tag`)) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = "True" if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug(NIGHTMARE, "put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.buf) self.rulestack.append((pos, rulelist)) if filtered: self.buf_append_data(item) elif self.js_filter: # if its not yet filtered, try filter javascript self.jsStartElement(tag, attrs) else: self.buf.append(item) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.js_filter: self.buf2data()
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug(NIGHTMARE, "startElement", `tag`) tag = check_spelling(tag, self.url) item = [STARTTAG, tag, attrs] if self.state=='wait': return self.waitbuf.append(item) rulelist = [] filtered = 0 if tag=="meta" and \ attrs.get('http-equiv', '').lower() =='pics-label': labels = resolve_html_entities(attrs.get('content', '')) # note: if there are no pics rules, this loop is empty for rule in self.pics: msg = check_pics(rule, labels) if msg: raise FilterPics(msg) # first labels match counts self.pics = [] elif tag=="body": # headers finished if self.pics: # no pics data found self.pics = [] # look for filter rules which apply for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = "True" if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug(NIGHTMARE, "put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.buf) self.rulestack.append((pos, rulelist)) if filtered: self.buf_append_data(item) elif self.js_filter: # if its not yet filtered, try filter javascript self.jsStartElement(tag, attrs) else: self.buf.append(item) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.js_filter: self.buf2data()
| 16,516
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug(NIGHTMARE, "startElement", `tag`) tag = check_spelling(tag, self.url) item = [STARTTAG, tag, attrs] if self.state=='wait': return self.waitbuf.append(item) rulelist = [] filtered = 0 if tag=="meta" and \ attrs.get('http-equiv', '').lower() =='pics-label': labels = resolve_html_entities(attrs.get('content', '')) # note: if there are no pics rules, this loop is empty for rule in self.pics: msg = check_pics(rule, labels) if msg: raise FilterPics(msg) # first labels match counts self.pics = [] elif tag=="body": # headers finished if self.pics: # no pics data found self.pics = [] # look for filter rules which apply for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug(NIGHTMARE, "matched rule %s on tag %s" % (`rule.title`, `tag`)) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = "True" if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug(NIGHTMARE, "put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.buf) self.rulestack.append((pos, rulelist)) if filtered: self.buf_append_data(item) elif self.js_filter: # if its not yet filtered, try filter javascript self.jsStartElement(tag, attrs) else: self.buf.append(item) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.js_filter: self.buf2data()
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug(NIGHTMARE, "startElement", `tag`) tag = check_spelling(tag, self.url) item = [STARTTAG, tag, attrs] if self.state=='wait': return self.waitbuf.append(item) rulelist = [] filtered = 0 if tag=="meta" and \ attrs.get('http-equiv', '').lower() =='pics-label': labels = resolve_html_entities(attrs.get('content', '')) # note: if there are no pics rules, this loop is empty for rule in self.pics: msg = check_pics(rule, labels) if msg: raise FilterPics(msg) # first labels match counts self.pics = [] elif tag=="body": # headers finished if self.pics: # no pics data found self.pics = [] # look for filter rules which apply for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug(NIGHTMARE, "matched rule %s on tag %s" % (`rule.title`, `tag`)) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = "True" if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.buf) self.rulestack.append((pos, rulelist)) if filtered: self.buf_append_data(item) elif self.js_filter: # if its not yet filtered, try filter javascript self.jsStartElement(tag, attrs) else: self.buf.append(item) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.js_filter: self.buf2data()
| 16,517
|
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
|
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
| 16,518
|
def jsStartElement (self, tag, attrs): """Check popups for onmouseout and onmouseover. Inline extern javascript sources""" changed = 0 self.js_src = None self.js_output = 0 self.js_popup = 0 for name in ('onmouseover', 'onmouseout'): if attrs.has_key(name) and self.jsPopup(attrs, name): self._debug(NIGHTMARE, "JS: del", `name`, "from", `tag`) del attrs[name] changed = 1 if tag=='form': name = attrs.get('name', attrs.get('id')) self.jsForm(name, attrs.get('action', ''), attrs.get('target', '')) elif tag=='script': lang = attrs.get('language', '').lower() url = attrs.get('src', '') scrtype = attrs.get('type', '').lower() is_js = scrtype=='text/javascript' or \ lang.startswith('javascript') or \ not (lang or scrtype) if is_js and url: return self.jsScriptSrc(url, lang) self.buf.append([STARTTAG, tag, attrs])
|
def jsStartElement (self, tag, attrs): """Check popups for onmouseout and onmouseover. Inline extern javascript sources""" changed = 0 self.js_src = None self.js_output = 0 self.js_popup = 0 for name in ('onmouseover', 'onmouseout'): if attrs.has_key(name) and self.jsPopup(attrs, name): del attrs[name] changed = 1 if tag=='form': name = attrs.get('name', attrs.get('id')) self.jsForm(name, attrs.get('action', ''), attrs.get('target', '')) elif tag=='script': lang = attrs.get('language', '').lower() url = attrs.get('src', '') scrtype = attrs.get('type', '').lower() is_js = scrtype=='text/javascript' or \ lang.startswith('javascript') or \ not (lang or scrtype) if is_js and url: return self.jsScriptSrc(url, lang) self.buf.append([STARTTAG, tag, attrs])
| 16,519
|
def jsPopup (self, attrs, name): """check if attrs[name] javascript opens a popup window""" self._debug(NIGHTMARE, "JS: jsPopup") val = resolve_html_entities(attrs[name]) if not val: return self.js_env.attachListener(self) try: self.js_env.executeScriptAsFunction(val, 0.0) except jslib.error, msg: pass self.js_env.detachListener(self) res = self.js_popup self.js_popup = 0 return res
|
def jsPopup (self, attrs, name): """check if attrs[name] javascript opens a popup window""" val = resolve_html_entities(attrs[name]) if not val: return self.js_env.attachListener(self) try: self.js_env.executeScriptAsFunction(val, 0.0) except jslib.error, msg: pass self.js_env.detachListener(self) res = self.js_popup self.js_popup = 0 return res
| 16,520
|
def jsForm (self, name, action, target): """when hitting a (named) form, notify the JS engine about that""" if not name: return self._debug(HURT_ME_PLENTY, "jsForm", `name`, `action`, `target`) self.js_env.addForm(name, action, target)
|
def jsForm (self, name, action, target): """when hitting a (named) form, notify the JS engine about that""" if not name: return self.js_env.addForm(name, action, target)
| 16,521
|
def jsScriptData (self, data, url, ver): """Callback for loading <script src=""> data in the background If downloading is finished, data is None""" assert self.state=='wait' if data is None: if not self.js_script: print >> sys.stderr, "HtmlParser[%d]: empty JS src"%self.level, url else: self.buf.append([STARTTAG, "script", {'type': 'text/javascript'}]) script = "<!--\n%s\n//-->"%escape_js(self.js_script) self.buf.append([DATA, script]) # Note: <script src=""> could be missing an end tag, # but now we need one. Look later for a duplicate </script>. self.buf.append([ENDTAG, "script"]) self.js_script = '' self.state = 'parse' self._debug(NIGHTMARE, "switching back to parse with") self._debugbuf() else: self._debug(HURT_ME_PLENTY, "JS read", len(data), "<=", url) self.js_script += data
|
def jsScriptData (self, data, url, ver): """Callback for loading <script src=""> data in the background If downloading is finished, data is None""" assert self.state=='wait' if data is None: if not self.js_script: print >> sys.stderr, "HtmlParser[%d]: empty JS src"%self.level, url else: self.buf.append([STARTTAG, "script", {'type': 'text/javascript'}]) script = "<!--\n%s\n//-->"%escape_js(self.js_script) self.buf.append([DATA, script]) # Note: <script src=""> could be missing an end tag, # but now we need one. Look later for a duplicate </script>. self.buf.append([ENDTAG, "script"]) self.js_script = '' self.state = 'parse' self._debugbuf() else: self._debug(HURT_ME_PLENTY, "JS read", len(data), "<=", url) self.js_script += data
| 16,522
|
def jsScriptData (self, data, url, ver): """Callback for loading <script src=""> data in the background If downloading is finished, data is None""" assert self.state=='wait' if data is None: if not self.js_script: print >> sys.stderr, "HtmlParser[%d]: empty JS src"%self.level, url else: self.buf.append([STARTTAG, "script", {'type': 'text/javascript'}]) script = "<!--\n%s\n//-->"%escape_js(self.js_script) self.buf.append([DATA, script]) # Note: <script src=""> could be missing an end tag, # but now we need one. Look later for a duplicate </script>. self.buf.append([ENDTAG, "script"]) self.js_script = '' self.state = 'parse' self._debug(NIGHTMARE, "switching back to parse with") self._debugbuf() else: self._debug(HURT_ME_PLENTY, "JS read", len(data), "<=", url) self.js_script += data
|
def jsScriptData (self, data, url, ver): """Callback for loading <script src=""> data in the background If downloading is finished, data is None""" assert self.state=='wait' if data is None: if not self.js_script: print >> sys.stderr, "HtmlParser[%d]: empty JS src"%self.level, url else: self.buf.append([STARTTAG, "script", {'type': 'text/javascript'}]) script = "<!--\n%s\n//-->"%escape_js(self.js_script) self.buf.append([DATA, script]) # Note: <script src=""> could be missing an end tag, # but now we need one. Look later for a duplicate </script>. self.buf.append([ENDTAG, "script"]) self.js_script = '' self.state = 'parse' self._debug(NIGHTMARE, "switching back to parse with") self._debugbuf() else: self.js_script += data
| 16,523
|
def jsScriptSrc (self, url, language): """Start a background download for <script src=""> tags""" assert self.state=='parse' ver = 0.0 if language: mo = re.search(r'(?i)javascript(?P<num>\d\.\d)', language) if mo: ver = float(mo.group('num')) url = urlparse.urljoin(self.url, url) self._debug(HURT_ME_PLENTY, "JS jsScriptSrc", `url`, `ver`) if _has_ws(url): print >> sys.stderr, "HtmlParser[%d]: broken JS url"%self.level,\ `url`, "at", `self.url` return self.state = 'wait' self.waited = 'True' self.js_src = 'True' client = HttpProxyClient(self.jsScriptData, (url, ver)) ClientServerMatchmaker(client, "GET %s HTTP/1.1" % url, #request {}, #headers '', #content {'nofilter': None}, # nofilter 'identity', # compress mime = "application/x-javascript", )
|
def jsScriptSrc (self, url, language): """Start a background download for <script src=""> tags""" assert self.state=='parse' ver = 0.0 if language: mo = re.search(r'(?i)javascript(?P<num>\d\.\d)', language) if mo: ver = float(mo.group('num')) url = urlparse.urljoin(self.url, url) if _has_ws(url): print >> sys.stderr, "HtmlParser[%d]: broken JS url"%self.level,\ `url`, "at", `self.url` return self.state = 'wait' self.waited = 'True' self.js_src = 'True' client = HttpProxyClient(self.jsScriptData, (url, ver)) ClientServerMatchmaker(client, "GET %s HTTP/1.1" % url, #request {}, #headers '', #content {'nofilter': None}, # nofilter 'identity', # compress mime = "application/x-javascript", )
| 16,524
|
def jsScript (self, script, ver, item): """execute given script with javascript version ver""" self._debug(NIGHTMARE, "JS: jsScript", ver, `script`) assert self.state == 'parse' assert len(self.buf) >= 2 self.js_output = 0 self.js_env.attachListener(self) # start recursive html filter (used by jsProcessData) self.js_html = FilterHtmlParser(self.rules, self.pics, self.url, comments=self.comments, javascript=self.js_filter, level=self.level+1) # execute self.js_env.executeScript(unescape_js(script), ver) self.js_env.detachListener(self) # wait for recursive filter to finish self.jsEndScript(item)
|
def jsScript (self, script, ver, item): """execute given script with javascript version ver""" assert self.state == 'parse' assert len(self.buf) >= 2 self.js_output = 0 self.js_env.attachListener(self) # start recursive html filter (used by jsProcessData) self.js_html = FilterHtmlParser(self.rules, self.pics, self.url, comments=self.comments, javascript=self.js_filter, level=self.level+1) # execute self.js_env.executeScript(unescape_js(script), ver) self.js_env.detachListener(self) # wait for recursive filter to finish self.jsEndScript(item)
| 16,525
|
def jsEndScript (self, item): self._debug(NIGHTMARE, "JS: endScript") assert len(self.buf) >= 2 if self.js_output: try: self.js_html.feed('') self.js_html.flush() except FilterWait: self.state = 'wait' self.waited = 'True' make_timer(0.1, lambda : self.jsEndScript(item)) return self.js_html._debugbuf() assert not self.js_html.inbuf.getvalue() assert not self.js_html.waitbuf assert len(self.buf) >= 2 self.buf[-2:-2] = [[DATA, self.js_html.outbuf.getvalue()]]+self.js_html.buf self.js_html = None if (self.js_popup + self.js_output) > 0: # delete old script del self.buf[-1] del self.buf[-1] elif not self.filterEndElement(item[1]): self.buf.append(item) self._debug(NIGHTMARE, "JS: switching back to parse with") self._debugbuf() self.state = 'parse'
|
def jsEndScript (self, item): assert len(self.buf) >= 2 if self.js_output: try: self.js_html.feed('') self.js_html.flush() except FilterWait: self.state = 'wait' self.waited = 'True' make_timer(0.1, lambda : self.jsEndScript(item)) return self.js_html._debugbuf() assert not self.js_html.inbuf.getvalue() assert not self.js_html.waitbuf assert len(self.buf) >= 2 self.buf[-2:-2] = [[DATA, self.js_html.outbuf.getvalue()]]+self.js_html.buf self.js_html = None if (self.js_popup + self.js_output) > 0: # delete old script del self.buf[-1] del self.buf[-1] elif not self.filterEndElement(item[1]): self.buf.append(item) self._debug(NIGHTMARE, "JS: switching back to parse with") self._debugbuf() self.state = 'parse'
| 16,526
|
def jsEndScript (self, item): self._debug(NIGHTMARE, "JS: endScript") assert len(self.buf) >= 2 if self.js_output: try: self.js_html.feed('') self.js_html.flush() except FilterWait: self.state = 'wait' self.waited = 'True' make_timer(0.1, lambda : self.jsEndScript(item)) return self.js_html._debugbuf() assert not self.js_html.inbuf.getvalue() assert not self.js_html.waitbuf assert len(self.buf) >= 2 self.buf[-2:-2] = [[DATA, self.js_html.outbuf.getvalue()]]+self.js_html.buf self.js_html = None if (self.js_popup + self.js_output) > 0: # delete old script del self.buf[-1] del self.buf[-1] elif not self.filterEndElement(item[1]): self.buf.append(item) self._debug(NIGHTMARE, "JS: switching back to parse with") self._debugbuf() self.state = 'parse'
|
def jsEndScript (self, item): self._debug(NIGHTMARE, "JS: endScript") assert len(self.buf) >= 2 if self.js_output: try: self.js_html.feed('') self.js_html.flush() except FilterWait: self.state = 'wait' self.waited = 'True' make_timer(0.1, lambda : self.jsEndScript(item)) return self.js_html._debugbuf() assert not self.js_html.inbuf.getvalue() assert not self.js_html.waitbuf assert len(self.buf) >= 2 self.buf[-2:-2] = [[DATA, self.js_html.outbuf.getvalue()]]+self.js_html.buf self.js_html = None if (self.js_popup + self.js_output) > 0: # delete old script del self.buf[-1] del self.buf[-1] elif not self.filterEndElement(item[1]): self.buf.append(item) self._debugbuf() self.state = 'parse'
| 16,527
|
def __init__ (self, socket, addr): Connection.__init__(self, socket) self.addr = addr self.state = 'request' self.server = None self.request = '' self.headers = None self.bytes_remaining = None # for content only self.content = '' if self.addr[0] not in config['allowed_hosts']: self.close()
|
def __init__ (self, socket, addr): Connection.__init__(self, socket) self.addr = addr self.state = 'request' self.server = None self.request = '' self.headers = None self.bytes_remaining = None # for content only self.content = '' if not config['allowedhosts'].has_key(self.addr[0]): self.close()
| 16,528
|
def __init__ (self, sid=None, titles=None, descriptions=None, disable=0, tag=u"a", attrs=None, enclosed=u"", part=wc.filter.html.COMPLETE, replacement=u""): """ Initialize rule data. """ super(HtmlrewriteRule, self).__init__(sid=sid, titles=titles, descriptions=descriptions, disable=disable) self.tag = tag self.tag_ro = None if attrs is None: self.attrs = {} else: self.attrs = attrs self.attrs_ro = {} self.part = part self.replacement = replacement self.enclosed = enclosed self.enclosed_ro = None if self.enclosed and self.tag in NO_CLOSE_TAGS: raise ValueError, "reading rule %r: tag %r has no end tag, " \ "so specifying an enclose value is invalid." % \ (self.titles['en'], tag) self.attrnames.append('tag')
|
def __init__ (self, sid=None, titles=None, descriptions=None, disable=0, tag=u"a", attrs=None, enclosed=u"", part=wc.filter.html.COMPLETE, replacement=u""): """ Initialize rule data. """ super(HtmlrewriteRule, self).__init__(sid=sid, titles=titles, descriptions=descriptions, disable=disable) self.tag = tag self.tag_ro = None if attrs is None: self.attrs = {} else: self.attrs = attrs self.attrs_ro = {} self.part = part self.replacement = replacement self.enclosed = enclosed self.enclosed_ro = None self.attrnames.append('tag')
| 16,529
|
def matches_starttag (self): """ See if this rule matches start tags. """ if self.tag in NO_CLOSE_TAGS: return True return self.part not in [ wc.filter.html.ENCLOSED, wc.filter.html.COMPLETE, ]
|
def matches_starttag (self): """ See if this rule matches start tags. """ for tag in NO_CLOSE_TAGS: if self.match_tag(tag): return True return self.part not in [ wc.filter.html.ENCLOSED, wc.filter.html.COMPLETE, ]
| 16,530
|
def matches_endtag (self): """ See if this rule matches end tags. """ if self.tag in NO_CLOSE_TAGS: return False return self.part not in [ wc.filter.html.ATTR, wc.filter.html.ATTRVAL, wc.filter.html.ATTRNAME, ]
|
def matches_endtag (self): """ See if this rule matches end tags. """ for tag in NO_CLOSE_TAGS: if self.match_tag(tag): return False return self.part not in [ wc.filter.html.ATTR, wc.filter.html.ATTRVAL, wc.filter.html.ATTRNAME, ]
| 16,531
|
def XtestScriptSrc1 (self): self.filt(
|
def testScriptSrc1 (self): self.filt(
| 16,532
|
def XtestScriptSrc2 (self): self.filt(
|
def testScriptSrc2 (self): self.filt(
| 16,533
|
def XtestScriptSrc3 (self): """missing </script>""" self.filt(
|
def testScriptSrc3 (self): """missing </script>""" self.filt(
| 16,534
|
def parse_headers (): headers = [] try: s = get_data("/headers/") #debug(BRING_IT_ON, "headers data", s) except (IOError, ValueError): print >> sys.stderr, _("WebCleaner is not running") return headers if s=="-": return headers lines = s.split("\n") for l in lines: print "line", `l` # strip off paranthesis l = l[1:-1] # split into three parts url, io, hlist = l.split(", ", 2) # split headers hlist = (hlist.strip())[2:-2].split("', '") # strip headers hlist = map(lambda x: x.replace("\\r", ""), hlist) hlist = map(lambda x: x.replace("\\n", ""), hlist) hlist = map(lambda x: x.split(":", 1), hlist) # append headers.append([url[1:-1], int(io), hlist]) return headers
|
def parse_headers (): headers = [] try: s = get_data("/headers/") #debug(BRING_IT_ON, "headers data", s) except (IOError, ValueError): print >> sys.stderr, _("WebCleaner is not running") return headers if s=="-": return headers lines = s.split("\n") for l in lines: # strip off paranthesis l = l[1:-1] # split into three parts url, io, hlist = l.split(", ", 2) # split headers hlist = (hlist.strip())[2:-2].split("', '") # strip headers hlist = map(lambda x: x.replace("\\r", ""), hlist) hlist = map(lambda x: x.replace("\\n", ""), hlist) hlist = map(lambda x: x.split(":", 1), hlist) # append headers.append([url[1:-1], int(io), hlist]) return headers
| 16,535
|
def processData (self, data): print >>sys.stderr, "JS:", data # XXX parse recursively
|
def processData (self, data): print >>sys.stderr, "JS:", data # XXX parse recursively
| 16,536
|
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
|
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
| 16,537
|
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
|
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
| 16,538
|
def jsEndElement (self, tag): """parse generated html for scripts""" if tag!='script': return if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last return script = last[1].strip() if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val, 0.0) self.jsEnv.detachListener(self)
|
def jsEndElement (self, tag): """parse generated html for scripts""" if len(self.buffer)<2: print >>sys.stderr, "short buffer on </script>", self.buffer return last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last return script = last[1].strip() if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val, 0.0) self.jsEnv.detachListener(self)
| 16,539
|
def jsEndElement (self, tag): """parse generated html for scripts""" if tag!='script': return if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last return script = last[1].strip() if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val, 0.0) self.jsEnv.detachListener(self)
|
def jsEndElement (self, tag): """parse generated html for scripts""" if tag!='script': return if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return if self.buffer[-1][0]!=DATA or self.buffer[-2][0]!=STARTTAG: print >>sys.stderr, "missing tags for </script>", self.buffer[-2:] return script = last[1].strip() if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val, 0.0) self.jsEnv.detachListener(self)
| 16,540
|
def jsEndElement (self, tag): """parse generated html for scripts""" if tag!='script': return if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last return script = last[1].strip() if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val, 0.0) self.jsEnv.detachListener(self)
|
def jsEndElement (self, tag): """parse generated html for scripts""" if tag!='script': return if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last return script = self.buffer[-1][1].strip() self.buffer[-2:] = [] if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val, 0.0) self.jsEnv.detachListener(self)
| 16,541
|
def jsEndElement (self, tag): """parse generated html for scripts""" if tag!='script': return if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last return script = last[1].strip() if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsEnv.executeScriptAsFunction(val, 0.0) self.jsEnv.detachListener(self)
|
def jsEndElement (self, tag): """parse generated html for scripts""" if tag!='script': return if not self.buffer: print >>sys.stderr, "empty buffer on </script>" return last = self.buffer[-1] if last[0]!=DATA: print >>sys.stderr, "missing body for </script>", last return script = last[1].strip() if script.startswith("<!--"): script = script[4:].strip() self.jsEnv.attachListener(self) self.jsfilter = HtmlFilter(self.rules, self.document, comments=self.comments, javascript=self.javascript) self.jsEnv.executeScriptAsFunction(script, 0.0) self.jsEnv.detachListener(self)
| 16,542
|
def finish (self, data, attrs): """ Feed image data to buffer, then convert it and return result. """ if self.init_image_reducer: self.set_ctype_header(attrs) self.init_image_reducer = False if not attrs.has_key('imgreducer_buf'): return data p = attrs['imgreducer_buf'] if data: p.write(data) p.seek(0) try: img = Image.open(p) data = StringIO.StringIO() if attrs.get('imgreducer_convert'): img = img.convert() img.save(data, "JPEG", quality=self.quality, optimize=1) except IOError, msg: # return original image data on error wc.log.warn(wc.LOG_FILTER, "I/O error reading image data: %s", str(msg)) # XXX the content type is pretty sure wrong return p.getvalue() return data.getvalue()
|
def finish (self, data, attrs): """ Feed image data to buffer, then convert it and return result. """ if self.init_image_reducer: self.set_ctype_header(attrs) self.init_image_reducer = False if not attrs.has_key('imgreducer_buf'): return data p = attrs['imgreducer_buf'] if data: p.write(data) p.seek(0) try: img = Image.open(p) data = StringIO.StringIO() if img.mode not in ('RGB', 'L'): img.draft("RGB", img.size) img = img.convert("RGB") img.save(data, "JPEG", quality=self.quality, optimize=1) except IOError, msg: # return original image data on error wc.log.warn(wc.LOG_FILTER, "I/O error reading image data: %s", str(msg)) # XXX the content type is pretty sure wrong return p.getvalue() return data.getvalue()
| 16,543
|
def finish (self, data, attrs): """ Feed image data to buffer, then convert it and return result. """ if self.init_image_reducer: self.set_ctype_header(attrs) self.init_image_reducer = False if not attrs.has_key('imgreducer_buf'): return data p = attrs['imgreducer_buf'] if data: p.write(data) p.seek(0) try: img = Image.open(p) data = StringIO.StringIO() if attrs.get('imgreducer_convert'): img = img.convert() img.save(data, "JPEG", quality=self.quality, optimize=1) except IOError, msg: # return original image data on error wc.log.warn(wc.LOG_FILTER, "I/O error reading image data: %s", str(msg)) # XXX the content type is pretty sure wrong return p.getvalue() return data.getvalue()
|
def finish (self, data, attrs): """ Feed image data to buffer, then convert it and return result. """ if self.init_image_reducer: self.set_ctype_header(attrs) self.init_image_reducer = False if not attrs.has_key('imgreducer_buf'): return data p = attrs['imgreducer_buf'] if data: p.write(data) p.seek(0) try: img = Image.open(p) data = StringIO.StringIO() if attrs.get('imgreducer_convert'): img = img.convert() img.save(data, "JPEG", quality=self.quality, optimize=1) except IOError, msg: # return original image data on error wc.log.warn(wc.LOG_FILTER, "I/O error reading image data %r: %s", attrs['url'], str(msg)) # XXX the content type is pretty sure wrong return p.getvalue() return data.getvalue()
| 16,544
|
def get_attrs (self, url, localhost, stages, headers): """ Initialize image reducer buffer and flags. """ if not self.applies_to_stages(stages): return {} # don't filter tiny images d = super(ImageReducer, self).get_attrs(url, localhost, stages, headers) # weed out the rules that don't apply to this url rules = [ rule for rule in self.rules if rule.applies_to_url(url) ] if rules: if len(rules) > 1: wc.log.warn(wc.LOG_FILTER, "more than one rule matched %r: %s", url, str(rules)) # first rule wins quality = rules[0].quality minimal_size_bytes = rules[0].minimal_size_bytes else: quality = self.quality minimal_size_bytes = self.minimal_size_bytes try: length = int(headers['server'].get('Content-Length', 0)) except ValueError: wc.log.warn(wc.LOG_FILTER, "invalid content length at %r", url) return d if length < 0: wc.log.warn(wc.LOG_FILTER, "negative content length at %r", url) return d if length == 0: wc.log.warn(wc.LOG_FILTER, "missing content length at %r", url) elif 0 < length < minimal_size_bytes: return d d['imgreducer_buf'] = StringIO.StringIO() # some images have to be convert()ed before saving ctype = headers['server'].get('Content-Type') d['imgreducer_convert'] = convert(ctype) return d
|
def get_attrs (self, url, localhost, stages, headers): """ Initialize image reducer buffer and flags. """ if not self.applies_to_stages(stages): return {} # don't filter tiny images d = super(ImageReducer, self).get_attrs(url, localhost, stages, headers) # weed out the rules that don't apply to this url rules = [ rule for rule in self.rules if rule.applies_to_url(url) ] if rules: if len(rules) > 1: wc.log.warn(wc.LOG_FILTER, "more than one rule matched %r: %s", url, str(rules)) # first rule wins quality = rules[0].quality minimal_size_bytes = rules[0].minimal_size_bytes else: quality = self.quality minimal_size_bytes = self.minimal_size_bytes try: length = int(headers['server'].get('Content-Length', 0)) except ValueError: wc.log.warn(wc.LOG_FILTER, "invalid content length at %r", url) return d if length < 0: wc.log.warn(wc.LOG_FILTER, "negative content length at %r", url) return d if length == 0: wc.log.warn(wc.LOG_FILTER, "missing content length at %r", url) elif 0 < length < minimal_size_bytes: return d d['imgreducer_buf'] = StringIO.StringIO() # some images have to be convert()ed before saving ctype = headers['server'].get('Content-Type') return d
| 16,545
|
def get_attrs (self, url, localhost, stages, headers): """ Initialize image reducer buffer and flags. """ if not self.applies_to_stages(stages): return {} # don't filter tiny images d = super(ImageReducer, self).get_attrs(url, localhost, stages, headers) # weed out the rules that don't apply to this url rules = [ rule for rule in self.rules if rule.applies_to_url(url) ] if rules: if len(rules) > 1: wc.log.warn(wc.LOG_FILTER, "more than one rule matched %r: %s", url, str(rules)) # first rule wins quality = rules[0].quality minimal_size_bytes = rules[0].minimal_size_bytes else: quality = self.quality minimal_size_bytes = self.minimal_size_bytes try: length = int(headers['server'].get('Content-Length', 0)) except ValueError: wc.log.warn(wc.LOG_FILTER, "invalid content length at %r", url) return d if length < 0: wc.log.warn(wc.LOG_FILTER, "negative content length at %r", url) return d if length == 0: wc.log.warn(wc.LOG_FILTER, "missing content length at %r", url) elif 0 < length < minimal_size_bytes: return d d['imgreducer_buf'] = StringIO.StringIO() # some images have to be convert()ed before saving ctype = headers['server'].get('Content-Type') d['imgreducer_convert'] = convert(ctype) return d
|
def get_attrs (self, url, localhost, stages, headers): """ Initialize image reducer buffer and flags. """ if not self.applies_to_stages(stages): return {} # don't filter tiny images d = super(ImageReducer, self).get_attrs(url, localhost, stages, headers) # weed out the rules that don't apply to this url rules = [ rule for rule in self.rules if rule.applies_to_url(url) ] if rules: if len(rules) > 1: wc.log.warn(wc.LOG_FILTER, "more than one rule matched %r: %s", url, str(rules)) # first rule wins quality = rules[0].quality minimal_size_bytes = rules[0].minimal_size_bytes else: quality = self.quality minimal_size_bytes = self.minimal_size_bytes try: length = int(headers['server'].get('Content-Length', 0)) except ValueError: wc.log.warn(wc.LOG_FILTER, "invalid content length at %r", url) return d if length < 0: wc.log.warn(wc.LOG_FILTER, "negative content length at %r", url) return d if length == 0: wc.log.warn(wc.LOG_FILTER, "missing content length at %r", url) elif 0 < length < minimal_size_bytes: return d d['imgreducer_buf'] = StringIO.StringIO() # some images have to be convert()ed before saving ctype = headers['server'].get('Content-Type') d['imgreducer_convert'] = convert(ctype) return d
| 16,546
|
def construct_request_data (self, request): """ Construct valid HTTP request data string. """ lines = [] version = "HTTP/%d.%d" % request.version lines.append("%s %s %s" % (request.method, request.uri, version)) lines.extend(request.headers) # an empty line ends the headers lines.extend(("", "")) data = "\r\n".join(lines) if request.content: data += content return data
|
def construct_request_data (self, request): """ Construct valid HTTP request data string. """ lines = [] version = "HTTP/%d.%d" % request.version lines.append("%s %s %s" % (request.method, request.uri, version)) lines.extend(request.headers) # an empty line ends the headers lines.extend(("", "")) data = "\r\n".join(lines) if request.content: data += request.content return data
| 16,547
|
def get_localaddrs (): """all active interfaces' ip addresses""" addrs = sets.Set() try: # search interfaces key = wc.winreg.key_handle(wc.winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces") for subkey in key.subkeys(): if subkey.get('EnableDHCP')==1: ip = subkey.get('DhcpIPAddress') else: ip = subkey.get('IPAddress') if ip: addrs.add(ip) except EnvironmentError: pass return addrs
|
def get_localaddrs (): """all active interfaces' ip addresses""" addrs = sets.Set() try: # search interfaces key = wc.winreg.key_handle(wc.winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces") for subkey in key.subkeys(): if subkey.get('EnableDHCP'): ip = subkey.get('DhcpIPAddress', '') else: ip = subkey.get('IPAddress') if ip: addrs.add(ip) except EnvironmentError: pass return addrs
| 16,548
|
def get_localaddrs (): """all active interfaces' ip addresses""" addrs = sets.Set() try: # search interfaces key = wc.winreg.key_handle(wc.winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces") for subkey in key.subkeys(): if subkey.get('EnableDHCP')==1: ip = subkey.get('DhcpIPAddress') else: ip = subkey.get('IPAddress') if ip: addrs.add(ip) except EnvironmentError: pass return addrs
|
def get_localaddrs (): """all active interfaces' ip addresses""" addrs = sets.Set() try: # search interfaces key = wc.winreg.key_handle(wc.winreg.HKEY_LOCAL_MACHINE, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces") for subkey in key.subkeys(): if subkey.get('EnableDHCP')==1: ip = subkey.get('DhcpIPAddress') else: ip = subkey.get('IPAddress', '') if not (isinstance(ip, basestring) and ip): continue addrs.add(str(ip)) except EnvironmentError: pass return addrs
| 16,549
|
def _main (): """USAGE: test/run.sh test/parsefile.py test.html""" import sys if len(sys.argv)!=2: print _main.__doc__ sys.exit(1) if sys.argv[1]=='-': f = sys.stdin else: f = file(sys.argv[1]) from wc.parser.htmllib import HtmlPrinter from wc.parser import htmlsax p = htmlsax.parser(HtmlPrinter()) p.debug(1) size = 1024 #size = 1 data = f.read(size) while data: p.feed(data) data = f.read(size) p.flush()
|
def _main (): """USAGE: test/run.sh test/parsefile.py test.html""" import sys if len(sys.argv)!=2: print _main.__doc__ sys.exit(1) if sys.argv[1]=='-': f = sys.stdin else: f = file(sys.argv[1]) from wc.parser.htmllib import HtmlPrinter from wc.parser import htmlsax p = htmlsax.parser(HtmlPrinter()) size = 1024 #size = 1 data = f.read(size) while data: p.feed(data) data = f.read(size) p.flush()
| 16,550
|
def fname (name): return os.path.join("wc", "dns", "tests", name)
|
def fname (name): return os.path.join("wc", "dns", "tests", name)
| 16,551
|
def __init__ (self, client, url, form, protocol, status=200, msg=i18n._('Ok'), context={}, headers={'Content-Type': 'text/html'}): self.client = client # we pretend to be the server self.connected = True try: lang = i18n.get_headers_lang(headers) # get the template filename path, dirs, lang = get_template_url(url, lang) # do not rely on content-type header value if path.endswith('.html'): headers['Content-Type'] = 'text/html' f = file(path) # get TAL context context = get_context(dirs, form, context, lang) # get translator translator = gettext.translation(Name, LocaleDir, [lang]) # expand template data = expand_template(f, context, translator=translator) else: f = file(path, 'rb') data = f.read() except IOError, e: exception(GUI, "Wrong path `%s'", url) # XXX this can actually lead to a maximum recursion # error when client.error caused the exception return client.error(404, i18n._("Not Found")) except: # catch all other exceptions and report internal error exception(GUI, "Template error") return client.error(500, i18n._("Internal Error")) f.close() # write response self.put_response(data, protocol, status, msg, headers)
|
def __init__ (self, client, url, form, protocol, status=200, msg=i18n._('Ok'), context={}, headers={'Content-Type': 'text/html'}): self.client = client # we pretend to be the server self.connected = True try: lang = i18n.get_headers_lang(headers) # get the template filename path, dirs, lang = get_template_url(url, lang) # do not rely on content-type header value if path.endswith('.html'): headers['Content-Type'] = 'text/html' f = file(path) # get TAL context context = get_context(dirs, form, context, lang) # get translator if lang!='en': translator = gettext.translation(Name, LocaleDir, [lang]) else: translator = None # expand template data = expand_template(f, context, translator=translator) else: f = file(path, 'rb') data = f.read() except IOError, e: exception(GUI, "Wrong path `%s'", url) # XXX this can actually lead to a maximum recursion # error when client.error caused the exception return client.error(404, i18n._("Not Found")) except: # catch all other exceptions and report internal error exception(GUI, "Template error") return client.error(500, i18n._("Internal Error")) f.close() # write response self.put_response(data, protocol, status, msg, headers)
| 16,552
|
def iswritable (fname): """return True if given file is writable""" if os.path.isdir(fname) or os.path.islink(fname): return False try: if os.path.exists(fname): open(fname, 'a').close() return True else: open(fname, 'w').close() os.remove(fname) return True except IOError: pass return False
|
def iswritable (fname): """return True if given file is writable""" if os.path.isdir(fname) or os.path.islink(fname): return False try: if os.path.exists(fname): open(fname, 'a').close() return True else: open(fname, 'w').close() os.remove(fname) return True except IOError: pass return False
| 16,553
|
def _broken (): p = HtmlPrinter() p.feed("""<a b="c"><""") p.feed("""d>""")
|
def _broken (): p = HtmlPrinter() s = """<h1>bla</h1>""" for c in s: p.feed(c) p.flush()
| 16,554
|
def rating_range (value): """parse value as range; return tuple (rmin, rmax) or None on error""" mo = _range_re.match(value) if not mo: return None return (mo.group(1), mo.group(2))
|
def rating_range (value): """parse value as range; return tuple (rmin, rmax) or None on error""" mo = _range_re.match(value) if not mo: return None return (mo.group(1), mo.group(2))
| 16,555
|
def server_response (self, response, statuscode, headers): """the server got a response""" # Okay, transfer control over to the real client if self.client.connected: config['requests']['valid'] += 1 self.server.client = self.client self.client.server_response(self.server, response, statuscode, headers) else: self.server.client_abort()
|
def server_response (self, response, statuscode, status, headers): """the server got a response""" # Okay, transfer control over to the real client if self.client.connected: config['requests']['valid'] += 1 self.server.client = self.client self.client.server_response(self.server, response, statuscode, headers) else: self.server.client_abort()
| 16,556
|
def server_response (self, response, statuscode, headers): """the server got a response""" # Okay, transfer control over to the real client if self.client.connected: config['requests']['valid'] += 1 self.server.client = self.client self.client.server_response(self.server, response, statuscode, headers) else: self.server.client_abort()
|
def server_response (self, response, statuscode, headers): """the server got a response""" # Okay, transfer control over to the real client if self.client.connected: config['requests']['valid'] += 1 self.server.client = self.client self.client.server_response(self.server, response, status, headers) else: self.server.client_abort()
| 16,557
|
def check_headers (self): """add missing content-type and/or encoding headers""" # 304 Not Modified does not send any type or encoding info, # because this info was cached if self.statuscode == '304': return # check content-type against our own guess i = self.document.find('?') if i>0: document = self.document[:i] else: document = self.document gm = mimetypes.guess_type(document, None) ct = self.headers.get('Content-Type', None) if self.mime: if ct is None: warn(PROXY, i18n._("add Content-Type %s in %s"), `self.mime`, `self.url`) self.headers['Content-Type'] = "%s\r"%self.mime elif not ct.startswith(self.mime): i = ct.find(';') if i== -1: val = self.mime else: val = self.mime + ct[i:] warn(PROXY, i18n._("set Content-Type from %s to %s in %s"), `str(ct)`, `val`, `self.url`) self.headers['Content-Type'] = "%s\r"%val elif gm[0]: # guessed an own content type if ct is None: warn(PROXY, i18n._("add Content-Type %s to %s"), `gm[0]`, `self.url`) self.headers['Content-Type'] = "%s\r"%gm[0] # fix some content types elif not ct.startswith(gm[0]) and \ gm[0] in _fix_content_types: warn(PROXY, i18n._("change Content-Type from %s to %s in %s"), `ct`, `gm[0]`, `self.url`) self.headers['Content-Type'] = "%s\r"%gm[0] if gm[1] and gm[1] in _fix_content_encodings: ce = self.headers.get('Content-Encoding', None) # guessed an own encoding type if ce is None: self.headers['Content-Encoding'] = "%s\r"%gm[1] warn(PROXY, i18n._("add Content-Encoding %s to %s"), `gm[1]`, `self.url`) elif ce != gm[1]: warn(PROXY, i18n._("change Content-Encoding from %s to %s in %s"), `ce`, `gm[1]`, `self.url`) self.headers['Content-Encoding'] = "%s\r"%gm[1] # hmm, fix application/x-httpd-php* if self.headers.get('Content-Type', '').lower().startswith('application/x-httpd-php'): warn(PROXY, i18n._("fix x-httpd-php Content-Type")) self.headers['Content-Type'] = 'text/html\r'
|
def check_headers (self): """add missing content-type and/or encoding headers""" # 304 Not Modified does not send any type or encoding info, # because this info was cached if self.statuscode == '304': return # check content-type against our own guess i = self.document.find('?') if i>0: document = self.document[:i] else: document = self.document gm = mimetypes.guess_type(document, None) ct = self.headers.get('Content-Type', None) if self.mime: if ct is None: warn(PROXY, i18n._("add Content-Type %s in %s"), `self.mime`, `self.url`) self.headers['Content-Type'] = "%s\r"%self.mime elif not ct.startswith(self.mime): i = ct.find(';') if i== -1: val = self.mime else: val = self.mime + ct[i:] warn(PROXY, i18n._("set Content-Type from %s to %s in %s"), `str(ct)`, `val`, `self.url`) self.headers['Content-Type'] = "%s\r"%val elif gm[0]: # guessed an own content type if ct is None: warn(PROXY, i18n._("add Content-Type %s to %s"), `gm[0]`, `self.url`) self.headers['Content-Type'] = "%s\r"%gm[0] # fix some content types elif not ct.startswith(gm[0]) and \ gm[0] in _fix_content_types: warn(PROXY, i18n._("change Content-Type from %s to %s in %s"), `ct`, `gm[0]`, `self.url`) self.headers['Content-Type'] = "%s\r"%gm[0] if gm[1] and gm[1] in _fix_content_encodings: ce = self.headers.get('Content-Encoding', None) # guessed an own encoding type if ce is None: self.headers['Content-Encoding'] = "%s\r"%gm[1] warn(PROXY, i18n._("add Content-Encoding %s to %s"), `gm[1]`, `self.url`) elif ce != gm[1]: warn(PROXY, i18n._("change Content-Encoding from %s to %s in %s"), `ce`, `gm[1]`, `self.url`) self.headers['Content-Encoding'] = "%s\r"%gm[1] # hmm, fix application/x-httpd-php* if self.headers.get('Content-Type', '').lower().startswith('application/x-httpd-php'): warn(PROXY, i18n._("fix x-httpd-php Content-Type")) self.headers['Content-Type'] = 'text/html\r'
| 16,558
|
def get_request_headers (self, content): port = self.server.socket.getsockname()[1] headers = [ "Host: localhost:%d" % port, "Proxy-Connection: close", ] if content: headers.append("Content-Length: %d" % len(content)) headers.append("Content-Length: %d" % len(content)-5) return headers
|
def get_request_headers (self, content): port = self.server.socket.getsockname()[1] headers = [ "Host: localhost:%d" % port, "Proxy-Connection: close", ] if content: headers.append("Content-Length: %d" % len(content)) headers.append("Content-Length: %d" % (len(content)-5)) return headers
| 16,559
|
def get_response_headers (self, content): return [ "Content-Type: text/plain", "Content-Length: %d" % len(content), "Content-Length: %d" % len(content)-5, ]
|
def get_response_headers (self, content): return [ "Content-Type: text/plain", "Content-Length: %d" % len(content), "Content-Length: %d" % (len(content)-5), ]
| 16,560
|
def generate_sids (): for rule in _rules_without_sid: rule.sid = generate_unique_sid("wc") del _rules_without_sid[:]
|
def generate_sids (prefix="wc"): for rule in _rules_without_sid: rule.sid = generate_unique_sid("wc") del _rules_without_sid[:]
| 16,561
|
def generate_sids (): for rule in _rules_without_sid: rule.sid = generate_unique_sid("wc") del _rules_without_sid[:]
|
def generate_sids (): for rule in _rules_without_sid: rule.sid = generate_unique_sid(prefix) del _rules_without_sid[:]
| 16,562
|
def write_proxyconf (self): """write proxy configuration""" f = file(self['configfile'], 'w') f.write("""<?xml version="1.0" encoding="%s"?>
|
def write_proxyconf (self): """write proxy configuration""" f = file(self['configfile'], 'w') f.write("""<?xml version="1.0" encoding="%s"?>
| 16,563
|
def error(self, code, msg): ServerHandleDirectly( self.client, 'HTTP/1.0 %d Use different host\r\n', 'Content-type: text/html\r\n' 'Location: http://%s\r\n' '\r\n' % (code, new_url), msg)
|
def error(self, code, msg): ServerHandleDirectly( self.client, 'HTTP/1.0 %d %s\r\n', 'Server: WebCleaner Proxy\r\n' 'Content-type: text/html\r\n' 'Location: http://%s\r\n' '\r\n' % (code, new_url), msg)
| 16,564
|
def error(self, code, msg): ServerHandleDirectly( self.client, 'HTTP/1.0 %d Use different host\r\n', 'Content-type: text/html\r\n' 'Location: http://%s\r\n' '\r\n' % (code, new_url), msg)
|
def error(self, code, msg): ServerHandleDirectly( self.client, 'HTTP/1.0 %d Use different host\r\n', 'Content-type: text/html\r\n' '\r\n' '<html><head>' '<title>WebCleaner Proxy Error %d %s</title>' '</head><body bgcolor=" 'WebCleaner Proxy Error %d %s<br>' '%s<br></center></body></html>' % (code, msg, code, msg), msg)
| 16,565
|
def handle_dns(self, hostname, answer): assert self.state == 'dns' if answer.isError(): self.error(400, _(answer.data)) return self.state = 'server' self.ipaddr = socket.gethostbyname(self.hostname)
|
def handle_dns(self, hostname, answer): assert self.state == 'dns' if answer.isError(): self.error(400, _(answer.data)) return self.state = 'server' self.ipaddr = socket.gethostbyname(self.hostname)
| 16,566
|
def classify (self, f): if not self.entries: raise StandardError("Not initialised properly") # Are we still looking for the ruleset to apply or are we in a rule found_rule = False # When we found the rule, what is the level that we successfull passed in_level = 0 # If we failed part of the rule there is no point looking for higher level subrule allow_next = 0 # String provided by the successfull rule result = ""
|
def classify (self, f): if not self.entries: raise StandardError("Not initialised properly") # Are we still looking for the ruleset to apply or are we in a rule found_rule = False # When we found the rule, what is the level that we successfull passed in_level = 0 # If we failed part of the rule there is no point looking for higher level subrule allow_next = 0 # String provided by the successfull rule result = ""
| 16,567
|
def handle_read (self): """read data from connection, put it into recv_buffer and call process_read""" assert self.connected debug(PROXY, '%s handle_read', self)
|
def handle_read (self): """read data from connection, put it into recv_buffer and call process_read""" assert self.connected debug(PROXY, '%s handle_read', self)
| 16,568
|
def __init__ (self, client, request, headers, content, nofilter,compress): self.client = client self.request = request self.headers = headers self.compress = compress self.content = content self.nofilter = nofilter debug(HURT_ME_PLENTY, "request", `self.request`) self.method, self.url, protocol = self.request.split() scheme, hostname, port, document = spliturl(self.url) # fix missing trailing / if not document: document = '/' # fix missing host headers for HTTP/1.1 if protocol=='HTTP/1.1' and not self.headers.has_key('host'): self.headers['Host'] = hostname if port!=80: self.headers['Host'] += ":%d"%port debug(HURT_ME_PLENTY, "Proxy: splitted url", scheme, hostname, port, document) if scheme=='file': # a blocked url is a local file:// link # this means we should _not_ use this proxy for local # file links :) mtype = mimetypes.guess_type(self.url)[0] config['requests']['valid'] += 1 config['requests']['blocked'] += 1 ServerHandleDirectly(self.client, 'HTTP/1.0 200 OK\r\n', 'Content-Type: %s\r\n\r\n'%(mtype or 'application/octet-stream'), open(document, 'rb').read()) return
|
def __init__ (self, client, request, headers, content, nofilter,compress): self.client = client self.request = request self.headers = headers self.compress = compress self.content = content self.nofilter = nofilter self.method, self.url, protocol = self.request.split() scheme, hostname, port, document = spliturl(self.url) # fix missing trailing / if not document: document = '/' # fix missing host headers for HTTP/1.1 if protocol=='HTTP/1.1' and not self.headers.has_key('host'): self.headers['Host'] = hostname if port!=80: self.headers['Host'] += ":%d"%port debug(HURT_ME_PLENTY, "Proxy: splitted url", scheme, hostname, port, document) if scheme=='file': # a blocked url is a local file:// link # this means we should _not_ use this proxy for local # file links :) mtype = mimetypes.guess_type(self.url)[0] config['requests']['valid'] += 1 config['requests']['blocked'] += 1 ServerHandleDirectly(self.client, 'HTTP/1.0 200 OK\r\n', 'Content-Type: %s\r\n\r\n'%(mtype or 'application/octet-stream'), open(document, 'rb').read()) return
| 16,569
|
def update_filter (wconfig, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ chg = False baseurl = wconfig['baseurl']+"filter/" url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return chg # remember all local config files filemap = {} for filename in wc.filterconf_files(): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >>log, "error fetching", url return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating filter %s")%filename else: print >>log, i18n._("adding new filter %s"), filename # parse new filter url = baseurl+filename+".gz" page = open_url(url) p = wc.ZapperParser(fullname, wconfig, compile_data=False) p.parse(fp=page) page.close() chg = wconfig.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >>log, i18n._("error fetching %s:")%url, i18n._("invalid content") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("extern filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating extern filter %s")%filename else: print >>log, i18n._("adding new extern filter %s")%filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg continue data = page.read() if not data: print >>log, i18n._("error fetching %s:")%url, i18n._("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() return chg
|
def update_filter (wconfig, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ chg = False baseurl = wconfig['baseurl']+"filter/" url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return chg # remember all local config files filemap = {} for filename in wc.filterconf_files(): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >>log, "error fetching", url return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating filter %s")%filename else: print >>log, i18n._("adding new filter %s"), filename # parse new filter url = baseurl+filename page = open_url(url) p = wc.ZapperParser(fullname, wconfig, compile_data=False) p.parse(fp=page) page.close() chg = wconfig.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >>log, i18n._("error fetching %s:")%url, i18n._("invalid content") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("extern filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating extern filter %s")%filename else: print >>log, i18n._("adding new extern filter %s")%filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg continue data = page.read() if not data: print >>log, i18n._("error fetching %s:")%url, i18n._("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() return chg
| 16,570
|
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() self.statuscode = None if response: self.statuscode = response[1] if self.statuscode == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) self.check_headers() # will content be rewritten? rewrite = self.is_rewrite() # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.get('Content-Length') is not None: self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None # add decoders self.decoders = [] # Chunked encoded if self.headers.get('Transfer-Encoding') is not None: #debug(BRING_IT_ON, 'S/Transfer-encoding:', `self.headers['transfer-encoding']`) self.decoders.append(UnchunkStream()) # remove encoding header to_remove = ["Transfer-Encoding"] if self.headers.get("Content-Length") is not None: print >>sys.stderr, _('Warning: chunked encoding should not have Content-Length') to_remove.append("Content-Length") self.bytes_remaining = None remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" # Compressed content (uncompress only for rewriting modules) encoding = self.headers.get('Content-Encoding', '').lower() if encoding in ('gzip', 'x-gzip', 'deflate') and rewrite: if encoding=='deflate': self.decoders.append(DeflateStream()) else: self.decoders.append(GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if self.headers.get('Cache-Control', '').lower()=='no-transform': to_remove.append('Cache-Control') remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" elif encoding and encoding!='identity' and rewrite: print >>sys.stderr, _("Warning: unsupported encoding:"),`encoding` # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) # initStateObject can modify headers (see Compress.py)! self.attrs = initStateObjects(self.headers, self.url) if self.headers.get('Content-Length') is None: self.headers['Connection'] = 'close' #debug(HURT_ME_PLENTY, "S/Headers filtered", `self.headers.headers`) wc.proxy.HEADERS.append((self.url, 1, self.headers.headers)) self.client.server_response(self.response, self.headers) self.attrs['nofilter'] = self.nofilter['nofilter'] if ((response and response[1] in ('204', '304')) or \ self.method == 'HEAD'): # These response codes indicate no content self.state = 'recycle' else: self.state = 'content'
|
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() self.statuscode = None if response: self.statuscode = response[1] if self.statuscode == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) self.check_headers() # will content be rewritten? rewrite = self.is_rewrite() # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.get('Content-Length') is not None: self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None # add decoders self.decoders = [] # Chunked encoded if self.headers.get('Transfer-Encoding') is not None: #debug(BRING_IT_ON, 'S/Transfer-encoding:', `self.headers['transfer-encoding']`) self.decoders.append(UnchunkStream()) # remove encoding header to_remove = ["Transfer-Encoding"] if self.headers.get("Content-Length") is not None: print >>sys.stderr, _('Warning: chunked encoding should not have Content-Length') to_remove.append("Content-Length") self.bytes_remaining = None remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" # Compressed content (uncompress only for rewriting modules) encoding = self.headers.get('Content-Encoding', '').lower() if encoding in ('gzip', 'x-gzip', 'deflate'): if encoding=='deflate': self.decoders.append(DeflateStream()) else: self.decoders.append(GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if self.headers.get('Cache-Control', '').lower()=='no-transform': to_remove.append('Cache-Control') remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" elif encoding and encoding!='identity' and rewrite: print >>sys.stderr, _("Warning: unsupported encoding:"),`encoding` # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) # initStateObject can modify headers (see Compress.py)! self.attrs = initStateObjects(self.headers, self.url) if self.headers.get('Content-Length') is None: self.headers['Connection'] = 'close' #debug(HURT_ME_PLENTY, "S/Headers filtered", `self.headers.headers`) wc.proxy.HEADERS.append((self.url, 1, self.headers.headers)) self.client.server_response(self.response, self.headers) self.attrs['nofilter'] = self.nofilter['nofilter'] if ((response and response[1] in ('204', '304')) or \ self.method == 'HEAD'): # These response codes indicate no content self.state = 'recycle' else: self.state = 'content'
| 16,571
|
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() self.statuscode = None if response: self.statuscode = response[1] if self.statuscode == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) self.check_headers() # will content be rewritten? rewrite = self.is_rewrite() # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.get('Content-Length') is not None: self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None # add decoders self.decoders = [] # Chunked encoded if self.headers.get('Transfer-Encoding') is not None: #debug(BRING_IT_ON, 'S/Transfer-encoding:', `self.headers['transfer-encoding']`) self.decoders.append(UnchunkStream()) # remove encoding header to_remove = ["Transfer-Encoding"] if self.headers.get("Content-Length") is not None: print >>sys.stderr, _('Warning: chunked encoding should not have Content-Length') to_remove.append("Content-Length") self.bytes_remaining = None remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" # Compressed content (uncompress only for rewriting modules) encoding = self.headers.get('Content-Encoding', '').lower() if encoding in ('gzip', 'x-gzip', 'deflate') and rewrite: if encoding=='deflate': self.decoders.append(DeflateStream()) else: self.decoders.append(GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if self.headers.get('Cache-Control', '').lower()=='no-transform': to_remove.append('Cache-Control') remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" elif encoding and encoding!='identity' and rewrite: print >>sys.stderr, _("Warning: unsupported encoding:"),`encoding` # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) # initStateObject can modify headers (see Compress.py)! self.attrs = initStateObjects(self.headers, self.url) if self.headers.get('Content-Length') is None: self.headers['Connection'] = 'close' #debug(HURT_ME_PLENTY, "S/Headers filtered", `self.headers.headers`) wc.proxy.HEADERS.append((self.url, 1, self.headers.headers)) self.client.server_response(self.response, self.headers) self.attrs['nofilter'] = self.nofilter['nofilter'] if ((response and response[1] in ('204', '304')) or \ self.method == 'HEAD'): # These response codes indicate no content self.state = 'recycle' else: self.state = 'content'
|
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() self.statuscode = None if response: self.statuscode = response[1] if self.statuscode == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) self.check_headers() # will content be rewritten? rewrite = self.is_rewrite() # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.get('Content-Length') is not None: self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None # add decoders self.decoders = [] # Chunked encoded if self.headers.get('Transfer-Encoding') is not None: #debug(BRING_IT_ON, 'S/Transfer-encoding:', `self.headers['transfer-encoding']`) self.decoders.append(UnchunkStream()) # remove encoding header to_remove = ["Transfer-Encoding"] if self.headers.get("Content-Length") is not None: print >>sys.stderr, _('Warning: chunked encoding should not have Content-Length') to_remove.append("Content-Length") self.bytes_remaining = None remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" # Compressed content (uncompress only for rewriting modules) encoding = self.headers.get('Content-Encoding', '').lower() if encoding in ('gzip', 'x-gzip', 'deflate') and rewrite: if encoding=='deflate': self.decoders.append(DeflateStream()) else: self.decoders.append(GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if self.headers.get('Cache-Control', '').lower()=='no-transform': to_remove.append('Cache-Control') remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" elif encoding and encoding!='identity': print >>sys.stderr, _("Warning: unsupported encoding:"),`encoding` # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) # initStateObject can modify headers (see Compress.py)! self.attrs = initStateObjects(self.headers, self.url) if self.headers.get('Content-Length') is None: self.headers['Connection'] = 'close' #debug(HURT_ME_PLENTY, "S/Headers filtered", `self.headers.headers`) wc.proxy.HEADERS.append((self.url, 1, self.headers.headers)) self.client.server_response(self.response, self.headers) self.attrs['nofilter'] = self.nofilter['nofilter'] if ((response and response[1] in ('204', '304')) or \ self.method == 'HEAD'): # These response codes indicate no content self.state = 'recycle' else: self.state = 'content'
| 16,572
|
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() self.statuscode = None if response: self.statuscode = response[1] if self.statuscode == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) self.check_headers() # will content be rewritten? rewrite = self.is_rewrite() # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.get('Content-Length') is not None: self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None # add decoders self.decoders = [] # Chunked encoded if self.headers.get('Transfer-Encoding') is not None: #debug(BRING_IT_ON, 'S/Transfer-encoding:', `self.headers['transfer-encoding']`) self.decoders.append(UnchunkStream()) # remove encoding header to_remove = ["Transfer-Encoding"] if self.headers.get("Content-Length") is not None: print >>sys.stderr, _('Warning: chunked encoding should not have Content-Length') to_remove.append("Content-Length") self.bytes_remaining = None remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" # Compressed content (uncompress only for rewriting modules) encoding = self.headers.get('Content-Encoding', '').lower() if encoding in ('gzip', 'x-gzip', 'deflate') and rewrite: if encoding=='deflate': self.decoders.append(DeflateStream()) else: self.decoders.append(GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if self.headers.get('Cache-Control', '').lower()=='no-transform': to_remove.append('Cache-Control') remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" elif encoding and encoding!='identity' and rewrite: print >>sys.stderr, _("Warning: unsupported encoding:"),`encoding` # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) # initStateObject can modify headers (see Compress.py)! self.attrs = initStateObjects(self.headers, self.url) if self.headers.get('Content-Length') is None: self.headers['Connection'] = 'close' #debug(HURT_ME_PLENTY, "S/Headers filtered", `self.headers.headers`) wc.proxy.HEADERS.append((self.url, 1, self.headers.headers)) self.client.server_response(self.response, self.headers) self.attrs['nofilter'] = self.nofilter['nofilter'] if ((response and response[1] in ('204', '304')) or \ self.method == 'HEAD'): # These response codes indicate no content self.state = 'recycle' else: self.state = 'content'
|
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() self.statuscode = None if response: self.statuscode = response[1] if self.statuscode == '100': # it's a Continue request, so go back to waiting for headers # XXX for HTTP/1.1 clients, forward this self.state = 'response' return # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) #debug(HURT_ME_PLENTY, "S/Headers", `self.headers.headers`) self.check_headers() # will content be rewritten? rewrite = self.is_rewrite() # add client accept-encoding value self.headers['Accept-Encoding'] = self.client.compress if self.headers.get('Content-Length') is not None: self.bytes_remaining = int(self.headers['Content-Length']) #debug(HURT_ME_PLENTY, "%d bytes remaining"%self.bytes_remaining) if rewrite: remove_headers(self.headers, ['Content-Length']) else: self.bytes_remaining = None # add decoders self.decoders = [] # Chunked encoded if self.headers.get('Transfer-Encoding') is not None: #debug(BRING_IT_ON, 'S/Transfer-encoding:', `self.headers['transfer-encoding']`) self.decoders.append(UnchunkStream()) # remove encoding header to_remove = ["Transfer-Encoding"] if self.headers.get("Content-Length") is not None: print >>sys.stderr, _('Warning: chunked encoding should not have Content-Length') to_remove.append("Content-Length") self.bytes_remaining = None remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" # Compressed content (uncompress only for rewriting modules) encoding = self.headers.get('Content-Encoding', '').lower() if encoding in ('gzip', 'x-gzip', 'deflate') and rewrite: if encoding=='deflate': self.decoders.append(DeflateStream()) else: self.decoders.append(GunzipStream()) # remove encoding because we unzip the stream to_remove = ['Content-Encoding'] # remove no-transform cache control if self.headers.get('Cache-Control', '').lower()=='no-transform': to_remove.append('Cache-Control') remove_headers(self.headers, to_remove) # add warning self.headers['Warning'] = "214 WebCleaner Transformation applied" elif encoding and encoding!='identity' and rewrite: print >>sys.stderr, _("Warning: unsupported encoding:"),`encoding` # do not disable filtering for unknown content-encodings # this could result in a DoS attack (server sending garbage # as content-encoding) # initStateObject can modify headers (see Compress.py)! self.attrs = initStateObjects(self.headers, self.url) if self.headers.get('Content-Length') is None: self.headers['Connection'] = 'close' #debug(HURT_ME_PLENTY, "S/Headers filtered", `self.headers.headers`) wc.proxy.HEADERS.append((self.url, 1, self.headers.headers)) self.client.server_response(self.response, self.headers) self.attrs['nofilter'] = self.nofilter['nofilter'] if ((response and response[1] in ('204', '304')) or \ self.method == 'HEAD'): # These response codes indicate no content self.state = 'recycle' else: self.state = 'content'
| 16,573
|
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() if response and response[1] == '100': # it's a Continue request, so go back to waiting for headers self.state = 'response' return # check for unusual compressed files if self.document.endswith(".bz2") or \ self.document.endswith(".tgz") or \ self.document.endswith(".gz"): gm = mimetypes.guess_type(self.document, False) if gm[1]: self.headers['Content-Encoding'] = gm[1] if gm[0]: self.headers['Content-Type'] = gm[0] # filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) # will content be rewritten? rewrite = False for ro in config['mime_content_rewriting']: if ro.match(self.headers.get('Content-Type', "")): rewrite = True break #debug(HURT_ME_PLENTY, "S/Headers ", `self.headers.headers`) if self.headers.has_key('Content-Length'): if rewrite: remove_headers(self.headers, ['Content-Length']) self.bytes_remaining = None else: self.bytes_remaining = int(self.headers['Content-Length']) else: self.bytes_remaining = None
|
def process_headers (self): # Headers are terminated by a blank line .. now in the regexp, # we want to say it's either a newline at the beginning of # the document, or it's a lot of headers followed by two newlines. # The cleaner alternative would be to read one line at a time # until we get to a blank line... m = re.match(r'^((?:[^\r\n]+\r?\n)*\r?\n)', self.recv_buffer) if not m: return # handle continue requests (XXX should be in process_response?) response = self.response.split() if response and response[1] == '100': # it's a Continue request, so go back to waiting for headers self.state = 'response' return # check for unusual compressed files# filter headers self.headers = applyfilter(FILTER_RESPONSE_HEADER, rfc822.Message(StringIO(self.read(m.end()))), attrs=self.nofilter) # will content be rewritten? rewrite = False for ro in config['mime_content_rewriting']: if ro.match(self.headers.get('Content-Type', "")): rewrite = True break #debug(HURT_ME_PLENTY, "S/Headers ", `self.headers.headers`) if self.headers.has_key('Content-Length'): if rewrite: remove_headers(self.headers, ['Content-Length']) self.bytes_remaining = None else: self.bytes_remaining = int(self.headers['Content-Length']) else: self.bytes_remaining = None
| 16,574
|
def _broken (): p = HtmlPrinter() p.feed("""<a><t""") p.feed("""r>""") p.flush()
|
def _broken (): p = HtmlPrinter() p.feed("""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"> <HTML> <HEAD> <META NAME="Description" CONTENT="Obsession Development: Products"> <META NAME="Resource-Type" CONTENT="document"> <META NAME="Content-Type" CONTENT="text/html, charset=iso-8859-1"> <META HTTP-EQUIV="Content-Style-Type" CONTENT="text/css"> <LINK REL="Stylesheet" HREF="../obsession.css" TYPE="text/css"> <TITLE>Obsession Development: gentoo</TITLE> <!-- SCRIPT LANGUAGE="JavaScript"> <!-- if (document.images) { var obsessionoff = new Image() obsessionoff.src = "../ObsessionOff.GIF" var obsessionon = new Image() obsessionon.src = "../ObsessionOn.GIF" var contactoff = new Image() contactoff.src = "../ContactOff.GIF" var contacton = new Image() contacton.src = "../ContactOn.GIF" var projectsoff = new Image() projectsoff.src = "../ProjectsOff.GIF" var projectson = new Image() projectson.src = "../ProjectsOn.GIF" } function actMenuItem(img1,img2) { act(img1) } function inactMenuItem(img1, img2) { inact(img1) } function act(imgName) { if (document.images) document[imgName].src = eval(imgName + 'on.src') window.status = "Click me!" } function inact(imgName) { if (document.images) document[imgName].src = eval(imgName + 'off.src') window.status = "Obsession - Just ideas." } // --> </SCRIPT --> </HEAD> <BODY BACKGROUND="../bk14.gif" BGCOLOR=" <TABLE COLS=3 ROWS=2 CELLPADDING=0 CELLSPACING=0 BORDER=0> <TR> <TD WIDTH=164 HEIGHT=36><IMG CLASS="Hemlig" SRC="../spacer.GIF" WIDTH=1 HEIGHT=36 BORDER="0"></TD><TD></TD> </TR> <TR> <TD></TD><TD> <TABLE COLS=2 CELLPADDING=0 CELLSPACING=0 BORDER=0> <TR> <TD WIDTH=127 VALIGN=Top ALIGN="CENTER"> <IMG SRC="../OD_Logo-Small.GIF" WIDTH=127 HEIGHT=136 ALT="Magic" BORDER="0"> <BR> <A HREF="MAILTO:emil@obsession.se?subject=[gentoo]"><IMG SRC="../Junk.GIF" WIDTH=25 HEIGHT=19 ALT="E-mail Emil" BORDER="0"></A> <BR> <FONT FACE="Verdana, Arial, Helvetica, sans-serif" SIZE="1"><B><A HREF="MAILTO:emil@obsession.se?subject=[gentoo]">E-mail Author</A> <BR><BR><BR> <P align="center" class="Margin"> <B>Download</B><BR> <A href="http://prdownloads.sourceforge.net/gentoo/gentoo-0.11.25.tar.gz?download" title="Download gentoo"> <IMG border=0 height=17 src="../download.gif" width="19"> <B>Download<BR> gentoo 0.11.25 (http)</A> </B> <BR>[711 KB, tar.gz] <BR>Requires GTK+ 1.2.x </P> <P align=center class=Margin> <B>Patch</B><BR> <A href="http://prdownloads.sourceforge.net/gentoo/diff-0.11.24-to-0.11.25.gz?download" title="Download Patch"> <IMG border=0 height=17 src="../download.gif" width=19> <B>Download<BR>0.11.24 to 0.11.25 (http)</A> </B> <BR>[28 KB diff -ruN patch, gzipped] </P><BR> <P align=center class=Margin> <P align=center class=Margin>Packages<BR> <A href="ftp://ftp.falsehope.com/pub/gentoo">Red Hat RPMs</A><BR> [Maintainer: <A href="mailto:ryanw@infohwy.com">Ryan Weaver</A>] <BR> <BR> <A href="http://www.debian.org/Packages/unstable/x11/gentoo.html">Debian DEBs</A><BR> [Maintainer: <A href="mailto:jrodin@jagor.srce.hr">Josip Rodin</A>]<BR> <BR> Gentoo Linux users, type<BR> <TT>emerge app-misc/gentoo</TT> <BR> <BR> <A HREF="http://www.openbsd.org/cgi-bin/cvsweb/ports/x11/gentoo">OpenBSD Port</A><BR> [Maintainer: Jim Geovedi]<BR> </P> <P align=center class=Margin>AppIndex<BR> New releases of gentoo are announced on <A href="http://freshmeat.net/">FreshMeat</A> You can go directly to gentoo's <A href="http://freshmeat.net/appindex/1998/09/24/906621975.html">AppIndex page</A>. </P></FONT> </TD> <TD WIDTH=16></TD> <TD WIDTH=256> <TABLE WIDTH=512 COLS=2 CELLPADDING=0 CELLSPACING=0 BORDER="0"> <TR> <TD WIDTH="8" ROWSPAN="2"></TD <TD WIDTH="512" HEIGHT="88"> <IMG NAME="obsession" SRC="../spacer.GIF" WIDTH=32 HEIGHT=32 BORDER="0" HSPACE=8> <BR CLEAR=All> <IMG SRC="../Just02.GIF" WIDTH=256 HEIGHT=88 ALT="gentoo logo" BORDER="0"></TD> </TR ><TR> <TD> <BR><FONT FACE="Arial, Geneva, Helvetica, sans-serif" SIZE="2"> <P> gentoo is a modern, powerful, flexible, and utterly configurable file manager for UNIX systems, written using the GTK+ toolkit. It aims to be 100% graphically configurable; there's no need to edit config files by hand and then restart the application. gentoo is somewhat inspired in its look & feel by the classic Amiga program DirectoryOpus. It has been successfully tested on a variety of platforms, including Linux/x86, Linux/Alpha, Solaris, FreeBSD and OpenBSD. </P> <P> (If you came here looking for the <A href="http://www.gentoo.org/">Gentoo Linux</A> distribution, you know where to click. Then come back and download gentoo to manage your files with! :) </P> <B>Features</B> <P>Some of the main features of gentoo are: <UL> <FONT face="Arial, Geneva, Helvetica, sans-serif" size="2"> <LI>Written from scratch, using ANSI C and the GTK+ toolkit. <LI>Aims to be 100% graphically configurable, and comes pretty close, too. <LI>Very cool (!) file typing and -styling systems allows you to configure how files of different types are shown (with colors and icons), and what happens when you doubleclick them (spawn image viewers, music players, etc). <LI>Includes more than 120 original pixmaps icons (16x15 pixels). <LI>Internal support for most file operations (copy, move, rename, rename, makedir etc). </FONT> </UL> </P> <B>Requirements</B> <P> The most modern (0.11.x) releases of <B>gentoo</B> require GTK+ 1.2.x. As is normal with GTK+ applications, gentoo also requires the GDK and glib libraries. If you have a working GTK+ installation, you will have these too, so don't worry. If your system does <B>not</B> have GTK+ installed, you need to download it (and glib) from <A href="http://www.gtk.org/">http://www.gtk.org/</A>. </P> <P> It is nice, but not required, to have the <CODE>file(1)</CODE> command installed, since gentoo can use it to identify filetypes. Please be aware that not all <CODE>file</CODE> commands supplied with commercial Un*xes are good enough to be used with gentoo (this is the case with Sun's <CODE>file</CODE> implementation, for example). You might want to look for a replacement. The version found <A href="http://freshmeat.net/projects/file/?highlight=file">here</A> is recommended. </P> <P> A few <B>screenshots</B> of gentoo are also available (Shots show gentoo version 0.11.11, running under <A href="http://www.windowmaker.org/">Window Maker</A> and were taken on 2000-01-04): <UL> <FONT face="Arial, Geneva, Helvetica, sans-serif"> <LI><A href="/gentoo/main.gif" title="Screenshot of gentoo">Main Window</A> [41 KB GIF] <LI><A href="/gentoo/cfg_dirpane.gif" title="Screenshot of gentoo">Dir Pane Config</A> [21 KB GIF] <LI><A href="/gentoo/cfg_styles.gif" title="Screenshot of gentoo">File Style Config</A> [15 KB GIF] <LI><A href="/gentoo/cfg_types.gif" title="Screenshot of gentoo">File Type Config</A> [18 KB GIF] <LI><A href="/gentoo/cfg_buttons.gif" title="Screenshot of gentoo">Action Button Config</A> [16 KB GIF] </FONT> </UL> <P></P> <P> <B>User-Contributed Screenshots</B> <UL> <FONT face="Arial, Geneva, Helvetica, sans-serif"> <LI><A href="/gentoo/contrib/Stefan_Eiserman.gif">Main Window</A> [By Stefan Eiserman, 77 KB GIF]</LI> <LI><A href="/gentoo/contrib/Stefan_Eiserman2.jpg">Main Window, big</A> [Also by Stefan Eiserman, 333 KB JPG]</LI> <LI><A href="/gentoo/contrib/theduke_dockicon.xpm">Window Maker dock icon</A> [By Kris, <<A href="mailto:theduke@planetinternet.be">theduke@planetinternet.be</A>>, 21 KB XPM]</LI> <LI><A HREF="/gentoo/contrib/Stefan_Nicolin.jpg">Main Window (themed GTK+)</A> [By Stefan Nicolin, 75 KB JPG]</LI> <LI><A HREF="/gentoo/contrib/Johannes_Tevessen.gif">Main Window (themed GTK+)</A> [By Johannes Tevessen, 58 KB GIF]</LI> <LI><A HREF="/gentoo/contrib/Erik_Sittmann.jpg">Main Window (Cygwin/Win32)</A> [By Erik Sittmann, 388 KB JPG]</LI> </FONT> </UL> </P> </TD> </TR> </TABLE> </TD> </TR> </TABLE> </TD> </TR> </TABLE> </BODY> </HTML> """) p.flush()
| 16,575
|
def _form_ratings (form): """Check category value validity""" for catname, value in _get_prefix_vals(form, 'category_'): category = _get_category(catname) if category is None: # unknown category error['categoryvalue'] = True return False if not category.valid_value(value): error['categoryvalue'] = True return False if category.iterable: values[catname]['none'] = False values[catname][value] = True else: values[catname] = value return True
|
def _form_ratings (form): """Check category value validity""" for catname, value in _get_prefix_vals(form, 'category_'): category = _get_category(catname) if category is None: # unknown category error['categoryvalue'] = True return False if category.iterable: realvalue = value else: realvalue = _intrange_from_string(value) if not category.valid_value(realvalue): error['categoryvalue'] = True return False if category.iterable: values[catname]['none'] = False values[catname][value] = True else: values[catname] = value return True
| 16,576
|
def read_ids (filename, ids): p = wc.configuration.ZapperParser(filename) p.parse() ids['folder']['sid'] = str(p.folder.sid) ids['folder']['oid'] = p.folder.oid ids['folder']['configversion'] = str(p.folder.configversion) for rule in p.folder.rules: for ftype in ('domains', 'urls'): if rule.name.endswith(ftype): ids[ftype]['sid'] = str(rule.sid)
|
def read_ids (filename, ids): p = wc.configuration.confparse.ZapperParser(filename) p.parse() ids['folder']['sid'] = str(p.folder.sid) ids['folder']['oid'] = p.folder.oid ids['folder']['configversion'] = str(p.folder.configversion) for rule in p.folder.rules: for ftype in ('domains', 'urls'): if rule.name.endswith(ftype): ids[ftype]['sid'] = str(rule.sid)
| 16,577
|
def blacklist (fname, extract_to="extracted"): source = os.path.join("downloads", fname) # extract tar if fname.endswith(".tar.gz") or fname.endswith(".tgz"): print "extracting archive", fname f = tarfile.TarFile.gzopen(source) for m in f: a, b = os.path.split(m.name) a = os.path.basename(a) if b in myfiles and a in mycats: print m.name f.extract(m, extract_to) f.close() read_blacklists(extract_to) rm_rf(extract_to) elif fname.endswith(".gz"): print "gunzip..." f = gzip.open(source) fname = "extracted/"+fname[:-3] os.makedirs(os.path.dirname(fname), 0722) w = file(fname, 'wb') w.write(f.read()) w.close() f.close() read_data(fname, "domains", domains)
|
def blacklist (fname, extract_to="extracted"): source = os.path.join("downloads", fname) # extract tar if fname.endswith(".tar.gz") or fname.endswith(".tgz"): print "extracting archive", fname f = tarfile.TarFile.gzopen(source) for m in f: a, b = os.path.split(m.name) a = os.path.basename(a) if b in myfiles and a in mycats: print m.name f.extract(m, extract_to) f.close() read_blacklists(extract_to) rm_rf(extract_to) elif fname.endswith(".gz"): print "gunzip..." f = gzip.open(source) fname = os.path.join(extract_to, fname[:-3]) os.makedirs(os.path.dirname(fname), 0722) w = file(fname, 'wb') w.write(f.read()) w.close() f.close() read_data(fname, "domains", domains)
| 16,578
|
def jsScriptData (self, data, url, ver): """Callback for loading <script src=""> data in the background If downloading is finished, data is None""" assert self.htmlparser.state[0] == 'wait', "non-wait state" wc.log.debug(wc.LOG_JS, "%s jsScriptData %r", self, data) if data is None: if not self.js_script: wc.log.warn(wc.LOG_JS, "empty JavaScript src %s", url) self.js_script = u"// "+\ _("error fetching script from %r") % url self.htmlparser.tagbuf.append( [wc.filter.rules.RewriteRule.STARTTAG, u"script", {'type': 'text/javascript'}]) # norm html comments script = wc.js.remove_html_comments(self.js_script) script = u"\n<!--\n%s\n//-->\n" % wc.js.escape_js(script) self.htmlparser.tagbuf.append( [wc.filter.rules.RewriteRule.DATA, script]) # Note: <script src=""> could be missing an end tag, # but now we need one. Look later for a duplicate </script>. self.htmlparser.tagbuf.append( [wc.filter.rules.RewriteRule.ENDTAG, u"script"]) self.js_script = u'' self.htmlparser.state = ('parse',) wc.log.debug(wc.LOG_JS, "%s switching back to parse with", self) self.htmlparser.debugbuf(wc.LOG_JS) else: wc.log.debug(wc.LOG_JS, "JS read %d <= %s", len(data), url) self.js_script += data
|
def jsScriptData (self, data, url, ver): """Callback for loading <script src=""> data in the background If downloading is finished, data is None""" assert self.htmlparser.state[0] == 'wait', "non-wait state" wc.log.debug(wc.LOG_JS, "%s jsScriptData %r", self, data) if data is None: if not self.js_script: wc.log.warn(wc.LOG_JS, "empty JavaScript src %s", url) self.js_script = u"// "+\ _("error fetching script from %r") % url self.htmlparser.tagbuf.append( [wc.filter.rules.RewriteRule.STARTTAG, u"script", {'type': 'text/javascript'}]) # norm html comments script = wc.js.remove_html_comments(self.js_script) script = u"\n<!--\n%s\n//-->\n" % wc.js.escape_js(script) self.htmlparser.tagbuf.append( [wc.filter.rules.RewriteRule.DATA, script]) # Note: <script src=""> could be missing an end tag, # but now we need one. Look later for a duplicate </script>. self.htmlparser.tagbuf.append( [wc.filter.rules.RewriteRule.ENDTAG, u"script"]) self.js_script = u'' self.htmlparser.state = ('parse',) wc.log.debug(wc.LOG_JS, "%s switching back to parse with", self) self.htmlparser.debugbuf(wc.LOG_JS) else: wc.log.debug(wc.LOG_JS, "JS read %d <= %s", len(data), url) self.js_script += data
| 16,579
|
def _form_reset (): """reset info/error and global vars""" global filterenabled, filterdisabled filterenabled = u"" filterdisabled = u"" for key in info.keys(): info[key] = False for key in error.keys(): error[key] = False res = [None]
|
def _form_reset (): """reset info/error and global vars""" global filterenabled, filterdisabled filterenabled = u"" filterdisabled = u"" for key in info.keys(): info[key] = False for key in error.keys(): error[key] = False
| 16,580
|
def update (wconfig, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ chg = False url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return chg # remember all local config files filemap = {} for filename in wc.filterconf_files(): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >>log, "error fetching", url return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating filter %s")%filename else: print >>log, i18n._("adding new filter %s"), filename # parse new filter url = baseurl+filename+".gz" page = open_url(url) p = wc.ZapperParser(fullname, compile_data=False) p.parse(page, wconfig) page.close() chg = wconfig.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >>log, i18n._("error fetching %s:")%url, i18n._("invalid content") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("extern filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating extern filter %s")%filename else: print >>log, i18n._("adding new extern filter %s")%filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg continue data = page.read() if not data: print >>log, i18n._("error fetching %s:")%url, i18n._("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() return chg
|
def update (wconfig, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ chg = False url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return chg # remember all local config files filemap = {} for filename in wc.filterconf_files(): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >>log, "error fetching", url return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating filter %s")%filename else: print >>log, i18n._("adding new filter %s"), filename # parse new filter url = baseurl+filename+".gz" page = open_url(url) p = wc.ZapperParser(fullname, compile_data=False) p.parse(page, wconfig) page.close() chg = wconfig.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >>log, i18n._("error fetching %s:")%url, i18n._("invalid content") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("extern filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating extern filter %s")%filename else: print >>log, i18n._("adding new extern filter %s")%filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg continue data = page.read() if not data: print >>log, i18n._("error fetching %s:")%url, i18n._("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() return chg
| 16,581
|
def update (wconfig, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ chg = False url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return chg # remember all local config files filemap = {} for filename in wc.filterconf_files(): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >>log, "error fetching", url return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating filter %s")%filename else: print >>log, i18n._("adding new filter %s"), filename # parse new filter url = baseurl+filename+".gz" page = open_url(url) p = wc.ZapperParser(fullname, compile_data=False) p.parse(page, wconfig) page.close() chg = wconfig.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >>log, i18n._("error fetching %s:")%url, i18n._("invalid content") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("extern filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating extern filter %s")%filename else: print >>log, i18n._("adding new extern filter %s")%filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg continue data = page.read() if not data: print >>log, i18n._("error fetching %s:")%url, i18n._("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() return chg
|
def update (wconfig, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ chg = False url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return chg # remember all local config files filemap = {} for filename in wc.filterconf_files(): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >>log, "error fetching", url return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating filter %s")%filename else: print >>log, i18n._("adding new filter %s"), filename # parse new filter url = baseurl+filename+".gz" page = open_url(url) p = wc.ZapperParser(fullname, wconfig, compile_data=False) p.parse(fp=page) page.close() chg = wconfig.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >>log, i18n._("error fetching %s:")%url, i18n._("invalid content") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("extern filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating extern filter %s")%filename else: print >>log, i18n._("adding new extern filter %s")%filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg continue data = page.read() if not data: print >>log, i18n._("error fetching %s:")%url, i18n._("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() return chg
| 16,582
|
def update (wconfig, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ chg = False url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return chg # remember all local config files filemap = {} for filename in wc.filterconf_files(): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >>log, "error fetching", url return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating filter %s")%filename else: print >>log, i18n._("adding new filter %s"), filename # parse new filter url = baseurl+filename+".gz" page = open_url(url) p = wc.ZapperParser(fullname, compile_data=False) p.parse(page, wconfig) page.close() chg = wconfig.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >>log, i18n._("error fetching %s:")%url, i18n._("invalid content") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("extern filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating extern filter %s")%filename else: print >>log, i18n._("adding new extern filter %s")%filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg continue data = page.read() if not data: print >>log, i18n._("error fetching %s:")%url, i18n._("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() return chg
|
def update (wconfig, baseurl, dryrun=False, log=None): """Update the given configuration object with .zap files found at baseurl. If dryrun is True, only print out the changes but do nothing throws IOError on error """ chg = False url = baseurl+"filter-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, "error fetching %s:"%url, msg return chg # remember all local config files filemap = {} for filename in wc.filterconf_files(): filemap[os.path.basename(filename)] = filename # read md5sums for line in page.read().splitlines(): if "<" in line: print >>log, "error fetching", url return chg if not line: continue md5sum, filename = line.split() assert filename.endswith('.zap') fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if filemap.has_key(filename): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating filter %s")%filename else: print >>log, i18n._("adding new filter %s"), filename # parse new filter url = baseurl+filename+".gz" page = open_url(url) p = wc.ZapperParser(fullname, compile_data=False) p.parse(page, wconfig) page.close() chg = wconfig.merge_folder(p.folder, dryrun=dryrun, log=log) or chg url = baseurl+"extern-md5sums.txt" try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg return chg lines = page.read().splitlines() page.close() for line in lines: if "<" in line: print >>log, i18n._("error fetching %s:")%url, i18n._("invalid content") return chg if not line: continue md5sum, filename = line.split() # XXX UNIX-generated md5sum filenames with subdirs are not portable fullname = os.path.join(wc.ConfigDir, filename) # compare checksums if os.path.exists(fullname): f = file(fullname) data = f.read() digest = list(md5.new(data).digest()) f.close() digest = "".join([ "%0.2x"%ord(c) for c in digest ]) if digest==md5sum: print >>log, i18n._("extern filter %s not changed, ignoring")%filename continue print >>log, i18n._("updating extern filter %s")%filename else: print >>log, i18n._("adding new extern filter %s")%filename chg = True if not dryrun: url = baseurl+filename try: page = open_url(url) except IOError, msg: print >>log, i18n._("error fetching %s:")%url, msg continue data = page.read() if not data: print >>log, i18n._("error fetching %s:")%url, i18n._("got no data") continue f = file(fullname, 'wb') f.write(data) f.close() return chg
| 16,583
|
def new_instance (self, opts): return HtmlFilter(self.rules, self.ratings, self.url, **opts)
|
def new_instance (self, **opts): return HtmlFilter(self.rules, self.ratings, self.url, **opts)
| 16,584
|
def cdata (self, data): """character data""" self._debug("cdata %r", data) return self._data(data)
|
def cdata (self, data): """character data""" debug(FILTER, "%s cdata %r", self, data) return self._data(data)
| 16,585
|
def characters (self, data): """characters""" self._debug("characters %r", data) return self._data(data)
|
def characters (self, data): """characters""" debug(FILTER, "%s characters %r", self, data) return self._data(data)
| 16,586
|
def comment (self, data): """a comment; accept only non-empty comments""" if not (self.comments and data): return self._debug("comment %r", data) item = [COMMENT, data] self.htmlparser.tagbuf.append(item)
|
def comment (self, data): """a comment; accept only non-empty comments""" if not (self.comments and data): return debug(FILTER, "%s comment %r", self, data) item = [COMMENT, data] self.htmlparser.tagbuf.append(item)
| 16,587
|
def doctype (self, data): self._debug("doctype %r", data) return self._data("<!DOCTYPE%s>"%data)
|
def doctype (self, data): debug(FILTER, "%s doctype %r", self, data) return self._data("<!DOCTYPE%s>"%data)
| 16,588
|
def pi (self, data): self._debug("pi %r", data) return self._data("<?%s?>"%data)
|
def pi (self, data): debug(FILTER, "%s pi %r", self, data) return self._data("<?%s?>"%data)
| 16,589
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug("startElement %r", tag) tag = check_spelling(tag, self.url) if self.stackcount: if self.stackcount[-1][0]==tag: self.stackcount[-1][1] += 1 if tag=="meta": if attrs.get('http-equiv', '').lower() =='content-rating': rating = resolve_html_entities(attrs.get('content', '')) url, rating = rating_import(url, rating) # note: always put this in the cache, since this overrides # any http header setting, and page content changes more # often rating_add(url, rating) elif tag=="body": if self.ratings: # headers finished, check rating data for rule in self.ratings: msg = rating_allow(self.url, rule) if msg: raise FilterRating(msg) self.ratings = [] elif tag=="base" and attrs.has_key('href'): self.base_url = attrs['href'] # some base urls are just the host name, eg. www.imadoofus.com if not urllib.splittype(self.base_url)[0]: self.base_url = "%s://%s" % \ (urllib.splittype(self.url)[0], self.base_url) self._debug("using base url %r", self.base_url) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self) # look for filter rules which apply self._filterStartElement(tag, attrs) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.javascript: self.htmlparser.tagbuf2data()
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data debug(FILTER, "%s startElement %r", self, tag) if self._is_waiting([STARTTAG, tag, attrs]): return tag = check_spelling(tag, self.url) if self.stackcount: if self.stackcount[-1][0]==tag: self.stackcount[-1][1] += 1 if tag=="meta": if attrs.get('http-equiv', '').lower() =='content-rating': rating = resolve_html_entities(attrs.get('content', '')) url, rating = rating_import(url, rating) # note: always put this in the cache, since this overrides # any http header setting, and page content changes more # often rating_add(url, rating) elif tag=="body": if self.ratings: # headers finished, check rating data for rule in self.ratings: msg = rating_allow(self.url, rule) if msg: raise FilterRating(msg) self.ratings = [] elif tag=="base" and attrs.has_key('href'): self.base_url = attrs['href'] # some base urls are just the host name, eg. www.imadoofus.com if not urllib.splittype(self.base_url)[0]: self.base_url = "%s://%s" % \ (urllib.splittype(self.url)[0], self.base_url) self._debug("using base url %r", self.base_url) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self) # look for filter rules which apply self._filterStartElement(tag, attrs) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.javascript: self.htmlparser.tagbuf2data()
| 16,590
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug("startElement %r", tag) tag = check_spelling(tag, self.url) if self.stackcount: if self.stackcount[-1][0]==tag: self.stackcount[-1][1] += 1 if tag=="meta": if attrs.get('http-equiv', '').lower() =='content-rating': rating = resolve_html_entities(attrs.get('content', '')) url, rating = rating_import(url, rating) # note: always put this in the cache, since this overrides # any http header setting, and page content changes more # often rating_add(url, rating) elif tag=="body": if self.ratings: # headers finished, check rating data for rule in self.ratings: msg = rating_allow(self.url, rule) if msg: raise FilterRating(msg) self.ratings = [] elif tag=="base" and attrs.has_key('href'): self.base_url = attrs['href'] # some base urls are just the host name, eg. www.imadoofus.com if not urllib.splittype(self.base_url)[0]: self.base_url = "%s://%s" % \ (urllib.splittype(self.url)[0], self.base_url) self._debug("using base url %r", self.base_url) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self) # look for filter rules which apply self._filterStartElement(tag, attrs) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.javascript: self.htmlparser.tagbuf2data()
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug("startElement %r", tag) tag = check_spelling(tag, self.url) if self.stackcount: if self.stackcount[-1][0]==tag: self.stackcount[-1][1] += 1 if tag=="meta": if attrs.get('http-equiv', '').lower() =='content-rating': rating = resolve_html_entities(attrs.get('content', '')) url, rating = rating_import(url, rating) # note: always put this in the cache, since this overrides # any http header setting, and page content changes more # often rating_add(url, rating) elif tag=="body": if self.ratings: # headers finished, check rating data for rule in self.ratings: msg = rating_allow(self.url, rule) if msg: raise FilterRating(msg) self.ratings = [] elif tag=="base" and attrs.has_key('href'): self.base_url = attrs['href'] # some base urls are just the host name, eg. www.imadoofus.com if not urllib.splittype(self.base_url)[0]: self.base_url = "%s://%s" % \ (urllib.splittype(self.url)[0], self.base_url) debug(FILTER, "%s using base url %r", self, self.base_url) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self) # look for filter rules which apply self._filterStartElement(tag, attrs) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.javascript: self.htmlparser.tagbuf2data()
| 16,591
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug("startElement %r", tag) tag = check_spelling(tag, self.url) if self.stackcount: if self.stackcount[-1][0]==tag: self.stackcount[-1][1] += 1 if tag=="meta": if attrs.get('http-equiv', '').lower() =='content-rating': rating = resolve_html_entities(attrs.get('content', '')) url, rating = rating_import(url, rating) # note: always put this in the cache, since this overrides # any http header setting, and page content changes more # often rating_add(url, rating) elif tag=="body": if self.ratings: # headers finished, check rating data for rule in self.ratings: msg = rating_allow(self.url, rule) if msg: raise FilterRating(msg) self.ratings = [] elif tag=="base" and attrs.has_key('href'): self.base_url = attrs['href'] # some base urls are just the host name, eg. www.imadoofus.com if not urllib.splittype(self.base_url)[0]: self.base_url = "%s://%s" % \ (urllib.splittype(self.url)[0], self.base_url) self._debug("using base url %r", self.base_url) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self) # look for filter rules which apply self._filterStartElement(tag, attrs) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.javascript: self.htmlparser.tagbuf2data()
|
def startElement (self, tag, attrs): """We get a new start tag. New rules could be appended to the pending rules. No rules can be removed from the list.""" # default data self._debug("startElement %r", tag) tag = check_spelling(tag, self.url) if self.stackcount: if self.stackcount[-1][0]==tag: self.stackcount[-1][1] += 1 if tag=="meta": if attrs.get('http-equiv', '').lower() =='content-rating': rating = resolve_html_entities(attrs.get('content', '')) url, rating = rating_import(url, rating) # note: always put this in the cache, since this overrides # any http header setting, and page content changes more # often rating_add(url, rating) elif tag=="body": if self.ratings: # headers finished, check rating data for rule in self.ratings: msg = rating_allow(self.url, rule) if msg: raise FilterRating(msg) self.ratings = [] elif tag=="base" and attrs.has_key('href'): self.base_url = attrs['href'] # some base urls are just the host name, eg. www.imadoofus.com if not urllib.splittype(self.base_url)[0]: self.base_url = "%s://%s" % \ (urllib.splittype(self.url)[0], self.base_url) self._debug("using base url %r", self.base_url) # search for and prevent known security flaws in HTML self.security.scan_start_tag(tag, attrs, self) # look for filter rules which apply self.filterStartElement(tag, attrs) # if rule stack is empty, write out the buffered data if not self.rulestack and not self.javascript: self.htmlparser.tagbuf2data()
| 16,592
|
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
|
def filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
| 16,593
|
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
|
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): debug(FILTER, "%s matched rule %r on tag %r", self, rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
| 16,594
|
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
|
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: debug(FILTER, "%s put rule %r on buffer", self, rule.title) rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
| 16,595
|
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self._jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
|
def _filterStartElement (self, tag, attrs): """filter the start element according to filter rules""" rulelist = [] filtered = False item = [STARTTAG, tag, attrs] for rule in self.rules: if rule.match_tag(tag) and rule.match_attrs(attrs): self._debug("matched rule %r on tag %r", rule.title, tag) if rule.start_sufficient: item = rule.filter_tag(tag, attrs) filtered = True if item[0]==STARTTAG and item[1]==tag: foo,tag,attrs = item # give'em a chance to replace more than one attribute continue else: break else: self._debug("put on buffer") rulelist.append(rule) if rulelist: # remember buffer position for end tag matching pos = len(self.htmlparser.tagbuf) self.rulestack.append((pos, rulelist)) self.stackcount.append([tag, 1]) if filtered: # put filtered item on tag buffer self.htmlparser.tagbuf.append(item) elif self.javascript: # if it's not yet filtered, try filter javascript self.jsStartElement(tag, attrs) else: # put original item on tag buffer self.htmlparser.tagbuf.append(item)
| 16,596
|
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
|
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
| 16,597
|
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
|
defendElement(self,tag):"""Weknowthefollowing:ifarulematches,itmustbetheoneonthetopofthestack.Sowelookonlyatthetoprule.
| 16,598
|
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
|
def endElement (self, tag): """We know the following: if a rule matches, it must be the one on the top of the stack. So we look only at the top rule.
| 16,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.