bugged
stringlengths 4
228k
| fixed
stringlengths 0
96.3M
| __index_level_0__
int64 0
481k
|
|---|---|---|
def testRdfDescription2 (self): self.filt("""<?xml version="1.0" encoding="ISO-8859-1"?>
|
def testRdfDescription2 (self): self.filt("""<?xml version="1.0" encoding="ISO-8859-1"?>
| 15,800
|
def testRdfDescription3 (self): self.filt("""<?xml version="1.0" encoding="ISO-8859-1"?>
|
def testRdfDescription3 (self): self.filt("""<?xml version="1.0" encoding="ISO-8859-1"?>
| 15,801
|
def filter (self, data, **attrs): """compress the string s. Note that compression state is saved outside of this function in the compression object. """ if not attrs.has_key('compressobj'): return data compobj = attrs['compressobj'] if compobj: header = compobj['header'] if header: compobj['header'] = '' wc.log.debug(wc.LOG_FILTER, 'writing gzip header') compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data)) return data
|
def filter (self, data, **attrs): """Compress the string s. Note that compression state is saved outside of this function in the compression object. """ if not attrs.has_key('compressobj'): return data compobj = attrs['compressobj'] if compobj: header = compobj['header'] if header: compobj['header'] = '' wc.log.debug(wc.LOG_FILTER, 'writing gzip header') compobj['size'] += len(data) compobj['crc'] = zlib.crc32(data, compobj['crc']) data = "%s%s" % (header, compobj['compressor'].compress(data)) return data
| 15,802
|
def server_connected (self, server): assert self.state == 'connect' if not self.client.connected: # The client has aborted, so let's return this server # connection to the pool server.reuse() return self.server = server addr = (self.ipaddr, self.port) # check expectations expect = self.headers.get('Expect', '').lower().strip() docontinue = expect.startswith('100-continue') or \ expect.startswith('0100-continue') if docontinue and serverpool.http_versions.get(addr, 1.1) < 1.1: self.client.error(417, i18n._("Expectation failed"), i18n._("Server does not understand HTTP/1.1")) return if expect: self.client.error(417, i18n._("Expectation failed"), i18n._("Unsupported expectation `%s'")%expect) return # ok, assign server object self.state = 'response' # At this point, we tell the server that we are the client. # Once we get a response, we transfer to the real client. self.server.client_send_request(self.method, self.hostname, self.document, self.headers, self.content, self, self.nofilter, self.url)
|
def server_connected (self, server): assert self.state == 'connect' if not self.client.connected: # The client has aborted, so let's return this server # connection to the pool server.reuse() return self.server = server addr = (self.ipaddr, self.port) # check expectations expect = self.headers.get('Expect', '').lower().strip() docontinue = expect.startswith('100-continue') or \ expect.startswith('0100-continue') if docontinue: if serverpool.http_versions.get(addr, 1.1) < 1.1: self.client.error(417, i18n._("Expectation failed"), i18n._("Server does not understand HTTP/1.1")) return elif expect: self.client.error(417, i18n._("Expectation failed"), i18n._("Unsupported expectation `%s'")%expect) return # ok, assign server object self.state = 'response' # At this point, we tell the server that we are the client. # Once we get a response, we transfer to the real client. self.server.client_send_request(self.method, self.hostname, self.document, self.headers, self.content, self, self.nofilter, self.url)
| 15,803
|
def build_extensions (self): # For gcc 3.x we can add -std=gnu99 to get rid of warnings. extra = [] if self.compiler.compiler_type == 'unix': option = "-std=gnu99" if cc_supports_option(self.compiler.compiler, option): extra.append(option) # First, sanity-check the 'extensions' list self.check_extensions_list(self.extensions) for ext in self.extensions: ext.extra_compile_args.extend(extra) self.build_extension(ext)
|
def build_extensions (self): # For gcc 3.x we can add -std=gnu99 to get rid of warnings. extra = [] if self.compiler.compiler_type == 'unix': option = "-std=gnu99" if cc_supports_option(self.compiler.compiler, option): extra.append(option) # First, sanity-check the 'extensions' list self.check_extensions_list(self.extensions) for ext in self.extensions: for opt in extra: if opt not in ext.extra_compile_args: ext.extra_compile_args.append(opt) self.build_extension(ext)
| 15,804
|
def filter_tag (self, tag, attrs): #debug(NIGHTMARE, "rule %s filter_tag" % self.title) part = self.replace[0] #debug(NIGHTMARE, "original tag", `tag`, "attrs", attrs) #debug(NIGHTMARE, "replace", num_part(part), "with", `self.replace[1]`) if part==TAGNAME: return (STARTTAG, self.replace[1], attrs) if part==TAG: return (DATA, self.replace[1]) if part==ENCLOSED: return (STARTTAG, tag, attrs) if part==COMPLETE: return [DATA, ""] newattrs = {} # look for matching tag attributes for attr,val in attrs.items(): ro = self.attrs.get(attr) if ro: mo = ro.search(val) if mo: if part==ATTR: # replace complete attr if self.replace[1]: newattrs[self.replace[1][0]] = self.replace[1][1] else: # part has to be ATTRVAL # Python has named submatches, and we can use them # the name 'replace' replaces the value, # all other names are given as format strings dict = mo.groupdict() if dict.has_key('replace'): newattrs[attr] = dict['replace'] else: newattrs[attr] = self.replace[1] % dict continue # nothing matched, just append the attribute as is newattrs[attr] = val #debug(NIGHTMARE, "filtered tag", tag, "attrs", newattrs) return (STARTTAG, tag, newattrs)
|
def filter_tag (self, tag, attrs): #debug(NIGHTMARE, "rule %s filter_tag" % self.title) part = self.replace[0] #debug(NIGHTMARE, "original tag", `tag`, "attrs", attrs) #debug(NIGHTMARE, "replace", num_part(part), "with", `self.replace[1]`) if part==TAGNAME: return (STARTTAG, self.replace[1], attrs) if part==TAG: return (DATA, self.replace[1]) if part==ENCLOSED: return (STARTTAG, tag, attrs) newattrs = {} # look for matching tag attributes for attr,val in attrs.items(): ro = self.attrs.get(attr) if ro: mo = ro.search(val) if mo: if part==ATTR: # replace complete attr if self.replace[1]: newattrs[self.replace[1][0]] = self.replace[1][1] else: # part has to be ATTRVAL # Python has named submatches, and we can use them # the name 'replace' replaces the value, # all other names are given as format strings dict = mo.groupdict() if dict.has_key('replace'): newattrs[attr] = dict['replace'] else: newattrs[attr] = self.replace[1] % dict continue # nothing matched, just append the attribute as is newattrs[attr] = val #debug(NIGHTMARE, "filtered tag", tag, "attrs", newattrs) return (STARTTAG, tag, newattrs)
| 15,805
|
def filter_tag (self, tag, attrs): #debug(NIGHTMARE, "rule %s filter_tag" % self.title) part = self.replace[0] #debug(NIGHTMARE, "original tag", `tag`, "attrs", attrs) #debug(NIGHTMARE, "replace", num_part(part), "with", `self.replace[1]`) if part==TAGNAME: return (STARTTAG, self.replace[1], attrs) if part==TAG: return (DATA, self.replace[1]) if part==ENCLOSED: return (STARTTAG, tag, attrs) if part==COMPLETE: return [DATA, ""] newattrs = {} # look for matching tag attributes for attr,val in attrs.items(): ro = self.attrs.get(attr) if ro: mo = ro.search(val) if mo: if part==ATTR: # replace complete attr if self.replace[1]: newattrs[self.replace[1][0]] = self.replace[1][1] else: # part has to be ATTRVAL # Python has named submatches, and we can use them # the name 'replace' replaces the value, # all other names are given as format strings dict = mo.groupdict() if dict.has_key('replace'): newattrs[attr] = dict['replace'] else: newattrs[attr] = self.replace[1] % dict continue # nothing matched, just append the attribute as is newattrs[attr] = val #debug(NIGHTMARE, "filtered tag", tag, "attrs", newattrs) return (STARTTAG, tag, newattrs)
|
def filter_tag (self, tag, attrs): #debug(NIGHTMARE, "rule %s filter_tag" % self.title) part = self.replace[0] #debug(NIGHTMARE, "original tag", `tag`, "attrs", attrs) #debug(NIGHTMARE, "replace", num_part(part), "with", `self.replace[1]`) if part==TAGNAME: return (STARTTAG, self.replace[1], attrs) if part==TAG: return (DATA, self.replace[1]) if part==ENCLOSED: return (STARTTAG, tag, attrs) if part==COMPLETE: return [DATA, ""] newattrs = {} # look for matching tag attributes for attr,val in attrs.items(): ro = self.attrs.get(attr) if ro: mo = ro.search(val) if mo: if part==ATTR: # replace complete attr if self.replace[1]: newattrs[self.replace[1][0]] = self.replace[1][1] else: # part has to be ATTRVAL # Python has named submatches, and we can use them # the name 'replace' replaces the value, # all other names are given as format strings dict = mo.groupdict() if dict.has_key('replace'): newattrs[attr] = dict['replace'] else: newattrs[attr] = self.replace[1] % dict continue # nothing matched, just append the attribute as is newattrs[attr] = val #debug(NIGHTMARE, "filtered tag", tag, "attrs", newattrs) return (STARTTAG, tag, newattrs)
| 15,806
|
def toxml (self): s = UrlRule.toxml(self) if self.tag!='a': s += '\n tag="%s"' % self.tag if not (self.attrs or self.replace or self.enclosed): return s+"/>\n" s += ">\n" for key,val in self.attrs.items(): s += "<attr" if key!='href': s += ' name="%s"' % key if val: s += ">"+xmlify(val)+"</attr>\n" else: s += "/>\n" if self.enclosed: s += "<enclosed>"+xmlify(self.enclosed)+"</enclosed>\n" if not self.replace[0]==COMPLETE or self.replace[1]: s += "<replace" if self.replace[0]!=COMPLETE: s += ' part="%s"' % num_part(self.replace[0]) if self.replace[1]: if self.replace[0]==ATTR: val = self.replace[0][0]+'="'+self.replace[0][1]+'"' else: val = self.replace[1] s += '>'+xmlify(val)+"</replace>\n" else: s += "/>\n" return s + "</rewrite>"
|
def toxml (self): s = UrlRule.toxml(self) if self.tag!='a': s += '\n tag="%s"' % self.tag if not (self.attrs or self.replace or self.enclosed): return s+"/>\n" s += ">\n" for key,val in self.attrs.items(): s += "<attr" if key!='href': s += ' name="%s"' % key if val: s += ">"+xmlify(val)+"</attr>\n" else: s += "/>\n" if self.enclosed: s += "<enclosed>"+xmlify(self.enclosed)+"</enclosed>\n" if not self.replace[0]==COMPLETE or self.replace[1]: s += "<replace" if self.replace[0]!=COMPLETE: s += ' part="%s"' % num_part(self.replace[0]) if self.replace[1]: s += '>'+xmlify(self.replace[1])+"</replace>\n" else: s += "/>\n" return s + "</rewrite>"
| 15,807
|
def _form_selrule (index): """ Select a rule. """ try: index = int(index) global currule currule = [r for r in curfolder.rules if r.oid == index][0] print "XXX", currule # fill ruletype flags for rt in rulenames: ruletype[rt] = (currule.name == rt) # XXX this side effect is bad :( # fill part flags if currule.name == u"htmlrewrite": global curparts curparts = {} for i, part in enumerate(partvalnames): curparts[part] = (currule.part == i) elif currule.name == u"xmlrewrite": global curreplacetypes curreplacetypes = {} for name, num in replacetypenums.items(): curreplacetypes[name] = (currule.replacetypenum == num) elif currule.name == u"header": global curfilterstage, curheaderaction curfilterstage = { u'both': currule.filterstage == u'both', u'request': currule.filterstage == u'request', u'response': currule.filterstage == u'response', } curheaderaction = { u'add': currule.action == u'add', u'replace': currule.action == u'replace', u'remove': currule.action == u'remove', } except (ValueError, IndexError, OverflowError): error['ruleindex'] = True
|
def _form_selrule (index): """ Select a rule. """ try: index = int(index) global currule currule = [r for r in curfolder.rules if r.oid == index][0] # fill ruletype flags for rt in rulenames: ruletype[rt] = (currule.name == rt) # XXX this side effect is bad :( # fill part flags if currule.name == u"htmlrewrite": global curparts curparts = {} for i, part in enumerate(partvalnames): curparts[part] = (currule.part == i) elif currule.name == u"xmlrewrite": global curreplacetypes curreplacetypes = {} for name, num in replacetypenums.items(): curreplacetypes[name] = (currule.replacetypenum == num) elif currule.name == u"header": global curfilterstage, curheaderaction curfilterstage = { u'both': currule.filterstage == u'both', u'request': currule.filterstage == u'request', u'response': currule.filterstage == u'response', } curheaderaction = { u'add': currule.action == u'add', u'replace': currule.action == u'replace', u'remove': currule.action == u'remove', } except (ValueError, IndexError, OverflowError): error['ruleindex'] = True
| 15,808
|
def process_request (self): """read request, split it up and filter it""" # One newline ends request i = self.recv_buffer.find('\r\n') if i < 0: return # self.read(i) is not including the newline self.request = self.read(i) # basic request checking (more will be done below) try: self.method, self.url, protocol = self.request.split() except ValueError: self.error(400, _("Can't parse request")) return if not self.allow.method(self.method): self.error(405, _("Method Not Allowed")) return # fix broken url paths self.url = wc.url.url_norm(self.url)[0] if not self.url: self.error(400, _("Empty URL")) return self.protocol = wc.proxy.fix_http_version(protocol) self.http_ver = wc.proxy.get_http_version(self.protocol) # build request self.request = "%s %s %s" % (self.method, self.url, self.protocol) wc.log.debug(wc.LOG_PROXY, "%s request %r", self, self.request) # filter request attrs = wc.filter.get_filterattrs(self.url, wc.filter.STAGE_REQUEST) self.request = wc.filter.applyfilter(self.request, "finish", attrs) # final request checking if not self.fix_request(): return wc.log.info(wc.LOG_ACCESS, '%s - %s - %s', self.addr[0], time.ctime(time.time()), self.request) self.state = 'headers'
|
def process_request (self): """read request, split it up and filter it""" # One newline ends request i = self.recv_buffer.find('\r\n') if i < 0: return # self.read(i) is not including the newline self.request = self.read(i) # basic request checking (more will be done below) try: self.method, self.url, protocol = self.request.split() except ValueError: self.error(400, _("Can't parse request")) return if not self.allow.method(self.method): self.error(405, _("Method Not Allowed")) return # fix broken url paths self.url = wc.url.url_norm(self.url)[0] if not self.url: self.error(400, _("Empty URL")) return self.protocol = wc.proxy.fix_http_version(protocol) self.http_ver = wc.proxy.get_http_version(self.protocol) # build request request = "%s %s %s" % (self.method, self.url, self.protocol) wc.log.debug(wc.LOG_PROXY, "%s request %r", self, request) # filter request attrs = wc.filter.get_filterattrs(self.url, wc.filter.STAGE_REQUEST) self.request = wc.filter.applyfilter(self.request, "finish", attrs) # final request checking if not self.fix_request(): return wc.log.info(wc.LOG_ACCESS, '%s - %s - %s', self.addr[0], time.ctime(time.time()), self.request) self.state = 'headers'
| 15,809
|
def process_request (self): """read request, split it up and filter it""" # One newline ends request i = self.recv_buffer.find('\r\n') if i < 0: return # self.read(i) is not including the newline self.request = self.read(i) # basic request checking (more will be done below) try: self.method, self.url, protocol = self.request.split() except ValueError: self.error(400, _("Can't parse request")) return if not self.allow.method(self.method): self.error(405, _("Method Not Allowed")) return # fix broken url paths self.url = wc.url.url_norm(self.url)[0] if not self.url: self.error(400, _("Empty URL")) return self.protocol = wc.proxy.fix_http_version(protocol) self.http_ver = wc.proxy.get_http_version(self.protocol) # build request self.request = "%s %s %s" % (self.method, self.url, self.protocol) wc.log.debug(wc.LOG_PROXY, "%s request %r", self, self.request) # filter request attrs = wc.filter.get_filterattrs(self.url, wc.filter.STAGE_REQUEST) self.request = wc.filter.applyfilter(self.request, "finish", attrs) # final request checking if not self.fix_request(): return wc.log.info(wc.LOG_ACCESS, '%s - %s - %s', self.addr[0], time.ctime(time.time()), self.request) self.state = 'headers'
|
def process_request (self): """read request, split it up and filter it""" # One newline ends request i = self.recv_buffer.find('\r\n') if i < 0: return # self.read(i) is not including the newline self.request = self.read(i) # basic request checking (more will be done below) try: self.method, self.url, protocol = self.request.split() except ValueError: self.error(400, _("Can't parse request")) return if not self.allow.method(self.method): self.error(405, _("Method Not Allowed")) return # fix broken url paths self.url = wc.url.url_norm(self.url)[0] if not self.url: self.error(400, _("Empty URL")) return self.protocol = wc.proxy.fix_http_version(protocol) self.http_ver = wc.proxy.get_http_version(self.protocol) # build request self.request = "%s %s %s" % (self.method, self.url, self.protocol) wc.log.debug(wc.LOG_PROXY, "%s request %r", self, self.request) # filter request attrs = wc.filter.get_filterattrs(self.url, wc.filter.STAGE_REQUEST) self.request = wc.filter.applyfilter(request, "finish", attrs) # final request checking if not self.fix_request(): return wc.log.info(wc.LOG_ACCESS, '%s - %s - %s', self.addr[0], time.ctime(time.time()), self.request) self.state = 'headers'
| 15,810
|
def process_content (self): """read and filter client request content""" data = self.read(self.bytes_remaining) if self.bytes_remaining is not None: # Just pass everything through to the server # NOTE: It's possible to have 'chunked' encoding here, # and then the current system of counting bytes remaining # won't work; we have to deal with chunks self.bytes_remaining -= len(data) is_closed = False for decoder in self.decoders: data = decoder.decode(data) if not is_closed: is_closed = decoder.closed for stage in FilterStages: attrs = wc.filter.get_filterattrs(self.url, stage, clientheaders=self.clientheaders, headers=self.headers) data = wc.filter.applyfilter(data, "filter", attrs) self.content += data underflow = self.bytes_remaining is not None and \ self.bytes_remaining < 0 if underflow: wc.log.warn(wc.LOG_PROXY, "client received %d bytes more than content-length", -self.bytes_remaining) if is_closed or self.bytes_remaining <= 0: for stage in FilterStages: attrs = wc.filter.get_filterattrs(self.url, stage, clientheaders=self.clientheaders, headers=self.headers) data = wc.filter.applyfilter("", "finish", attrs) self.content += data if self.content and not self.headers.has_key('Content-Length'): self.headers['Content-Length'] = "%d\r" % len(self.content) # We're done reading content self.state = 'receive' is_local = self.hostname in \ wc.proxy.dns_lookups.resolver.localhosts and \ self.port in (wc.configuration.config['port'], wc.configuration.config['sslport']) if is_local: is_public_doc = self.allow.public_document(self.document) if wc.configuration.config['adminuser'] and \ not wc.configuration.config['adminpass']: if is_local and is_public_doc: self.handle_local(is_public_doc=is_public_doc) else: # ignore request, must init admin password self.headers['Location'] = \ "http://%s:%d/adminpass.html\r" % \ (self.socket.getsockname()[0], wc.configuration.config['port']) self.error(302, _("Moved Temporarily")) elif is_local: # this is a direct proxy call self.handle_local(is_public_doc=is_public_doc) else: self.server_request()
|
def process_content (self): """read and filter client request content""" data = self.read(self.bytes_remaining) if self.bytes_remaining is not None: # Just pass everything through to the server # NOTE: It's possible to have 'chunked' encoding here, # and then the current system of counting bytes remaining # won't work; we have to deal with chunks self.bytes_remaining -= len(data) is_closed = False for decoder in self.decoders: data = decoder.decode(data) if not is_closed: is_closed = decoder.closed for stage in FilterStages: attrs = wc.filter.get_filterattrs(self.url, stage, clientheaders=self.clientheaders, headers=self.headers) data = wc.filter.applyfilter(data, "filter", attrs) self.content += data underflow = self.bytes_remaining is not None and \ self.bytes_remaining < 0 if underflow: wc.log.warn(wc.LOG_PROXY, "client received %d bytes more than content-length", -self.bytes_remaining) if is_closed or self.bytes_remaining <= 0: for stage in FilterStages: attrs = wc.filter.get_filterattrs(self.url, stage, clientheaders=self.clientheaders, headers=self.headers) data = wc.filter.applyfilter(data, "finish", attrs) self.content += data if self.content and not self.headers.has_key('Content-Length'): self.headers['Content-Length'] = "%d\r" % len(self.content) # We're done reading content self.state = 'receive' is_local = self.hostname in \ wc.proxy.dns_lookups.resolver.localhosts and \ self.port in (wc.configuration.config['port'], wc.configuration.config['sslport']) if is_local: is_public_doc = self.allow.public_document(self.document) if wc.configuration.config['adminuser'] and \ not wc.configuration.config['adminpass']: if is_local and is_public_doc: self.handle_local(is_public_doc=is_public_doc) else: # ignore request, must init admin password self.headers['Location'] = \ "http://%s:%d/adminpass.html\r" % \ (self.socket.getsockname()[0], wc.configuration.config['port']) self.error(302, _("Moved Temporarily")) elif is_local: # this is a direct proxy call self.handle_local(is_public_doc=is_public_doc) else: self.server_request()
| 15,811
|
def suffix2mask (n): "return a mask of n bits as a long integer" return (2L<<n-1)-1
|
def suffix2mask (n): "return a mask of n bits as a long integer" return (1L << (32 - n)) - 1
| 15,812
|
def mask2suffix (mask): """return suff for given bit mask""" return int(math.log(mask+1, 2))
|
def mask2suffix (mask): """return suff for given bit mask""" return 32 - int(math.log(mask+1, 2))
| 15,813
|
def _test (): hosts = ["192.168.1.1/16"] hostmap = hosts2map(hosts) print hostmap print map2hosts(hostmap)
|
def _test (): hosts, nets = hosts2map([ "192.168.2.1", "192.168.2.1/32", "192.168.2.1/31", "192.168.2.1/30", "192.168.2.1/29", "192.168.2.1/28", "192.168.2.1/27", "192.168.2.1/26", "192.168.2.1/25", "192.168.2.1/24", "192.168.2.1/23", "192.168.2.1/22", "192.168.2.1/21", "192.168.2.1/20", "192.168.2.1/19", "192.168.2.1/18", "192.168.2.1/17", "192.168.2.1/16", "192.168.2.1/15", "192.168.2.1/14", "192.168.2.1/13", "192.168.2.1/12", "192.168.2.1/11", "192.168.2.1/10", "192.168.2.1/9", "192.168.2.1/8", "192.168.2.1/7", "192.168.2.1/6", "192.168.2.1/5", "192.168.2.1/4", "192.168.2.1/3", "192.168.2.1/2", "192.168.2.1/1", "127.0.0.1/8" ]) for host in hosts: print "host: %s" % (host) for net, mask in nets: print "net: %s %s => %s/%s" % (net, mask, num2dq(net), mask2suffix(mask)) maps = map2hosts([hosts, nets]) for map in maps: print "map: %s" % (map)
| 15,814
|
def filter (self, data, **attrs): """feed data to recognizer""" if not attrs.has_key('mimerecognizer_buf'): return data buf = attrs['mimerecognizer_buf'] buf.write(data) if buf.len >= self.minimal_size_bytes: return self.recognize(buf, attrs) return ''
|
def filter (self, data, **attrs): """feed data to recognizer""" if not attrs.has_key('mimerecognizer_buf'): return data buf = attrs['mimerecognizer_buf'] buf.write(data) if buf.tell() >= self.minimal_size_bytes: return self.recognize(buf, attrs) return ''
| 15,815
|
def fix_request (self): # refresh with filtered request data self.method, self.url, self.protocol = self.request.split() # enforce a maximum url length if len(self.url) > 2048: wc.log.error(wc.LOG_PROXY, "%s request url length %d chars is too long", self, len(self.url)) self.error(400, _("URL too long"), txt=_('URL length limit is %d bytes.') % 2048) return False if len(self.url) > 255: wc.log.warn(wc.LOG_PROXY, "%s request url length %d chars is very long", self, len(self.url)) # and unquote again self.url = wc.url.url_norm(self.url)[0] self.scheme, self.hostname, self.port, self.document = \ wc.url.url_split(self.url) # fix missing trailing / if not self.document: self.document = '/' # some clients send partial URI's without scheme, hostname # and port to clients, so we have to handle this if not self.scheme: self.scheme = "https" if not self.allow.scheme(self.scheme): wc.log.warn(wc.LOG_PROXY, "%s forbidden scheme %r encountered", self, self.scheme) self.error(403, _("Forbidden")) return False # request is ok return True
|
def fix_request (self): # refresh with filtered request data self.method, self.url, self.protocol = self.request.split() # enforce a maximum url length if len(self.url) > 2048: wc.log.error(wc.LOG_PROXY, "%s request url length %d chars is too long", self, len(self.url)) self.error(400, _("URL too long"), txt=_('URL length limit is %d bytes.') % 2048) return False if len(self.url) > 255: wc.log.warn(wc.LOG_PROXY, "%s request url length %d chars is very long", self, len(self.url)) # and unquote again self.url = wc.url.url_norm(self.url)[0] self.scheme, self.hostname, self.port, self.document = \ wc.url.url_split(self.url) # fix missing trailing / if not self.document: self.document = '/' # some clients send partial URI's without scheme, hostname # and port to clients, so we have to handle this if not self.scheme: self.scheme = "https" if not self.allow.is_allowed(self.method, self.scheme, self.port): wc.log.warn(wc.LOG_PROXY, "Unallowed request %s", self.url) self.error(403, _("Forbidden")) return False # request is ok return True
| 15,816
|
def server_response (self, server, response, status, headers): """ Follow redirects, and finish on errors. For HTTP status 2xx continue. """ self.server = server assert self.server.connected wc.log.debug(wc.LOG_PROXY, '%s server_response %r', self, response) version, status, msg = \ wc.http.parse_http_response(response, self.args[0]) # XXX check version wc.log.debug(wc.LOG_PROXY, '%s response %s %d %s', self, version, status, msg) if status in (302, 301): self.isredirect = True elif not (200 <= status < 300): wc.log.error(wc.LOG_PROXY, "%s got %s status %d %r", self, version, status, msg) self.finish() if headers.has_key('Transfer-Encoding'): # XXX don't look at value, assume chunked encoding for now wc.log.debug(wc.LOG_PROXY, '%s Transfer-encoding %r', self, headers['Transfer-encoding']) unchunker = wc.proxy.decoder.UnchunkStream.UnchunkStream(self) self.decoders.append(unchunker)
|
def server_response (self, server, response, status, headers): """ Follow redirects, and finish on errors. For HTTP status 2xx continue. """ self.server = server assert self.server.connected wc.log.debug(wc.LOG_PROXY, '%s server_response %r', self, response) version, status, msg = \ wc.http.parse_http_response(response, self.args[0]) # XXX check version wc.log.debug(wc.LOG_PROXY, '%s response %s %d %s', self, version, status, msg) if status in (302, 301): self.isredirect = True elif not (200 <= status < 300): wc.log.debug(wc.LOG_PROXY, "%s got %s status %d %r", self, version, status, msg) self.finish() if headers.has_key('Transfer-Encoding'): # XXX don't look at value, assume chunked encoding for now wc.log.debug(wc.LOG_PROXY, '%s Transfer-encoding %r', self, headers['Transfer-encoding']) unchunker = wc.proxy.decoder.UnchunkStream.UnchunkStream(self) self.decoders.append(unchunker)
| 15,817
|
def execute (pythonw, script, args): """execute given script""" cargs = " ".join(args) _in, _out = os.popen4("%s %s %s" % (pythonw, script, cargs)) line = _out.readline() while line: print line line = _out.readline() _in.close() _out.close()
|
def execute (pythonw, script, args): """ Execute given script. """ cargs = " ".join(args) _in, _out = os.popen4("%s %s %s" % (pythonw, script, cargs)) line = _out.readline() while line: print line line = _out.readline() _in.close() _out.close()
| 15,818
|
def fix_configdata (): """fix install and config paths in the config file""" name = "_webcleaner2_configdata.py" conffile = os.path.join(sys.prefix, "Lib", "site-packages", name) lines = [] for line in file(conffile): if line.startswith("install_") or line.startswith("config_"): lines.append(fix_install_path(line)) else: lines.append(line) f = file(conffile, "w") f.write("".join(lines)) f.close()
|
def fix_configdata (): """ Fix install and config paths in the config file. """ name = "_webcleaner2_configdata.py" conffile = os.path.join(sys.prefix, "Lib", "site-packages", name) lines = [] for line in file(conffile): if line.startswith("install_") or line.startswith("config_"): lines.append(fix_install_path(line)) else: lines.append(line) f = file(conffile, "w") f.write("".join(lines)) f.close()
| 15,819
|
def fix_configdata (): """fix install and config paths in the config file""" name = "_webcleaner2_configdata.py" conffile = os.path.join(sys.prefix, "Lib", "site-packages", name) lines = [] for line in file(conffile): if line.startswith("install_") or line.startswith("config_"): lines.append(fix_install_path(line)) else: lines.append(line) f = file(conffile, "w") f.write("".join(lines)) f.close()
|
def fix_configdata (): """fix install and config paths in the config file""" name = "_webcleaner2_configdata.py" conffile = os.path.join(sys.prefix, "Lib", "site-packages", name) lines = [] for line in file(conffile): if line.startswith("install_") or \ line.startswith("config_") or \ line.startswith("template_"): lines.append(fix_install_path(line)) else: lines.append(line) f = file(conffile, "w") f.write("".join(lines)) f.close()
| 15,820
|
def fix_install_path (line): """Replace placeholders written by bdist_wininst with those specified in win_path_scheme.""" key, eq, val = line.split() # unescape string (do not use eval()) val = val[1:-1].replace("\\\\", "\\") for d in win_path_scheme.keys(): # look for placeholders to replace oldpath, newpath = win_path_scheme[d] oldpath = "%s%s" % (os.sep, oldpath) if oldpath in val: val = val.replace(oldpath, newpath) val = os.path.join(sys.prefix, val) return "%s = %r%s" % (key, val, os.linesep)
|
def fix_install_path (line): """ Replace placeholders written by bdist_wininst with those specified in win_path_scheme. """ key, eq, val = line.split() # unescape string (do not use eval()) val = val[1:-1].replace("\\\\", "\\") for d in win_path_scheme.keys(): # look for placeholders to replace oldpath, newpath = win_path_scheme[d] oldpath = "%s%s" % (os.sep, oldpath) if oldpath in val: val = val.replace(oldpath, newpath) val = os.path.join(sys.prefix, val) return "%s = %r%s" % (key, val, os.linesep)
| 15,821
|
def do_install (): """install shortcuts and NT service""" fix_configdata() import wc # initialize i18n wc.init_i18n() install_shortcuts() install_certificates() install_service() restart_service() open_browser_config()
|
def do_install (): """ Install shortcuts and NT service. """ fix_configdata() import wc # initialize i18n wc.init_i18n() install_shortcuts() install_certificates() install_service() restart_service() open_browser_config()
| 15,822
|
def install_shortcuts (): """create_shortcut(target, description, filename[, arguments[, \ workdir[, iconpath[, iconindex]]]]) file_created(path) - register 'path' so that the uninstaller removes it directory_created(path) - register 'path' so that the uninstaller removes it get_special_folder_location(csidl_string) """ try: prg = get_special_folder_path("CSIDL_COMMON_PROGRAMS") except OSError: try: prg = get_special_folder_path("CSIDL_PROGRAMS") except OSError, reason: # give up - cannot install shortcuts print _("Cannot install shortcuts: %s") % reason sys.exit() lib_dir = distutils.sysconfig.get_python_lib(plat_specific=1) dest_dir = os.path.join(prg, "WebCleaner") try: os.mkdir(dest_dir) directory_created(dest_dir) except OSError: pass target = os.path.join(sys.prefix, "RemoveWebCleaner.exe") path = os.path.join(dest_dir, "Uninstall WebCleaner.lnk") arguments = "-u " + os.path.join(sys.prefix, "WebCleaner-wininst.log") create_shortcut(target, _("Uninstall WebCleaner"), path, arguments) file_created(path)
|
def install_shortcuts (): """ create_shortcut(target, description, filename[, arguments[, \ workdir[, iconpath[, iconindex]]]]) file_created(path) - register 'path' so that the uninstaller removes it directory_created(path) - register 'path' so that the uninstaller removes it get_special_folder_location(csidl_string) """ try: prg = get_special_folder_path("CSIDL_COMMON_PROGRAMS") except OSError: try: prg = get_special_folder_path("CSIDL_PROGRAMS") except OSError, reason: # give up - cannot install shortcuts print _("Cannot install shortcuts: %s") % reason sys.exit() lib_dir = distutils.sysconfig.get_python_lib(plat_specific=1) dest_dir = os.path.join(prg, "WebCleaner") try: os.mkdir(dest_dir) directory_created(dest_dir) except OSError: pass target = os.path.join(sys.prefix, "RemoveWebCleaner.exe") path = os.path.join(dest_dir, "Uninstall WebCleaner.lnk") arguments = "-u " + os.path.join(sys.prefix, "WebCleaner-wininst.log") create_shortcut(target, _("Uninstall WebCleaner"), path, arguments) file_created(path)
| 15,823
|
def install_certificates (): """generate SSL certificates for SSL gateway functionality""" pythonw = os.path.join(sys.prefix, "pythonw.exe") import wc script = os.path.join(wc.ScriptDir, "webcleaner-certificates") execute(pythonw, script, ["install"])
|
def install_certificates (): """ Generate SSL certificates for SSL gateway functionality. """ pythonw = os.path.join(sys.prefix, "pythonw.exe") import wc script = os.path.join(wc.ScriptDir, "webcleaner-certificates") execute(pythonw, script, ["install"])
| 15,824
|
def state_nt_service (name): """return status of NT service""" try: return win32serviceutil.QueryServiceStatus(name)[1] except pywintypes.error, msg: print _("Service status error: %s") % str(msg) return None
|
def state_nt_service (name): """ Return status of NT service. """ try: return win32serviceutil.QueryServiceStatus(name)[1] except pywintypes.error, msg: print _("Service status error: %s") % str(msg) return None
| 15,825
|
def install_service (): """install WebCleaner as NT service""" import wc import wc.win32start oldargs = sys.argv print _("Installing %s service...") % wc.AppName sys.argv = ['webcleaner', 'install'] win32serviceutil.HandleCommandLine(wc.win32start.ProxyService) sys.argv = oldargs
|
def install_service (): """ Install WebCleaner as NT service. """ import wc import wc.win32start oldargs = sys.argv print _("Installing %s service...") % wc.AppName sys.argv = ['webcleaner', 'install'] win32serviceutil.HandleCommandLine(wc.win32start.ProxyService) sys.argv = oldargs
| 15,826
|
def restart_service (): """restart WebCleaner NT service""" stop_service() start_service()
|
def restart_service (): """ Restart WebCleaner NT service. """ stop_service() start_service()
| 15,827
|
def stop_service (): """stop WebCleaner NT service (if it is running)""" import wc import wc.win32start print _("Stopping %s proxy...") % wc.AppName oldargs = sys.argv state = state_nt_service(wc.AppName) while state==win32service.SERVICE_START_PENDING: time.sleep(1) state = state_nt_service(wc.AppName) if state==win32service.SERVICE_RUNNING: sys.argv = ['webcleaner', 'stop'] win32serviceutil.HandleCommandLine(wc.win32start.ProxyService) state = state_nt_service(wc.AppName) while state==win32service.SERVICE_STOP_PENDING: time.sleep(1) state = state_nt_service(wc.AppName) sys.argv = oldargs
|
def stop_service (): """ Stop WebCleaner NT service (if it is running). """ import wc import wc.win32start print _("Stopping %s proxy...") % wc.AppName oldargs = sys.argv state = state_nt_service(wc.AppName) while state==win32service.SERVICE_START_PENDING: time.sleep(1) state = state_nt_service(wc.AppName) if state==win32service.SERVICE_RUNNING: sys.argv = ['webcleaner', 'stop'] win32serviceutil.HandleCommandLine(wc.win32start.ProxyService) state = state_nt_service(wc.AppName) while state==win32service.SERVICE_STOP_PENDING: time.sleep(1) state = state_nt_service(wc.AppName) sys.argv = oldargs
| 15,828
|
def start_service (): """start WebCleaner NT service""" import wc import wc.win32start print _("Starting %s proxy...") % wc.AppName oldargs = sys.argv sys.argv = ['webcleaner', 'start'] win32serviceutil.HandleCommandLine(wc.win32start.ProxyService) sys.argv = oldargs
|
def start_service (): """ Start WebCleaner NT service. """ import wc import wc.win32start print _("Starting %s proxy...") % wc.AppName oldargs = sys.argv sys.argv = ['webcleaner', 'start'] win32serviceutil.HandleCommandLine(wc.win32start.ProxyService) sys.argv = oldargs
| 15,829
|
def do_remove (): """stop and remove the installed NT service""" import wc # initialize i18n wc.init_i18n() stop_service() remove_service() remove_certificates() remove_tempfiles()
|
def do_remove (): """ Stop and remove the installed NT service. """ import wc # initialize i18n wc.init_i18n() stop_service() remove_service() remove_certificates() remove_tempfiles()
| 15,830
|
def remove_certificates (): """generate SSL certificates for SSL gateway functionality""" import wc pythonw = os.path.join(sys.prefix, "pythonw.exe") script = os.path.join(wc.ScriptDir, "webcleaner-certificates") execute(pythonw, script, ["remove"])
|
def remove_certificates (): """ Generate SSL certificates for SSL gateway functionality. """ import wc pythonw = os.path.join(sys.prefix, "pythonw.exe") script = os.path.join(wc.ScriptDir, "webcleaner-certificates") execute(pythonw, script, ["remove"])
| 15,831
|
def remove_tempfiles (): """remove log files and magic(1) cache file""" import wc remove_file(os.path.join(wc.ConfigDir, "magic.mime.mgc")) remove_file(os.path.join(wc.ConfigDir, "webcleaner.log")) remove_file(os.path.join(wc.ConfigDir, "webcleaner-access.log"))
|
def remove_tempfiles (): """ Remove log files and magic(1) cache file. """ import wc remove_file(os.path.join(wc.ConfigDir, "magic.mime.mgc")) remove_file(os.path.join(wc.ConfigDir, "webcleaner.log")) remove_file(os.path.join(wc.ConfigDir, "webcleaner-access.log"))
| 15,832
|
def remove_file (fname): """Remove a single file if it exists. Errors are printed to stdout""" if os.path.exists(fname): try: os.remove(fname) except OSError, msg: print _("Could not remove %r: %s") % (fname, str(msg))
|
def remove_file (fname): """ Remove a single file if it exists. Errors are printed to stdout. """ if os.path.exists(fname): try: os.remove(fname) except OSError, msg: print _("Could not remove %r: %s") % (fname, str(msg))
| 15,833
|
def _form_newfolder (foldername): if not foldername: error['newfolder'] = True return fd, filename = tempfile.mkstemp(".zap", "local_", ConfigDir, text=True) # select the new folder global curfolder curfolder = _FolderRule(title=foldername, desc="", disable=0, filename=filename) _register_rule(curfolder) _generate_sids(prefix="lc") curfolder.oid = len(config['folderrules']) curfolder.write() config['folderrules'].append(curfolder) _recalc_up_down(config['folderrules']) info['newfolder'] = True
|
def _form_newfolder (foldername): if not foldername: error['newfolder'] = True return fd, filename = tempfile.mkstemp(".zap", "local_", ConfigDir, text=True) # select the new folder global curfolder curfolder = _FolderRule(title=foldername, desc="", disable=0, filename=filename) _register_rule(curfolder) _generate_sids(prefix="lc") if not config['folderrules']: curfolder.oid = 0 else: curfolder.oid = config['folderrules'][-1].oid+1 curfolder.write() config['folderrules'].append(curfolder) _recalc_up_down(config['folderrules']) info['newfolder'] = True
| 15,834
|
def _form_removerule (rule): # XXX error handling curfolder.rules.remove(rule) global currule currule = None curfolder.write() info['removerule'] = True
|
def _form_removerule (rule): # XXX error handling rules = curfolder.rules rules.remove(rule) for i in range(rule.oid, len(rules)): rules[i].oid = i curfolder.write() global currule currule = None curfolder.write() info['removerule'] = True
| 15,835
|
def _form_removerule (rule): # XXX error handling curfolder.rules.remove(rule) global currule currule = None curfolder.write() info['removerule'] = True
|
def _form_removerule (rule): # XXX error handling curfolder.rules.remove(rule) global currule currule = None info['removerule'] = True
| 15,836
|
def create_tcp_socket (self, sockinfo): """create tcp socket, connect to it and return socket object""" host = self.get('TCPAddr', 'localhost') port = int(self['TCPSocket']) sockinfo = get_sockinfo(host, port=port) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect(sockinfo[0][4]) except socket.error: sock.close() raise return sock
|
def create_tcp_socket (self): """create tcp socket, connect to it and return socket object""" host = self.get('TCPAddr', 'localhost') port = int(self['TCPSocket']) sockinfo = get_sockinfo(host, port=port) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.connect(sockinfo[0][4]) except socket.error: sock.close() raise return sock
| 15,837
|
def write_filters (res, filename): if os.path.exists(filename): remove(filename) zapfile = file(filename, 'w') d = { "charset": wc.ConfigCharset, "title_en": wc.XmlUtils.xmlquote("AdZapper filters"), "title_de": wc.XmlUtils.xmlquote("AdZapper Filter"), "desc_en": wc.XmlUtils.xmlquote("Automatically generated by adzap2wc.py from %s on %s"%(ADZAPPER_URL, date)), "desc_de": wc.XmlUtils.xmlquote("Automatisch erzeugt von adzap2wc.py aus %s am %s"%(ADZAPPER_URL, date)), } zapfile.write("""<?xml version="1.0" encoding="%(charset)s"?>
|
def write_filters (res, filename): if os.path.exists(filename): remove(filename) zapfile = file(filename, 'w') d = { "charset": wc.configuration.ConfigCharset, "title_en": wc.XmlUtils.xmlquote("AdZapper filters"), "title_de": wc.XmlUtils.xmlquote("AdZapper Filter"), "desc_en": wc.XmlUtils.xmlquote("Automatically generated by adzap2wc.py from %s on %s"%(ADZAPPER_URL, date)), "desc_de": wc.XmlUtils.xmlquote("Automatisch erzeugt von adzap2wc.py aus %s am %s"%(ADZAPPER_URL, date)), } zapfile.write("""<?xml version="1.0" encoding="%(charset)s"?>
| 15,838
|
def _broken (): p = HtmlPrinter() p.feed("""<!------>""") p.flush()
|
def _broken (): p = HtmlPrinter() s = """< a>""" for c in s: p.feed(c) p.flush()
| 15,839
|
def testITSVuln (self): """Microsoft Internet Explorer ITS Protocol Zone Bypass Vulnerability""" # To avoid virus alarms we obfuscate the exploit URL. This # code is harmless. data_url = "ms-its:mhtml:file://"+ "C:\\foo.mht!${PATH}/"+ "EXPLOIT.CHM::"+ "/exploit.htm" self.filt("""<object data="%s">""" % data_url, """<object data="ms-its:mhtml:file:/C:/foo.mht">""")
|
def testITSVuln (self): """Microsoft Internet Explorer ITS Protocol Zone Bypass Vulnerability""" # To avoid virus alarms we obfuscate the exploit URL. This # code is harmless. data_url = "ms-its:mhtml:file://"+ "C:\\foo.mht!${PATH}/"+ "EXPLOIT.CHM::"+ "/exploit.htm" self.filt("""<object data="%s">""" % data_url, """<object data="ms-its:mhtml:file:/C:/foo.mht">""")
| 15,840
|
def applies_to_mime (self, mime): """ Ask if this filter applies to a mime type. """ if mime not in self.mime_cache: if not self.mimes: self.mime_cache[mime] = True elif mime is None: self.mime_cache[mime] = False else: for ro in self.mimes: if ro.match(mime): self.mime_cache[mime] = True else: self.mime_cache[mime] = False return self.mime_cache[mime]
|
def applies_to_mime (self, mime): """ Ask if this filter applies to a mime type. """ if mime not in self.mime_cache: if not self.mimes: self.mime_cache[mime] = True elif mime is None: self.mime_cache[mime] = False else: self.mime_cache[mime] = \ [ro for ro in self.mimes if ro.match(mime)] return self.mime_cache[mime]
| 15,841
|
def handle_read (self): if not self.connected: # It's been closed (presumably recently) return
|
def handle_read (self): if not self.connected: # It's been closed (presumably recently) return
| 15,842
|
def size_error (self): """ Raise an exceptionto cause a 406 HTTP return code. """ wc.log.warn(wc.LOG_FILTER, "Virus filter size exceeded.") raise wc.filter.FilterProxyError((406, _("Not acceptable"), _("Maximum data size (%s) exceeded") % \ wc.strformat.strsize(VirusFilter.MAX_FILE_BYTES)))
|
def size_error (self): """ Raise an exceptionto cause a 406 HTTP return code. """ wc.log.warn(wc.LOG_FILTER, "Virus filter size exceeded.") raise wc.filter.FilterProxyError(406, _("Not acceptable"), _("Maximum data size (%s) exceeded") % \ wc.strformat.strsize(VirusFilter.MAX_FILE_BYTES)))
| 15,843
|
def size_error (self): """ Raise an exceptionto cause a 406 HTTP return code. """ wc.log.warn(wc.LOG_FILTER, "Virus filter size exceeded.") raise wc.filter.FilterProxyError((406, _("Not acceptable"), _("Maximum data size (%s) exceeded") % \ wc.strformat.strsize(VirusFilter.MAX_FILE_BYTES)))
|
def size_error (self): """ Raise an exceptionto cause a 406 HTTP return code. """ wc.log.warn(wc.LOG_FILTER, "Virus filter size exceeded.") raise wc.filter.FilterProxyError((406, _("Not acceptable"), _("Maximum data size (%s) exceeded") % \ wc.strformat.strsize(VirusFilter.MAX_FILE_BYTES)))
| 15,844
|
def _form_newrule (rtype, lang): if rtype not in rulenames: error['newrule'] = True return # add new rule rule = _GetRuleFromName(rtype) rule.parent = curfolder rule.titles[lang] = _("No title") # compile data and register rule.compile_data() if config['development']: prefix = u"wc" else: prefix = u"lc" _generate_sids(prefix) curfolder.append_rule(rule) _recalc_up_down(curfolder.rules) curfolder.write() _reinit_filters() # select new rule _form_selrule(rule.oid) info['newrule'] = True print "XXX new rule", rule
|
def _form_newrule (rtype, lang): if rtype not in rulenames: error['newrule'] = True return # add new rule rule = _GetRuleFromName(rtype) rule.parent = curfolder rule.titles[lang] = _("No title") # compile data and register rule.compile_data() if config['development']: prefix = u"wc" else: prefix = u"lc" _generate_sids(prefix) curfolder.append_rule(rule) _recalc_up_down(curfolder.rules) curfolder.write() _reinit_filters() # select new rule _form_selrule(rule.oid) info['newrule'] = True
| 15,845
|
def server_set_encoding_headers (server, filename=None): """ Set encoding headers. """ rewrite = server.is_rewrite() bytes_remaining = get_content_length(server.headers) to_remove = sets.Set() if server.headers.has_key('Transfer-Encoding'): to_remove.add('Transfer-Encoding') tencs = server.headers['Transfer-Encoding'].lower() for tenc in tencs.split(","): tenc = tenc.strip() if ";" in tenc: tenc = tenc.split(";", 1)[0] if not tenc or tenc == 'identity': continue if tenc == 'chunked': server.decoders.append(UnchunkStream.UnchunkStream(server)) elif tenc in ('x-gzip', 'gzip'): server.decoders.append(GunzipStream.GunzipStream(server)) elif tenc == 'deflate': server.decoders.append(DeflateStream.DeflateStream(server)) else: wc.log.warn(wc.LOG_PROXY, "unsupported transfer encoding in %r", tencs) if server.headers.has_key("Content-Length"): wc.log.warn(wc.LOG_PROXY, 'Transfer-Encoding should not have Content-Length') to_remove.add("Content-Length") bytes_remaining = None if rewrite: to_remove.add('Content-Length') remove_headers(server.headers, to_remove) if not server.headers.has_key('Content-Length'): server.headers['Connection'] = 'close\r' if not rewrite: # only decompress on rewrite return bytes_remaining to_remove = sets.Set() #if server.protocol == "HTTP/1.1": # # To make pipelining possible, enable chunked encoding. # server.headers['Transfer-Encoding'] = "chunked\r" # server.encoders.append(ChunkStream.ChunkStream(server)) # Compressed content (uncompress only for rewriting modules) if server.headers.has_key('Content-Encoding'): to_remove.add('Content-Encoding') cencs = server.headers['Content-Encoding'].lower() for cenc in cencs.split(","): cenc = cenc.strip() if ";" in cenc: cenc = cenc.split(";", 1)[0] if not cenc or cenc == 'identity': continue if filename is not None and \ (filename.endswith(".gz") or filename.endswith(".tgz")): continue # note: do not gunzip .gz files if cenc in ('gzip', 'x-gzip'): server.decoders.append(GunzipStream.GunzipStream()) elif cenc == 'deflate': server.decoders.append(DeflateStream.DeflateStream()) else: wc.log.warn(wc.LOG_PROXY, "unsupported content encoding in %r", encoding) # remove no-transform cache control if server.headers.get('Cache-Control', '').lower() == 'no-transform': to_remove.add('Cache-Control') # add warning server.headers['Warning'] = "214 Transformation applied\r" remove_headers(server.headers, to_remove) return bytes_remaining
|
def server_set_encoding_headers (server, filename=None): """ Set encoding headers. """ rewrite = server.is_rewrite() bytes_remaining = get_content_length(server.headers) to_remove = sets.Set() if server.headers.has_key('Transfer-Encoding'): to_remove.add('Transfer-Encoding') tencs = server.headers['Transfer-Encoding'].lower() for tenc in tencs.split(","): tenc = tenc.strip() if ";" in tenc: tenc = tenc.split(";", 1)[0] if not tenc or tenc == 'identity': continue if tenc == 'chunked': server.decoders.append(UnchunkStream.UnchunkStream(server)) elif tenc in ('x-gzip', 'gzip'): server.decoders.append(GunzipStream.GunzipStream(server)) elif tenc == 'deflate': server.decoders.append(DeflateStream.DeflateStream(server)) else: wc.log.warn(wc.LOG_PROXY, "unsupported transfer encoding in %r", tencs) if server.headers.has_key("Content-Length"): wc.log.warn(wc.LOG_PROXY, 'Transfer-Encoding should not have Content-Length') to_remove.add("Content-Length") bytes_remaining = None if rewrite: to_remove.add('Content-Length') remove_headers(server.headers, to_remove) if not server.headers.has_key('Content-Length'): server.headers['Connection'] = 'close\r' if not rewrite: # only decompress on rewrite return bytes_remaining to_remove = sets.Set() #if server.protocol == "HTTP/1.1": # # To make pipelining possible, enable chunked encoding. # server.headers['Transfer-Encoding'] = "chunked\r" # server.encoders.append(ChunkStream.ChunkStream(server)) # Compressed content (uncompress only for rewriting modules) if server.headers.has_key('Content-Encoding'): to_remove.add('Content-Encoding') cencs = server.headers['Content-Encoding'].lower() for cenc in cencs.split(","): cenc = cenc.strip() if ";" in cenc: cenc = cenc.split(";", 1)[0] if not cenc or cenc == 'identity': continue if filename is not None and \ (filename.endswith(".gz") or filename.endswith(".tgz")): continue # note: do not gunzip .gz files if cenc in ('gzip', 'x-gzip'): server.decoders.append(GunzipStream.GunzipStream()) elif cenc == 'deflate': server.decoders.append(DeflateStream.DeflateStream()) else: wc.log.warn(wc.LOG_PROXY, "unsupported content encoding in %r", cenc) # remove no-transform cache control if server.headers.get('Cache-Control', '').lower() == 'no-transform': to_remove.add('Cache-Control') # add warning server.headers['Warning'] = "214 Transformation applied\r" remove_headers(server.headers, to_remove) return bytes_remaining
| 15,846
|
def flush (self): """ Flush data of decoders (if any) and filters and write it to the client. return True if flush was successful. """ assert None == wc.log.debug(wc.LOG_PROXY, "%s HttpServer.flush", self) if not self.statuscode and self.method != 'CONNECT': wc.log.warn(wc.LOG_PROXY, "%s flush without status", self) return True data = self.flush_coders(self.decoders) try: for stage in FilterStages: data = wc.filter.applyfilter(stage, data, "finish", self.attrs) except wc.filter.FilterWait, msg: assert None == wc.log.debug(wc.LOG_PROXY, "%s FilterWait %s", self, msg) # the filter still needs some data # to save CPU time make connection unreadable for a while self.set_unreadable(1.0) return False except wc.filter.FilterRating, msg: assert None == wc.log.debug(wc.LOG_PROXY, "%s FilterRating from content %s", self, msg) self._show_rating_deny(str(msg)) return True data = self.flush_coders(self.encoders, data=data) # the client might already have closed if not self.client: return if self.defer_data: self.defer_data = False self.client.server_response(self, self.response, self.statuscode, self.headers) if not self.client: return if data and self.statuscode != 407: self.client.server_content(data) return True
|
def flush (self): """ Flush data of decoders (if any) and filters and write it to the client. return True if flush was successful. """ assert None == wc.log.debug(wc.LOG_PROXY, "%s HttpServer.flush", self) if not self.statuscode and self.method != 'CONNECT': wc.log.warn(wc.LOG_PROXY, "%s flush without status", self) return True data = self.flush_coders(self.decoders) try: for stage in FilterStages: data = wc.filter.applyfilter(stage, data, "finish", self.attrs) except wc.filter.FilterWait, msg: assert None == wc.log.debug(wc.LOG_PROXY, "%s FilterWait %s", self, msg) # the filter still needs some data # to save CPU time make connection unreadable for a while self.set_unreadable(1.0) return False except wc.filter.FilterRating, msg: assert None == wc.log.debug(wc.LOG_PROXY, "%s FilterRating from content %s", self, msg) self._show_rating_deny(str(msg)) return True data = self.flush_coders(self.encoders, data=data) # the client might already have closed if not self.client: return if self.defer_data: self.defer_data = False self.client.server_response(self, self.response, self.statuscode, self.headers) if not self.client: return if data and self.statuscode != 407 and hasattr(self.client, "server_content"): self.client.server_content(data) return True
| 15,847
|
def filter (self, data, **attrs): if not attrs.has_key('rewriter_filter'): return data p = attrs['rewriter_filter'] p.feed(data) return p.flush()
|
def filter (self, data, **attrs): if not attrs.has_key('rewriter_filter'): return data p = attrs['rewriter_filter'] p.feed(data) return p.flush()
| 15,848
|
def finish (self, data, **attrs): if not attrs.has_key('rewriter_filter'): return data p = attrs['rewriter_filter'] # note: feed even if data is empty p.feed(data) return p.flush(finish=True)
|
def finish (self, data, **attrs): if not attrs.has_key('rewriter_filter'): return data p = attrs['rewriter_filter'] # note: feed even if data is empty p.feed(data) return p.flush(finish=True)
| 15,849
|
def finish (self, data, **attrs): if not attrs.has_key('rewriter_filter'): return data p = attrs['rewriter_filter'] # note: feed even if data is empty p.feed(data) return p.flush(finish=True)
|
def finish (self, data, **attrs): if not attrs.has_key('rewriter_filter'): return data p = attrs['rewriter_filter'] # note: feed even if data is empty p.feed(data) return p.flush(finish=True)
| 15,850
|
def _resolve_html_entity (mo): """resolve html entity, helper function for resolve_html_entities""" ent = mo.group("entity") s = mo.group() return unicode(htmlentitydefs.entitydefs.get(ent, s))
|
def _resolve_html_entity (mo): """resolve html entity, helper function for resolve_html_entities""" ent = mo.group("entity") s = mo.group() entdef = htmlentitydefs.entitydefs.get(ent) if entdef is None: return s return entdef.decode("iso8859-1")
| 15,851
|
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
|
def do_GET (self): """serve JavaScript files""" self.server.log.write("server got request path %r\n"%self.path) if not jsfiles.has_key(self.path): data = "HTTP/1.1 404 Oops\r\n" body = "" else: data = 'HTTP/1.1 200 OK\r\n' body = jsfiles[self.path] data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
| 15,852
|
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
|
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
| 15,853
|
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
|
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += body self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
| 15,854
|
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
|
def do_GET (self): """send chunk data""" body = random_chars(self.body_length) data = 'HTTP/1.1 200 OK\r\n' data += "Date: %s\r\n" % self.date_time_string() data += "Transfer-Encoding: chunked\r\n" data += "Connection: close\r\n" data += "\r\n" data += "0000000000%s\r\n" % hex(self.body_length)[2:] data += "%s\r\n" % body data += "0\r\n\r\n" self.server.log.write("server will send %d bytes\n" % len(data)) self.print_lines(data) self.wfile.write(data)
| 15,855
|
def name (self): return 'chunked-leading-zeros'
|
def name (self): return 'chunked-leading-zeros'
| 15,856
|
def name (self): return 'chunked-leading-zeros'
|
def name (self): return 'chunked-leading-zeros'
| 15,857
|
def __init__ (self, methodName='runTest'): ProxyTest.__init__(self, methodName=methodName) request = ChunkRequest() self.addTest(request, handler_class=ChunkRequestHandler)
|
def __init__ (self, methodName='runTest'): ProxyTest.__init__(self, methodName=methodName) request = ChunkRequest() self.addTest(request, handler_class=ChunkRequestHandler)
| 15,858
|
def __init__ (self, methodName='runTest'): ProxyTest.__init__(self, methodName=methodName) request = ChunkRequest() self.addTest(request, handler_class=ChunkRequestHandler)
|
def __init__ (self, methodName='runTest'): ProxyTest.__init__(self, methodName=methodName) request = ChunkRequest() self.addTest(request, handler_class=ChunkRequestHandler)
| 15,859
|
def finish (self, data, **attrs): """feed data to recognizer""" if not attrs.has_key('mimerecognizer_buf'): return data buf = attrs['mimerecognizer_buf'] buf.write(data) return self.recognize(buf)
|
def finish (self, data, **attrs): """feed data to recognizer""" if not attrs.has_key('mimerecognizer_buf'): return data buf = attrs['mimerecognizer_buf'] buf.write(data) return self.recognize(buf)
| 15,860
|
def recognize (self, buf, attrs): # note: recognizing a mime type fixes exploits like # CVE-2002-0025 and CVE-2002-0024 try: mime = wc.magic.classify(buf) if mime != attrs['mime']: wc.log.warn(wc.LOG_FILTER, "Adjusting MIME %r -> %r", attrs['mime'], mime) attrs['headers']['data']['Content-Type'] = "%s\r" % mime except StandardError, msg: wc.log.exception(wc.LOG_FILTER, "Mime recognize error") data = buf.getvalue() buf.close() del attrs['mimerecognizer_buf'] return data
|
def recognize (self, buf, attrs): # note: recognizing a mime type fixes exploits like # CVE-2002-0025 and CVE-2002-0024 try: mime = wc.magic.classify(buf) if not attrs['mime'].startswith(mime): wc.log.warn(wc.LOG_FILTER, "Adjusting MIME %r -> %r", attrs['mime'], mime) attrs['headers']['data']['Content-Type'] = "%s\r" % mime except StandardError, msg: wc.log.exception(wc.LOG_FILTER, "Mime recognize error") data = buf.getvalue() buf.close() del attrs['mimerecognizer_buf'] return data
| 15,861
|
def recognize (self, buf, attrs): # note: recognizing a mime type fixes exploits like # CVE-2002-0025 and CVE-2002-0024 try: mime = wc.magic.classify(buf) if mime != attrs['mime']: wc.log.warn(wc.LOG_FILTER, "Adjusting MIME %r -> %r", attrs['mime'], mime) attrs['headers']['data']['Content-Type'] = "%s\r" % mime except StandardError, msg: wc.log.exception(wc.LOG_FILTER, "Mime recognize error") data = buf.getvalue() buf.close() del attrs['mimerecognizer_buf'] return data
|
def recognize (self, buf, attrs): # note: recognizing a mime type fixes exploits like # CVE-2002-0025 and CVE-2002-0024 try: mime = wc.magic.classify(buf) if mime != attrs['mime']: wc.log.warn(wc.LOG_FILTER, "Adjusting MIME %r -> %r", attrs['mime'], mime) attrs['headers']['data']['Content-Type'] = "%s\r" % mime except StandardError, msg: wc.log.exception(wc.LOG_FILTER, "Mime recognize error") data = buf.getvalue() buf.close() return data
| 15,862
|
def install_shortcuts (): """create_shortcut(target, description, filename[, arguments[, \ workdir[, iconpath[, iconindex]]]]) file_created(path) - register 'path' so that the uninstaller removes it directory_created(path) - register 'path' so that the uninstaller removes it get_special_folder_location(csidl_string) """ try: prg = get_special_folder_path("CSIDL_COMMON_PROGRAMS") except OSError: try: prg = get_special_folder_path("CSIDL_PROGRAMS") except OSError, reason: # give up - cannot install shortcuts print "cannot install shortcuts: %s" % reason sys.exit() lib_dir = distutils.get_python_lib(plat_specific=1) dest_dir = os.path.join(prg, "WebCleaner") try: os.mkdir(dest_dir) directory_created(dest_dir) except OSError: pass target = os.path.join(sys.prefix, "RemoveWebCleaner.exe") path = os.path.join(dest_dir, "Uninstall WebCleaner.lnk") arguments = "-u " + os.path.join(sys.prefix, "WebCleaner-wininst.log") create_shortcut(target, "Uninstall WebCleaner", path, arguments) file_created(path)
|
def install_shortcuts (): """create_shortcut(target, description, filename[, arguments[, \ workdir[, iconpath[, iconindex]]]]) file_created(path) - register 'path' so that the uninstaller removes it directory_created(path) - register 'path' so that the uninstaller removes it get_special_folder_location(csidl_string) """ try: prg = get_special_folder_path("CSIDL_COMMON_PROGRAMS") except OSError: try: prg = get_special_folder_path("CSIDL_PROGRAMS") except OSError, reason: # give up - cannot install shortcuts print "cannot install shortcuts: %s" % reason sys.exit() lib_dir = distutils.sysconfig.get_python_lib(plat_specific=1) dest_dir = os.path.join(prg, "WebCleaner") try: os.mkdir(dest_dir) directory_created(dest_dir) except OSError: pass target = os.path.join(sys.prefix, "RemoveWebCleaner.exe") path = os.path.join(dest_dir, "Uninstall WebCleaner.lnk") arguments = "-u " + os.path.join(sys.prefix, "WebCleaner-wininst.log") create_shortcut(target, "Uninstall WebCleaner", path, arguments) file_created(path)
| 15,863
|
def install_certificates (): """generate SSL certificates for SSL gateway functionality""" pythonw = os.path.join(sys.prefix, "pythonw.exe") script = os.path.join(script_dir, "webcleaner-certificates") execute(pythonw, script, ["install"])
|
def install_certificates (): """generate SSL certificates for SSL gateway functionality""" pythonw = os.path.join(sys.prefix, "pythonw.exe") script = os.path.join(wc.ScriptDir, "webcleaner-certificates") execute(pythonw, script, ["install"])
| 15,864
|
def remove_certificates (): """generate SSL certificates for SSL gateway functionality""" pythonw = os.path.join(sys.prefix, "pythonw.exe") script = os.path.join(script_dir, "webcleaner-certificates") execute(pythonw, script, ["remove"])
|
def remove_certificates (): """generate SSL certificates for SSL gateway functionality""" pythonw = os.path.join(sys.prefix, "pythonw.exe") script = os.path.join(wc.ScriptDir, "webcleaner-certificates") execute(pythonw, script, ["remove"])
| 15,865
|
def js_end_element (self, item): """ Parse generated html for scripts. """ wc.log.debug(wc.LOG_JS, "%s js_end_element buf %r", self, self.htmlparser.tagbuf) if len(self.htmlparser.tagbuf)<2: # syntax error, ignore wc.log.warn(wc.LOG_JS, "JS syntax error, self.tagbuf %r", self.htmlparser.tagbuf) return if self.js_src: wc.log.debug(wc.LOG_JS, "JS src, self.tagbuf %r", self.htmlparser.tagbuf) del self.htmlparser.tagbuf[-1] if len(self.htmlparser.tagbuf)<2: # syntax error, ignore wc.log.warn(wc.LOG_JS, "JS end, self.tagbuf %s", self.htmlparser.tagbuf) return if len(self.htmlparser.tagbuf) > 2 and \ self.htmlparser.tagbuf[-3][0] == \ wc.filter.html.STARTTAG and \ self.htmlparser.tagbuf[-3][1] == 'script': del self.htmlparser.tagbuf[-1] if len(self.htmlparser.tagbuf)<2 or \ self.htmlparser.tagbuf[-1][0] != \ wc.filter.html.DATA or \ self.htmlparser.tagbuf[-2][0] != \ wc.filter.html.STARTTAG or \ self.htmlparser.tagbuf[-2][1] != 'script': # syntax error, ignore return js_ok, js_lang = wc.js.get_js_data(self.htmlparser.tagbuf[-2][2]) if not js_ok: # no JavaScript, add end tag and ignore self.htmlparser.tagbuf.append(item) return ver = wc.js.get_js_ver(js_lang) # get script data script = self.htmlparser.tagbuf[-1][1].strip() # remove html comments script = wc.js.remove_html_comments(script) if not script: # again, ignore an empty script del self.htmlparser.tagbuf[-1] del self.htmlparser.tagbuf[-1] return # put correctly quoted script data into buffer script = wc.js.clean(script, jscomments=self.jscomments) self.htmlparser.tagbuf[-1][1] = script # execute script self.jsScript(script, ver, item)
|
def js_end_element (self, item): """ Parse generated html for scripts. """ wc.log.debug(wc.LOG_JS, "%s js_end_element buf %r", self, self.htmlparser.tagbuf) if len(self.htmlparser.tagbuf)<2: # syntax error, ignore wc.log.debug(wc.LOG_JS, "JS syntax error, self.tagbuf %r", self.htmlparser.tagbuf) return if self.js_src: wc.log.debug(wc.LOG_JS, "JS src, self.tagbuf %r", self.htmlparser.tagbuf) del self.htmlparser.tagbuf[-1] if len(self.htmlparser.tagbuf)<2: # syntax error, ignore wc.log.debug(wc.LOG_JS, "JS end, self.tagbuf %s", self.htmlparser.tagbuf) return if len(self.htmlparser.tagbuf) > 2 and \ self.htmlparser.tagbuf[-3][0] == \ wc.filter.html.STARTTAG and \ self.htmlparser.tagbuf[-3][1] == 'script': del self.htmlparser.tagbuf[-1] if len(self.htmlparser.tagbuf)<2 or \ self.htmlparser.tagbuf[-1][0] != \ wc.filter.html.DATA or \ self.htmlparser.tagbuf[-2][0] != \ wc.filter.html.STARTTAG or \ self.htmlparser.tagbuf[-2][1] != 'script': # syntax error, ignore return js_ok, js_lang = wc.js.get_js_data(self.htmlparser.tagbuf[-2][2]) if not js_ok: # no JavaScript, add end tag and ignore self.htmlparser.tagbuf.append(item) return ver = wc.js.get_js_ver(js_lang) # get script data script = self.htmlparser.tagbuf[-1][1].strip() # remove html comments script = wc.js.remove_html_comments(script) if not script: # again, ignore an empty script del self.htmlparser.tagbuf[-1] del self.htmlparser.tagbuf[-1] return # put correctly quoted script data into buffer script = wc.js.clean(script, jscomments=self.jscomments) self.htmlparser.tagbuf[-1][1] = script # execute script self.jsScript(script, ver, item)
| 15,866
|
def parse_adzapper_file (filename): res = {} is_comment = re.compile('^\s*(#.*)?$').match content = False # skip content until __DATA__ marker for line in open(filename): if not content: content = line.startswith('__DATA__') elif not is_comment(line): parse_adzapper_line(line.strip(), res) return res
|
def parse_adzapper_file (filename): res = [] is_comment = re.compile('^\s*(#.*)?$').match content = False # skip content until __DATA__ marker for line in open(filename): if not content: content = line.startswith('__DATA__') elif not is_comment(line): parse_adzapper_line(line.strip(), res) return res
| 15,867
|
def parse_adzapper_line (line, res): adclass, pattern = line.split(None, 1) res.setdefault(adclass.lower(), []).append(pattern)
|
def parse_adzapper_line (line, res): adclass, pattern = line.split(None, 1) res.setdefault(adclass.lower(), []).append(pattern)
| 15,868
|
def write_filters (ads): filename = os.path.join("config", "adzapper.zap") if os.path.exists(filename): remove(filename) zapfile = file(filename, 'w') d = {"title": xmlify("AdZapper filters"), "desc": xmlify("Automatically generated on %s" % date), } zapfile.write("""<?xml version="1.0"?>
|
def write_filters (ads): filename = os.path.join("config", "adzapper.zap") if os.path.exists(filename): remove(filename) zapfile = file(filename, 'w') d = {"title": xmlify("AdZapper filters"), "desc": xmlify("Automatically generated on %s" % date), } zapfile.write("""<?xml version="1.0"?>
| 15,869
|
def convert_adzapper_pattern (pattern): pattern = pattern.replace(".", "\\.") pattern = pattern.replace("?", "\\?") pattern = pattern.replace("**", ".*?") pattern = re.sub(r"[^.]*[^?]", pattern, "[^/]*") return pattern
|
def convert_adzapper_pattern (pattern): pattern = pattern.replace(".", "\\.") pattern = pattern.replace("?", "\\?") pattern = pattern.replace("**", ".*?") pattern = re.sub(r"([^.])\*([^?])", r"\1[^/]*\2", pattern) return pattern
| 15,870
|
def convert_adzapper_replace (replace): # replace Perl back references with Python ones replace = re.sub(r"$(\d)", replace, r"\\1") return replace
|
def convert_adzapper_replace (replace): # replace Perl back references with Python ones replace = re.sub(r"\$(\d)", r"\\1", replace) return replace
| 15,871
|
def write_allow (zapfile, pattern): title = "AdZapper PASS filter" desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<allow title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s"/>
|
def write_allow (zapfile, adclass, pattern): d = get_rule_dict(adclass, pattern) zapfile.write("""<allow title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s"/>
| 15,872
|
def write_allow (zapfile, pattern): title = "AdZapper PASS filter" desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<allow title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s"/>
|
def write_allow (zapfile, pattern): title = "AdZapper PASS filter" desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<allow title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s"/>
| 15,873
|
def write_block (zapfile, adclass, pattern, replacement=None): title = "AdZapper %s filter" % adclass desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<block title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s" """ % d) if replacement: zapfile.write(">%(replacement)s</block>" % d) else: zapfile.write("/>") zapfile.write("\n")
|
def write_block (zapfile, adclass, pattern, replacement=None): d = get_rule_dict(adclass, pattern) zapfile.write("""<block title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s" """ % d) if replacement: zapfile.write(">%(replacement)s</block>" % d) else: zapfile.write("/>") zapfile.write("\n")
| 15,874
|
def write_block (zapfile, adclass, pattern, replacement=None): title = "AdZapper %s filter" % adclass desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<block title="%(title)s" desc="%(desc)s" scheme="%(scheme)s" host="%(host)s" path="%(path)s" query="%(query)s" fragment="%(fragment)s" """ % d) if replacement: zapfile.write(">%(replacement)s</block>" % d) else: zapfile.write("/>") zapfile.write("\n")
|
def write_block (zapfile, adclass, pattern, replacement=None): title = "AdZapper %s filter" % adclass desc = "Automatically generated, you should not edit this filter." scheme, host, path, query, fragment = urlparse.urlsplit(pattern) d = locals() for key, value in d: d[key] = xmlify(value) zapfile.write("""<block title="%(title)s" desc="%(desc)s" url="%(url)s" """ % d) if replacement is not None: zapfile.write(">%s</block>" % xmlify(replacement)) else: zapfile.write("/>") zapfile.write("\n")
| 15,875
|
def write_proxyconf (self): """write proxy configuration""" f = file(proxyconf_file()) f.write("""<?xml version="1.0"?>
|
def write_proxyconf (self): """write proxy configuration""" f = file(proxyconf_file(), 'w') f.write("""<?xml version="1.0"?>
| 15,876
|
def write_proxyconf (self): """write proxy configuration""" f = file(proxyconf_file()) f.write("""<?xml version="1.0"?>
|
def write_proxyconf (self): """write proxy configuration""") f = file(proxyconf_file()) f.write("""<?xml version="1.0"?>
| 15,877
|
def write_proxyconf (self): """write proxy configuration""" f = file(proxyconf_file()) f.write("""<?xml version="1.0"?>
|
def write_proxyconf (self): """write proxy configuration""" f = file(proxyconf_file()) f.write("""<?xml version="1.0"?>
| 15,878
|
def read_filterconf (self): """read filter rules""" from glob import glob # filter configuration for f in filterconf_files(): ZapperParser().parse(f, self) for f in self['rules']: f.sort() self['rules'].sort() filter.rules.FolderRule.recalc_oids(self['rules'])
|
def read_filterconf (self): """read filter rules""" # filter configuration for f in filterconf_files(): ZapperParser().parse(f, self) for f in self['rules']: f.sort() self['rules'].sort() filter.rules.FolderRule.recalc_oids(self['rules'])
| 15,879
|
def is_allowed (self, method, scheme, port): if not self.method(method): wc.log.warn(wc.LOG_PROXY, "illegal method %s", method) return False if scheme not in self.schemes: wc.log.warn(wc.LOG_PROXY, "illegal scheme %s", scheme) return False if method == 'CONNECT': # CONNECT method sanity if port not in self.connect_ports: wc.log.warn(wc.LOG_PROXY, "illegal CONNECT port %d", port) return False if self.scheme != 'https': wc.log.warn(wc.LOG_PROXY, "illegal CONNECT scheme %d", scheme) return False else: # all other methods if port not in self.http_ports: wc.log.warn(wc.LOG_PROXY, "illegal port %d", port) return False return True
|
def is_allowed (self, method, scheme, port): if not self.method(method): wc.log.warn(wc.LOG_PROXY, "illegal method %s", method) return False if scheme not in self.schemes: wc.log.warn(wc.LOG_PROXY, "illegal scheme %s", scheme) return False if method == 'CONNECT': # CONNECT method sanity if port not in self.connect_ports: wc.log.warn(wc.LOG_PROXY, "illegal CONNECT port %d", port) return False if scheme != 'https': wc.log.warn(wc.LOG_PROXY, "illegal CONNECT scheme %d", scheme) return False else: # all other methods if port not in self.http_ports: wc.log.warn(wc.LOG_PROXY, "illegal port %d", port) return False return True
| 15,880
|
def _test(): p = HtmlPrinter() #p.feed("<hTml>") p.feed("<a href>") #p.feed("<a href=''>") #p.feed('<a href="">') #p.feed("<a href='a'>") #p.feed('<a href="a">') p.feed("<a href=a>") #p.feed("<a href='\"'>") #p.feed("<a href=\"'\">") #p.feed("<a href=' '>") #p.feed("<a href=a href=b>") #p.feed("<a/>") #p.feed("<a href/>") #p.feed("<a href=a />") #p.feed("</a>") #p.feed("<?bla foo?>") #p.feed("<?bla?>") #p.feed("<!-- - comment -->") #p.feed("<!---->") #p.feed("<!DOCTYPE \"vla foo>") p.flush()
|
defp.feed("<hTml>") _test():p.feed("<hTml>") pp.feed("<hTml>") =p.feed("<hTml>") HtmlPrinter()p.feed("<hTml>") #p.feed("<hTml>")p.feed("<hTml>") p.feed("<ap.feed("<hTml>") href>")p.feed("<hTml>") #p.feed("<ap.feed("<hTml>") href=''>")p.feed("<hTml>") #p.feed('<ap.feed("<hTml>") href="">')p.feed("<hTml>") #p.feed("<ap.feed("<hTml>") href='a'>")p.feed("<hTml>") #p.feed('<ap.feed("<hTml>") href="a">')p.feed("<hTml>") p.feed("<ap.feed("<hTml>") href=a>")p.feed("<hTml>") #p.feed("<ap.feed("<hTml>") href='\"'>")p.feed("<hTml>") #p.feed("<ap.feed("<hTml>") href=\"'\">")p.feed("<hTml>") #p.feed("<ap.feed("<hTml>") href='p.feed("<hTml>") '>")p.feed("<hTml>") #p.feed("<ap.feed("<hTml>") href=ap.feed("<hTml>") href=b>")p.feed("<hTml>") #p.feed("<a/>")p.feed("<hTml>") #p.feed("<ap.feed("<hTml>") href/>")p.feed("<hTml>") #p.feed("<ap.feed("<hTml>") href=ap.feed("<hTml>") />")p.feed("<hTml>") #p.feed("</a>")p.feed("<hTml>") #p.feed("<?blap.feed("<hTml>") foo?>")p.feed("<hTml>") #p.feed("<?bla?>")p.feed("<hTml>") #p.feed("<!--p.feed("<hTml>") -p.feed("<hTml>") commentp.feed("<hTml>") -->")p.feed("<hTml>") #p.feed("<!---->")p.feed("<hTml>") #p.feed("<!DOCTYPEp.feed("<hTml>") \"vlap.feed("<hTml>") foo>")p.feed("<hTml>") p.flush()p.feed("<hTml>")
| 15,881
|
def _test(): p = HtmlPrinter() #p.feed("<hTml>") p.feed("<a href>") #p.feed("<a href=''>") #p.feed('<a href="">') #p.feed("<a href='a'>") #p.feed('<a href="a">') p.feed("<a href=a>") #p.feed("<a href='\"'>") #p.feed("<a href=\"'\">") #p.feed("<a href=' '>") #p.feed("<a href=a href=b>") #p.feed("<a/>") #p.feed("<a href/>") #p.feed("<a href=a />") #p.feed("</a>") #p.feed("<?bla foo?>") #p.feed("<?bla?>") #p.feed("<!-- - comment -->") #p.feed("<!---->") #p.feed("<!DOCTYPE \"vla foo>") p.flush()
|
def _test(): p = HtmlPrinter() #p.feed("<hTml>") p.feed("<a href>") #p.feed("<a href=''>") #p.feed('<a href="">') #p.feed("<a href='a'>") #p.feed('<a href="a">') p.feed("<a href=a>") #p.feed("<a href='\"'>") #p.feed("<a href=\"'\">") #p.feed("<a href=' '>") #p.feed("<a href=a href=b>") #p.feed("<a/>") #p.feed("<a href/>") #p.feed("<a href=a />") #p.feed("</a>") #p.feed("<?bla foo?>") #p.feed("<?bla?>") #p.feed("<!-- - comment -->") #p.feed("<!---->") #p.feed("<!DOCTYPE \"vla foo>") p.flush()
| 15,882
|
def _test(): p = HtmlPrinter() #p.feed("<hTml>") p.feed("<a href>") #p.feed("<a href=''>") #p.feed('<a href="">') #p.feed("<a href='a'>") #p.feed('<a href="a">') p.feed("<a href=a>") #p.feed("<a href='\"'>") #p.feed("<a href=\"'\">") #p.feed("<a href=' '>") #p.feed("<a href=a href=b>") #p.feed("<a/>") #p.feed("<a href/>") #p.feed("<a href=a />") #p.feed("</a>") #p.feed("<?bla foo?>") #p.feed("<?bla?>") #p.feed("<!-- - comment -->") #p.feed("<!---->") #p.feed("<!DOCTYPE \"vla foo>") p.flush()
|
def _test(): p = HtmlPrinter() #p.feed("<hTml>") p.feed("<a href>") #p.feed("<a href=''>") #p.feed('<a href="">') #p.feed("<a href='a'>") #p.feed('<a href="a">') p.feed("<a href=a>") #p.feed("<a href='\"'>") #p.feed("<a href=\"'\">") #p.feed("<a href=' '>") #p.feed("<a href=a href=b>") #p.feed("<a/>") #p.feed("<a href/>") #p.feed("<a href=a />") #p.feed("</a>") #p.feed("<?bla foo?>") #p.feed("<?bla?>") #p.feed("<!-- - comment -->") #p.feed("<!---->") #p.feed("<!DOCTYPE \"vla foo>") p.flush()
| 15,883
|
def _broken (): p = HtmlPrinter() p.feed("") p.flush()
|
def _broken (): p = HtmlPrinter() p.feed("<img bo\\\nrder=0>") p.flush()
| 15,884
|
def _broken (): p = HtmlPrinter() p.feed("") p.flush()
|
def _broken (): p = HtmlPrinter() p.feed("") p.flush()
| 15,885
|
def parse_ntlm_challenge (challenge): """parse both type0 and type2 challenges""" res = {} if not challenge.startswith('NTLMSSP\x00'): return res, challenge res['nonce'] = challenge[24:32] return res, challenge[40:]
|
def parse_ntlm_challenge (challenge): """parse both type0 and type2 challenges""" res = {} if "," in challenge: chal, remainder = challenge.split(",", 1) else: chal, remainder = challenge, "" chal = base64.decodestring(chal.strip()) if not chal.startswith('NTLMSSP\x00'): res['type'] = 0 return res, challenge res['nonce'] = challenge[24:32] return res, challenge[40:]
| 15,886
|
def parse_ntlm_challenge (challenge): """parse both type0 and type2 challenges""" res = {} if not challenge.startswith('NTLMSSP\x00'): return res, challenge res['nonce'] = challenge[24:32] return res, challenge[40:]
|
def parse_ntlm_challenge (challenge): """parse both type0 and type2 challenges""" res = {} if not challenge.startswith('NTLMSSP\x00'): return res, challenge res['nonce'] = chal[24:32] res['type'] = 2 return res, remainder.strip()
| 15,887
|
def parse_ntlm_credentials (credentials): """parse both type1 and type3 credentials""" # XXX pass
|
def parse_ntlm_credentials (credentials): """parse both type1 and type3 credentials""" # XXXres = {} if "," in credentials: creds, remainder = credentials.split(",", 1) else: creds, remainder = credentials, "" creds = base64.decodestring(creds.strip()) if not creds.startswith('NTLMSSP\x00'): return res, remainder.strip() type = creds[8] if type==1: res['type'] = 1 domain_len = int(creds[16:18]) domain_off = int(creds[20:22]) host_len = int(creds[24:26]) host_off = int(creds[28:30]) res['host'] = creds[host_off:host_off+host_len] res['domain'] = creds[domain_off:domain_off+domain_len] elif type==3: res['type'] = 3 lm_res_len = int(creds[12:14]) else: return res, remainder.strip() return res, remainder.strip()
| 15,888
|
def create_message2 (flags="\x82\x01"): protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
|
def create_message2 (): protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
| 15,889
|
def create_message2 (flags="\x82\x01"): protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
|
def create_message2 (flags="\x82\x01"): protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' zero2 = '\x00'*2 flags="\x82\x01" nonce = "%08d" % (random.random()*100000000) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
| 15,890
|
def create_message2 (flags="\x82\x01"): protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero2 = '\x00' * 2 zero7 = '\x00' * 7 zero8 = '\x00' * 8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(nonce)s%(zero8)s" % locals()
|
def create_message2 (flags="\x82\x01"): protocol = 'NTLMSSP\x00' #name type = '\x02' msglen = '\x28' nonce = "%08f" % (random.random()*10) assert nonce not in nonces nonces[nonce] = None zero8 = '\x00'*8 return "%(protocol)s%(type)s%(zero7)s%(msglen)s%(zero2)s%(flags)s%(zero2)s%(nonce)s%(zero8)s" % locals()
| 15,891
|
def create_message3 (nonce, domain, username, host, flags="\x82\x01", lm_hashed_pw=None, nt_hashed_pw=None, ntlm_mode=0): protocol = 'NTLMSSP\000' #name type = '\003\000' #type 3 head = protocol + type + '\000\000' domain_rec = record(domain) user_rec = record(username) host_rec = record(host) additional_rec = record('') if lm_hashed_pw: lm_rec = record(ntlm_procs.calc_resp(lm_hashed_pw, nonce)) else: lm_rec = record('') if nt_hashed_pw: nt_rec = record(ntlm_procs.calc_resp(nt_hashed_pw, nonce)) else: nt_rec = record('') # length of the head and five infos for LM, NT, Domain, User, Host domain_offset = len(head) + 5 * 8 # and unknown record info and flags' lenght if nltm_mode == 0: domain_offset = domain_offset + 8 + len(flags) # create info fields domain_rec.create_record_info(domain_offset) user_rec.create_record_info(domain_rec.next_offset) host_rec.create_record_info(user_rec.next_offset) lm_rec.create_record_info(host_rec.next_offset) nt_rec.create_record_info(lm_rec.next_offset) additional_rec.create_record_info(nt_rec.next_offset) # data part of the message 3 data_part = domain_rec.data + user_rec.data + host_rec.data + lm_rec.data + nt_rec.data # build message 3 m3 = head + lm_rec.record_info + nt_rec.record_info + \ domain_rec.record_info + user_rec.record_info + host_rec.record_info # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.record_info + flags m3 += data_part # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.data return m3
|
def create_message3 (nonce, domain, username, host, flags="\x82\x01", lm_hashed_pw=None, nt_hashed_pw=None, ntlm_mode=0): protocol = 'NTLMSSP\000' #name type = '\003\000' #type 3 head = protocol + type + '\000\000' domain_rec = record(domain) user_rec = record(username) host_rec = record(host) additional_rec = record('') if lm_hashed_pw: lm_rec = record(ntlm_procs.calc_resp(lm_hashed_pw, nonce)) else: lm_rec = record('') if nt_hashed_pw: nt_rec = record(ntlm_procs.calc_resp(nt_hashed_pw, nonce)) else: nt_rec = record('') # length of the head and five infos for LM, NT, Domain, User, Host domain_offset = len(head) + 5 * 8 # and unknown record info and flags' lenght if nltm_mode == 0: domain_offset = domain_offset + 8 + len(flags) # create info fields domain_rec.create_record_info(domain_offset) user_rec.create_record_info(domain_rec.next_offset) host_rec.create_record_info(user_rec.next_offset) lm_rec.create_record_info(host_rec.next_offset) nt_rec.create_record_info(lm_rec.next_offset) additional_rec.create_record_info(nt_rec.next_offset) # data part of the message 3 data_part = domain_rec.data + user_rec.data + host_rec.data + lm_rec.data + nt_rec.data # build message 3 m3 = head + lm_rec.record_info + nt_rec.record_info + \ domain_rec.record_info + user_rec.record_info + host_rec.record_info # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.record_info + flags m3 += data_part # Experimental feature !!! if ntlm_mode == 0: m3 += additional_rec.data return m3
| 15,892
|
def parse_message2 (msg2): msg2 = base64.decodestring(msg2) # protocol = msg2[0:7] # msg_type = msg2[7:9] nonce = msg2[24:32] return nonce
|
def parse_message2 (msg2): msg2 = base64.decodestring(msg2) # protocol = msg2[0:7] # msg_type = msg2[7:9] nonce = msg2[24:32] return nonce
| 15,893
|
def unknown_part (bin_str): res = 'Hex : %s\n' % utils.str2hex(bin_str, ' ') res += 'String : %s\n' % utils.str2prn_str(bin_str, ' ') res += 'Decimal: %s\n' % utils.str2dec(bin_str, ' ') return res
|
def unknown_part (bin_str): res = 'Hex : %s\n' % utils.str2hex(bin_str, ' ') res += 'String : %s\n' % utils.str2prn_str(bin_str, ' ') res += 'Decimal: %s\n' % utils.str2dec(bin_str, ' ') return res
| 15,894
|
def size_number (text): base = which_base(text) if base == 0: return 0 length = len(text) size = size_base(base) end = size+1 while end < length and text[end] in _hex[:base]: end += 1 return end
|
def size_number (text): base = which_base(text) if base == 0: return 0 length = len(text) size = size_base(base) end = size+1 while end < length and text[end] in _hex[:base]: end += 1 return end-size
| 15,895
|
def convert (text): base = which_base(text) start = size_base(base) end = size_number(text) return base10(text[start:end], base)
|
def convert (text): base = which_base(text) start = size_base(base) end = start+size_number(text) return base10(text[start:end], base)
| 15,896
|
def local4 (number): if sys.byteorder == 'big': return big4(number) return little4(number)
|
def local4 (number): if sys.byteorder == 'big': return big4(number) return little4(number)
| 15,897
|
def handle_read (self): """read data from connection, put it into recv_buffer and call process_read""" assert self.connected
|
def handle_read (self): """read data from connection, put it into recv_buffer and call process_read""" assert self.connected
| 15,898
|
def doit (self, data, **args): # note: data is the complete request method, url, httpver = data.split() debug(FILTER, "block filter working on url %s", `url`) if self.allowed(url): return data blocked = self.strict_whitelist or self.blocked(url) if blocked: debug(FILTER, "blocked url %s", url) if isinstance(blocked, basestring): doc = blocked # index 3, not 2! elif is_image(url): doc = self.block_image else: # XXX hmmm, what about CGI images? # make HTTP HEAD request? doc = self.block_url port = config['port'] if method=='CONNECT': return 'CONNECT https://localhost:%d%s HTTP/1.1'%(port, doc) return 'GET http://localhost:%d%s HTTP/1.1'%(port, doc) return data
|
def doit (self, data, **args): # note: data is the complete request method, url, httpver = data.split() debug(FILTER, "block filter working on url %s", `url`) if self.allowed(url): return data blocked = self.strict_whitelist or self.blocked(url) if blocked: debug(FILTER, "blocked url %s: %s", url, str(blocked)) if isinstance(blocked, basestring): doc = blocked # index 3, not 2! elif is_image(url): doc = self.block_image else: # XXX hmmm, what about CGI images? # make HTTP HEAD request? doc = self.block_url port = config['port'] if method=='CONNECT': return 'CONNECT https://localhost:%d%s HTTP/1.1'%(port, doc) return 'GET http://localhost:%d%s HTTP/1.1'%(port, doc) return data
| 15,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.